text
stringlengths
11
4.05M
package worker import ( "testing" ) func Test_Resolve_1(t *testing.T) { array := [6][2]string{ {"126.com", "ns"}, {"126.com", "mx"}, {"126.com", "txt"}, {"8.8.8.8", "ptr"}, {"mail.126.com", "a"}, {"mail.126.com", "cname"}, } for _, v := range array { addr, rtype := v[0], v[1] result := Resolve(&addr, &rtype, true) if result.Error != "" { t.Error(result.Error) continue } if !(len(result.Body) > 0) { t.Errorf("resolve %s (%s) result empty", addr, rtype) continue } t.Logf("pass resolve in %f, %s (%s) = %s ", result.TimeDur, addr, rtype, result.Body) } }
/* Copyright © 2021 Joe Kralicky Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kit import ( "github.com/kralicky/kit/pkg/machinery" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) var FetchCmd = &cobra.Command{ Use: "fetch", Short: "Fetch the contents of the remote store", Run: func(cmd *cobra.Command, args []string) { var config *machinery.KitConfig var err error if config, err = machinery.ReadConfig(); err != nil { log.Fatal(err) } var client *machinery.RemoteClient if client, err = machinery.NewRemoteClient(config); err != nil { log.Fatal(err) } if cache, err := client.LoadRemoteData(); err != nil { log.Fatal(err) } else { if err := cache.WriteToDisk(); err != nil { log.Fatal(err) } } log.Info("Done.") }, }
package launchpad import ( "gitlab.com/gomidi/midi" ) type scrollingTextBuilderMK2 struct { Seq []byte outputStream midi.Out } func (l *LaunchpadMK2) Text(color Color) ScrollingTextBuilder { return l.text(color, false) } func (l *LaunchpadMK2) TextLoop(color Color) ScrollingTextBuilder { return l.text(color, true) } func (l *LaunchpadMK2) text(color Color, loop bool) ScrollingTextBuilder { colorCode := color.AsBytes() bLoop := byte(0x00) if loop { bLoop = 0x01 } return &scrollingTextBuilderMK2{ Seq: []byte{0xF0, 0x00, 0x20, 0x29, 0x02, 0x18, 0x14, colorCode[0], bLoop}, outputStream: l.outputStream, } } func (s *scrollingTextBuilderMK2) Add(speed byte, text string) ScrollingTextBuilder { if speed > 7 { speed = 7 } else if speed < 1 { speed = 1 } s.Seq = append(s.Seq, speed) s.Seq = append(s.Seq, []byte(text)...) return s } func (s *scrollingTextBuilderMK2) Perform() error { s.Seq = append(s.Seq, 0xF7) // the syntax of the scrolling text message: // F0 00 20 29 02 18 14h <Colour> <Loop> <Text> F7 _, err := s.outputStream.Write(s.Seq) return err }
// +build !js package main import ( "flag" "fmt" "io" "log" "net/http" "os" "os/exec" "path/filepath" "strings" "time" "github.com/fsnotify/fsnotify" "github.com/gobwas/glob" ) //arrayFlags is an array of string flags. type arrayFlags []string func (i *arrayFlags) String() string { return "ArrayFlags" } func (i *arrayFlags) Set(value string) error { *i = append(*i, value) return nil } var ( cmd string filter string resourceDirectory string port int globFilter glob.Glob dirs arrayFlags args arrayFlags current *exec.Cmd lastTime time.Time watcher *fsnotify.Watcher client *wsclient ) func watchFiles() { for { select { case event, ok := <-watcher.Events: if !ok { return } now := time.Now() diff := now.Sub(lastTime) if diff.Milliseconds() >= 5000 { if event.Op&fsnotify.Write == fsnotify.Write { absFile, _ := filepath.Abs(event.Name) absFile = strings.Replace(absFile, "\\", "/", -1) if globFilter.Match(absFile) { log.Println("Starting Build", absFile) cmd, err := runCommand(cmd, args...) if err == nil { lastTime = now current = cmd current.Wait() client.Broadcast(payloadEvent{Event: "BuildSuccess", Asset: absFile}) log.Println("Build Complete") } else { client.Broadcast(payloadEvent{Event: "BuildFailure", Asset: absFile}) log.Println("Build Failed", err) } //Clear the current out current = nil } else { client.Broadcast(payloadEvent{Event: "AssetUpdated", Asset: absFile}) } } } case err, ok := <-watcher.Errors: if !ok { return } log.Println("error:", err) } } } func main() { //Parse the flag flag.StringVar(&cmd, "cmd", "go build", "The command that will be executed when a change has been discovered") flag.StringVar(&filter, "filter", "*.go", "Filters the files that are modified") flag.StringVar(&resourceDirectory, "resources", "./resources/", "Resource Directory") flag.IntVar(&port, "port", 8090, "Port to host the webserver on") flag.Var(&args, "args", "Arguments for the command") flag.Var(&dirs, "dir", "Folders to listen for changes.") flag.Parse() //Setup the file watcher globFilter = glob.MustCompile(filter) fileWatcher, err := fsnotify.NewWatcher() if err != nil { log.Fatal(err) } defer fileWatcher.Close() watcher = fileWatcher for _, f := range dirs { log.Println("Watching: ", f) err = watcher.Add(f) if err != nil { log.Fatal(err) } } go watchFiles() //SErve the files baseFileServe := http.FileServer(http.Dir("./")) http.Handle("/", http.StripPrefix("/", baseFileServe)) resourceFileServe := http.FileServer(http.Dir(resourceDirectory)) http.Handle("/resources/", http.StripPrefix("/resources/", resourceFileServe)) //Listens client = &wsclient{} log.Println("Serving on: ", port) http.HandleFunc("/listen", client.handle) http.ListenAndServe(fmt.Sprintf(":%d", port), nil) } // runCommand runs the command with given name and arguments. It copies the // logs to standard output func runCommand(name string, args ...string) (*exec.Cmd, error) { cmd := exec.Command(name, args...) stderr, err := cmd.StderrPipe() if err != nil { return cmd, err } stdout, err := cmd.StdoutPipe() if err != nil { return cmd, err } if err := cmd.Start(); err != nil { return cmd, err } go io.Copy(os.Stdout, stdout) go io.Copy(os.Stderr, stderr) return cmd, nil }
package tunnel import ( "fmt" "github.com/asppj/cnbs/net-bridge/options" ) func init() { // 校验 if p, _ := NewBuffWithPrefix(options.HeartbeatNet, 0); len(p) != options.PrefixLen { panic(fmt.Errorf("前缀长度与规定不匹配:PrefixLen(%d)!=NewBuffWithPrefix(%d)", options.PrefixLen, len(p))) } }
package getui_model import ( "fmt" "testing" ) func TestSingle(t *testing.T) { var p Single p.Cid = "2326fbc31ef63427a82e667b74353341" p.Message.Appkey = "al0zZ6nvSO9tvxPPrTVHD9" p.Message.IsOffline = false p.Message.Msgtype = "notification" p.Message.OfflineExpireTime = 1000000 p.Notification.Style.Type = 0 p.Notification.Style.Title = "Title" p.Notification.Style.Text = "Text" p.Notification.TransmissionContent = "" p.Notification.TransmissionType = false p.Requestid = "123456" re, e := PushSingel(p) fmt.Println("re", re) fmt.Println("e", e) }
package response import ( "time" "github.com/agusbasari29/Skilltest-RSP-Akselerasi-2-Backend-Agus-Basari/entity" "gorm.io/gorm" ) type ResponseTransaction struct { ID uint `json:"id"` ParticipantId int `json:"participant_id"` CreatorId int `json:"creator_id"` EventId int `json:"event_id"` Amount float32 `json:"amount"` StatusPayment entity.StatusPayment `json:"status_payment"` CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` DeletedAt gorm.DeletedAt `json:"deleted_at"` } func ResponseTransactionFormatter(trans entity.Transaction) ResponseTransaction { formatter := ResponseTransaction{} formatter.ID = trans.ID formatter.ParticipantId = trans.ParticipantId formatter.CreatorId = trans.CreatorId formatter.EventId = trans.EventId formatter.Amount = trans.Amount formatter.StatusPayment = trans.StatusPayment formatter.CreatedAt = trans.CreatedAt formatter.UpdatedAt = trans.UpdatedAt formatter.DeletedAt = trans.DeletedAt return formatter }
package main import "testing" func TestGetJumps(t *testing.T){ pairStrings := []string{"0", "0", "1", "0", "0", "1", "0"} returnValue := GetJumps(pairStrings, 7) if (!(returnValue == 4)){ t.Error() } pairStrings = []string{"0", "0", "0", "0", "1", "0"} returnValue = GetJumps(pairStrings, 6) if (!(returnValue == 3)){ t.Error() } }
package worker import ( "testing" ) func Test_HttpRequest_1(t *testing.T) { array := [...][2]string{ {"http://www.baidu.com", "GET"}, {"https://www.alipay.com/", "HEAD"}, // {"https://mail.163.com", "HEAD"}, // {"http://www.sina.com", "HEAD"}, // {"https://mail.126.com", "GET"}, // {"https://mail.qq.com", "GET"}, } for _, v := range array { url, method := v[0], v[1] result := HttpRequest(&url, &method, true) if result.Error != "" { t.Error(result.Error) continue } if result.Code != 200 { t.Errorf("%s(%s) response %s, expect 200", url, method, result.Code) continue } t.Logf("pass check: %s(%s) in %f response %d(%s)", url, method, result.TimeDur, result.Code, result.Status) } } func Test_HttpRequest_2(t *testing.T) { url, method := "https://passport.baidu.com/?q=login", "GET" result := HttpRequest(&url, &method, true) if result.Error != "" { t.Error(result.Error) return } if result.Code != 200 { t.Errorf("redirect url %s(%s) response %s, expect 200", url, method, result.Code) return } t.Logf("pass check: redirect url %s(%s) in %f response %d(%s)", url, method, result.TimeDur, result.Code, result.Status) }
package domain // Geolocation holds latitude and longtitude type Geolocation struct { Latitude float32 Longitude float32 }
package goSolution import "strings" func findDuplicate(paths []string) [][]string { contents := make(map[string][]string) for _, path := range paths { seg := strings.Split(path, " ") dir := seg[0] for _, file := range seg[1:] { p := strings.Index(file, "(") path, content := file[:p], file[p:len(file)-1] path = dir + "/" + path if contents[content] == nil { contents[content] = []string{path} } else { contents[content] = append(contents[content], path) } } } ret := make([][]string, 0) for _, values := range contents { if len(values) > 1 { ret = append(ret, values) } } return ret }
package redis import ( "strings" "github.com/go-redis/redis" ) type redisClient struct { client *redis.Client } func NewRedisClient(config Config) RedisClient { r := redisClient{} r.client = redis.NewClient(&redis.Options{ Addr: "localhost:32768", }) return &r } func (r *redisClient) GetTranslation(t GetTranslation) (string, error) { keyMap := map[string]string{ Key: t.Key, Locale: t.Locale, } key := GetKey(TranslationKey, keyMap) rs := r.client.Get(key) if rs.Err() != nil { return t.Key, rs.Err() } else { return rs.Val(), nil } } func (r *redisClient) SetTranslation(t SetTranslation) error { keyMap := map[string]string{ Key: t.Key, Locale: t.Locale, } key := GetKey(TranslationKey, keyMap) err := r.client.Set(key, t.Value, 0).Err() if err != nil { return err } if t.IsDefault { key := GetKey(TranslationKeyDefault, keyMap) err := r.client.Set(key, t.Value, 0).Err() if err != nil { return err } } return nil } func (r *redisClient) Close() error { return r.client.Close() } func GetKey(rawKey string, valueMap map[string]string) string { if rawKey == "" || len(valueMap) < 1 { return rawKey } actualKey := rawKey for placeHolder, actualValue := range valueMap { actualKey = strings.Replace(actualKey, "{"+placeHolder+"}", actualValue, 1) } return actualKey }
package gsettings import ( "errors" "sync" ) var ( mu = &sync.RWMutex{} s = make(map[string]string) ErrNotFound = errors.New("Key not found") ) func Reset() { s = make(map[string]string) } func Set(key, value string) { mu.Lock() s[key] = value mu.Unlock() } func Get(key string) (string, error) { mu.RLock() value, exists := s[key] mu.RUnlock() if !exists { return "", ErrNotFound } return value, nil }
package translator import ( "github.com/sirupsen/logrus" "io/ioutil" "net/http" "net/url" "regexp" "strings" ) const translateURL = "http://translate.googleapis.com/translate_a/single?client=gtx&sl=en&tl=uk&dt=t&q=" func TranslateText(text string) string { resp, err := http.Get(translateURL + url.QueryEscape(text)) if err != nil { logrus.Fatal(err) } defer func() { err := resp.Body.Close() if err != nil { logrus.Fatal(err) } }() t, _ := ioutil.ReadAll(resp.Body) translated := string(t) r, _ := regexp.Compile("\".+?\"") match := r.FindAllString(translated, -1) var res string for i, m := range match { if i%4 == 0 { res += m } } return strings.Replace(strings.TrimSuffix(strings.ReplaceAll(res, "\"", ""), "en"), `\n`, "\n", -1) }
package main import ( "github.com/danitw/api-em-go/database" "github.com/danitw/api-em-go/routes" "gopkg.in/macaron.v1" ) func main() { db := database.Connection() defer db.Close() database.Migrate(db) m := macaron.Classic() routes.Routes(m) m.Run() }
package mwords import ( "crypto/sha256" "errors" "math/big" "strings" ) type MnemonicSentence []string func (ms MnemonicSentence) String() string { return strings.Join(ms, " ") } func (ms MnemonicSentence) IsValid() bool { if len(ms)%sentenceMultiple != 0 || len(ms) < sentenceMinWords || len(ms) > sentenceMaxWords { return false } for _, word := range ms { if !IsValidWord(word) { return false } } return true } func MnemonicFromString(sentence string) (MnemonicSentence, error) { var split MnemonicSentence = strings.Fields(sentence) if !split.IsValid() { return nil, errors.New("invalid mnemonic sentence") } return split, nil } func EntropyToMnemonic(entropy []byte) (MnemonicSentence, error) { bitsEntropy := len(entropy) * 8 if !isValidEntropy(uint(bitsEntropy)) { return nil, errors.New("invalid entropy bit count") } h := sha256.New() if _, err := h.Write(entropy); err != nil { return nil, err } hSum := h.Sum(nil) bitsChecksum := bitsEntropy / entropyMultiple bigEntropy := new(big.Int).SetBytes(entropy) bigEntropy.Lsh(bigEntropy, uint(bitsChecksum)) bigEntropy.Or(bigEntropy, big.NewInt(int64(hSum[0]>>uint(8-bitsChecksum)))) wordCount := (bitsEntropy + bitsChecksum) / 11 words := make(MnemonicSentence, wordCount) wordIndex := big.NewInt(0) for iter := wordCount - 1; iter >= 0; iter-- { wordIndex.And(bigEntropy, bits0to11) bigEntropy.Rsh(bigEntropy, 11) words[iter] = mnemonicWords[wordIndex.Uint64()] } return words, nil } func EntropyFromString(sentence string) ([]byte, error) { ms, err := MnemonicFromString(sentence) if err != nil { return nil, err } return EntropyFromMnemonic(ms) } func EntropyFromMnemonic(ms MnemonicSentence) ([]byte, error) { if !ms.IsValid() { return nil, errors.New("invalid mnemonic sentence") } bitsEntropy := wordCountToEntropyBits[len(ms)] bitsChecksum := bitsEntropy / entropyMultiple decoder := big.NewInt(0) for _, word := range ms { index := mnemonicLookup[word] decoder.Lsh(decoder, 11) decoder.Or(decoder, big.NewInt(int64(index))) } checksumBits := big.NewInt(0) checksumBits.And(decoder, checksumMaskMap[bitsEntropy]) decoder.Rsh(decoder, uint(bitsChecksum)) decoded := decoder.Bytes() if len(decoded) != bitsEntropy/8 { padding := make([]byte, (bitsEntropy/8)-len(decoded)) decoded = append(padding, decoded...) } h := sha256.New() if _, err := h.Write(decoded); err != nil { return nil, err } hSum := h.Sum(nil) if checksumBits.Cmp(big.NewInt(int64(hSum[0]>>uint(8-bitsChecksum)))) != 0 { return nil, errors.New("failed to validate checksum bits") } return decoded, nil }
// RAINBOND, Application Management Platform // Copyright (C) 2014-2017 Goodrain Co., Ltd. // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. For any non-GPL usage of Rainbond, // one or multiple Commercial Licenses authorized by Goodrain Co., Ltd. // must be obtained first. // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. package metric import ( "context" "fmt" "math" "math/rand" "net/http" "sort" "strings" "sync" "time" "github.com/quipo/statsd" ) //TIMEBUCKETS Internal tuning const TIMEBUCKETS = 10000 //Maximume max 10 const Maximume = 10 //MapKey MapKey type MapKey string type httpMetricStore struct { methodRequestSize map[string]uint64 unusualRequestSize map[string]uint64 requestTimes [TIMEBUCKETS]uint64 //每次发出消息后清理 PathCache map[string]*cache //每次发出消息后清理 IndependentIP map[string]*cache ServiceID string HostName string Port string ctx context.Context cancel context.CancelFunc lock sync.Mutex monitorMessageManage *MonitorMessageManage statsdclient *statsd.StatsdClient } func (h *httpMetricStore) show() { h.lock.Lock() defer h.lock.Unlock() fmt.Println("-------methodRequest--------------") for k, v := range h.methodRequestSize { fmt.Printf("Path:%s Count %d \n", k, v) } fmt.Println("-------unusualRequestSize---------") for k, v := range h.unusualRequestSize { fmt.Printf("Path:%s Count %d \n", k, v) } fmt.Println("-------requestTimes---------------") min, avg, max := calculate(&h.requestTimes) fmt.Printf("Min %f Avg %f Max %f \n", min, avg, max) fmt.Println("-------cacheRequest---------------") for k, v := range h.PathCache { min, avg, max := calculate(&v.ResTime) fmt.Printf("Path:%s Count %d UnusualCount %d timemin %f timeavg %f timemax %f \n", k, v.Count, v.UnusualCount, min, avg, max) } fmt.Println("-------IndependentIP---------") for k, v := range h.IndependentIP { fmt.Printf("IndependentIP:%s Count %d \n", k, v.Count) } } //sendmessage send message to eventlog monitor message chan func (h *httpMetricStore) sendmessage() { h.lock.Lock() defer h.lock.Unlock() var caches = new(MonitorMessageList) for _, v := range h.PathCache { _, avg, max := calculate(&v.ResTime) mm := MonitorMessage{ ServiceID: h.ServiceID, Port: h.Port, HostName: h.HostName, MessageType: "http", Key: v.Key, Count: v.Count, AbnormalCount: v.UnusualCount, AverageTime: Round(avg, 2), MaxTime: Round(max, 2), CumulativeTime: Round(avg*float64(v.Count), 2), } caches.Add(&mm) } sort.Sort(caches) if caches.Len() > 20 { h.monitorMessageManage.Send(caches.Pop(20)) return } h.monitorMessageManage.Send(caches) } //sendstatsd send metric to statsd func (h *httpMetricStore) sendstatsd() { h.lock.Lock() defer h.lock.Unlock() var total int64 for k, v := range h.methodRequestSize { h.statsdclient.Incr("request."+k, int64(v)) total += int64(v) h.methodRequestSize[k] = 0 } h.statsdclient.Incr("request.total", int64(total)) var errtotal int64 for k, v := range h.unusualRequestSize { h.statsdclient.Incr("request.unusual."+k, int64(v)) errtotal += int64(v) h.unusualRequestSize[k] = 0 } h.statsdclient.Incr("request.unusual.total", int64(errtotal)) min, avg, max := calculate(&h.requestTimes) h.statsdclient.FGauge("requesttime.min", min) h.statsdclient.FGauge("requesttime.avg", avg) h.statsdclient.FGauge("requesttime.max", max) h.statsdclient.Gauge("requestclient", int64(len(h.IndependentIP))) } func (h *httpMetricStore) clear() { var clearKey []string for k, v := range h.PathCache { if v.updateTime.Add(5 * time.Minute).Before(time.Now()) { clearKey = append(clearKey, k) } } for _, key := range clearKey { delete(h.PathCache, key) } clearKey = clearKey[:0] for k, v := range h.IndependentIP { if v.updateTime.Add(5 * time.Minute).Before(time.Now()) { clearKey = append(clearKey, k) } } for _, key := range clearKey { delete(h.IndependentIP, key) } } //Input 数据输入 func (h *httpMetricStore) Input(message interface{}) { h.lock.Lock() defer h.lock.Unlock() if httpms, ok := message.(*HTTPMessage); ok { //request method h.methodRequestSize[httpms.Method]++ //request unusual if httpms.StatusCode >= 400 && httpms.StatusCode < 500 { h.unusualRequestSize["4xx"]++ } if httpms.StatusCode >= 500 { h.unusualRequestSize["5xx"]++ } //requestTimes randn := rand.Intn(TIMEBUCKETS) h.requestTimes[randn] = uint64(httpms.TimeConsum) //cache if c, ok := h.PathCache[httpms.URI]; ok { c.Count++ if httpms.StatusCode >= 400 { c.UnusualCount++ } c.ResTime[randn] = uint64(httpms.TimeConsum) c.updateTime = time.Now() } else { c := &cache{ Key: httpms.URI, } c.Count++ if httpms.StatusCode >= 400 { c.UnusualCount++ } c.ResTime[randn] = uint64(httpms.TimeConsum) c.updateTime = time.Now() h.PathCache[httpms.URI] = c } //remote addr if c, ok := h.IndependentIP[httpms.RemoteAddr]; ok { c.Count++ c.updateTime = time.Now() } else { c := &cache{ Key: httpms.RemoteAddr, } c.Count++ c.updateTime = time.Now() h.IndependentIP[httpms.RemoteAddr] = c } } } //Start 启动 func (h *httpMetricStore) Start() { tickMessage := time.NewTicker(time.Second * 5) for { select { case <-h.ctx.Done(): tickMessage.Stop() return case <-tickMessage.C: h.sendmessage() h.sendstatsd() h.clear() } } } //Stop 停止 func (h *httpMetricStore) Stop() { h.cancel() } //HTTPMessage http protocol zeromq message type HTTPMessage struct { Method string `json:"method"` URI string `json:"uri"` StatusCode int `json:"statusCode"` ContentLength int `json:"contentLength"` TimeConsum int64 `json:"timeConsum"` RemoteAddr string } //CreateHTTPMessage 通过response构造message func CreateHTTPMessage(rs *http.Response) *HTTPMessage { m := &HTTPMessage{ Method: rs.Request.Method, StatusCode: rs.StatusCode, RemoteAddr: rs.Request.RemoteAddr, } in := strings.Index(rs.Request.RequestURI, "?") if in > -1 { m.URI = rs.Request.RequestURI[:in] } else { m.URI = rs.Request.RequestURI } var ReqTime, ResTime time.Time if t, ok := rs.Request.Context().Value(MapKey("ReqTime")).(time.Time); ok { ReqTime = t } if t, ok := rs.Request.Context().Value(MapKey("ResTime")).(time.Time); ok { ResTime = t } if !ReqTime.IsZero() && !ResTime.IsZero() { m.TimeConsum = ResTime.Sub(ReqTime).Nanoseconds() } return m } //Round Round func Round(f float64, n int) float64 { pow10n := math.Pow10(n) return math.Trunc((f+0.5/pow10n)*pow10n) / pow10n }
// Copyright (c) 2019-2021 Leonid Kneller. All rights reserved. // Licensed under the MIT license. // See the LICENSE file for full license information. package rnames import ( "errors" "github.com/reconditematter/mym" "math" "math/rand" "time" ) // HumanName -- the name and gender of a person. type HumanName struct { Family string `json:"family"` Given string `json:"given"` Gender string `json:"gender"` } const ( GenBoth = iota // specifies both genders GenF // specifies the female gender GenM // specifies the male gender ) // Gen -- generates `count` random names. // `gengen` specifies the names gender. // This function returns an error when count∉{1,...,1000}. func Gen(count int, gengen int) ([]HumanName, error) { if !(1 <= count && count <= 1000) { return nil, errors.New("bad count") } // names := make(map[[2]string]string) src := mym.MT19937() src.Seed(time.Now().UnixNano()) rng := rand.New(src) // for len(names) < count { i := rng.Intn(1000) j := rng.Intn(1000) switch gengen { case GenF: name := [2]string{family[i], givenf[j]} names[name] = "female" case GenM: name := [2]string{family[i], givenm[j]} names[name] = "male" default: k := rng.Uint64() < math.MaxUint64/2 if k { name := [2]string{family[i], givenf[j]} names[name] = "female" } else { name := [2]string{family[i], givenm[j]} names[name] = "male" } } } // hnames := make([]HumanName, 0, count) for name, gender := range names { hn := HumanName{Family: name[0], Given: name[1], Gender: gender} hnames = append(hnames, hn) } // return hnames, nil }
package main import ( "log" "net/http" "strconv" "onikur.com/text-to-img-api/api" "onikur.com/text-to-img-api/utils" "onikur.com/text-to-img-api/conf" ) func main() { conf.Init() utils.Fonts().CacheFonts() mux := http.NewServeMux() mth := &api.MakeTextHandler{} mux.Handle("/-/", mth) mux.Handle("/api/text/", mth) mux.Handle("/api/fonts", &api.ListFontsHandler{}) log.Println("Server started") log.Fatal(http.ListenAndServe(":"+strconv.Itoa(conf.Get().Server.Port), mux)) }
package parser import ( "bytes" "github.com/bouncepaw/mycomarkup/v2/blocks" "github.com/bouncepaw/mycomarkup/v2/mycocontext" ) // Call only if there is a list item on the line. func nextList(ctx mycocontext.Context) (list blocks.List, eof bool) { var contents []blocks.Block rootMarker, rootLevel, _ := markerOnNextLine(ctx) list = blocks.List{ Items: make([]blocks.ListItem, 0), Marker: rootMarker, } for !eof { marker, level, found := markerOnNextLine(ctx) if !found || (!marker.SameAs(list.Marker) && rootLevel == level) { break } _ = mycocontext.EatUntilSpace(ctx) contents, eof = nextListItem(ctx, rootLevel) item := blocks.ListItem{ Marker: marker, Level: level, Contents: contents, } list.Items = append(list.Items, item) } return list, eof } func readNextListItemsContents(ctx mycocontext.Context) (text bytes.Buffer, eof bool) { var ( onNewLine = true escaping = false curlyBracesOpen = 0 b byte ) walker: // Read all item's contents for !eof { b, eof = mycocontext.NextByte(ctx) stateMachine: // I'm extremely sorry switch { case onNewLine && b != ' ': onNewLine = false goto stateMachine case onNewLine: // We just ignore spaces on line beginnings case escaping: escaping = false if b == '\n' && curlyBracesOpen == 0 { break walker } text.WriteByte(b) case b == '\\': escaping = true text.WriteByte('\\') case b == '{': if curlyBracesOpen > 0 { text.WriteByte('{') } curlyBracesOpen++ case b == '}': if curlyBracesOpen != 1 { text.WriteByte('}') } if curlyBracesOpen >= 0 { curlyBracesOpen-- } case b == '\n' && curlyBracesOpen == 0: break walker case b == '\n': text.WriteByte(b) onNewLine = true default: text.WriteByte(b) } } return text, eof } func nextListItem( ctx mycocontext.Context, rootLevel uint, // They have to have a level higher than this, though ) (contents []blocks.Block, eof bool) { // Parse the text as a separate mycodoc var ( text bytes.Buffer ast = make([]blocks.Block, 0) subText bytes.Buffer ) text, eof = readNextListItemsContents(ctx) // Grab the sublist text, if there is one. Each bullet is decremented by one asterisk. for !eof { _, level, found := markerOnNextLine(ctx) // We are not interested in same level or less-nested list items. Screw them! Forget them! if !found || level <= rootLevel { break } // I am so sure there is an asterisk we can simply drop. // Add a newline for proper parsing later on. // The space is left by EatUntilSpace at the end of the string. disnestedBullet := "\n" + mycocontext.EatUntilSpace(ctx)[1:] text.WriteString(disnestedBullet) subText, eof = readNextListItemsContents(ctx) _, _ = subText.WriteTo(&text) // Let's just hope it never fails. We are confident people. } parseSubdocumentForEachBlock(ctx, &text, func(block blocks.Block) { ast = append(ast, block) }) return ast, eof } func looksLikeList(ctx mycocontext.Context) bool { _, level, found := markerOnNextLine(ctx) return found && level == 1 } func markerOnNextLine(ctx mycocontext.Context) (m blocks.ListMarker, level uint, found bool) { var ( onStart = true onAsterisk = false onSpecialCharacter = false ) for _, b := range ctx.Input().Bytes() { switch { case onStart && b != '*': return blocks.MarkerUnordered, 0, false case onStart: level = 1 onStart = false onAsterisk = true case onAsterisk && b == '*': level++ case onAsterisk && b == ' ': return m, level, true case onAsterisk && (b == 'v' || b == 'x' || b == '.'): onAsterisk = false onSpecialCharacter = true switch b { case 'v': m = blocks.MarkerTodoDone case 'x': m = blocks.MarkerTodo case '.': m = blocks.MarkerOrdered } case onAsterisk: return blocks.MarkerUnordered, 0, false case onSpecialCharacter && b != ' ': return blocks.MarkerUnordered, 0, false case onSpecialCharacter: return m, level, true } } return blocks.MarkerUnordered, 0, false }
package twch import ( "fmt" "net/http" "reflect" "testing" ) func TestListBlocks(t *testing.T) { setup() defer teardown() mux.HandleFunc("/users/test_user1/blocks", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "GET") fmt.Fprint(w, `{ "_links": { "next": "https://api.twitch.tv/kraken/users/test_user1/test_user1?limit=25&offset=25", "self": "https://api.twitch.tv/kraken/users/test_user1/test_user1?limit=25&offset=0" }, "blocks": [ { "_links": { "self": "s" }, "updated_at": "2013-02-07T01:04:43Z", "user": { "_links": { "self": "s" }, "updated_at": "2013-02-06T22:44:19Z", "display_name": "d", "staff": false, "name": "n", "_id": 1, "logo": "l", "created_at": "2010-06-30T08:26:49Z" }, "_id": 1 } ] }`) }) want := []Block{ Block{ ID: intPtr(1), UpdatedAt: stringPtr("2013-02-07T01:04:43Z"), User: &User{ ID: intPtr(1), DisplayName: stringPtr("d"), Name: stringPtr("n"), Logo: stringPtr("l"), Staff: boolPtr(false), CreatedAt: stringPtr("2010-06-30T08:26:49Z"), UpdatedAt: stringPtr("2013-02-06T22:44:19Z"), }, }, } opts := &ListOptions{Limit: 25, Offset: 0} got, resp, err := client.Blocks.ListBlocks("test_user1", opts) if err != nil { t.Errorf("Blocks.ListBlocks: returned an error: %+v\n", err) } testListResponse(t, resp, nil, intPtr(25), nil) if !reflect.DeepEqual(got, want) { t.Errorf("Block.ListBlocks: result did not match expecation\nwant: %+v\n got: %+v", want, got) } } func TestAddBlock(t *testing.T) { setup() defer teardown() mux.HandleFunc("/users/test_user1/blocks/test_user2", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "PUT") fmt.Fprint(w, `{ "_links": { "self": "h" }, "updated_at": "2013-02-07T01:04:43Z", "user": { "_links": { "self": "h" }, "updated_at": "2013-01-18T22:33:55Z", "logo": "l", "staff": false, "display_name": "d", "name": "n", "_id": 1, "created_at": "2011-05-01T14:50:12Z" }, "_id": 1 }`) }) want := &Block{ ID: intPtr(1), UpdatedAt: stringPtr("2013-02-07T01:04:43Z"), User: &User{ ID: intPtr(1), DisplayName: stringPtr("d"), Name: stringPtr("n"), Logo: stringPtr("l"), Staff: boolPtr(false), CreatedAt: stringPtr("2011-05-01T14:50:12Z"), UpdatedAt: stringPtr("2013-01-18T22:33:55Z"), }, } got, _, err := client.Blocks.AddBlock("test_user1", "test_user2") if err != nil { t.Errorf("Blocks.AddBlock: returned error %+v", err) } if !reflect.DeepEqual(got, want) { t.Errorf("Block.AddBlocks: result did not match expecation\nwant: %+v\n got: %+v", want, got) } } func TestRemoveBlock(t *testing.T) { setup() defer teardown() mux.HandleFunc("/users/test_user1/blocks/test_user2", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "DELETE") w.WriteHeader(http.StatusNoContent) }) err := client.Blocks.RemoveBlock("test_user1", "test_user2") if err != nil { t.Errorf("Blocks.RemoveBlock: returned error %+v", err) } }
package main import "fmt" func normalFunction() { println("normalFunction") } func argument(text string) { println("text") } func arguments(a, b int, c string) { fmt.Printf("%d mas %d es igual a %s\n", a, b, c) } func returned(number int) int { return number * 2 } func multiReturn(a int) (b, c, d int) { return a, a + 1, a + 2 } func main() { normalFunction() argument("hola mundo") arguments(5, 5, "10") value := returned(5) fmt.Println("Value:", value) a, b, _ := multiReturn(5) fmt.Println("a:", a) fmt.Println("b:", b) // fmt.Println("c:", c) }
/* * Copyright (c) 2018 Juniper Networks, Inc. All rights reserved. * * file: main.go * details: Entry point for Query API Server, the binary creates Command * Line Interface (CLI) utility to run the application. * */ package main import ( "net/http" "os" dbhandler "github.com/Juniper/collector/query-api/db-handler" opts "github.com/Juniper/collector/query-api/options" "github.com/urfave/cli" ) var ( version string ) func main() { app := cli.NewApp() app.Version = version app.Flags = []cli.Flag{ cli.StringFlag{ Name: "config-file", Value: "/etc/query-api/query-api.conf", Usage: "Load configuration from `FILE`", }, } app.Action = RunQueryAPIServer err := app.Run(os.Args) if err != nil { opts.Logger.Fatalf("Application error: %s", err) } } func RunQueryAPIServer(c *cli.Context) error { opts.ParseArgs(c) StartQueryAPIServer() return nil } func StartQueryAPIServer() { mux := http.NewServeMux() registerDBHandlers(mux) opts.Logger.Println("Starting Web Server on :", opts.ListenPort) http.ListenAndServe(":"+opts.ListenPort, mux) } func registerDBHandlers(mux *http.ServeMux) { dbHandlersLen := len(opts.DataBaseList) for i := 0; i < dbHandlersLen; i++ { go func(i int) { dbH := dbhandler.NewDBHandler(opts.DataBaseList[i]) if err := dbH.Run(mux); err != nil { opts.Logger.Fatalf("dbHandler %v run error %v ", opts.DataBaseList[i], err) } }(i) } }
package imageutil import ( "bytes" "encoding/hex" "crypto/sha256" "testing" ) func TestReadRgbaPng(t *testing.T) { goldImageHash := "d333fb91aa05709483df2d00f62e3caa91db0be1b30ff72e8e829f3264cb30b9" image, err := ReadRgbaPng("test_data/fruits.png") if err != nil { t.Fatal(err) } if image.Rect.Min.X != 0 || image.Rect.Max.X != 512 || image.Rect.Min.Y != 0 || image.Rect.Max.Y != 512 { t.Errorf("Incorrect image rectangle: %v\n", image.Rect) } hash := sha256.Sum256(image.Pix) imageHash := hex.EncodeToString(hash[:]) if imageHash != goldImageHash { t.Error("Pixel data hash mismatch. Got :", imageHash) } } func TestRgbaToPng(t *testing.T) { image, err := ReadRgbaPng("test_data/fruits.png") if err != nil { t.Fatal(err) } err = RgbaToPng(image.Pix, image.Bounds().Dx(), image.Bounds().Dy(), "test_tmp/fruits_RgbaToPng.png") if err != nil { t.Fatal(err) } recodedImage, err := ReadRgbaPng("test_tmp/fruits_RgbaToPng.png") if err != nil { t.Fatal(err) } if !bytes.Equal(image.Pix, recodedImage.Pix) { t.Error("Pixel data mismatch") } }
package urlmanipulations import ( "context" "fmt" "github.com/go-chi/chi/v5" "io" "log" "net" "net/http" "net/url" "os" "github.com/rs/zerolog" ) //go:generate mockgen --source=./url_manipulations.go --destination=./url_manipulations_mocks_test.go --package=urlmanipulations_test type CallerURL interface { Call(url *url.URL) error } // how to model server with multiple http paths for test - not the best and rather reliable way but it works func ExampleServer() { mux := chi.NewMux() mux.Get("/aaaa", func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, "Hello, client aaaa") }) mux.Get("/bbbb", func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, "Hello, client bbbb") }) // ts := http.Server{Handler: mux, Addr: ":8000"} logger := zerolog.New(os.Stderr).With().Timestamp().Logger() listenStartChan := make(chan struct{}) go func() { l, err := net.Listen("tcp", ":8000") if err != nil { // handle error } listenStartChan <- struct{}{} if err := http.Serve(l, mux); err != nil { logger.Err(err).Msgf("serve error") } }() ctx := context.Background() ctx = logger.WithContext(ctx) req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://localhost:8000/vvvv", nil) httpClient := http.Client{} <-listenStartChan logger.Info().Msgf("server started and we can khow it") res, err := httpClient.Do(req) if err != nil { log.Fatal(err) } defer res.Body.Close() greeting, err := io.ReadAll(res.Body) if err != nil { log.Fatal(err) } fmt.Printf("%s", greeting) // Output: Hello, client }
package main import ( "bufio" "flag" "fmt" "math" "os" ) const maximumSum = 10000 type coordinate struct { w, h float64 } func main() { filePath := flag.String("p", "input.txt", "Input's file path") flag.Parse() f, err := os.Open(*filePath) if err != nil { fmt.Fprintf(os.Stderr, "Opening input file: %v\n", err) os.Exit(1) } defer f.Close() scanner := bufio.NewScanner(f) var width, heigth float64 coordinates := make([]coordinate, 0) for scanner.Scan() { coord := coordinate{} if _, err := fmt.Sscanf(scanner.Text(), "%b, %b", &coord.w, &coord.h); err != nil { fmt.Fprintf(os.Stderr, "Error parsing coordinates: %v\n", err) os.Exit(1) } if coord.w > width { width = coord.w } if coord.h > heigth { heigth = coord.h } coordinates = append(coordinates, coord) } infinate := make(map[coordinate]bool) areas := make(map[coordinate]int) regions := 0 for w := float64(0); w < width; w++ { for h := float64(0); h < heigth; h++ { var initial coordinate var total float64 minimum := float64(-1) for _, coord := range coordinates { distance := math.Abs(w-coord.w) + math.Abs(h-coord.h) total += distance if distance < minimum || minimum == -1 { minimum = distance initial = coord } else if distance == minimum { initial = coordinate{-1, -1} } } if w == 0 || h == 0 || w == width || h == heigth { infinate[initial] = true } areas[initial]++ if total < maximumSum { regions++ } } } maximum := 0 for coord, count := range areas { if _, ok := infinate[coord]; count > maximum && !ok { maximum = count } } fmt.Printf("Largest area: %d\n", maximum) fmt.Printf("Size of regions with area less than %d: %d\n", maximumSum, regions) }
package main import "fmt" type IPhone interface { LoginQQ() } type IPhone7 struct{} func (p *IPhone7) LoginQQ() { fmt.Println("正在使用 IPhone7 登陆QQ") } type IPhone13 struct{} func (p *IPhone13) LoginQQ() { fmt.Println("正在使用 IPhone13 登陆QQ") } type IPhone110 struct{} func (p *IPhone110) LoginQQ() { fmt.Println("正在使用 IPhone110 登陆QQ") } //IPhone生产工厂,根据要求版本号生成新的IPhone func NewIphone(version int) IPhone { switch version { case 7: return &IPhone7{} case 13: return &IPhone13{} case 110: return &IPhone110{} default: //该工厂不支持该版本号的IPhone生产,所以返回空 return nil } } func main() { iphone := NewIphone(110) if iphone == nil { fmt.Println("创建新IPhone失败") return } iphone.LoginQQ() }
package kuu import ( "testing" ) func TestGetXAxisNames(t *testing.T) { _10 := GetXAxisNames(10) if _10[len(_10)-1] != "J" { t.Errorf("wrong x-axis name: %v\n", _10) } else { t.Log(_10) } _27 := GetXAxisNames(27) if _27[len(_27)-1] != "AA" { t.Errorf("wrong x-axis name: %v\n", _27) } else { t.Log(_27) } _52 := GetXAxisNames(52) if _52[len(_52)-1] != "AZ" { t.Errorf("wrong x-axis name: %v\n", _52) } else { t.Log(_52) } }
package main import ( "fmt" "log" "os" "github.com/godbus/dbus" "github.com/godbus/dbus/introspect" "pault.ag/go/config" "pault.ag/go/wmata" ) var wifiMetroMap = map[string][]string{ "Dolcezza Dupont - Guest": []string{"A03"}, "Pretty Fly for a WiFi": []string{"B35"}, } type WMATADbusInterface struct{} func (w WMATADbusInterface) NextLocalTrains() ([]map[string]string, *dbus.Error) { ssids, err := GetVisibleNetworks() if err != nil { return []map[string]string{}, dbus.NewError( "org.anized.wmata.Rail.NetworkError", []interface{}{err.Error()}, ) } stops := []string{} for _, ssid := range ssids { if wifiStops, ok := wifiMetroMap[ssid]; ok { stops = append(stops, wifiStops...) } } if len(stops) == 0 { return []map[string]string{}, nil } return w.NextTrains(stops) } func (w WMATADbusInterface) NextTrains(stops []string) ([]map[string]string, *dbus.Error) { if len(stops) == 0 { return []map[string]string{}, dbus.NewError( "org.anized.wmata.Rail.NoStopsGiven", []interface{}{fmt.Errorf("No stops given").Error()}, ) } log.Printf("Getting info") predictions, err := wmata.GetPredictionsByCodes(stops...) if err != nil { return []map[string]string{}, dbus.NewError( "org.anized.wmata.Rail.NotFound", []interface{}{err.Error()}, ) } log.Printf("Building map") ret := []map[string]string{} for _, prediction := range predictions { ret = append(ret, map[string]string{ "cars": prediction.Cars, "group": prediction.Group, "line": prediction.Line.Code, "minutes": prediction.Minutes, "desitnation": prediction.Destination, "desitnation_name": prediction.DesitnationName, "desitnation_code": prediction.DesitnationCode, "location_name": prediction.LocationName, "location_code": prediction.LocationCode, }) } return ret, nil } type WMATADbus struct { APIKey string `flag:"apikey" description:"API Key to use"` } func main() { conf := WMATADbus{} if err := config.Load("wmatadbusd", &conf); err != nil { panic(err) } wmata.SetAPIKey(conf.APIKey) conn, err := dbus.SessionBus() if err != nil { panic(err) } reply, err := conn.RequestName("org.anized.wmata.Rail", dbus.NameFlagDoNotQueue) if err != nil { panic(err) } if reply != dbus.RequestNameReplyPrimaryOwner { fmt.Fprintln(os.Stderr, "name already taken") os.Exit(1) } wmata := WMATADbusInterface{} introspectedMethods := introspect.Methods(wmata) node := introspect.Node{ Name: "/org/anized/wmata", Interfaces: []introspect.Interface{ introspect.Interface{ Name: "org.anized.wmata.Rail", Methods: introspectedMethods, }, }, } export := introspect.NewIntrospectable(&node) // str, err := export.Introspect() // fmt.Printf("%s %s\n", str, err) conn.Export(wmata, "/org/anized/wmata/Rail", "org.anized.wmata.Rail") conn.Export( export, "/org/anized/wmata/Rail", "org.freedesktop.DBus.Introspectable", ) select {} }
package main import ( ".." // pulse-simple "fmt" ) func main() { fmt.Printf("Channel Map Test\n") fmt.Printf("================\n") fmt.Printf("CHANNELS_MAX: %v\n", pulse.CHANNELS_MAX) fmt.Printf("\nMono\n") fmt.Printf("----\n") mono := &pulse.ChannelMap{} mono.InitMono() print_info(mono) fmt.Printf("\nStereo\n") fmt.Printf("------\n") stereo := &pulse.ChannelMap{} stereo.InitStereo() print_info(stereo) spec := &pulse.SampleSpec{pulse.SAMPLE_S16LE, 44100, 2} fmt.Printf("\nspec := &SampleSpec{SAMPLE_S16LE, 44100, 2}\n") fmt.Printf("mono.Compatible(spec): %v\n", mono.Compatible(spec)) fmt.Printf("stereo.Compatible(spec): %v\n", stereo.Compatible(spec)) fmt.Printf("\n9 Channel AIFF (should fail)\n") fmt.Printf("----------------------------\n") cmap := &pulse.ChannelMap{} err := cmap.InitAuto(9, pulse.CHANNEL_MAP_AIFF) if err != nil { fmt.Printf("Error: %s\n", err) } else { print_info(cmap) } fmt.Printf("\n9 Channel AIFF (should succeed)\n") fmt.Printf("-------------------------------\n") cmap.InitExtend(9, pulse.CHANNEL_MAP_AIFF) print_info(cmap) fmt.Printf("\naiff9.Superset(stereo): %v\n", cmap.Superset(stereo)) fmt.Printf("aiff9.Superset(mono): %v\n", cmap.Superset(mono)) name, err := stereo.Name() if err != nil { fmt.Printf("\nstereo.Name(): %v\n", err) } else { fmt.Printf("\nstereo.Name(): %v\n", name) } name, err = stereo.PrettyName() if err != nil { fmt.Printf("stereo.PrettyName(): %v\n", err) } else { fmt.Printf("stereo.PrettyName(): %v\n", name) } fmt.Printf("\n7.1 Surround\n") fmt.Printf("------------\n") err = cmap.InitAuto(8, pulse.CHANNEL_MAP_ALSA) if err != nil { fmt.Printf("Error: %s\n", err) } else { print_info(cmap) name, err = cmap.PrettyName() if err != nil { fmt.Println(err) } else { fmt.Printf("PrettyName: %v\n", name) } } fmt.Printf("\nsurround has subwoofer: %v\n", cmap.HasPosition(pulse.CHANNEL_POSITION_SUBWOOFER)) fmt.Printf("stereo has subwoofer: %v\n", stereo.HasPosition(pulse.CHANNEL_POSITION_SUBWOOFER)) fmt.Printf("subwoofer channel mask: 0x%x\n", pulse.CHANNEL_POSITION_SUBWOOFER.Mask()) fmt.Printf("\nsurround.String(): %v\n", cmap.String()) } func print_info(cmap *pulse.ChannelMap) { fmt.Printf("Channels: %v\n", cmap.Channels) for i := 0; i < int(cmap.Channels); i++ { fmt.Printf("Map[%d]: %d (%v)\n", i, cmap.Map[i], cmap.Map[i]) } fmt.Printf("ChannelPositionMask: %b\n", cmap.Mask()) }
package group import ( "Open_IM/pkg/common/config" "Open_IM/pkg/common/db" "Open_IM/pkg/common/db/mysql_model/im_mysql_model" "Open_IM/pkg/common/log" pbGroup "Open_IM/pkg/proto/group" "Open_IM/pkg/utils" "context" ) func (s *groupServer) QuitGroup(ctx context.Context, req *pbGroup.QuitGroupReq) (*pbGroup.CommonResp, error) { log.InfoByArgs("rpc quit group is server,args:", req.String()) //Parse token, to find current user information claims, err := utils.ParseToken(req.Token) if err != nil { log.Error(req.Token, req.OperationID, "err=%s,parse token failed", err.Error()) return &pbGroup.CommonResp{ErrorCode: config.ErrParseToken.ErrCode, ErrorMsg: config.ErrParseToken.ErrMsg}, nil } log.InfoByKv("args:", req.OperationID, req.GetGroupID(), claims.UID) //Check to see whether there is a user in the group. _, err = im_mysql_model.FindGroupMemberInfoByGroupIdAndUserId(req.GroupID, claims.UID) if err != nil { log.Error(req.Token, req.OperationID, "no such group or you are not in the group,err=%s", err.Error(), req.OperationID, req.GroupID, claims.UID) return &pbGroup.CommonResp{ErrorCode: config.ErrQuitGroup.ErrCode, ErrorMsg: config.ErrQuitGroup.ErrMsg}, nil } //After the user's verification is successful, user will quit the group chat. err = im_mysql_model.DeleteGroupMemberByGroupIdAndUserId(req.GroupID, claims.UID) if err != nil { log.ErrorByArgs("this user exit the group failed,err=%s", err.Error(), req.OperationID, req.GroupID, claims.UID) return &pbGroup.CommonResp{ErrorCode: config.ErrQuitGroup.ErrCode, ErrorMsg: config.ErrQuitGroup.ErrMsg}, nil } err = db.DB.DelGroupMember(req.GroupID, claims.UID) if err != nil { log.Error("", "", "delete mongo group member failed, db.DB.DelGroupMember fail [err: %s]", err.Error()) return &pbGroup.CommonResp{ErrorCode: config.ErrQuitGroup.ErrCode, ErrorMsg: config.ErrQuitGroup.ErrMsg}, nil } ////Push message when quit group chat //jsonInfo, _ := json.Marshal(req) //logic.SendMsgByWS(&pbChat.WSToMsgSvrChatMsg{ // SendID: claims.UID, // RecvID: req.GroupID, // Content: string(jsonInfo), // SendTime: utils.GetCurrentTimestampBySecond(), // MsgFrom: constant.SysMsgType, // ContentType: constant.QuitGroupTip, // SessionType: constant.GroupChatType, // OperationID: req.OperationID, //}) log.Info(req.Token, req.OperationID, "rpc quit group is success return") return &pbGroup.CommonResp{}, nil }
package model type ResourceLimit struct { // 资源限制 Memory int64 `json:"memory"` CpuShare uint64 `json:"cpuShares"` Timeout int `json:"timeout"` // 超时时间 }
package routes import ( "fmt" "log" "net/http" "sort" "strconv" "github.com/codegangsta/martini" "github.com/coopernurse/gorp" "github.com/zachlatta/southbayfession/misc" "github.com/zachlatta/southbayfession/models" ) type School struct { Id int `json:"id"` Name string `json:"name"` Tweets []models.Tweet `json:"tweets,omitempty"` } func GetSchools(enc Encoder, db gorp.SqlExecutor) (int, string) { schools := make([]School, len(misc.Schools)) schoolNames := make([]string, len(misc.Schools)) i := 0 for k, _ := range misc.Schools { schoolNames[i] = k i++ } sort.Strings(schoolNames) for i, name := range schoolNames { schools[i].Id = i schools[i].Name = name } return http.StatusOK, Must(enc.EncodeOne(schools)) } func GetSchool(enc Encoder, db gorp.SqlExecutor, parms martini.Params) (int, string) { var school School id, err := strconv.Atoi(parms["id"]) if err != nil { log.Println(err) return http.StatusConflict, "" } schoolNames := make([]string, len(misc.Schools)) i := 0 for k, _ := range misc.Schools { schoolNames[i] = k i++ } sort.Strings(schoolNames) school.Id = id school.Name = schoolNames[id] _, err = db.Select(&school.Tweets, fmt.Sprintf("select * from Tweet where school = '%s' order by Id desc", school.Name)) if err != nil { log.Println(err) return http.StatusConflict, "" } return http.StatusOK, Must(enc.EncodeOne(school)) }
// vi:nu:et:sts=4 ts=4 sw=4 // See License.txt in main repository directory (Public Domain) // Test Go's object capabilities and how it works. // Actually, inheritacne works pretty much the same as in other // Object Oriented Programming languages. We have upward in- // heritance as normal. However, there is no way for a lower- // level object to execute a upper-level function from a lower- // level function. This can be done in other OOPs. // To accomplish the lower-level doing upper-level function, we // must careate a func within the lower-level object. // Generated: 2019-04-24 11:09:33.44631 -0400 EDT m=+0.001906926 package main import ( "flag" "fmt" "log" "os" ) var ( debug bool force bool noop bool quiet bool ) //--------------------------------------------------------------------- // Object A //--------------------------------------------------------------------- type objA struct { } func (o *objA) DoA() { log.Printf("objA::DoA\n") } func (o *objA) DoB() { log.Printf("objA::DoB\n") } func (o *objA) DoC() { log.Printf("objA::DoC\n") } func NewObjA() *objA { return &objA{} } //--------------------------------------------------------------------- // Object B //--------------------------------------------------------------------- type objB struct { objA Abc func () } func (o *objB) DoA() { log.Printf("objB::DoA\n") o.objA.DoA() } func (o *objB) DoD() { log.Printf("objB::DoD\n") if o.Abc != nil { o.Abc() } } func NewObjB() *objB { return &objB{} } //--------------------------------------------------------------------- // Object C //--------------------------------------------------------------------- type objC struct { objB } func (o *objC) DoD() { log.Printf("objC::DoD\n") o.objB.DoD() } func (o *objC) DoE() { log.Printf("objC::DoE\n") } func NewObjC() *objC { oc := &objC{} oc.Abc = func () { // This works because oc is remembered since this is an inline function. oc.DoE() } return oc } //===================================================================== // Main Support Functions //===================================================================== func usage() { fmt.Fprintf(flag.CommandLine.Output(), "Usage of %s:\n", os.Args[0]) fmt.Fprintf(flag.CommandLine.Output(), "\nOptions:\n") flag.PrintDefaults() fmt.Fprintf(flag.CommandLine.Output(), "\nNotes:\n") } func main() { var oa *objA var ob *objB var oc *objC // Set up flag variables flag.Usage = usage flag.BoolVar(&debug, "debug", true, "enable debugging") flag.BoolVar(&force, "force", true, "enable over-writes and deletions") flag.BoolVar(&force, "f", true, "enable over-writes and deletions") flag.BoolVar(&noop, "noop", true, "execute program, but do not make real changes") flag.BoolVar(&quiet, "quiet", true, "enable quiet mode") flag.BoolVar(&quiet, "q", true, "enable quiet mode") // Parse the flags and check them flag.Parse() if debug { log.Println("\tIn Debug Mode...") } //--------------------------------------------------------------------- // Various Tests //--------------------------------------------------------------------- oa = NewObjA() ob = NewObjB() oc = NewObjC() log.Println("You should see: objA::DoA") oa.DoA() log.Println("You should see: objB::DoA then objA::DoA") ob.DoA() log.Println("You should see: objB::DoA then objA::DoA") oc.DoA() log.Println("You should see: objC::DoD then objB::DoD then objC::DoE") oc.DoD() }
package esclient import ( "log" elastigo "github.com/mattbaird/elastigo/lib" ) //CreateESClient create ElasticSearch client func CreateESClient(esAddress, esPort string) (*elastigo.Conn, error) { ec := elastigo.NewConn() ec.Domain = esAddress ec.Port = esPort ec.RequestTracer = func(method, url, body string) { log.Printf("Requesting %s %s", method, url) log.Printf("Request body: %s", body) } _, err := ec.Health() if err != nil { return nil, err } return ec, nil }
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. // https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2 package types import ( "bytes" "encoding/json" "errors" "io" "strconv" ) // BucketSummary type. // // https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/ml/_types/Bucket.ts#L31-L78 type BucketSummary struct { // AnomalyScore The maximum anomaly score, between 0-100, for any of the bucket influencers. // This is an overall, rate-limited // score for the job. All the anomaly records in the bucket contribute to this // score. This value might be updated as // new data is analyzed. AnomalyScore Float64 `json:"anomaly_score"` BucketInfluencers []BucketInfluencer `json:"bucket_influencers"` // BucketSpan The length of the bucket in seconds. This value matches the bucket span that // is specified in the job. BucketSpan int64 `json:"bucket_span"` // EventCount The number of input data records processed in this bucket. EventCount int64 `json:"event_count"` // InitialAnomalyScore The maximum anomaly score for any of the bucket influencers. This is the // initial value that was calculated at the // time the bucket was processed. InitialAnomalyScore Float64 `json:"initial_anomaly_score"` // IsInterim If true, this is an interim result. In other words, the results are // calculated based on partial input data. IsInterim bool `json:"is_interim"` // JobId Identifier for the anomaly detection job. JobId string `json:"job_id"` // ProcessingTimeMs The amount of time, in milliseconds, that it took to analyze the bucket // contents and calculate results. ProcessingTimeMs int64 `json:"processing_time_ms"` // ResultType Internal. This value is always set to bucket. ResultType string `json:"result_type"` // Timestamp The start time of the bucket. This timestamp uniquely identifies the bucket. // Events that occur exactly at the // timestamp of the bucket are included in the results for the bucket. Timestamp int64 `json:"timestamp"` // TimestampString The start time of the bucket. This timestamp uniquely identifies the bucket. // Events that occur exactly at the // timestamp of the bucket are included in the results for the bucket. TimestampString DateTime `json:"timestamp_string,omitempty"` } func (s *BucketSummary) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) for { t, err := dec.Token() if err != nil { if errors.Is(err, io.EOF) { break } return err } switch t { case "anomaly_score": var tmp interface{} dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseFloat(v, 64) if err != nil { return err } f := Float64(value) s.AnomalyScore = f case float64: f := Float64(v) s.AnomalyScore = f } case "bucket_influencers": if err := dec.Decode(&s.BucketInfluencers); err != nil { return err } case "bucket_span": if err := dec.Decode(&s.BucketSpan); err != nil { return err } case "event_count": var tmp interface{} dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseInt(v, 10, 64) if err != nil { return err } s.EventCount = value case float64: f := int64(v) s.EventCount = f } case "initial_anomaly_score": var tmp interface{} dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseFloat(v, 64) if err != nil { return err } f := Float64(value) s.InitialAnomalyScore = f case float64: f := Float64(v) s.InitialAnomalyScore = f } case "is_interim": var tmp interface{} dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseBool(v) if err != nil { return err } s.IsInterim = value case bool: s.IsInterim = v } case "job_id": if err := dec.Decode(&s.JobId); err != nil { return err } case "processing_time_ms": if err := dec.Decode(&s.ProcessingTimeMs); err != nil { return err } case "result_type": var tmp json.RawMessage if err := dec.Decode(&tmp); err != nil { return err } o := string(tmp[:]) o, err = strconv.Unquote(o) if err != nil { o = string(tmp[:]) } s.ResultType = o case "timestamp": if err := dec.Decode(&s.Timestamp); err != nil { return err } case "timestamp_string": if err := dec.Decode(&s.TimestampString); err != nil { return err } } } return nil } // NewBucketSummary returns a BucketSummary. func NewBucketSummary() *BucketSummary { r := &BucketSummary{} return r }
package utils import ( "bytes" "encoding/binary" ) func Str2Byte(str string) []byte { var ret []byte = []byte(str) return ret } func Byte2Str(data []byte) string { //var str string = string(data[:len(data)]) var str string = string(data[:]) return str } func MergeSlice(s1 []byte, s2 []byte) []byte { slice := make([]byte, len(s1)+len(s2)) copy(slice, s1) copy(slice[len(s1):], s2) return slice } func IntToBytes(n int) []byte { data := int64(n) bytebuf := bytes.NewBuffer([]byte{}) binary.Write(bytebuf, binary.BigEndian, data) return bytebuf.Bytes() }
package aoc2020 // day17_hypercube is a helper for day17 to isolate the "hypercube" component (part 2). import ( "strings" "github.com/pkg/errors" ) // conwaHypercube represents a Conway hypercube type conwayHypercube [][][][]bool func (hcube conwayHypercube) String() string { var sb strings.Builder for ww := range hcube { for zz := range hcube[ww] { for yy := range hcube[ww][zz] { for xx := range hcube[ww][zz][yy] { if hcube[ww][zz][yy][xx] { sb.WriteByte('#') } else { sb.WriteByte('.') } } sb.WriteByte('\n') } sb.WriteByte('\n') } } return sb.String() } // width returns the width (x-dim) of a hcube. func (hcube conwayHypercube) width() int { if hcube.height() == 0 { return 0 } return len(hcube[0][0][0]) } // height returns the height (y-dim) of a hcube. func (hcube conwayHypercube) height() int { if hcube.depth() == 0 { return 0 } return len(hcube[0][0]) } // depth returns the depth (z-dim) of a hcube. func (hcube conwayHypercube) depth() int { if hcube.hyperdepth() == 0 { return 0 } return len(hcube[0]) } // hyperdepth returns the hyperdepth (w-dim) of a hcube func (hcube conwayHypercube) hyperdepth() int { return len(hcube) } // volume returns the volume of the hcube. func (hcube conwayHypercube) volume() int { return hcube.hyperdepth() * hcube.width() * hcube.height() * hcube.depth() } // countActive returns how many active hcubes are there func (hcube conwayHypercube) countActive() int { result := 0 for xx := 0; xx < hcube.width(); xx++ { for yy := 0; yy < hcube.height(); yy++ { for zz := 0; zz < hcube.depth(); zz++ { for ww := 0; ww < hcube.hyperdepth(); ww++ { if hcube.at(xx, yy, zz, ww) { result++ } } } } } return result } // newConwayHypercube returns a new Conway Cube from a string // whose width each line is equal and only contains `#` and `.`. func newConwayHypercube(input string) (conwayHypercube, error) { lines := strings.Split(input, "\n") if len(lines) == 0 { // Well... we have an empty hcube! return blankConwayHypercube(0, 0, 0, 0), nil } hcube := make([][][][]bool, 1) hcube[0] = make([][][]bool, 1) hcube[0][0] = make([][]bool, len(lines)) width := len(lines[0]) for ii, line := range lines { if len(line) != width { return nil, errors.Errorf("line %s is length %d, expected %d", line, len(line), width) } hcube[0][0][ii] = make([]bool, width) for jj, pt := range line { switch pt { case '#': hcube[0][0][ii][jj] = true case '.': hcube[0][0][ii][jj] = false default: return nil, errors.Errorf("invalid character %c in line %s", pt, line) } } } return hcube, nil } // blankConwayCube makes a blank conwayCube with some width, height, and depth func blankConwayHypercube(w, h, d, hd int) conwayHypercube { result := make(conwayHypercube, hd) for ww := range result { result[ww] = make([][][]bool, d) for zz := range result[ww] { result[ww][zz] = make([][]bool, h) for yy := range result[ww][zz] { result[ww][zz][yy] = make([]bool, w) } } } return result } // reduce reduces the hcube and returns a new hcube func (hcube conwayHypercube) reduce() conwayHypercube { // every element in the hcube is checked // and {min|max}{X|Y|Z} is narrowed down minX, minY, minZ, minW := hcube.width(), hcube.height(), hcube.depth(), hcube.hyperdepth() maxX, maxY, maxZ, maxW := 0, 0, 0, 0 for ww := 0; ww < hcube.hyperdepth(); ww++ { for zz := 0; zz < hcube.depth(); zz++ { for yy := 0; yy < hcube.height(); yy++ { for xx := 0; xx < hcube.width(); xx++ { if !hcube.at(xx, yy, zz, ww) { continue } // update mins and maxs if xx < minX { minX = xx } if xx > maxX { maxX = xx } if yy < minY { minY = yy } if yy > maxY { maxY = yy } if zz < minZ { minZ = zz } if zz > maxZ { maxZ = zz } if ww < minW { minW = ww } if ww > maxW { maxW = ww } } } } } // -1 since maxX goes from 0 to hcube.width()-1, and similarly maxY and maxZ if maxX-minX == hcube.width()-1 && maxY-minY == hcube.height()-1 && maxZ-minZ == hcube.depth()-1 && maxW-minW == hcube.hyperdepth()-1 { return hcube } newCube := blankConwayHypercube(maxX-minX+1, maxY-minY+1, maxZ-minZ+1, maxW-minW+1) for xx := minX; xx <= maxX; xx++ { for yy := minY; yy <= maxY; yy++ { for zz := minZ; zz <= maxZ; zz++ { for ww := minW; ww <= maxW; ww++ { newCube.set(xx-minX, yy-minY, zz-minZ, ww-minW, hcube.at(xx, yy, zz, ww)) } } } } return newCube } // at represents if there is a value at some point in the hcube. // If x, y, or z is out of bounds, return false. func (hcube conwayHypercube) at(x, y, z, w int) bool { if w < 0 || w >= hcube.hyperdepth() || z < 0 || z >= hcube.depth() || y < 0 || y >= hcube.height() || x < 0 || x >= hcube.width() { return false } return hcube[w][z][y][x] } // set sets the value of some position to val. // If x, y, or z is out of bounds, do nothing. func (hcube *conwayHypercube) set(x, y, z, w int, val bool) { if w < 0 || w >= hcube.hyperdepth() || z < 0 || z >= hcube.depth() || y < 0 || y >= hcube.height() || x < 0 || x >= hcube.width() { return } (*hcube)[w][z][y][x] = val } // surrounds counts the number of active hcubes that surround (x,y,z) func (hcube conwayHypercube) surrounds(x, y, z, w int) int { result := 0 offsets := []int{-1, 0, 1} boolToInt := func(b bool) int { if b { return 1 } return 0 } for _, ox := range offsets { for _, oy := range offsets { for _, oz := range offsets { for _, ow := range offsets { if ox == 0 && oy == 0 && oz == 0 && ow == 0 { continue } result = result + boolToInt(hcube.at(x+ox, y+oy, z+oz, w+ow)) } } } } return result } // iterate returns a new conwayCube based on the original after iterating based on the rules func (hcube conwayHypercube) iterate() conwayHypercube { newCube := blankConwayHypercube(hcube.width()+2, hcube.height()+2, hcube.depth()+2, hcube.hyperdepth()+2) // newCube.at(x+1,y+1,z+1,w+1) <-> hcube.at(x,y,z,w) for ww := 0; ww < newCube.hyperdepth(); ww++ { for zz := 0; zz < newCube.depth(); zz++ { for yy := 0; yy < newCube.height(); yy++ { for xx := 0; xx < newCube.width(); xx++ { val := hcube.at(xx-1, yy-1, zz-1, ww-1) sur := hcube.surrounds(xx-1, yy-1, zz-1, ww-1) if (val && (sur == 2 || sur == 3)) || (!val && sur == 3) { newCube.set(xx, yy, zz, ww, true) } else { newCube.set(xx, yy, zz, ww, false) // unneeded really since default is false } } } } } return newCube.reduce() }
/* # -*- coding: utf-8 -*- # @Author : joker # @Time : 2021/9/1 8:43 上午 # @File : lt_53_最大子序和.go # @Description : # @Attention : */ package offer func maxSubArray(nums []int) int { if len(nums) == 0 { return 0 } max := nums[0] for index := 1; index < len(nums); index++ { if nums[index]+nums[index-1] > nums[index] { nums[index] = nums[index] + nums[index-1] } if nums[index] > max { max = nums[index] } } return max }
package player import ( "fmt" "github.com/kataras/iris/core/errors" "goslib/gen_server" "goslib/logger" "goslib/scene_utils" "goslib/session_utils" ) const SERVER = "__player_manager_server__" /* GenServer Callbacks */ type PlayerManager struct { } func StartPlayerManager() { gen_server.Start(SERVER, new(PlayerManager)) } func StartPlayer(accountId string) error { session, err := session_utils.Find(accountId) if err != nil { logger.ERR("StartPlayer failed: ", err) return err } scene, err := scene_utils.FindScene(session.SceneId) if err != nil { logger.ERR("StartPlayer failed: ", err) return err } if scene == nil { err = errors.New(fmt.Sprintf("scene: %s not found", session.SceneId)) logger.ERR("StartPlayer failed: ", err) return err } if scene.GameAppId != CurrentGameAppId { err = errors.New("player not belongs to this server!") logger.ERR("StartPlayer failed: ", err) return err } _, err = gen_server.Call(SERVER, "StartPlayer", accountId) return err } func (self *PlayerManager) Init(args []interface{}) (err error) { return nil } func (self *PlayerManager) HandleCast(args []interface{}) { } func (self *PlayerManager) HandleCall(args []interface{}) (interface{}, error) { handle := args[0].(string) if handle == "StartPlayer" { accountId := args[1].(string) if !gen_server.Exists(accountId) { gen_server.Start(accountId, new(Player), accountId) } } return nil, nil } func (self *PlayerManager) Terminate(reason string) (err error) { return nil }
// Copyright 2019 Yunion // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package hostinfo import ( "testing" ) func TestSHostInfo_Start(t *testing.T) { type fields struct { isRegistered bool kvmModuleSupport string nestStatus string Cpu *SCPUInfo Mem *SMemory sysinfo *SSysInfo } tests := []struct { name string fields fields wantErr bool }{ { "HostInfo Test", fields{}, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { h := &SHostInfo{ isRegistered: tt.fields.isRegistered, kvmModuleSupport: tt.fields.kvmModuleSupport, nestStatus: tt.fields.nestStatus, Cpu: tt.fields.Cpu, Mem: tt.fields.Mem, sysinfo: tt.fields.sysinfo, } if err := h.Start(); (err != nil) != tt.wantErr { t.Errorf("SHostInfo.Start() error = %v, wantErr %v", err, tt.wantErr) } }) } }
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // ====CHAINCODE EXECUTION SAMPLES (BCS REST API) ================== /* #TEST transaction / Init ledger curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/invocation -d '{"channel":"channel1","chaincode":"vehiclenet","method":"initLedgerB","args":["ser1234"],"chaincodeVer":"v1"}' curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/invocation -d '{"channel":"channel1","chaincode":"vehiclenet","method":"initLedgerC","args":["ser1234"],"chaincodeVer":"v1"}' curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/invocation -d '{"channel":"channel1","chaincode":"vehiclenet","method":"initLedgerD","args":["ser1234"],"chaincodeVer":"v1"}' # TEST transaction / Add Car Part curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/invocation -d '{"channel":"channel1","chaincode":"vehiclenet","method":"initVehiclePart","args":["ser1234", "tata", "1502688979", "airbag 2020", "mazda", "false", "1502688979"],"chaincodeVer":"v1"}' curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/invocation -d '{"channel":"channel1","chaincode":"vehiclenet","method":"initVehiclePart","args":["ser1235", "tata", "1502688979", "airbag 2020", "mercedes", "false", "1502688979"],"chaincodeVer":"v1"}' curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/invocation -d '{"channel":"channel1","chaincode":"vehiclenet","method":"initVehiclePart","args":["ser1236", "tata", "1502688979", "airbag 2020", "toyota", "false", "15026889790"],"chaincodeVer":"v1"}' curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/invocation -d '{"channel":"channel1","chaincode":"vehiclenet","method":"initVehiclePart","args":["ser1237", "tata", "1502688979", "airbag 5000", "mazda", "false", "1502688979"],"chaincodeVer":"v1"}' curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/invocation -d '{"channel":"channel1","chaincode":"vehiclenet","method":"initVehiclePart","args":["ser1238", "tata", "1502688979", "airbag 5000", "mercedes", "false", "1502688979"],"chaincodeVer":"v1"}' curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/invocation -d '{"channel":"channel1","chaincode":"vehiclenet","method":"initVehiclePart","args":["ser1239", "tata", "1502688979", "airbag 5000", "toyota", "false", "15026889790"],"chaincodeVer":"v1"}' # TEST transaction / Add Car curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/invocation -d '{"channel":"channel1","chaincode":"vehiclenet","method":"initVehicle","args":["mer1000001", "mercedes", "c class", "1502688979", "ser1234", "mercedes", "false", "1502688979"],"chaincodeVer":"v1"}' curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/invocation -d '{"channel":"channel1","chaincode":"vehiclenet","method":"initVehicle","args":["maz1000001", "mazda", "mazda 6", "1502688979", "ser1235", "mazda", "false", "1502688979"],"chaincodeVer":"v1"}' curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/invocation -d '{"channel":"channel1","chaincode":"vehiclenet","method":"initVehicle","args":["ren1000001", "renault", "megan", "1502688979", "ser1236", "renault", "false", "1502688979"],"chaincodeVer":"v1"}' curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/invocation -d '{"channel":"channel1","chaincode":"vehiclenet","method":"initVehicle","args":["ford1000001", "ford", "mustang", "1502688979", "ser1237", "ford", "false", "1502688979"],"chaincodeVer":"v1"}' # TEST query / Populated database curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/query -d '{"channel":"channel1","chaincode":"vehiclenet","method":"readVehiclePart","args":["ser1234"],"chaincodeVer":"v1"}' curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/query -d '{"channel":"channel1","chaincode":"vehiclenet","method":"readVehicle","args":["mer1000001"],"chaincodeVer":"v1"}' # TEST transaction / Transfer ownership curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/invocation -d '{"channel":"channel1","chaincode":"vehiclenet","method":"transferVehiclePart","args":["ser1234", "mercedes"],"chaincodeVer":"v1"}' curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/invocation -d '{"channel":"channel1","chaincode":"vehiclenet","method":"transferVehicle","args":["mer1000001", "mercedes los angeles"],"chaincodeVer":"v1"}' # TEST query / Get History curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/query -d '{"channel":"channel1","chaincode":"vehiclenet","method":"getHistoryForRecord","args":["ser1234"],"chaincodeVer":"v1"}' curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/query -d '{"channel":"channel1","chaincode":"vehiclenet","method":"getHistoryForRecord","args":["mer1000001"],"chaincodeVer":"v1"}' # TEST transaction / delete records curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/invocation -d '{"channel":"channel1","chaincode":"vehiclenet","method":"deleteVehiclePart","args":["ser1235"],"chaincodeVer":"v1"}' curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/rest/v1/transaction/invocation -d '{"channel":"channel1","chaincode":"vehiclenet","method":"deleteVehicle","args":["maz1000001"],"chaincodeVer":"v1"}' # TEST transaction / Recall Part curl -H "Content-type:application/json" -X POST http://localhost:3100/bcsgw/{"channel":"channel1","chaincode":"vehiclenet","method":"setPartRecallState","args":["abg1234",true],"chaincodeVer":"v3"}' */ package main import ( // "bytes" "encoding/json" "fmt" "strings" "strconv" // "time" "github.com/hyperledger/fabric/core/chaincode/shim" "github.com/hyperledger/fabric/protos/peer" ) //ClaimProcessChaincode example simple Chaincode implementation type ClaimProcessChaincode struct { } // @claimPart JSON object type claimInfo struct { ObjectType string `json:"docType"` //docType is used to distinguish the various types of objects in state database SerialNumber string `json:"serialNumber"` InsuredName string `json:"insuredName"` PolicyID string `json:"policyID"` ClaimID string `json:"claimID"` InsuranceStartDate int `json:"insuranceStartDate"` InsuranceEndDate int `json:"insuranceEndDate"` AdmissionDate int `json:"admissionDate"` DischargeDate int `json:"dischargeDate"` PatientName string `json:"patientName"` PatientGender string `json:"patientGender"` DOB int `json:"dob"` Relationship string `json:"relationship"` ClaimType string `json:"claimType"` HospitalName string `json:"hospitalName"` DoctorName string `json:"doctorName"` NatureOfIllness string `json:"natureOfIllness"` Diagnosis string `json:"diagnosis"` Accident bool `json:"accident"` FIR bool `json:"fir"` FIRNumber string `json:"firNumber"` PoliceStation string `json:"policeStation"` OtherPolicy bool `json:"otherPolicy"` OtherPolicyId string `json:"otherPolicyId"` ClaimStatus string `json:"claimStatus"` } // @the bill information type is below type billInfo struct { ObjectType string `json:"docType"` //docType is used to distinguish the various types of objects in state database ClaimID string `json:"claimID"` ClaimItemId string `json:"claimItemId"` Description string `json:"description"` BillNo string `json:"billNo"` BillDate int `json:"billDate"` BillType string `json:"billType"` Amount string `json:"amount"` } // =================================================================================== // Main // =================================================================================== func main1() { err := shim.Start(new(ClaimProcessChaincode)) if err != nil { fmt.Printf("Error starting claim process chaincode: %s", err) } } // Init initializes chaincode // =========================== func (t *ClaimProcessChaincode) Init(stub shim.ChaincodeStubInterface) peer.Response { return shim.Success(nil) } // Invoke - Our entry point for Invocations // ======================================== func (t *ClaimProcessChaincode) Invoke(stub shim.ChaincodeStubInterface) peer.Response { function, args := stub.GetFunctionAndParameters() fmt.Println("invoke is running " + function) // Handle different functions if function == "initclaimProcess" { //create a new Claim return t.initclaimProcess(stub, args) } /*else if function == "addDiagnosisInfo" { //update illness information return t.addDiagnosisInfo(stub, args) } else if function == "updateBillInfo" { //update bill information return t.updateBillInfo(stub, args) }else if function == "deleteClaim" { //delete a Claim return t.deleteClaim(stub, args) } else if function == "readClaim" { //read a Claim return t.readClaim(stub, args) } else if function == "getHistoryForClaim" { //get history of values for a claim return t.getHistoryForPolicy(stub, args) } else if function == "gethistoryForPolicy" { //get history of values for a Policy return t.getBillsByClaim(stub, args) } else if function == "getClaimStatus" { //get claim Status return t.getClaimStatus(stub, args) } else if function == "updateClaimStatus" { //update Claim Status return t.updateClaimStatus(stub, args) } */ fmt.Println("invoke did not find func: " + function) //error return shim.Error("Received unknown function invocation") } // ============================================================ // initVehiclePart - create a new vehicle part, store into chaincode state // ============================================================ func (t *ClaimProcessChaincode) initclaimProcess(stub shim.ChaincodeStubInterface, args []string) peer.Response { var err error // sample data model with // 0 1 2 3 4 5 6 // "ser1234", "Raj", "grpHealth1", "1", "1502688979", "1502688999", "1502688989","1502688990","Sharmila", "Female", "1502688969", "Wife", "Hospitalization", "Apollo", "Dr. Arvind", "Fever", "Dengue" , "No", "","","","", "","","","", "admission" if len(args) < 4 { return shim.Error("Incorrect number of arguments. Expecting atleast 4") } // ==== Input sanitation ==== fmt.Println("- start init claim process") if len(args[0]) <= 0 { return shim.Error("1st argument must be a non-empty string") } if len(args[1]) <= 0 { return shim.Error("2nd argument must be a non-empty string") } if len(args[2]) <= 0 { return shim.Error("3rd argument must be a non-empty string") } if len(args[3]) <= 0 { return shim.Error("4th argument must be a non-empty string") } if len(args[4]) <= 0 { return shim.Error("5th argument must be a non-empty string") } if len(args[5]) <= 0 { return shim.Error("6th argument must be a non-empty string") } if len(args[6]) <= 0 { return shim.Error("7th argument must be a non-empty string") } if len(args[7]) <= 0 { return shim.Error("8th argument must be a non-empty string") } if len(args[8]) <= 0 { return shim.Error("9th argument must be a non-empty string") } if len(args[9]) <= 0 { return shim.Error("10th argument must be a non-empty string") } if len(args[10]) <= 0 { return shim.Error("11th argument must be a non-empty string") } serialNumber := args[0] insuredName := strings.ToLower(args[1]) policyID := strings.ToLower(args[2]) claimID := strings.ToLower(args[3]) insuranceStartDate, err := strconv.Atoi(args[4]) if err != nil { return shim.Error("4rd argument must be a numeric string") } insuranceEndDate, err := strconv.Atoi(args[5]) if err != nil { return shim.Error("5th argument must be a numeric string") } admissionDate, err := strconv.Atoi(args[6]) if err != nil { return shim.Error("6th argument must be a numeric string") } dischargeDate, err := strconv.Atoi(args[7]) if err != nil { return shim.Error("7th argument must be a numeric string") } patientName := strings.ToLower(args[8]) patientGender := strings.ToLower(args[9]) dob, err := strconv.Atoi(args[10]) if err != nil { return shim.Error("11th argument must be a numeric string") } relationship:= strings.ToLower(args[11]) claimType:= strings.ToLower(args[12]) hospitalName:= strings.ToLower(args[13]) doctorName:= strings.ToLower(args[14]) natureOfIllness:= strings.ToLower(args[15]) diagnosis:= strings.ToLower(args[16]) accident, err := strconv.ParseBool(args[17]) if err != nil { return shim.Error("16th argument must be a boolean string") } fir, err := strconv.ParseBool(args[18]) if err != nil { return shim.Error("17th argument must be a boolean string") } firNumber:= strings.ToLower(args[19]) policeStation:= strings.ToLower(args[20]) otherPolicy, err := strconv.ParseBool(args[21]) if err != nil { return shim.Error("20th argument must be a boolean string") } otherPolicyId:= args[22] claimStatus:= strings.ToLower(args[23]) // ==== Check if claim already exists ==== claimAsBytes, err := stub.GetState(claimID) if err != nil { return shim.Error("Failed to get claim " + err.Error()) } else if claimAsBytes != nil { fmt.Println("This claim already exists: " + claimID) return shim.Error("This claim already exists: " + claimID) } // ==== Create claimInfo object and marshal to JSON ==== objectType := "claimInfo" claimInfo := &claimInfo{objectType, serialNumber,insuredName,policyID,claimID,insuranceStartDate,insuranceEndDate,admissionDate,dischargeDate,patientName,patientGender,dob,relationship,claimType,hospitalName,doctorName,natureOfIllness,diagnosis,accident,fir,firNumber,policeStation,otherPolicy,otherPolicyId,claimStatus } claimJSONasBytes, err := json.Marshal(claimInfo) if err != nil { return shim.Error(err.Error()) } // === Save claim to state === err = stub.PutState(claimID, claimJSONasBytes) if err != nil { return shim.Error(err.Error()) } // ==== claim saved Return success ==== fmt.Println("- end init claim part") return shim.Success(nil) } /* // ============================================================ // initVehicle - create a new bill Info , store into chaincode state // ============================================================ func (t *ClaimProcessChaincode) initVehicle(stub shim.ChaincodeStubInterface, args []string) peer.Response { var err error // data model with recall fields // 0 1 2 3 4 5 6 7 // "mer1000001", "mercedes", "c class", "1502688979", "ser1234", "mercedes", "false", "1502688979" // @MODIFY_HERE extend to expect 8 arguements, up from 6 if len(args) != 7 { return shim.Error("Incorrect number of arguments. Expecting 7") } // ==== Input sanitation ==== fmt.Println("- start init bill ") if len(args[0]) <= 0 { return shim.Error("1st argument must be a non-empty string") } if len(args[1]) <= 0 { return shim.Error("2nd argument must be a non-empty string") } if len(args[2]) <= 0 { return shim.Error("3rd argument must be a non-empty string") } if len(args[3]) <= 0 { return shim.Error("4th argument must be a non-empty string") } if len(args[4]) <= 0 { return shim.Error("5th argument must be a non-empty string") } if len(args[5]) <= 0 { return shim.Error("6th argument must be a non-empty string") } if len(args[6]) <= 0 { return shim.Error("7h argument must be a non-empty string") } claimID := args[0] claimItemId:=args[1] description:=args[2] billNo:=args[3] billDate:= strconv.Atoi(args[4]) if err != nil { return shim.Error("5th argument must be a numeric string") } billType:=args[5] amount:=args[6] // ==== Check if claim already exists ==== billInfoAsBytes, err := stub.GetState(billID) if err != nil { return shim.Error("Failed to get claim: " + err.Error()) } else if claimAsBytes != nil { return shim.Error("This claim already exists: " + claimID) } // ==== Create claim object and marshal to JSON ==== objectType := "billInfo" billInfo := &billInfo{objectType,claimID,claimItemId,description,billNo,billDate,billType,amount} vehicleJSONasBytes, err := json.Marshal(vehicle) if err != nil { return shim.Error(err.Error()) } // === Save bill to state === err = stub.PutState(claimBill, billInfoJSONasBytes) if err != nil { return shim.Error(err.Error()) } // ==== billinfo saved and indexed. Return success ==== fmt.Println("- end init bill Info") return shim.Success(nil) } // =============================================== // createIndex - create search index for ledger // =============================================== func (t *ClaimProcessChaincode) createIndex(stub shim.ChaincodeStubInterface, indexName string, attributes []string) error { fmt.Println("- start create index") var err error // ==== Index the object to enable range queries, e.g. return all claims made by policy ==== // An 'index' is a normal key/value entry in state. // The key is a composite key, with the elements that you want to range query on listed first. // This will enable very efficient state range queries based on composite keys matching indexName~color~* indexKey, err := stub.CreateCompositeKey(indexName, attributes) if err != nil { return err } // Save index entry to state. Only the key name is needed, no need to store a duplicate copy of object. // Note - passing a 'nil' value will effectively delete the key from state, therefore we pass null character as value value := []byte{0x00} stub.PutState(indexKey, value) fmt.Println("- end create index") return nil } // =============================================== // deleteIndex - remove search index for ledger // =============================================== func (t *ClaimProcessChaincode) deleteIndex(stub shim.ChaincodeStubInterface, indexName string, attributes []string) error { fmt.Println("- start delete index") var err error // ==== Index the object to enable range queries, e.g. return all parts made by supplier b ==== // An 'index' is a normal key/value entry in state. // The key is a composite key, with the elements that you want to range query on listed first. // This will enable very efficient state range queries based on composite keys matching indexName~color~* indexKey, err := stub.CreateCompositeKey(indexName, attributes) if err != nil { return err } // Delete index by key stub.DelState(indexKey) fmt.Println("- end delete index") return nil } // =============================================== // readClaim - read a Claim from chaincode state // =============================================== func (t *ClaimProcessChaincode) readClaim(stub shim.ChaincodeStubInterface, args []string) peer.Response { var claimID, jsonResp string var err error if len(args) != 1 { return shim.Error("Incorrect number of arguments. Expecting Claim ID of the Claim to query") } claimID = args[0] valAsbytes, err := stub.GetState(claimID) //get the claim from chaincode state if err != nil { jsonResp = "{\"Error\":\"Failed to get state for " + claimID + "\"}" fmt.Println(jsonResp) return shim.Error(jsonResp) } else if valAsbytes == nil { jsonResp = "{\"Error\":\"Claim does not exist: " + claimID + "\"}" fmt.Println(jsonResp) return shim.Error(jsonResp) } return shim.Success(valAsbytes) } // =============================================== // BillsByClaim - read a vehicle from chaincode state // =============================================== func (t *ClaimProcessChaincode) readClaim(stub shim.ChaincodeStubInterface, args []string) peer.Response { var claimID, billNo, jsonResp string var err error if len(args) != 2 { return shim.Error("Incorrect number of arguments. Expecting chassis number of the vehicle to query") } claimID = args[0] billNo = args[1] valAsbytes, err := stub.GetState(claimID,billNo) //get the bill from chaincode state if err != nil { jsonResp = "{\"Error\":\"Failed to get state for " + billNo + "\"}" return shim.Error(jsonResp) } else if valAsbytes == nil { jsonResp = "{\"Error\":\"Bill does not exist: " + billNo + "\"}" return shim.Error(jsonResp) } return shim.Success(valAsbytes) } // ================================================== // deleteClaim - remove a Claim key/value pair from state // ================================================== func (t *ClaimProcessChaincode) deleteClaim(stub shim.ChaincodeStubInterface, args []string) peer.Response { var jsonResp string var claimInfo claimInfo if len(args) != 1 { return shim.Error("Incorrect number of arguments. Expecting 1") } claimID := args[0] // to maintain the claimID index, we need to read the claim valAsbytes, err := stub.GetState(claimID) //get the claim from chaincode state if err != nil { jsonResp = "{\"Error\":\"Failed to get state for " + claimID + "\"}" return shim.Error(jsonResp) } else if valAsbytes == nil { jsonResp = "{\"Error\":\"Claim does not exist: " + claimID + "\"}" return shim.Error(jsonResp) } err = json.Unmarshal([]byte(valAsbytes), &ClaimInfoJSON) if err != nil { jsonResp = "{\"Error\":\"Failed to decode JSON of: " + serialNumber + "\"}" return shim.Error(jsonResp) } err = stub.DelState(claimID) //remove the claim from chaincode state if err != nil { return shim.Error("Failed to delete state:" + err.Error()) } return shim.Success(nil) } // =========================================================== // transfer a vehicle part by setting a new owner name on the vehiclePart // =========================================================== func (t *ClaimProcessChaincode) addDiagnosisInfo(stub shim.ChaincodeStubInterface, args []string) peer.Response { // 0 1 3 // "name", "from", "to" if len(args) < 3 { return shim.Error("Incorrect number of arguments. Expecting 3") } claimID := args[0] diagnosis := strings.ToLower(args[1]) natureOfIllness := strings.ToLower(args[2]) fmt.Println("- start update Diagnosis Info ", claimID, diagnosis, natureOfIllness) message, err := t.addDiagnosisHelper(stub, claimID, diagnosis, natureOfIllness) if err != nil { return shim.Error(message + err.Error()) } else if message != "" { return shim.Error(message) } fmt.Println("- end addDiagnosis (success)") return shim.Success(nil) } // =========================================================== // addDiagnosis : helper method for addDiagnosis // =========================================================== /func (t *ClaimProcessChaincode) addDiagnosisHelper(stub shim.ChaincodeStubInterface, ClaimID string, diagnosis string, natureOfIllness string) (string, error) { // attempt to get the current claim object by serial number. // if sucessful, returns us a byte array we can then us JSON.parse to unmarshal fmt.Println("Transfering claim with Claim ID: " + claimID ) claimInfoAsBytes, err := stub.GetState(claimID) if err != nil { return "Failed to get claim: " + claimID, err } else if vehiclePartAsBytes == nil { return "Claim does not exist: " + claimID, nil } claimInfoToTransfer := claimInfo{} err = json.Unmarshal(claimInfoAsBytes, &claimInfoToTransfer := claimInfo{} ) //unmarshal it aka JSON.parse() if err != nil { return "", err } claimInfoToTransfer.diagnosis = diagnosis claimInfoToTransfer.natureOfIllness = natureOfIllness claimInfoJSONBytes, _ := json.Marshal(claimInfoToTransfer) err = stub.PutState(claimID, claimInfoJSONBytes) //rewrite the claimInfo if err != nil { return "", err } return "", nil } // =========================================================================================== // getClaimByRange performs a range query based on the start and end keys provided. // Read-only function results are not typically submitted to ordering. If the read-only // results are submitted to ordering, or if the query is used in an update transaction // and submitted to ordering, then the committing peers will re-execute to guarantee that // result sets are stable between endorsement time and commit time. The transaction is // invalidated by the committing peers if the result set has changed between endorsement // time and commit time. // Therefore, range queries are a safe option for performing update transactions based on query results. // =========================================================================================== func (t *ClaimProcessChaincode) getClaimByRange(stub shim.ChaincodeStubInterface, args []string) peer.Response { if len(args) < 2 { return shim.Error("Incorrect number of arguments. Expecting 2") } startKey := args[0] endKey := args[1] resultsIterator, err := stub.GetStateByRange(startKey, endKey) if err != nil { return shim.Error(err.Error()) } defer resultsIterator.Close() // buffer is a JSON array containing QueryResults var buffer bytes.Buffer buffer.WriteString("[") bArrayMemberAlreadyWritten := false for resultsIterator.HasNext() { queryResponse, err := resultsIterator.Next() if err != nil { return shim.Error(err.Error()) } // Add a comma before array members, suppress it for the first array member if bArrayMemberAlreadyWritten == true { buffer.WriteString(",") } buffer.WriteString("{\"Key\":") buffer.WriteString("\"") buffer.WriteString(queryResponse.Key) buffer.WriteString("\"") buffer.WriteString(", \"Record\":") // Record is a JSON object, so we write as-is buffer.WriteString(string(queryResponse.Value)) buffer.WriteString("}") bArrayMemberAlreadyWritten = true } buffer.WriteString("]") fmt.Printf("- getClaimByRange queryResult:\n%s\n", buffer.String()) return shim.Success(buffer.Bytes()) } // =========================================================================================== // getHistoryForRecord returns the historical state transitions for a given key of a record // =========================================================================================== func (t *ProcessClaimChaincode) getHistoryForRecord(stub shim.ChaincodeStubInterface, args []string) peer.Response { if len(args) < 1 { return shim.Error("Incorrect number of arguments. Expecting 1") } recordKey := args[0] fmt.Printf("- start getHistoryForRecord: %s\n", recordKey) resultsIterator, err := stub.GetHistoryForKey(recordKey) if err != nil { return shim.Error(err.Error()) } defer resultsIterator.Close() // buffer is a JSON array containing historic values for the key/value pair var buffer bytes.Buffer buffer.WriteString("[") bArrayMemberAlreadyWritten := false for resultsIterator.HasNext() { response, err := resultsIterator.Next() if err != nil { return shim.Error(err.Error()) } // Add a comma before array members, suppress it for the first array member if bArrayMemberAlreadyWritten == true { buffer.WriteString(",") } buffer.WriteString("{\"TxId\":") buffer.WriteString("\"") buffer.WriteString(response.TxId) buffer.WriteString("\"") buffer.WriteString(", \"Value\":") // if it was a delete operation on given key, then we need to set the //corresponding value null. Else, we will write the response.Value //as-is (as the Value itself a JSON vehiclePart) if response.IsDelete { buffer.WriteString("null") } else { buffer.WriteString(string(response.Value)) } buffer.WriteString(", \"Timestamp\":") buffer.WriteString("\"") buffer.WriteString(time.Unix(response.Timestamp.Seconds, int64(response.Timestamp.Nanos)).String()) buffer.WriteString("\"") buffer.WriteString(", \"IsDelete\":") buffer.WriteString("\"") buffer.WriteString(strconv.FormatBool(response.IsDelete)) buffer.WriteString("\"") buffer.WriteString("}") bArrayMemberAlreadyWritten = true } buffer.WriteString("]") fmt.Printf("- getHistoryForRecord returning:\n%s\n", buffer.String()) return shim.Success(buffer.Bytes()) } // ============================================================ // setPartRecallState - sets recall field of a vehicle // ============================================================ func (t *AutoTraceChaincode) setPartRecallState(stub shim.ChaincodeStubInterface, args []string) peer.Response { var err error // expects following arguements // 0 1 // "serialNumber", "status (boolean)" if len(args) != 2 { return shim.Error("Incorrect number of arguments. Expecting 2") } if len(args[0]) <= 0 { return shim.Error("1st argument must be a non-empty string") } serialNumber := args[0] recall, err := strconv.ParseBool(args[1]) if err != nil { return shim.Error("2nd argument must be a boolean string") } // ==== Check if vehicle part already exists ==== vehiclePartAsBytes, err := stub.GetState(serialNumber) if err != nil { return shim.Error("Failed to get vehicle part: " + err.Error()) } else if vehiclePartAsBytes == nil { fmt.Println("This vehicle part does not exist: " + serialNumber) return shim.Error("This vehicle part does not exist:: " + serialNumber) } vehiclePartJSON := vehiclePart{} err = json.Unmarshal(vehiclePartAsBytes, &vehiclePartJSON) //unmarshal it aka JSON.parse() if err != nil { fmt.Println("Unable to unmarshall vehicle part from byte to JSON object: " + serialNumber) return shim.Error("Unable to unmarshall vehicle part from byte to JSON object: " + serialNumber) } // ==== Create vehiclePart object and marshal to JSON ==== objectType := "vehiclePart" // Set now as the time of recall var recallTime int if recall { recallTime = int(time.Now().UTC().Unix()) }else{ recallTime = 0 } vehiclePart := &vehiclePart{objectType, serialNumber, vehiclePartJSON.Assembler, vehiclePartJSON.AssemblyDate, vehiclePartJSON.Name, vehiclePartJSON.Owner, recall, recallTime} vehiclePartJSONasBytes, err := json.Marshal(vehiclePart) if err != nil { return shim.Error(err.Error()) } // === Save vehiclePart to state === err = stub.PutState(serialNumber, vehiclePartJSONasBytes) // ==== Vehicle part saved. Return success ==== fmt.Println("- end setPartRecallState") return shim.Success(nil) } */
/** *@Author: haoxiongxiao *@Date: 2018/10/5 *@Description: CREATE GO FILE queue */ package queue import ( "testing" "fmt" ) func TestArraryQueue_EnQueue(t *testing.T) { arraryQueue := InitNoParam() for i := 0; i < 10; i++ { arraryQueue.EnQueue(i) } fmt.Println(arraryQueue.ToString()) arraryQueue.DeQueue() fmt.Println(arraryQueue.ToString()) } func TestLinkedQueue_EnQueue(t *testing.T) { listQueue := Init() for i := 0; i < 10; i++ { listQueue.EnQueue(i) } fmt.Println(listQueue.ToString()) listQueue.DeQueue() fmt.Println(listQueue.ToString()) }
/* Copyright 2020 The Qmgo Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package qmgo import ( "fmt" "testing" "time" "github.com/stretchr/testify/require" ) func TestNow(t *testing.T) { t1 := time.Unix(0, time.Now().UnixNano()/1e6*1e6) t2 := Now() fmt.Println(t1, t2) } func TestNewObjectID(t *testing.T) { objId := NewObjectID() objId.Hex() } func TestCompareVersions(t *testing.T) { ast := require.New(t) i, err := CompareVersions("4.4.0", "3.0") ast.NoError(err) ast.True(i > 0) i, err = CompareVersions("3.0.1", "3.0") ast.NoError(err) ast.True(i == 0) i, err = CompareVersions("3.1.5", "4.0") ast.NoError(err) ast.True(i < 0) }
/* # -*- coding: utf-8 -*- # @Author : joker # @Time : 2021/10/15 9:10 上午 # @File : lt_134_加油站.go # @Description : # @Attention : */ package offer // 解题关键: // 遍历所有的加油站,判断是否都能够到达 func canCompleteCircuit(gas []int, cost []int) int { n := len(cost) for i := 0; i < n; { count := 0 // 当前走过的加油站 sumOfCas := 0 // 总共加的油 sumOfCost := 0 // 总共要使用的油 for count < n { index := (i + count) % n // 因为是环形,所以要取余 sumOfCas += gas[index] sumOfCost += cost[index] if sumOfCost > sumOfCas { break // 代表的是油不够,所以直接break即可 } count++ // 满足,则行驶到下一个站点 } if count == n { // 代表着,这个加油站出发,能够回到原来位置 return i } // 则从下一个未探测的站点出发,这一步就是可以避免重复计算的 i = i + count + 1 } // 都不满足 return -1 }
package main import ( "fmt" log "github.com/sirupsen/logrus" "io" "os" "sync" "time" ) // buildVersion should be populated at build time by build ldflags var buildVersion string func init() { Apps = make(map[Application]Metric) } func main() { start := time.Now() var flag Flag flag.Version = buildVersion flag.Parse() SetLogger(os.Stderr, flag.LogLevel, "text", false) f, err := os.Open(flag.HostsFile) if err != nil { log.WithError(err).Fatalf("unable to open file '%s'", flag.HostsFile) } hosts, err := ReadAllHosts(f) if err != nil { log.WithError(err).Fatal("unable to read in hosts") } var wg sync.WaitGroup for _, h := range hosts { wg.Add(1) go func(h string) { defer wg.Done() statusURL, err := HostStatusURL(flag.RootURL, h) if err != nil { log.WithError(err).Errorf("could not create URL for host '%s'", h) } host := Host{URL: statusURL} status, err := host.RequestHostStatus() if err != nil { log.WithError(err).Errorf("could not get status for host '%s'", h) } IncrementCounters(Apps, status) }(h) } wg.Wait() writeReport(os.Stdout) fmt.Printf("\ncompleted in %s\n", time.Now().Sub(start).Truncate(time.Millisecond)) } func writeReport(w io.Writer) { for app, metrics := range Apps { var successRate float32 if metrics.TotalSuccessCount == 0 || metrics.TotalRequestsCount == 0 { successRate = 0 } else { successRate = float32(metrics.TotalSuccessCount) / float32(metrics.TotalRequestsCount) } if _, err := fmt.Fprintf(w, "%s,%s,%.2f\n", app.Name, app.Version, successRate); err != nil { log.WithError(err).Error("invalid printer format") } } }
package dto import ( "bytes" "encoding/binary" ) //This interface is used for additional data that could be send with some aggregations type AggregationData interface { Encoder Size() int GetBucketCount() int32 } //This structure keeps additional data for histogram by value aggregation type HistogramValueData struct { Min float32 Max float32 BucketCount int32 } //This structure keeps additional data for histogram by time aggregation type HistogramTimeData struct { Min int64 Max int64 BucketCount int32 } //This structure keeps additional data for series aggregation with interpolation type InterpolatedData int32 //This method do binary encoding and return result as a slice of bytes func (this InterpolatedData) Encode() ([]byte, error) { buf := new(bytes.Buffer) err := binary.Write(buf, binary.LittleEndian, this) if err != nil { return nil, err } return buf.Bytes(), nil } func (this InterpolatedData) GetBucketCount() int32 { return int32(this) } func (this InterpolatedData) Size() int { return 4 } func (this HistogramTimeData) Encode() ([]byte, error) { buf := new(bytes.Buffer) err := binary.Write(buf, binary.LittleEndian, this) if err != nil { return nil, err } return buf.Bytes(), nil } func (this HistogramTimeData) GetBucketCount() int32 { return this.BucketCount } func (this HistogramTimeData) Size() int { return binary.Size(this) } func (this HistogramValueData) Encode() ([]byte, error) { buf := new(bytes.Buffer) err := binary.Write(buf, binary.LittleEndian, this) if err != nil { return nil, err } return buf.Bytes(), nil } func (this HistogramValueData) GetBucketCount() int32 { return this.BucketCount } func (this HistogramValueData) Size() int { return binary.Size(this) }
package connlib import ( "bufio" "io" "regexp" "strconv" "strings" ) var fieldSeparator = regexp.MustCompile("\\s+") // ParseEndpoint - Parses and hexadecimal IPv4 endpoint (eg.: "0100007F:1F90") func ParseEndpoint(endpoint string) (*Endpoint, error) { parts := strings.Split(endpoint, ":") ip, err := strconv.ParseUint(parts[0], 16, 32) if err != nil { return nil, err } port, err := strconv.ParseUint(parts[1], 16, 16) if err != nil { return nil, err } return &Endpoint{ IP: IPv4Address{byte(ip & 0xff), byte(ip >> 8), byte(ip >> 16), byte(ip >> 24)}, Port: uint16(port), }, nil } // ParseTCPFile - Parses a file that's in the format of /proc/net/tcp func ParseTCPFile(r io.Reader) (ConnectionList, error) { var err error var line []byte reader := bufio.NewReader(r) if _, _, err = reader.ReadLine(); err != nil { return nil, err } connections := ConnectionList{} for err == nil { line, _, err = reader.ReadLine() switch err { case nil: lineStr := string(line) entryStr := strings.SplitN(lineStr, ":", 2)[1] fields := fieldSeparator.Split(entryStr, 15) var src, dst *Endpoint if src, err = ParseEndpoint(fields[1]); err != nil { return nil, err } if dst, err = ParseEndpoint(fields[2]); err != nil { return nil, err } connections = append(connections, Connection{Local: *src, Remote: *dst}) break case io.EOF: break default: return nil, err } } return connections, nil } func CalculateDirection(listeners ConnectionList, conn Connection) DirectionalConnection { // Let's see if we have an exact hit first for _, l := range listeners { if l.Local == conn.Remote { return DirectionalConnection{Source: conn.Local, Destination: conn.Remote} } if l.Local == conn.Local { return DirectionalConnection{Source: conn.Remote, Destination: conn.Local} } } // Check if there's a listener on 0.0.0.0 for the same port // Note: this is totally not right, since we don't have any // information about what IP addresses this specific node has. // TODO: parse it out from /proc/net/fib_trie and use that // list instead of IP.IsUnspecified()! for _, l := range listeners { if l.Local.IP.IsUnspecified() && l.Local.Port == conn.Remote.Port { return DirectionalConnection{Source: conn.Local, Destination: conn.Remote} } if l.Local.IP.IsUnspecified() && l.Local.Port == conn.Local.Port { return DirectionalConnection{Source: conn.Remote, Destination: conn.Local} } } // Assuming outgoing connection if the local port is ephemeral if isEphemeralPort(conn.Local.Port) { return DirectionalConnection{Source: conn.Local, Destination: conn.Remote} } // Assuming incoming connection if nothing else matches // TODO: return an error and print a warning about it return DirectionalConnection{Source: conn.Remote, Destination: conn.Local} }
package main import ( "fmt" _ "image/jpeg" _ "image/png" "os" "strconv" "time" "github.com/deluan/bring" "github.com/faiface/pixel" "github.com/faiface/pixel/pixelgl" "github.com/sirupsen/logrus" "golang.org/x/image/colornames" ) const ( windowTitle = "Bring it on!" defaultWidth = 1024 defaultHeight = 768 guacdAddress = "localhost:4822" ) var stateNames = map[bring.SessionState]string{ bring.SessionActive: "Active", bring.SessionClosed: "Closed", bring.SessionHandshake: "Handshake", } // Creates and initialize Bring's Session and Client func createBringClient(protocol, hostname, port string) *bring.Client { logger := logrus.New() logger.SetFormatter(&logrus.TextFormatter{DisableTimestamp: true, ForceColors: true}) logger.SetLevel(logrus.DebugLevel) client, err := bring.NewClient(guacdAddress, protocol, map[string]string{ "hostname": hostname, "port": port, "password": "vncpassword", "width": strconv.Itoa(defaultWidth), "height": strconv.Itoa(defaultHeight), }, logger) if err != nil { panic(err) } go client.Start() return client } func mainLoop(win *pixelgl.Window, client *bring.Client) { frames := 0 second := time.Tick(time.Second) var lastRefresh int64 for !win.Closed() { // Get an updated image from the Bring Client img, lastUpdate := client.Screen() imgWidth := img.Bounds().Dx() imgHeight := img.Bounds().Dy() // If the image is not empty if imgWidth > 0 && imgHeight > 0 { // Process screen updates if there were any updates in the image if lastRefresh != lastUpdate { updateScreen(win, img) lastRefresh = lastUpdate } // Handle mouse events mouseInfo := collectNewMouseInfo(win, imgWidth, imgHeight) if mouseInfo != nil { if err := client.SendMouse(mouseInfo.pos, mouseInfo.pressedButtons...); err != nil { fmt.Printf("Error: %s", err) } } // Handle keyboard events pressed, released := collectKeyStrokes(win) for _, k := range pressed { if err := client.SendKey(k, true); err != nil { fmt.Printf("Error: %s", err) } } for _, k := range released { if err := client.SendKey(k, false); err != nil { fmt.Printf("Error: %s", err) } } } win.Update() // Measure FPS and update title frames++ select { case <-second: win.SetTitle(fmt.Sprintf("%s | %s | FPS: %d", windowTitle, stateNames[client.State()], frames)) frames = 0 default: } } } // Create the App's main window func createAppWindow() *pixelgl.Window { cfg := pixelgl.WindowConfig{ Title: windowTitle, Bounds: pixel.R(0, 0, defaultWidth, defaultHeight), VSync: true, Resizable: true, } win, err := pixelgl.NewWindow(cfg) if err != nil { panic(err) } win.Clear(colornames.Skyblue) win.SetCursorVisible(false) return win } // Pixel library requires the main to be run inside pixelgl.Run, to guarantee it is run in the main thread func Main() { if len(os.Args) < 4 { println("Usage: app <vnc|rdp> address port") return } client := createBringClient(os.Args[1], os.Args[2], os.Args[3]) win := createAppWindow() mainLoop(win, client) } func main() { pixelgl.Run(Main) }
/* Tencent is pleased to support the open source community by making Basic Service Configuration Platform available. Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "as IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package dao import ( "errors" "fmt" rawgen "gorm.io/gen" "bscp.io/pkg/cc" "bscp.io/pkg/criteria/errf" "bscp.io/pkg/dal/gen" "bscp.io/pkg/dal/table" "bscp.io/pkg/kit" "bscp.io/pkg/tools" "bscp.io/pkg/types" ) // Credential supplies all the Credential related operations. type Credential interface { // Get get credential Get(kit *kit.Kit, bizID, id uint32) (*table.Credential, error) // GetByCredentialString get credential by credential string GetByCredentialString(kit *kit.Kit, bizID uint32, credential string) (*table.Credential, error) // Create one credential instance. Create(kit *kit.Kit, credential *table.Credential) (uint32, error) // List get credentials List(kit *kit.Kit, bizID uint32, searchKey string, opt *types.BasePage) ([]*table.Credential, int64, error) // Delete delete credential Delete(kit *kit.Kit, strategy *table.Credential) error // Update update credential Update(kit *kit.Kit, credential *table.Credential) error // UpdateRevisionWithTx update credential revision with transaction UpdateRevisionWithTx(kit *kit.Kit, tx *gen.QueryTx, bizID, id uint32) error } var _ Credential = new(credentialDao) type credentialDao struct { genQ *gen.Query idGen IDGenInterface auditDao AuditDao credentialSetting *cc.Credential } // Get .. func (dao *credentialDao) Get(kit *kit.Kit, bizID, id uint32) (*table.Credential, error) { if bizID == 0 { return nil, errors.New("bizID is empty") } if id == 0 { return nil, errors.New("credential id is empty") } m := dao.genQ.Credential q := dao.genQ.Credential.WithContext(kit.Ctx) credential, err := q.Where(m.BizID.Eq(bizID), m.ID.Eq(id)).Take() if err != nil { return nil, fmt.Errorf("get credential failed, err: %v", err) } return credential, nil } // Get Credential by encoded credential string. func (dao *credentialDao) GetByCredentialString(kit *kit.Kit, bizID uint32, str string) (*table.Credential, error) { if bizID == 0 { return nil, errors.New("bizID is empty") } if str == "" { return nil, errors.New("credential string is empty") } // encode credential string encryptionAlgorithm := dao.credentialSetting.EncryptionAlgorithm masterKey := dao.credentialSetting.MasterKey encrypted, err := tools.EncryptCredential(str, masterKey, encryptionAlgorithm) if err != nil { return nil, errf.ErrCredentialInvalid } m := dao.genQ.Credential q := dao.genQ.Credential.WithContext(kit.Ctx) credential, err := q.Where(m.BizID.Eq(bizID), m.EncCredential.Eq(encrypted)).Take() if err != nil { return nil, fmt.Errorf("get credential failed, err: %v", err) } return credential, nil } // Create create credential func (dao *credentialDao) Create(kit *kit.Kit, g *table.Credential) (uint32, error) { if err := g.ValidateCreate(); err != nil { return 0, err } // generate a credential id and update to credential. id, err := dao.idGen.One(kit, table.Name(g.TableName())) if err != nil { return 0, err } g.ID = id ad := dao.auditDao.DecoratorV2(kit, g.Attachment.BizID).PrepareCreate(g) // 多个使用事务处理 createTx := func(tx *gen.Query) error { q := tx.Credential.WithContext(kit.Ctx) if err := q.Create(g); err != nil { return err } if err := ad.Do(tx); err != nil { return err } return nil } if err := dao.genQ.Transaction(createTx); err != nil { return 0, nil } return g.ID, nil } // List get credentials func (dao *credentialDao) List(kit *kit.Kit, bizID uint32, searchKey string, opt *types.BasePage) ( []*table.Credential, int64, error) { m := dao.genQ.Credential q := dao.genQ.Credential.WithContext(kit.Ctx) var conds []rawgen.Condition if searchKey != "" { conds = append(conds, q.Where(m.Memo.Regexp("(?i)"+searchKey)).Or(m.Reviser.Regexp("(?i)"+searchKey))) } result, count, err := q.Where(m.BizID.Eq(bizID)). Where(conds...). Order(m.ID.Desc()). FindByPage(opt.Offset(), opt.LimitInt()) if err != nil { return nil, 0, err } return result, count, nil } // Delete delete credential func (dao *credentialDao) Delete(kit *kit.Kit, g *table.Credential) error { // 参数校验 if err := g.ValidateDelete(); err != nil { return err } // 删除操作, 获取当前记录做审计 m := dao.genQ.Credential q := dao.genQ.Credential.WithContext(kit.Ctx) oldOne, err := q.Where(m.ID.Eq(g.ID), m.BizID.Eq(g.Attachment.BizID)).Take() if err != nil { return err } ad := dao.auditDao.DecoratorV2(kit, g.Attachment.BizID).PrepareDelete(oldOne) // 多个使用事务处理 deleteTx := func(tx *gen.Query) error { q = tx.Credential.WithContext(kit.Ctx) if _, err := q.Where(m.BizID.Eq(g.Attachment.BizID)).Delete(g); err != nil { return err } if err := ad.Do(tx); err != nil { return err } return nil } if err := dao.genQ.Transaction(deleteTx); err != nil { return err } return nil } // Update update credential // Note: only update name, description, enable func (dao *credentialDao) Update(kit *kit.Kit, g *table.Credential) error { if err := g.ValidateUpdate(); err != nil { return err } // 更新操作, 获取当前记录做审计 m := dao.genQ.Credential q := dao.genQ.Credential.WithContext(kit.Ctx) oldOne, err := q.Where(m.ID.Eq(g.ID), m.BizID.Eq(g.Attachment.BizID)).Take() if err != nil { return err } ad := dao.auditDao.DecoratorV2(kit, g.Attachment.BizID).PrepareUpdate(g, oldOne) // 多个使用事务处理 updateTx := func(tx *gen.Query) error { q = tx.Credential.WithContext(kit.Ctx) if _, err := q.Where(m.BizID.Eq(g.Attachment.BizID), m.ID.Eq(g.ID)). Select(m.Memo, m.Enable, m.Reviser).Updates(g); err != nil { return err } if err := ad.Do(tx); err != nil { return err } return nil } if err := dao.genQ.Transaction(updateTx); err != nil { return err } return nil } // UpdateRevisionWithTx update credential revision with transaction func (dao *credentialDao) UpdateRevisionWithTx(kit *kit.Kit, tx *gen.QueryTx, bizID uint32, id uint32) error { if bizID == 0 || id == 0 { return errors.New("credential bizID or id is zero") } m := tx.Credential q := tx.Credential.WithContext(kit.Ctx) if _, err := q.Where(m.BizID.Eq(bizID), m.ID.Eq(id)). Select(m.Reviser).Update(m.Reviser, kit.User); err != nil { return err } return nil }
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package perfschema // perfSchemaTables is a shortcut to involve all table names. var perfSchemaTables = []string{ tableGlobalStatus, tableSessionConnectAttrs, tableSessionStatus, tableSetupActors, tableSetupObjects, tableSetupInstruments, tableSetupConsumers, tableStmtsCurrent, tableStmtsHistory, tableStmtsHistoryLong, tablePreparedStmtsInstances, tableTransCurrent, tableTransHistory, tableTransHistoryLong, tableSessionVariables, tableStagesCurrent, tableStagesHistory, tableStagesHistoryLong, tableEventsStatementsSummaryByDigest, tableTiDBProfileCPU, tableTiDBProfileMemory, tableTiDBProfileMutex, tableTiDBProfileAllocs, tableTiDBProfileBlock, tableTiDBProfileGoroutines, tableTiKVProfileCPU, tablePDProfileCPU, tablePDProfileMemory, tablePDProfileMutex, tablePDProfileAllocs, tablePDProfileBlock, tablePDProfileGoroutines, } // tableGlobalStatus contains the column name definitions for table global_status, same as MySQL. const tableGlobalStatus = "CREATE TABLE performance_schema." + tableNameGlobalStatus + " (" + "VARIABLE_NAME VARCHAR(64) not null," + "VARIABLE_VALUE VARCHAR(1024));" // tableSessionStatus contains the column name definitions for table session_status, same as MySQL. const tableSessionStatus = "CREATE TABLE performance_schema." + tableNameSessionStatus + " (" + "VARIABLE_NAME VARCHAR(64) not null," + "VARIABLE_VALUE VARCHAR(1024));" // tableSetupActors contains the column name definitions for table setup_actors, same as MySQL. const tableSetupActors = "CREATE TABLE if not exists performance_schema." + tableNameSetupActors + " (" + "HOST CHAR(60) NOT NULL DEFAULT '%'," + "USER CHAR(32) NOT NULL DEFAULT '%'," + "ROLE CHAR(16) NOT NULL DEFAULT '%'," + "ENABLED ENUM('YES','NO') NOT NULL DEFAULT 'YES'," + "HISTORY ENUM('YES','NO') NOT NULL DEFAULT 'YES');" // tableSetupObjects contains the column name definitions for table setup_objects, same as MySQL. const tableSetupObjects = "CREATE TABLE if not exists performance_schema." + tableNameSetupObjects + " (" + "OBJECT_TYPE ENUM('EVENT','FUNCTION','TABLE') NOT NULL DEFAULT 'TABLE'," + "OBJECT_SCHEMA VARCHAR(64) DEFAULT '%'," + "OBJECT_NAME VARCHAR(64) NOT NULL DEFAULT '%'," + "ENABLED ENUM('YES','NO') NOT NULL DEFAULT 'YES'," + "TIMED ENUM('YES','NO') NOT NULL DEFAULT 'YES');" // tableSetupInstruments contains the column name definitions for table setup_instruments, same as MySQL. const tableSetupInstruments = "CREATE TABLE if not exists performance_schema." + tableNameSetupInstruments + " (" + "NAME VARCHAR(128) NOT NULL," + "ENABLED ENUM('YES','NO') NOT NULL," + "TIMED ENUM('YES','NO') NOT NULL);" // tableSetupConsumers contains the column name definitions for table setup_consumers, same as MySQL. const tableSetupConsumers = "CREATE TABLE if not exists performance_schema." + tableNameSetupConsumers + " (" + "NAME VARCHAR(64) NOT NULL," + "ENABLED ENUM('YES','NO') NOT NULL);" // tableStmtsCurrent contains the column name definitions for table events_statements_current, same as MySQL. const tableStmtsCurrent = "CREATE TABLE if not exists performance_schema." + tableNameEventsStatementsCurrent + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + "EVENT_NAME VARCHAR(128) NOT NULL," + "SOURCE VARCHAR(64)," + "TIMER_START BIGINT(20) UNSIGNED," + "TIMER_END BIGINT(20) UNSIGNED," + "TIMER_WAIT BIGINT(20) UNSIGNED," + "LOCK_TIME BIGINT(20) UNSIGNED NOT NULL," + "SQL_TEXT LONGTEXT," + "DIGEST VARCHAR(32)," + "DIGEST_TEXT LONGTEXT," + "CURRENT_SCHEMA VARCHAR(64)," + "OBJECT_TYPE VARCHAR(64)," + "OBJECT_SCHEMA VARCHAR(64)," + "OBJECT_NAME VARCHAR(64)," + "OBJECT_INSTANCE_BEGIN BIGINT(20) UNSIGNED," + "MYSQL_ERRNO INT(11)," + "RETURNED_SQLSTATE VARCHAR(5)," + "MESSAGE_TEXT VARCHAR(128)," + "ERRORS BIGINT(20) UNSIGNED NOT NULL," + "WARNINGS BIGINT(20) UNSIGNED NOT NULL," + "ROWS_AFFECTED BIGINT(20) UNSIGNED NOT NULL," + "ROWS_SENT BIGINT(20) UNSIGNED NOT NULL," + "ROWS_EXAMINED BIGINT(20) UNSIGNED NOT NULL," + "CREATED_TMP_DISK_TABLES BIGINT(20) UNSIGNED NOT NULL," + "CREATED_TMP_TABLES BIGINT(20) UNSIGNED NOT NULL," + "SELECT_FULL_JOIN BIGINT(20) UNSIGNED NOT NULL," + "SELECT_FULL_RANGE_JOIN BIGINT(20) UNSIGNED NOT NULL," + "SELECT_RANGE BIGINT(20) UNSIGNED NOT NULL," + "SELECT_RANGE_CHECK BIGINT(20) UNSIGNED NOT NULL," + "SELECT_SCAN BIGINT(20) UNSIGNED NOT NULL," + "SORT_MERGE_PASSES BIGINT(20) UNSIGNED NOT NULL," + "SORT_RANGE BIGINT(20) UNSIGNED NOT NULL," + "SORT_ROWS BIGINT(20) UNSIGNED NOT NULL," + "SORT_SCAN BIGINT(20) UNSIGNED NOT NULL," + "NO_INDEX_USED BIGINT(20) UNSIGNED NOT NULL," + "NO_GOOD_INDEX_USED BIGINT(20) UNSIGNED NOT NULL," + "NESTING_EVENT_ID BIGINT(20) UNSIGNED," + "NESTING_EVENT_TYPE ENUM('TRANSACTION','STATEMENT','STAGE')," + "NESTING_EVENT_LEVEL INT(11));" // tableStmtsHistory contains the column name definitions for table events_statements_history, same as MySQL. const tableStmtsHistory = "CREATE TABLE if not exists performance_schema." + tableNameEventsStatementsHistory + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + "EVENT_NAME VARCHAR(128) NOT NULL," + "SOURCE VARCHAR(64)," + "TIMER_START BIGINT(20) UNSIGNED," + "TIMER_END BIGINT(20) UNSIGNED," + "TIMER_WAIT BIGINT(20) UNSIGNED," + "LOCK_TIME BIGINT(20) UNSIGNED NOT NULL," + "SQL_TEXT LONGTEXT," + "DIGEST VARCHAR(32)," + "DIGEST_TEXT LONGTEXT," + "CURRENT_SCHEMA VARCHAR(64)," + "OBJECT_TYPE VARCHAR(64)," + "OBJECT_SCHEMA VARCHAR(64)," + "OBJECT_NAME VARCHAR(64)," + "OBJECT_INSTANCE_BEGIN BIGINT(20) UNSIGNED," + "MYSQL_ERRNO INT(11)," + "RETURNED_SQLSTATE VARCHAR(5)," + "MESSAGE_TEXT VARCHAR(128)," + "ERRORS BIGINT(20) UNSIGNED NOT NULL," + "WARNINGS BIGINT(20) UNSIGNED NOT NULL," + "ROWS_AFFECTED BIGINT(20) UNSIGNED NOT NULL," + "ROWS_SENT BIGINT(20) UNSIGNED NOT NULL," + "ROWS_EXAMINED BIGINT(20) UNSIGNED NOT NULL," + "CREATED_TMP_DISK_TABLES BIGINT(20) UNSIGNED NOT NULL," + "CREATED_TMP_TABLES BIGINT(20) UNSIGNED NOT NULL," + "SELECT_FULL_JOIN BIGINT(20) UNSIGNED NOT NULL," + "SELECT_FULL_RANGE_JOIN BIGINT(20) UNSIGNED NOT NULL," + "SELECT_RANGE BIGINT(20) UNSIGNED NOT NULL," + "SELECT_RANGE_CHECK BIGINT(20) UNSIGNED NOT NULL," + "SELECT_SCAN BIGINT(20) UNSIGNED NOT NULL," + "SORT_MERGE_PASSES BIGINT(20) UNSIGNED NOT NULL," + "SORT_RANGE BIGINT(20) UNSIGNED NOT NULL," + "SORT_ROWS BIGINT(20) UNSIGNED NOT NULL," + "SORT_SCAN BIGINT(20) UNSIGNED NOT NULL," + "NO_INDEX_USED BIGINT(20) UNSIGNED NOT NULL," + "NO_GOOD_INDEX_USED BIGINT(20) UNSIGNED NOT NULL," + "NESTING_EVENT_ID BIGINT(20) UNSIGNED," + "NESTING_EVENT_TYPE ENUM('TRANSACTION','STATEMENT','STAGE')," + "NESTING_EVENT_LEVEL INT(11));" // tableStmtsHistoryLong contains the column name definitions for table events_statements_history_long, same as MySQL. const tableStmtsHistoryLong = "CREATE TABLE if not exists performance_schema." + tableNameEventsStatementsHistoryLong + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + "EVENT_NAME VARCHAR(128) NOT NULL," + "SOURCE VARCHAR(64)," + "TIMER_START BIGINT(20) UNSIGNED," + "TIMER_END BIGINT(20) UNSIGNED," + "TIMER_WAIT BIGINT(20) UNSIGNED," + "LOCK_TIME BIGINT(20) UNSIGNED NOT NULL," + "SQL_TEXT LONGTEXT," + "DIGEST VARCHAR(32)," + "DIGEST_TEXT LONGTEXT," + "CURRENT_SCHEMA VARCHAR(64)," + "OBJECT_TYPE VARCHAR(64)," + "OBJECT_SCHEMA VARCHAR(64)," + "OBJECT_NAME VARCHAR(64)," + "OBJECT_INSTANCE_BEGIN BIGINT(20) UNSIGNED," + "MYSQL_ERRNO INT(11)," + "RETURNED_SQLSTATE VARCHAR(5)," + "MESSAGE_TEXT VARCHAR(128)," + "ERRORS BIGINT(20) UNSIGNED NOT NULL," + "WARNINGS BIGINT(20) UNSIGNED NOT NULL," + "ROWS_AFFECTED BIGINT(20) UNSIGNED NOT NULL," + "ROWS_SENT BIGINT(20) UNSIGNED NOT NULL," + "ROWS_EXAMINED BIGINT(20) UNSIGNED NOT NULL," + "CREATED_TMP_DISK_TABLES BIGINT(20) UNSIGNED NOT NULL," + "CREATED_TMP_TABLES BIGINT(20) UNSIGNED NOT NULL," + "SELECT_FULL_JOIN BIGINT(20) UNSIGNED NOT NULL," + "SELECT_FULL_RANGE_JOIN BIGINT(20) UNSIGNED NOT NULL," + "SELECT_RANGE BIGINT(20) UNSIGNED NOT NULL," + "SELECT_RANGE_CHECK BIGINT(20) UNSIGNED NOT NULL," + "SELECT_SCAN BIGINT(20) UNSIGNED NOT NULL," + "SORT_MERGE_PASSES BIGINT(20) UNSIGNED NOT NULL," + "SORT_RANGE BIGINT(20) UNSIGNED NOT NULL," + "SORT_ROWS BIGINT(20) UNSIGNED NOT NULL," + "SORT_SCAN BIGINT(20) UNSIGNED NOT NULL," + "NO_INDEX_USED BIGINT(20) UNSIGNED NOT NULL," + "NO_GOOD_INDEX_USED BIGINT(20) UNSIGNED NOT NULL," + "NESTING_EVENT_ID BIGINT(20) UNSIGNED," + "NESTING_EVENT_TYPE ENUM('TRANSACTION','STATEMENT','STAGE')," + "NESTING_EVENT_LEVEL INT(11));" // tablePreparedStmtsInstances contains the column name definitions for table prepared_statements_instances, same as MySQL. const tablePreparedStmtsInstances = "CREATE TABLE if not exists performance_schema." + tableNamePreparedStatementsInstances + " (" + "OBJECT_INSTANCE_BEGIN BIGINT(20) UNSIGNED NOT NULL," + "STATEMENT_ID BIGINT(20) UNSIGNED NOT NULL," + "STATEMENT_NAME VARCHAR(64)," + "SQL_TEXT LONGTEXT NOT NULL," + "OWNER_THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "OWNER_EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "OWNER_OBJECT_TYPE ENUM('EVENT','FUNCTION','TABLE')," + "OWNER_OBJECT_SCHEMA VARCHAR(64)," + "OWNER_OBJECT_NAME VARCHAR(64)," + "TIMER_PREPARE BIGINT(20) UNSIGNED NOT NULL," + "COUNT_REPREPARE BIGINT(20) UNSIGNED NOT NULL," + "COUNT_EXECUTE BIGINT(20) UNSIGNED NOT NULL," + "SUM_TIMER_EXECUTE BIGINT(20) UNSIGNED NOT NULL," + "MIN_TIMER_EXECUTE BIGINT(20) UNSIGNED NOT NULL," + "AVG_TIMER_EXECUTE BIGINT(20) UNSIGNED NOT NULL," + "MAX_TIMER_EXECUTE BIGINT(20) UNSIGNED NOT NULL," + "SUM_LOCK_TIME BIGINT(20) UNSIGNED NOT NULL," + "SUM_ERRORS BIGINT(20) UNSIGNED NOT NULL," + "SUM_WARNINGS BIGINT(20) UNSIGNED NOT NULL," + " SUM_ROWS_AFFECTED BIGINT(20) UNSIGNED NOT NULL," + "SUM_ROWS_SENT BIGINT(20) UNSIGNED NOT NULL," + "SUM_ROWS_EXAMINED BIGINT(20) UNSIGNED NOT NULL," + "SUM_CREATED_TMP_DISK_TABLES BIGINT(20) UNSIGNED NOT NULL," + "SUM_CREATED_TMP_TABLES BIGINT(20) UNSIGNED NOT NULL," + "SUM_SELECT_FULL_JOIN BIGINT(20) UNSIGNED NOT NULL," + "SUM_SELECT_FULL_RANGE_JOIN BIGINT(20) UNSIGNED NOT NULL," + "SUM_SELECT_RANGE BIGINT(20) UNSIGNED NOT NULL," + "SUM_SELECT_RANGE_CHECK BIGINT(20) UNSIGNED NOT NULL," + "SUM_SELECT_SCAN BIGINT(20) UNSIGNED NOT NULL," + "SUM_SORT_MERGE_PASSES BIGINT(20) UNSIGNED NOT NULL," + "SUM_SORT_RANGE BIGINT(20) UNSIGNED NOT NULL," + "SUM_SORT_ROWS BIGINT(20) UNSIGNED NOT NULL," + "SUM_SORT_SCAN BIGINT(20) UNSIGNED NOT NULL," + "SUM_NO_INDEX_USED BIGINT(20) UNSIGNED NOT NULL," + "SUM_NO_GOOD_INDEX_USED BIGINT(20) UNSIGNED NOT NULL);" // tableTransCurrent contains the column name definitions for table events_transactions_current, same as MySQL. const tableTransCurrent = "CREATE TABLE if not exists performance_schema." + tableNameEventsTransactionsCurrent + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + "EVENT_NAME VARCHAR(128) NOT NULL," + "STATE ENUM('ACTIVE','COMMITTED','ROLLED BACK')," + "TRX_ID BIGINT(20) UNSIGNED," + "GTID VARCHAR(64)," + "XID_FORMAT_ID INT(11)," + "XID_GTRID VARCHAR(130)," + "XID_BQUAL VARCHAR(130)," + "XA_STATE VARCHAR(64)," + "SOURCE VARCHAR(64)," + "TIMER_START BIGINT(20) UNSIGNED," + "TIMER_END BIGINT(20) UNSIGNED," + "TIMER_WAIT BIGINT(20) UNSIGNED," + "ACCESS_MODE ENUM('READ ONLY','READ WRITE')," + "ISOLATION_LEVEL VARCHAR(64)," + "AUTOCOMMIT ENUM('YES','NO') NOT NULL," + "NUMBER_OF_SAVEPOINTS BIGINT(20) UNSIGNED," + "NUMBER_OF_ROLLBACK_TO_SAVEPOINT BIGINT(20) UNSIGNED," + "NUMBER_OF_RELEASE_SAVEPOINT BIGINT(20) UNSIGNED," + "OBJECT_INSTANCE_BEGIN BIGINT(20) UNSIGNED," + "NESTING_EVENT_ID BIGINT(20) UNSIGNED," + "NESTING_EVENT_TYPE ENUM('TRANSACTION','STATEMENT','STAGE'));" // tableTransHistory contains the column name definitions for table events_transactions_history, same as MySQL. const tableTransHistory = "CREATE TABLE if not exists performance_schema." + tableNameEventsTransactionsHistory + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + "EVENT_NAME VARCHAR(128) NOT NULL," + "STATE ENUM('ACTIVE','COMMITTED','ROLLED BACK')," + "TRX_ID BIGINT(20) UNSIGNED," + "GTID VARCHAR(64)," + "XID_FORMAT_ID INT(11)," + "XID_GTRID VARCHAR(130)," + "XID_BQUAL VARCHAR(130)," + "XA_STATE VARCHAR(64)," + "SOURCE VARCHAR(64)," + "TIMER_START BIGINT(20) UNSIGNED," + "TIMER_END BIGINT(20) UNSIGNED," + "TIMER_WAIT BIGINT(20) UNSIGNED," + "ACCESS_MODE ENUM('READ ONLY','READ WRITE')," + "ISOLATION_LEVEL VARCHAR(64)," + "AUTOCOMMIT ENUM('YES','NO') NOT NULL," + "NUMBER_OF_SAVEPOINTS BIGINT(20) UNSIGNED," + "NUMBER_OF_ROLLBACK_TO_SAVEPOINT BIGINT(20) UNSIGNED," + "NUMBER_OF_RELEASE_SAVEPOINT BIGINT(20) UNSIGNED," + "OBJECT_INSTANCE_BEGIN BIGINT(20) UNSIGNED," + "NESTING_EVENT_ID BIGINT(20) UNSIGNED," + "NESTING_EVENT_TYPE ENUM('TRANSACTION','STATEMENT','STAGE'));" // tableTransHistoryLong contains the column name definitions for table events_transactions_history_long, same as MySQL. const tableTransHistoryLong = "CREATE TABLE if not exists performance_schema." + tableNameEventsTransactionsHistoryLong + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + "EVENT_NAME VARCHAR(128) NOT NULL," + "STATE ENUM('ACTIVE','COMMITTED','ROLLED BACK')," + "TRX_ID BIGINT(20) UNSIGNED," + "GTID VARCHAR(64)," + "XID_FORMAT_ID INT(11)," + "XID_GTRID VARCHAR(130)," + "XID_BQUAL VARCHAR(130)," + "XA_STATE VARCHAR(64)," + "SOURCE VARCHAR(64)," + "TIMER_START BIGINT(20) UNSIGNED," + "TIMER_END BIGINT(20) UNSIGNED," + "TIMER_WAIT BIGINT(20) UNSIGNED," + "ACCESS_MODE ENUM('READ ONLY','READ WRITE')," + "ISOLATION_LEVEL VARCHAR(64)," + "AUTOCOMMIT ENUM('YES','NO') NOT NULL," + "NUMBER_OF_SAVEPOINTS BIGINT(20) UNSIGNED," + "NUMBER_OF_ROLLBACK_TO_SAVEPOINT BIGINT(20) UNSIGNED," + "NUMBER_OF_RELEASE_SAVEPOINT BIGINT(20) UNSIGNED," + "OBJECT_INSTANCE_BEGIN BIGINT(20) UNSIGNED," + "NESTING_EVENT_ID BIGINT(20) UNSIGNED," + "NESTING_EVENT_TYPE ENUM('TRANSACTION','STATEMENT','STAGE'));" // tableStagesCurrent contains the column name definitions for table events_stages_current, same as MySQL. const tableStagesCurrent = "CREATE TABLE if not exists performance_schema." + tableNameEventsStagesCurrent + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + "EVENT_NAME VARCHAR(128) NOT NULL," + "SOURCE VARCHAR(64)," + "TIMER_START BIGINT(20) UNSIGNED," + "TIMER_END BIGINT(20) UNSIGNED," + "TIMER_WAIT BIGINT(20) UNSIGNED," + "WORK_COMPLETED BIGINT(20) UNSIGNED," + "WORK_ESTIMATED BIGINT(20) UNSIGNED," + "NESTING_EVENT_ID BIGINT(20) UNSIGNED," + "NESTING_EVENT_TYPE ENUM('TRANSACTION','STATEMENT','STAGE'));" // tableStagesHistory contains the column name definitions for table events_stages_history, same as MySQL. const tableStagesHistory = "CREATE TABLE if not exists performance_schema." + tableNameEventsStagesHistory + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + "EVENT_NAME VARCHAR(128) NOT NULL," + "SOURCE VARCHAR(64)," + "TIMER_START BIGINT(20) UNSIGNED," + "TIMER_END BIGINT(20) UNSIGNED," + "TIMER_WAIT BIGINT(20) UNSIGNED," + "WORK_COMPLETED BIGINT(20) UNSIGNED," + "WORK_ESTIMATED BIGINT(20) UNSIGNED," + "NESTING_EVENT_ID BIGINT(20) UNSIGNED," + "NESTING_EVENT_TYPE ENUM('TRANSACTION','STATEMENT','STAGE'));" // tableStagesHistoryLong contains the column name definitions for table events_stages_history_long, same as MySQL. const tableStagesHistoryLong = "CREATE TABLE if not exists performance_schema." + tableNameEventsStagesHistoryLong + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + "EVENT_NAME VARCHAR(128) NOT NULL," + "SOURCE VARCHAR(64)," + "TIMER_START BIGINT(20) UNSIGNED," + "TIMER_END BIGINT(20) UNSIGNED," + "TIMER_WAIT BIGINT(20) UNSIGNED," + "WORK_COMPLETED BIGINT(20) UNSIGNED," + "WORK_ESTIMATED BIGINT(20) UNSIGNED," + "NESTING_EVENT_ID BIGINT(20) UNSIGNED," + "NESTING_EVENT_TYPE ENUM('TRANSACTION','STATEMENT','STAGE'));" // tableEventsStatementsSummaryByDigest contains the column name definitions for table // events_statements_summary_by_digest, same as MySQL. const tableEventsStatementsSummaryByDigest = "CREATE TABLE if not exists performance_schema." + tableNameEventsStatementsSummaryByDigest + " (" + "SCHEMA_NAME varchar(64) DEFAULT NULL," + "DIGEST varchar(64) DEFAULT NULL," + "DIGEST_TEXT longtext," + "COUNT_STAR bigint unsigned NOT NULL," + "SUM_TIMER_WAIT bigint unsigned NOT NULL," + "MIN_TIMER_WAIT bigint unsigned NOT NULL," + "AVG_TIMER_WAIT bigint unsigned NOT NULL," + "MAX_TIMER_WAIT bigint unsigned NOT NULL," + "SUM_LOCK_TIME bigint unsigned NOT NULL," + "SUM_ERRORS bigint unsigned NOT NULL," + "SUM_WARNINGS bigint unsigned NOT NULL," + "SUM_ROWS_AFFECTED bigint unsigned NOT NULL," + "SUM_ROWS_SENT bigint unsigned NOT NULL," + "SUM_ROWS_EXAMINED bigint unsigned NOT NULL," + "SUM_CREATED_TMP_DISK_TABLES bigint unsigned NOT NULL," + "SUM_CREATED_TMP_TABLES bigint unsigned NOT NULL," + "SUM_SELECT_FULL_JOIN bigint unsigned NOT NULL," + "SUM_SELECT_FULL_RANGE_JOIN bigint unsigned NOT NULL," + "SUM_SELECT_RANGE bigint unsigned NOT NULL," + "SUM_SELECT_RANGE_CHECK bigint unsigned NOT NULL," + "SUM_SELECT_SCAN bigint unsigned NOT NULL," + "SUM_SORT_MERGE_PASSES bigint unsigned NOT NULL," + "SUM_SORT_RANGE bigint unsigned NOT NULL," + "SUM_SORT_ROWS bigint unsigned NOT NULL," + "SUM_SORT_SCAN bigint unsigned NOT NULL," + "SUM_NO_INDEX_USED bigint unsigned NOT NULL," + "SUM_NO_GOOD_INDEX_USED bigint unsigned NOT NULL," + "FIRST_SEEN timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000'," + "LAST_SEEN timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000'," + "PLAN_IN_CACHE bool NOT NULL," + "PLAN_CACHE_HITS bigint unsigned NOT NULL," + "PLAN_IN_BINDING bool NOT NULL," + "QUANTILE_95 bigint unsigned NOT NULL," + "QUANTILE_99 bigint unsigned NOT NULL," + "QUANTILE_999 bigint unsigned NOT NULL," + "QUERY_SAMPLE_TEXT longtext," + "QUERY_SAMPLE_SEEN timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000'," + "QUERY_SAMPLE_TIMER_WAIT bigint unsigned NOT NULL," + "UNIQUE KEY `SCHEMA_NAME` (`SCHEMA_NAME`,`DIGEST`));" // tableTiDBProfileCPU contains the columns name definitions for table tidb_profile_cpu const tableTiDBProfileCPU = "CREATE TABLE IF NOT EXISTS " + tableNameTiDBProfileCPU + " (" + "FUNCTION VARCHAR(512) NOT NULL," + "PERCENT_ABS VARCHAR(8) NOT NULL," + "PERCENT_REL VARCHAR(8) NOT NULL," + "ROOT_CHILD INT(8) NOT NULL," + "DEPTH INT(8) NOT NULL," + "FILE VARCHAR(512) NOT NULL);" // tableTiDBProfileMemory contains the columns name definitions for table tidb_profile_memory const tableTiDBProfileMemory = "CREATE TABLE IF NOT EXISTS " + tableNameTiDBProfileMemory + " (" + "FUNCTION VARCHAR(512) NOT NULL," + "PERCENT_ABS VARCHAR(8) NOT NULL," + "PERCENT_REL VARCHAR(8) NOT NULL," + "ROOT_CHILD INT(8) NOT NULL," + "DEPTH INT(8) NOT NULL," + "FILE VARCHAR(512) NOT NULL);" // tableTiDBProfileMutex contains the columns name definitions for table tidb_profile_mutex const tableTiDBProfileMutex = "CREATE TABLE IF NOT EXISTS " + tableNameTiDBProfileMutex + " (" + "FUNCTION VARCHAR(512) NOT NULL," + "PERCENT_ABS VARCHAR(8) NOT NULL," + "PERCENT_REL VARCHAR(8) NOT NULL," + "ROOT_CHILD INT(8) NOT NULL," + "DEPTH INT(8) NOT NULL," + "FILE VARCHAR(512) NOT NULL);" // tableTiDBProfileAllocs contains the columns name definitions for table tidb_profile_allocs const tableTiDBProfileAllocs = "CREATE TABLE IF NOT EXISTS " + tableNameTiDBProfileAllocs + " (" + "FUNCTION VARCHAR(512) NOT NULL," + "PERCENT_ABS VARCHAR(8) NOT NULL," + "PERCENT_REL VARCHAR(8) NOT NULL," + "ROOT_CHILD INT(8) NOT NULL," + "DEPTH INT(8) NOT NULL," + "FILE VARCHAR(512) NOT NULL);" // tableTiDBProfileBlock contains the columns name definitions for table tidb_profile_block const tableTiDBProfileBlock = "CREATE TABLE IF NOT EXISTS " + tableNameTiDBProfileBlock + " (" + "FUNCTION VARCHAR(512) NOT NULL," + "PERCENT_ABS VARCHAR(8) NOT NULL," + "PERCENT_REL VARCHAR(8) NOT NULL," + "ROOT_CHILD INT(8) NOT NULL," + "DEPTH INT(8) NOT NULL," + "FILE VARCHAR(512) NOT NULL);" // tableTiDBProfileGoroutines contains the columns name definitions for table tidb_profile_goroutines const tableTiDBProfileGoroutines = "CREATE TABLE IF NOT EXISTS " + tableNameTiDBProfileGoroutines + " (" + "FUNCTION VARCHAR(512) NOT NULL," + "ID INT(8) NOT NULL," + "STATE VARCHAR(16) NOT NULL," + "LOCATION VARCHAR(512) NOT NULL);" // tableTiKVProfileCPU contains the columns name definitions for table tikv_profile_cpu const tableTiKVProfileCPU = "CREATE TABLE IF NOT EXISTS " + tableNameTiKVProfileCPU + " (" + "ADDRESS VARCHAR(64) NOT NULL," + "FUNCTION VARCHAR(512) NOT NULL," + "PERCENT_ABS VARCHAR(8) NOT NULL," + "PERCENT_REL VARCHAR(8) NOT NULL," + "ROOT_CHILD INT(8) NOT NULL," + "DEPTH INT(8) NOT NULL," + "FILE VARCHAR(512) NOT NULL);" // tablePDProfileCPU contains the columns name definitions for table pd_profile_cpu const tablePDProfileCPU = "CREATE TABLE IF NOT EXISTS " + tableNamePDProfileCPU + " (" + "ADDRESS VARCHAR(64) NOT NULL," + "FUNCTION VARCHAR(512) NOT NULL," + "PERCENT_ABS VARCHAR(8) NOT NULL," + "PERCENT_REL VARCHAR(8) NOT NULL," + "ROOT_CHILD INT(8) NOT NULL," + "DEPTH INT(8) NOT NULL," + "FILE VARCHAR(512) NOT NULL);" // tablePDProfileMemory contains the columns name definitions for table pd_profile_cpu_memory const tablePDProfileMemory = "CREATE TABLE IF NOT EXISTS " + tableNamePDProfileMemory + " (" + "ADDRESS VARCHAR(64) NOT NULL," + "FUNCTION VARCHAR(512) NOT NULL," + "PERCENT_ABS VARCHAR(8) NOT NULL," + "PERCENT_REL VARCHAR(8) NOT NULL," + "ROOT_CHILD INT(8) NOT NULL," + "DEPTH INT(8) NOT NULL," + "FILE VARCHAR(512) NOT NULL);" // tablePDProfileMutex contains the columns name definitions for table pd_profile_mutex const tablePDProfileMutex = "CREATE TABLE IF NOT EXISTS " + tableNamePDProfileMutex + " (" + "ADDRESS VARCHAR(64) NOT NULL," + "FUNCTION VARCHAR(512) NOT NULL," + "PERCENT_ABS VARCHAR(8) NOT NULL," + "PERCENT_REL VARCHAR(8) NOT NULL," + "ROOT_CHILD INT(8) NOT NULL," + "DEPTH INT(8) NOT NULL," + "FILE VARCHAR(512) NOT NULL);" // tablePDProfileAllocs contains the columns name definitions for table pd_profile_allocs const tablePDProfileAllocs = "CREATE TABLE IF NOT EXISTS " + tableNamePDProfileAllocs + " (" + "ADDRESS VARCHAR(64) NOT NULL," + "FUNCTION VARCHAR(512) NOT NULL," + "PERCENT_ABS VARCHAR(8) NOT NULL," + "PERCENT_REL VARCHAR(8) NOT NULL," + "ROOT_CHILD INT(8) NOT NULL," + "DEPTH INT(8) NOT NULL," + "FILE VARCHAR(512) NOT NULL);" // tablePDProfileBlock contains the columns name definitions for table pd_profile_block const tablePDProfileBlock = "CREATE TABLE IF NOT EXISTS " + tableNamePDProfileBlock + " (" + "ADDRESS VARCHAR(64) NOT NULL," + "FUNCTION VARCHAR(512) NOT NULL," + "PERCENT_ABS VARCHAR(8) NOT NULL," + "PERCENT_REL VARCHAR(8) NOT NULL," + "ROOT_CHILD INT(8) NOT NULL," + "DEPTH INT(8) NOT NULL," + "FILE VARCHAR(512) NOT NULL);" // tablePDProfileGoroutines contains the columns name definitions for table pd_profile_goroutines const tablePDProfileGoroutines = "CREATE TABLE IF NOT EXISTS " + tableNamePDProfileGoroutines + " (" + "ADDRESS VARCHAR(64) NOT NULL," + "FUNCTION VARCHAR(512) NOT NULL," + "ID INT(8) NOT NULL," + "STATE VARCHAR(16) NOT NULL," + "LOCATION VARCHAR(512) NOT NULL);" // tableSessionVariables contains the const tableSessionVariables = "CREATE TABLE IF NOT EXISTS " + tableNameSessionVariables + " (" + "VARIABLE_NAME VARCHAR(64) NOT NULL," + "VARIABLE_VALUE VARCHAR(1024) NOT NULL);" // tableSessionConnectAttrs contains the column name definitions for the table session_connect_attrs const tableSessionConnectAttrs = "CREATE TABLE IF NOT EXISTS " + tableNameSessionConnectAttrs + " (" + "PROCESSLIST_ID bigint unsigned NOT NULL," + "ATTR_NAME varchar(32) COLLATE utf8mb4_bin NOT NULL," + "ATTR_VALUE varchar(1024) COLLATE utf8mb4_bin DEFAULT NULL," + "ORDINAL_POSITION int DEFAULT NULL);"
package main import ( "flag" "fmt" "log" "os" "github.com/fatih/color" "gopkg.in/src-d/go-git.v4" "gopkg.in/src-d/go-git.v4/config" "gopkg.in/src-d/go-git.v4/plumbing" ) func flagInit() (int, string) { color.Set(color.Bold) prID := flag.Int("n", -1, color.HiRedString("number of pull request you want to fetch for review - mandatory")) branch := flag.String("b", "review", color.HiBlueString("name of branch you want PR fetched to")) color.Unset() flag.Parse() return *prID, *branch } func checkError(err error) { if err != nil { log.Fatal(err) } } func main() { number, branch := flagInit() if number < 0 { log.Fatal(fmt.Errorf(color.RedString(`use "-n" flag to set number of pull request you want to fetch`))) } r, err := git.PlainOpen(".") checkError(err) externalRefs := config.RefSpec(fmt.Sprintf("refs/pull/%d/head:refs/heads/%s", number, branch)) err = r.Fetch(&git.FetchOptions{Progress: os.Stdout, RemoteName: "upstream", RefSpecs: []config.RefSpec{externalRefs}}) checkError(err) w, err := r.Worktree() checkError(err) branchAsPlmbRef := plumbing.ReferenceName(fmt.Sprintf("refs/heads/%s", branch)) err = w.Checkout(&git.CheckoutOptions{Branch: branchAsPlmbRef}) checkError(err) color.HiGreen("Done!") }
package handler import ( "log" "net/http" "github.com/google/uuid" "github.com/gorilla/mux" . "2019_2_IBAT/pkg/pkg/models" ) func (h *Handler) CreateFavorite(w http.ResponseWriter, r *http.Request) { //+ w.Header().Set("Content-Type", "application/json; charset=UTF-8") authInfo, ok := FromContext(r.Context()) if !ok { SetError(w, http.StatusUnauthorized, UnauthorizedMsg) return } vacId, err := uuid.Parse(mux.Vars(r)["id"]) if err != nil { log.Printf("Handle CreateFavorite: invalid id - %s", err) SetError(w, http.StatusBadRequest, InvalidIdMsg) return } err = h.UserService.CreateFavorite(vacId, authInfo) if err != nil { SetError(w, http.StatusForbidden, ForbiddenMsg) return } } func (h *Handler) GetFavoriteVacancies(w http.ResponseWriter, r *http.Request) { //+ w.Header().Set("Content-Type", "application/json; charset=UTF-8") authInfo, ok := FromContext(r.Context()) if !ok { SetError(w, http.StatusUnauthorized, UnauthorizedMsg) return } var vacancies VacancySlice vacancies, _ = h.UserService.GetFavoriteVacancies(authInfo) //error handling respondsJSON, _ := vacancies.MarshalJSON() w.Write(respondsJSON) } func (h *Handler) DeleteFavoriteVacancy(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json; charset=UTF-8") authInfo, ok := FromContext(r.Context()) if !ok { SetError(w, http.StatusUnauthorized, UnauthorizedMsg) return } vacId, err := uuid.Parse(mux.Vars(r)["id"]) if err != nil { SetError(w, http.StatusBadRequest, InvalidIdMsg) return } err = h.UserService.DeleteFavoriteVacancy(vacId, authInfo) if err != nil { var code int switch err.Error() { case ForbiddenMsg: code = http.StatusForbidden case UnauthorizedMsg: code = http.StatusUnauthorized case InternalErrorMsg: code = http.StatusInternalServerError default: code = http.StatusBadRequest } SetError(w, code, err.Error()) return } }
package main import ( "fmt" "os" "strconv" "time" "github.com/actions-go/toolkit/core" ) var now = func() time.Time { return time.Now() } func runMain() { sleep := os.Getenv("INPUT_MILLISECONDS") core.Debug(fmt.Sprintf("Waiting %s milliseconds", sleep)) core.Debug(now().String()) delay, err := strconv.Atoi(sleep) if err != nil { core.Error(err.Error()) return } time.Sleep(time.Duration(delay) * time.Millisecond) core.Debug(now().String()) core.SetOutput("time", now().String()) } func main() { runMain() }
package ionic import ( "bytes" "encoding/json" "fmt" "github.com/ion-channel/ionic/risk" ) // GetScores takes one or more purl or other software ids, then performs a request for scores // against the Ion API, returning a set of scores based on the ids func (ic *IonClient) GetScores(ids []string, token string) ([]Scores, error) { body, err := json.Marshal(ids) if err != nil { return nil, fmt.Errorf("session: failed to marshal request body: %v", err.Error()) } buff := bytes.NewBuffer(body) b, err := ic.Post(risk.GetScoresEnpoint, token, nil, *buff, nil) if err != nil { return nil, fmt.Errorf("failed to get productidentifiers search: %v", err.Error()) } var results []Scores err = json.Unmarshal(b, &results) if err != nil { return nil, fmt.Errorf("failed to unmarshal product search results: %v", err.Error()) } return results, nil }
/* * VMaaS Webapp * * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * API version: 1.3.2 * Generated by: OpenAPI Generator (https://openapi-generator.tech) */ package vmaas import ( _context "context" _ioutil "io/ioutil" _nethttp "net/http" _neturl "net/url" "strings" "github.com/antihax/optional" ) // Linger please var ( _ _context.Context ) // VulnerabilitiesApiService VulnerabilitiesApi service type VulnerabilitiesApiService service /* AppVulnerabilitiesHandlerGetGet Method for AppVulnerabilitiesHandlerGetGet List of applicable CVEs for a single package NEVRA * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param nevra Package NEVRA @return VulnerabilitiesResponse */ func (a *VulnerabilitiesApiService) AppVulnerabilitiesHandlerGetGet(ctx _context.Context, nevra string) (VulnerabilitiesResponse, *_nethttp.Response, error) { var ( localVarHTTPMethod = _nethttp.MethodGet localVarPostBody interface{} localVarFormFileName string localVarFileName string localVarFileBytes []byte localVarReturnValue VulnerabilitiesResponse ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/v1/vulnerabilities/{nevra}" localVarPath = strings.Replace(localVarPath, "{"+"nevra"+"}", _neturl.QueryEscape(parameterToString(nevra, "")) , -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := _neturl.Values{} localVarFormParams := _neturl.Values{} // to determine the Content-Type header localVarHTTPContentTypes := []string{} // set Content-Type header localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) if localVarHTTPContentType != "" { localVarHeaderParams["Content-Type"] = localVarHTTPContentType } // to determine the Accept header localVarHTTPHeaderAccepts := []string{"application/json"} // set Accept header localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) if localVarHTTPHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes) if err != nil { return localVarReturnValue, nil, err } localVarHTTPResponse, err := a.client.callAPI(r) if err != nil || localVarHTTPResponse == nil { return localVarReturnValue, localVarHTTPResponse, err } localVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body) localVarHTTPResponse.Body.Close() if err != nil { return localVarReturnValue, localVarHTTPResponse, err } if localVarHTTPResponse.StatusCode >= 300 { newErr := GenericOpenAPIError{ body: localVarBody, error: localVarHTTPResponse.Status, } if localVarHTTPResponse.StatusCode == 200 { var v VulnerabilitiesResponse err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) if err != nil { newErr.error = err.Error() return localVarReturnValue, localVarHTTPResponse, newErr } newErr.model = v } return localVarReturnValue, localVarHTTPResponse, newErr } err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) if err != nil { newErr := GenericOpenAPIError{ body: localVarBody, error: err.Error(), } return localVarReturnValue, localVarHTTPResponse, newErr } return localVarReturnValue, localVarHTTPResponse, nil } // AppVulnerabilitiesHandlerPostPostOpts Optional parameters for the method 'AppVulnerabilitiesHandlerPostPost' type AppVulnerabilitiesHandlerPostPostOpts struct { VulnerabilitiesRequest optional.Interface } /* AppVulnerabilitiesHandlerPostPost Method for AppVulnerabilitiesHandlerPostPost List of applicable CVEs to a package list. * @param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background(). * @param optional nil or *AppVulnerabilitiesHandlerPostPostOpts - Optional Parameters: * @param "VulnerabilitiesRequest" (optional.Interface of VulnerabilitiesRequest) - @return VulnerabilitiesResponse */ func (a *VulnerabilitiesApiService) AppVulnerabilitiesHandlerPostPost(ctx _context.Context, localVarOptionals *AppVulnerabilitiesHandlerPostPostOpts) (VulnerabilitiesResponse, *_nethttp.Response, error) { var ( localVarHTTPMethod = _nethttp.MethodPost localVarPostBody interface{} localVarFormFileName string localVarFileName string localVarFileBytes []byte localVarReturnValue VulnerabilitiesResponse ) // create path and map variables localVarPath := a.client.cfg.BasePath + "/v1/vulnerabilities" localVarHeaderParams := make(map[string]string) localVarQueryParams := _neturl.Values{} localVarFormParams := _neturl.Values{} // to determine the Content-Type header localVarHTTPContentTypes := []string{"application/json"} // set Content-Type header localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) if localVarHTTPContentType != "" { localVarHeaderParams["Content-Type"] = localVarHTTPContentType } // to determine the Accept header localVarHTTPHeaderAccepts := []string{"application/json"} // set Accept header localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) if localVarHTTPHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept } // body params if localVarOptionals != nil && localVarOptionals.VulnerabilitiesRequest.IsSet() { localVarOptionalVulnerabilitiesRequest, localVarOptionalVulnerabilitiesRequestok := localVarOptionals.VulnerabilitiesRequest.Value().(VulnerabilitiesRequest) if !localVarOptionalVulnerabilitiesRequestok { return localVarReturnValue, nil, reportError("vulnerabilitiesRequest should be VulnerabilitiesRequest") } localVarPostBody = &localVarOptionalVulnerabilitiesRequest } r, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes) if err != nil { return localVarReturnValue, nil, err } localVarHTTPResponse, err := a.client.callAPI(r) if err != nil || localVarHTTPResponse == nil { return localVarReturnValue, localVarHTTPResponse, err } localVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body) localVarHTTPResponse.Body.Close() if err != nil { return localVarReturnValue, localVarHTTPResponse, err } if localVarHTTPResponse.StatusCode >= 300 { newErr := GenericOpenAPIError{ body: localVarBody, error: localVarHTTPResponse.Status, } if localVarHTTPResponse.StatusCode == 200 { var v VulnerabilitiesResponse err = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) if err != nil { newErr.error = err.Error() return localVarReturnValue, localVarHTTPResponse, newErr } newErr.model = v } return localVarReturnValue, localVarHTTPResponse, newErr } err = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) if err != nil { newErr := GenericOpenAPIError{ body: localVarBody, error: err.Error(), } return localVarReturnValue, localVarHTTPResponse, newErr } return localVarReturnValue, localVarHTTPResponse, nil }
package middlewares import ( "fmt" "encoding/hex" "net/http" "github.com/bitly/go-simplejson" "github.com/pkg/errors" ) //string to HEX func stringToHex(input string) string{ result := hex.EncodeToString([]byte(input)) return result } //将数字转化为16进制表示 func getlength(length int) string{ //防止返回0 if length == 1 { return "0x01" }else { result := fmt.Sprintf("%02x", length/2) return result } } func getRequest(inputKey string, inputValue string) string { //编码 //string to HEX stringValue := stringToHex(inputValue) partM := getlength(len(inputKey)) partL :=getlength(len(partM)) fmt.Println("lenK: ",partL," ",partM) retultK :=partL+partM+inputKey partM = getlength(len(stringValue)) partL =getlength(len(partM)) fmt.Println("lenM: ",len(stringValue),partM," ",partL) retultV :=partL+partM+stringValue reault := "0x01"+retultK+retultV return reault } func InsertIntoMT(url string,inputKey string, inputValue string) error{ request := url+"/broadcast_tx_commit?tx="+getRequest(inputKey,inputValue) fmt.Println("url, ",request) res, err := http.Get(request) if err !=nil{ fmt.Println(err) return err } js, err := simplejson.NewFromReader(res.Body) //反序列化 if err != nil { panic(err.Error()) } info := js.Get("error").MustString() fmt.Println("data ", info) if info == ""{ return nil }else{ return errors.New(info) } } //发送比较文件的请求 func getFileRequest(projectKey string, studentKey string) string { //编码 //string to HEX //stringValue := stringToHex(inputValue) tx=0x020102null0117QmPiGJvbS6Dq8BzV5wP2vQBrzHZDZsyARidEFkbv2noF9u partM := getlength(len(projectKey)) partL :=getlength(len(partM)) fmt.Println("lenK: ",partL," ",partM) retultK :=partL+partM+projectKey partM = getlength(len(studentKey)) partL =getlength(len(partM)) fmt.Println("lenM: ",partM," ",partL) retultV :=partL+partM+studentKey reault := "0x02"+retultK+retultV return reault } func Comparefiles(url string,projectKey string, studentKey string) string{ request := url+"/broadcast_tx_commit?tx="+getFileRequest(projectKey,studentKey) fmt.Println("url, ",request) res, err := http.Get(request) js, err := simplejson.NewFromReader(res.Body) //反序列化 if err != nil { panic(err.Error()) } info := js.Get("result").Get("deliver_tx").Get("data").MustString() fmt.Println("compare result ", info) result, err := hex.DecodeString(info) if err != nil { panic(err) } fmt.Printf("%s\n", result) if string(result) == "Matched"{ return "success" }else{ return "failure" } } func searchValue(url string, inputKey string) string{ //request := url+"/abci_query?data=0x"+inputKey+"&path=\"\"&prove=false" request :="http://localhost:46657/abci_query?data=0x44CBAE1AC3FC5B5BE5A6C4626147BDFE8BEDB837&prove=false" fmt.Println("url, ",request) res, err := http.Get(request) js, err := simplejson.NewFromReader(res.Body) //反序列化 if err != nil { panic(err.Error()) } info := js.Get("result").Get("response").Get("log").MustString() fmt.Println("log ", info) if info == "exists"{ value := js.Get("result").Get("response").Get("value").MustString() decoded, err := hex.DecodeString(value) if err != nil { panic(err.Error()) } return string(decoded) }else{ return "error" } } //func main() { // // url := "http://localhost:46657" // // //project // key := "44CBAE1AC3FC5B5BE5A6C4626147BDFE8BEDB837" // // value := "Qmeza6kXi6kd3962Yb1biD2juqGNN62JrYwhra3PyeuyEs" // // fmt.Println(insertIntoMT(url,key,value)) // // key = "44CBAE1AC3FC5B5BE5A6C4626147BDFE8BEDB836" // // value = "QmWY5RMFpwPkdmAs6tDmUKdyXgE9BYEYzBqGwVk9o1DVcK" // fmt.Println(insertIntoMT(url,key,value)) // fmt.Println(searchValue(url,key)) //}
package main import ( "context" "github.com/BukkitAPI-Translation-Group/docsbox/conf" "github.com/BukkitAPI-Translation-Group/docsbox/router" "github.com/BukkitAPI-Translation-Group/docsbox/updater" "github.com/BukkitAPI-Translation-Group/docsbox/util" "github.com/labstack/echo/middleware" "github.com/labstack/gommon/log" "os" "os/signal" "time" ) func main() { println("DocsBox - version:0.3") if !util.IsCommandExist("git") || !util.IsCommandExist("java") || !util.IsCommandExist("javadoc") { panic("该软件的正常运行依赖Git(用于克隆源代码)和Java(用于执行Java代码),请先安装Git和Java后重试.") } if err := conf.LoadConfig(""); err != nil { panic(err) } Init() e := router.Load() e.Debug = conf.Conf.Server.Debug e.Use(middleware.Recover()) if conf.Conf.Server.GzipLevel > 0 { e.Use(middleware.GzipWithConfig(middleware.GzipConfig{ Level: conf.Conf.Server.GzipLevel, })) } e.HideBanner = true if len(conf.Conf.Server.TLSKey) != 0 && len(conf.Conf.Server.TLSCert) != 0 && len(conf.Conf.Server.TLSAddr) != 0 { go func() { err := e.StartTLS(conf.Conf.Server.TLSAddr, conf.Conf.Server.TLSCert, conf.Conf.Server.TLSKey) if err != nil { e.Logger.Info("shutting down the https server") } }() } go func() { err := e.Start(conf.Conf.Server.Addr) if err != nil { e.Logger.Info("shutting down the http server") } }() // graceful shutdown quit := make(chan os.Signal) signal.Notify(quit, os.Interrupt) <-quit ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() if err := e.Shutdown(ctx); err != nil { e.Logger.Fatal(err) } } func Init() { _, err := os.Stat("web") if os.IsNotExist(err) { os.Mkdir("web", 0755) os.Mkdir("web/docs", 0755) os.Mkdir("web/bukkit", 0755) } _, err = os.Stat("buildlog") if os.IsNotExist(err) { os.Mkdir("buildlog", 0755) } if _, err := os.Stat("data"); os.IsNotExist(err) { os.Mkdir("data", 0755) } err = updater.FetchLatestCommitIDs() if err != nil { if err == updater.ErrorNotRepository { log.Info("git repository haven't benn initialized, it may be initialized later.") } else { log.Errorf("Unable to fetch the latest commit ids: %s", err) } } updater.UpdateDocument(updater.PushPayload{ Ref: "refs/heads/1.12.2", }) updater.UpdateDocument(updater.PushPayload{ Ref: "refs/heads/master", }) }
package main import ( "fmt" "io/ioutil" "log" "os" "github.com/google/uuid" ) const machineIDFilePath = "/etc/insights-client/machine-id" func getMachineID() string { if _, err := os.Stat(machineIDFilePath); os.IsNotExist(err) { UUID, err := uuid.NewUUID() if err != nil { log.Fatal(err) } file, err := os.Create(machineIDFilePath) if err != nil { log.Fatal(err) } defer file.Close() fmt.Fprintf(file, "%s", UUID) return UUID.String() } data, err := ioutil.ReadFile(machineIDFilePath) if err != nil { log.Fatal(err) } UUID, err := uuid.Parse(string(data)) if err != nil { log.Fatal(err) } return UUID.String() }
package craft import ( "archive/tar" "archive/zip" "bytes" "context" "errors" "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "text/tabwriter" "time" "github.com/danhale-git/craft/internal/files" "github.com/danhale-git/craft/mcworld" docker "github.com/docker/docker/api/types" "github.com/docker/docker/client" "github.com/danhale-git/craft/internal/backup" "github.com/danhale-git/craft/internal/logger" "github.com/danhale-git/craft/internal/configure" "github.com/danhale-git/craft/server" ) func DockerClient() *client.Client { c, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) if err != nil { logger.Error.Fatalf("Error: Failed to create new docker client: %s", err) } return c } // GetServerOrExit is a convenience function for attempting to find an existing docker container with the given name. // If not found, a helpful error message is printed and the program exits without error. func GetServerOrExit(containerName string) *server.Server { s, err := server.Get(DockerClient(), containerName) if err != nil { // Container was not found if errors.Is(err, &server.NotFoundError{}) { logger.Info.Println(err) os.Exit(0) } else if errors.Is(err, &server.NotCraftError{}) { logger.Info.Println(err) os.Exit(0) } else if !s.IsRunning() { logger.Info.Println(err) os.Exit(0) } // Something else went wrong logger.Error.Panic(err) } return s } // NewServer spawns a new craft server. Only the name is required. Full path to a .mcworld file, port and a slice of // "property=newvalue" strings may also be provided. func NewServer(name string, port int, props []string, mcw mcworld.ZipOpener, useVolume bool) (*server.Server, error) { // Check the server doesn't already exist if backupExists(name) { return nil, fmt.Errorf("server name '%s' is in use by a backup, run 'craft list -a'", name) } // Create a container for the server c, err := server.New(port, name, useVolume) if err != nil { return nil, fmt.Errorf("creating new container: %s", err) } // Copy world files to the server if mcw != nil { zr, err := mcw.Open() if err != nil { c.StopOrPanic() return nil, fmt.Errorf("inavlid world file: %s", err) } if err = backup.RestoreMCWorld(&zr.Reader, c.ContainerID, DockerClient()); err != nil { c.StopOrPanic() return nil, fmt.Errorf("restoring backup: %s", err) } if err = zr.Close(); err != nil { logger.Panic(err) } } // Set the properties if err := SetServerProperties(props, c); err != nil { return nil, fmt.Errorf("setting server properties: %s", err) } return c, nil } // StartServer sorts all available backup files by date and starts a server from the latest backup. func StartServer(name string, port int) (*server.Server, error) { s, err := server.Get(DockerClient(), name) if err != nil { if errors.Is(err, &server.NotFoundError{}) { if !backupExists(name) { return nil, fmt.Errorf("stopped server with name '%s' doesn't exist", name) } s, err = startServerFromBackup(name, port) if err != nil { return nil, fmt.Errorf("starting server from backup: %w", err) } return s, nil } else { return nil, err } } if s.IsRunning() { return nil, fmt.Errorf("server '%s' is already running (run 'craft list')", name) } err = s.ContainerStart( context.Background(), s.ContainerID, docker.ContainerStartOptions{}, ) if err != nil { return nil, fmt.Errorf("%s: starting docker container: %s", s.ContainerName, err) } return s, nil } func startServerFromBackup(name string, port int) (*server.Server, error) { s, err := server.New(port, name, false) if err != nil { return nil, fmt.Errorf("%s: running server: %s", name, err) } f, err := latestBackupFile(name) if err != nil { s.StopOrPanic() return nil, err } backupPath := filepath.Join(backupDirectory(), s.ContainerName) // Open backup zip zr, err := zip.OpenReader(filepath.Join(backupPath, f.Name())) if err != nil { s.StopOrPanic() return nil, err } if err = backup.Restore(&zr.Reader, s.ContainerID, DockerClient()); err != nil { s.StopOrPanic() return nil, err } if err = zr.Close(); err != nil { s.StopOrPanic() return nil, fmt.Errorf("closing zip: %s", err) } return s, nil } // SetServerProperties takes a slice of key=value strings and applies them to the server.properties configuration // file. If a key is missing, an error will be returned and no changes will be made. func SetServerProperties(propFlags []string, s *server.Server) error { if len(propFlags) > 0 { k := make([]string, len(propFlags)) v := make([]string, len(propFlags)) for i, p := range propFlags { s := strings.Split(p, "=") if !strings.ContainsRune(p, '=') || len(s[0]) == 0 || len(s[1]) == 0 { return fmt.Errorf("invalid property '%s' should be 'key=value'", p) } k[i] = s[0] v[i] = s[1] } containerPath := files.FullPaths.ServerProperties data, _, err := s.CopyFromContainer( context.Background(), s.ContainerID, containerPath, ) if err != nil { return fmt.Errorf("copying data from server at '%s': %s", containerPath, err) } tr := tar.NewReader(data) _, err = tr.Next() if err == io.EOF { return fmt.Errorf("no file was found at '%s', got EOF reading tar archive", files.FullPaths.ServerProperties) } if err != nil { return fmt.Errorf("reading tar archive: %s", err) } b, err := ioutil.ReadAll(tr) if err != nil { return err } b, err = configure.SetProperties(k, v, b) if err != nil { return err } var buf bytes.Buffer tw := tar.NewWriter(&buf) hdr := &tar.Header{ Name: filepath.Base(containerPath), Size: int64(len(b)), } if err := tw.WriteHeader(hdr); err != nil { return fmt.Errorf("writing header: %s", err) } if _, err := tw.Write(b); err != nil { return fmt.Errorf("writing body: %s", err) } err = s.CopyToContainer( context.Background(), s.ContainerID, filepath.Dir(containerPath), &buf, docker.CopyToContainerOptions{}, ) if err != nil { return fmt.Errorf("copying files to '%s': %s", filepath.Dir(containerPath), err) } } return nil } // PrintServers prints a list of servers. If all is true then stopped servers will be printed. Running servers show the // port players should connect on and stopped servers show the date and time at which they were stopped. func PrintServers(all bool) error { w := tabwriter.NewWriter(os.Stdout, 3, 3, 3, ' ', tabwriter.TabIndent) stoppedContainers := make([]*server.Server, 0) servers, err := server.All(DockerClient()) if err != nil { return fmt.Errorf("getting server clients: %s", err) } // Print running servers for _, s := range servers { s, err := server.Get(DockerClient(), s.ContainerName) if err != nil { return fmt.Errorf("creating docker client: %s", err) } if !s.IsRunning() { stoppedContainers = append(stoppedContainers, s) continue } port, err := s.Port() if err != nil { return fmt.Errorf("getting port for container '%s': '%s'", s.ContainerName, err) } if _, err := fmt.Fprintf(w, "%s\trunning - port %d\n", s.ContainerName, port); err != nil { return fmt.Errorf("writing to table: %s", err) } } if !all { if err = w.Flush(); err != nil { return fmt.Errorf("writing output to console: %s", err) } return nil } // Print stopped servers without mounted volumes for _, n := range stoppedServerNames() { if func() bool { // if n is an active server for _, s := range servers { if s.ContainerName == n { return true } } return false }() { continue } f, err := latestBackupFile(n) if err != nil { continue } t, err := backup.FileTime(f.Name()) if err != nil { panic(err) } if _, err := fmt.Fprintf(w, "%s\tstopped - %s\n", n, t.Format("02 Jan 2006 3:04PM")); err != nil { logger.Error.Fatalf("Error writing to table: %s", err) } } // Print stopped servers with mounted volumes for _, s := range stoppedContainers { inspect, err := s.ContainerInspect(context.Background(), s.ContainerID) if err != nil { return err } layout := "2006-01-02T15:04:05" t, err := time.Parse(layout, strings.Split(inspect.State.FinishedAt, ".")[0]) if err != nil { return fmt.Errorf("failed to pass stopped time for server '%s': %w", s.ContainerName, err) } p, err := s.Port() if err != nil { return err } if _, err := fmt.Fprintf(w, "%s\tstopped (volume) - port %d - %s\n", s.ContainerName, p, t.Format("02 Jan 2006 3:04PM")); err != nil { logger.Error.Fatalf("Error writing to table: %s", err) } } if err = w.Flush(); err != nil { logger.Error.Fatalf("Error writing output to console: %s", err) } return nil }
package main import ( "fmt" "time" ) /** * author: will fan * created: 2020/4/14 11:15 * description: */ func main() { switch time.Now().Weekday() { case time.Saturday: fmt.Println("Today is Saturday") case time.Sunday: fmt.Println("Today is Sunday") default: fmt.Println("Today is weekday") } // hour := time.Now().Hour() switch { case hour < 12: fmt.Println("Good morning!") case hour < 17: fmt.Println("Good afternoon") default: fmt.Println("Good evening") } //case list whiteSpace := func(c rune) bool{ switch c { case ' ', '\t', '\n', '\f', '\r': return true } return false } fmt.Println(whiteSpace(' ')) // fall through switch 2 { case 1: fmt.Println("1") fallthrough case 2: fmt.Println("2") fallthrough case 3: fmt.Println("3") } // execution order Foo := func(n int) int { fmt.Println(n) return n } fmt.Println("////////////////") switch Foo(1) { case Foo(0), Foo(1), Foo(2): fmt.Println("First case") fallthrough case Foo(3): fmt.Println("Second case ") } }
package mmcore // #cgo CFLAGS: -I../MMCoreC // // #include "MMCoreC.h" import "C" import ( "fmt" ) type Error int func statusToError(status C.MM_Status) error { if int(C.int(status)) == 0 { return nil } return Error(int(C.int(status))) } func (e Error) Error() string { s := errText[e] if s == "s" { return fmt.Sprintf("error %d", int(e)) } return s } var ( ErrGENERIC Error = 1 ErrNoDevice Error = 2 ErrSetPropertyFailed Error = 3 ErrLibraryFunctionNotFound Error = 4 ErrModuleVersionMismatch Error = 5 ErrDeviceVersionMismatch Error = 6 ErrUnknownModule Error = 7 ErrLoadLibraryFailed Error = 8 ErrCreateFailed Error = 9 ErrCreateNotFound Error = 10 ErrDeleteNotFound Error = 11 ErrDeleteFailed Error = 12 ErrUnexpectedDevice Error = 13 ErrDeviceUnloadFailed Error = 14 ErrCameraNotAvailable Error = 15 ErrDuplicateLabel Error = 16 ErrInvalidLabel Error = 17 ErrInvalidStateDevice Error = 19 ErrNoConfiguration Error = 20 ErrInvalidConfigurationIndex Error = 21 ErrDEVICE_GENERIC Error = 22 ErrInvalidPropertyBlock Error = 23 ErrUnhandledException Error = 24 ErrDevicePollingTimeout Error = 25 ErrInvalidShutterDevice Error = 26 ErrInvalidSerialDevice Error = 27 ErrInvalidStageDevice Error = 28 ErrInvalidSpecificDevice Error = 29 ErrInvalidXYStageDevice Error = 30 ErrFileOpenFailed Error = 31 ErrInvalidCFGEntry Error = 32 ErrInvalidContents Error = 33 ErrInvalidCoreProperty Error = 34 ErrInvalidCoreValue Error = 35 ErrNoConfigGroup Error = 36 ErrCameraBufferReadFailed Error = 37 ErrDuplicateConfigGroup Error = 38 ErrInvalidConfigurationFile Error = 39 ErrCircularBufferFailedToInitialize Error = 40 ErrCircularBufferEmpty Error = 41 ErrContFocusNotAvailable Error = 42 ErrAutoFocusNotAvailable Error = 43 ErrBadConfigName Error = 44 ErrCircularBufferIncompatibleImage Error = 45 ErrNotAllowedDuringSequenceAcquisition Error = 46 ErrOutOfMemory Error = 47 ErrInvalidImageSequence Error = 48 ErrNullPointerException Error = 49 ErrCreatePeripheralFailed Error = 50 ErrPropertyNotInCache Error = 51 ErrBadAffineTransform Error = 52 ) var errText = map[Error]string{ ErrGENERIC: "generic (unspecified) error", ErrNoDevice: "no device", ErrSetPropertyFailed: "set property failed", ErrLibraryFunctionNotFound: "library function not found", ErrModuleVersionMismatch: "module version mismatch", ErrDeviceVersionMismatch: "device version mismatch", ErrUnknownModule: "unknown module", ErrLoadLibraryFailed: "load library failed", ErrCreateFailed: "create failed", ErrCreateNotFound: "create not found", ErrDeleteNotFound: "delete not found", ErrDeleteFailed: "delete failed", ErrUnexpectedDevice: "unexpected device", ErrDeviceUnloadFailed: "device unload failed", ErrCameraNotAvailable: "camera not available", ErrDuplicateLabel: "duplicated label", ErrInvalidLabel: "invalid label", ErrInvalidStateDevice: "invalid state device", ErrNoConfiguration: "no configuration", ErrInvalidConfigurationIndex: "invalid configuration index", ErrDEVICE_GENERIC: "device generic (unspecified) error", ErrInvalidPropertyBlock: "invalid property block", ErrUnhandledException: "unhandled exception", ErrDevicePollingTimeout: "device polling timeout", ErrInvalidShutterDevice: "invalid shutter device", ErrInvalidSerialDevice: "invalid serial device", ErrInvalidStageDevice: "invalid stage device", ErrInvalidSpecificDevice: "invalid specific device", ErrInvalidXYStageDevice: "invalid XY stage device", ErrFileOpenFailed: "file open failed", ErrInvalidCFGEntry: "invalid CFG entry", ErrInvalidContents: "invalid contents", ErrInvalidCoreProperty: "invalid core property", ErrInvalidCoreValue: "invalid core value", ErrNoConfigGroup: "no config group", ErrCameraBufferReadFailed: "camera buffer read failed", ErrDuplicateConfigGroup: "duplicated config group", ErrInvalidConfigurationFile: "invalid configuration file", ErrCircularBufferFailedToInitialize: "circular buffer failed to initialize", ErrCircularBufferEmpty: "circular buffer empty", ErrContFocusNotAvailable: "continuous focus not available", ErrAutoFocusNotAvailable: "auto focus not available", ErrBadConfigName: "bad config name", ErrCircularBufferIncompatibleImage: "circular buffer incompatible image", ErrNotAllowedDuringSequenceAcquisition: "not allowed during sequence acquisition", ErrOutOfMemory: "out of memory", ErrInvalidImageSequence: "invalid image sequence", ErrNullPointerException: "null pointer exception", ErrCreatePeripheralFailed: "create peripheral failed", ErrPropertyNotInCache: "property not in cache", ErrBadAffineTransform: "bad affine transform", }
package soapboxd import ( "database/sql" "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/adhocteam/soapbox/proto" gpb "github.com/golang/protobuf/ptypes/timestamp" "github.com/pkg/errors" "golang.org/x/net/context" ) func (s *Server) ListConfigurations(ctx context.Context, req *proto.ListConfigurationRequest) (*proto.ListConfigurationResponse, error) { // TODO(paulsmith): maybe embed actual config vars in response payload query := `SELECT version, created_at FROM configurations WHERE environment_id = $1` var configs []*proto.Configuration envID := req.GetEnvironmentId() rows, err := s.db.Query(query, envID) if err != nil { return nil, errors.Wrap(err, "querying configurations table") } for rows.Next() { c := &proto.Configuration{ EnvironmentId: envID, } var createdAt time.Time if err := rows.Scan(&c.Version, &createdAt); err != nil { return nil, errors.Wrap(err, "scanning db row") } setPbTimestamp(c.CreatedAt, createdAt) configs = append(configs, c) } if err := rows.Err(); err != nil { return nil, errors.Wrap(err, "db iteration") } resp := &proto.ListConfigurationResponse{ Configs: configs, } return resp, nil } func (s *Server) GetLatestConfiguration(ctx context.Context, req *proto.GetLatestConfigurationRequest) (*proto.Configuration, error) { // TODO(paulsmith): FIXME return error or message with error // semantics if we get nothing back from the db, instead of // returning a zero-value configuration (in the case where an // environment doesn't have any configurations) query := ` SELECT version, created_at FROM configurations WHERE environment_id = $1 ORDER BY version DESC LIMIT 1 ` config := &proto.Configuration{ CreatedAt: new(gpb.Timestamp), } envID := req.GetEnvironmentId() var createdAt time.Time if err := s.db.QueryRow(query, envID).Scan(&config.Version, &createdAt); err != nil { if err == sql.ErrNoRows { return nil, status.Error(codes.NotFound, "no configuration found for environment") } return nil, errors.Wrap(err, "querying configurations table") } setPbTimestamp(config.CreatedAt, createdAt) appSlug, envSlug, err := s.getSlugs(ctx, envID) if err != nil { return nil, errors.Wrap(err, "getting env and app slugs") } config.ConfigVars, err = s.configurationStore.GetConfigVars(appSlug, envSlug, config.Version) if err != nil { return nil, errors.Wrap(err, "getting config variables") } return config, nil } func (s *Server) CreateConfiguration(ctx context.Context, req *proto.CreateConfigurationRequest) (*proto.Configuration, error) { envID := req.GetEnvironmentId() tx, err := s.db.Begin() if err != nil { return nil, errors.Wrap(err, "beginning transaction") } defer tx.Rollback() config := &proto.Configuration{ EnvironmentId: envID, ConfigVars: req.ConfigVars, CreatedAt: new(gpb.Timestamp), } configQuery := ` INSERT INTO configurations (environment_id) VALUES ($1) RETURNING created_at, version` var createdAt time.Time if err := tx.QueryRow(configQuery, envID).Scan(&createdAt, &config.Version); err != nil { return nil, errors.Wrap(err, "inserting into configurations table") } setPbTimestamp(config.CreatedAt, createdAt) env, err := s.GetEnvironment(ctx, &proto.GetEnvironmentRequest{Id: envID}) if err != nil { return nil, errors.Wrap(err, "getting environment") } app, err := s.GetApplication(ctx, &proto.GetApplicationRequest{Id: env.GetApplicationId()}) if err != nil { return nil, errors.Wrap(err, "getting application") } kmsKeyARN := app.GetAwsEncryptionKeyArn() appSlug := app.GetSlug() envSlug := env.GetSlug() err = s.configurationStore.SaveConfigVars(appSlug, envSlug, config.Version, config.ConfigVars, kmsKeyARN) if err != nil { return nil, errors.Wrap(err, "saving config variables") } if err := tx.Commit(); err != nil { return nil, errors.Wrap(err, "committing transaction") } return config, nil } func (s *Server) DeleteConfiguration(ctx context.Context, req *proto.DeleteConfigurationRequest) (*proto.Empty, error) { envID := req.GetEnvironmentId() version := req.GetVersion() tx, err := s.db.Begin() if err != nil { return nil, errors.Wrap(err, "beginning transaction") } defer tx.Rollback() query := `DELETE FROM configurations WHERE environment_id = $1 AND version = $2` if _, err := tx.Exec(query, envID, version); err != nil { return nil, errors.Wrap(err, "deleting configuration from local database") } appSlug, envSlug, err := s.getSlugs(ctx, envID) if err != nil { return nil, errors.Wrap(err, "getting env and app slugs") } err = s.configurationStore.DeleteConfigVars(appSlug, envSlug, version) if err != nil { return nil, errors.Wrap(err, "deleting config variables") } if err := tx.Commit(); err != nil { return nil, errors.Wrap(err, "committing transaction") } return &proto.Empty{}, nil } func (s *Server) getSlugs(ctx context.Context, envID int32) (string, string, error) { env, err := s.GetEnvironment(ctx, &proto.GetEnvironmentRequest{Id: envID}) if err != nil { return "", "", errors.Wrap(err, "getting environment") } app, err := s.GetApplication(ctx, &proto.GetApplicationRequest{Id: env.GetApplicationId()}) if err != nil { return "", "", errors.Wrap(err, "getting application") } return app.GetSlug(), env.GetSlug(), nil }
// Code generated from /Users/renyunyi/go_project/gengine/internal/iantlr/gengine.g4 by ANTLR 4.9. DO NOT EDIT. package parser // gengine import "github.com/antlr/antlr4/runtime/Go/antlr" type BasegengineVisitor struct { *antlr.BaseParseTreeVisitor } func (v *BasegengineVisitor) VisitPrimary(ctx *PrimaryContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitRuleEntity(ctx *RuleEntityContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitRuleName(ctx *RuleNameContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitRuleDescription(ctx *RuleDescriptionContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitSalience(ctx *SalienceContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitRuleContent(ctx *RuleContentContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitStatements(ctx *StatementsContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitStatement(ctx *StatementContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitConcStatement(ctx *ConcStatementContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitExpression(ctx *ExpressionContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitMathExpression(ctx *MathExpressionContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitExpressionAtom(ctx *ExpressionAtomContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitAssignment(ctx *AssignmentContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitReturnStmt(ctx *ReturnStmtContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitIfStmt(ctx *IfStmtContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitElseIfStmt(ctx *ElseIfStmtContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitElseStmt(ctx *ElseStmtContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitConstant(ctx *ConstantContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitFunctionArgs(ctx *FunctionArgsContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitInteger(ctx *IntegerContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitRealLiteral(ctx *RealLiteralContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitStringLiteral(ctx *StringLiteralContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitBooleanLiteral(ctx *BooleanLiteralContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitFunctionCall(ctx *FunctionCallContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitMethodCall(ctx *MethodCallContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitVariable(ctx *VariableContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitMathPmOperator(ctx *MathPmOperatorContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitMathMdOperator(ctx *MathMdOperatorContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitComparisonOperator(ctx *ComparisonOperatorContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitLogicalOperator(ctx *LogicalOperatorContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitAssignOperator(ctx *AssignOperatorContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitNotOperator(ctx *NotOperatorContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitMapVar(ctx *MapVarContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitAtName(ctx *AtNameContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitAtId(ctx *AtIdContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitAtDesc(ctx *AtDescContext) interface{} { return v.VisitChildren(ctx) } func (v *BasegengineVisitor) VisitAtSal(ctx *AtSalContext) interface{} { return v.VisitChildren(ctx) }
package problem0155 // MinStack 最小栈 type MinStack struct { data []int size int capacity int min int } // Constructor  构造器 func Constructor() MinStack { return MinStack{ data: make([]int, 10), capacity: 10, size: 0, min: 0, } } // Push 入栈 func (stack *MinStack) Push(x int) { if stack.size >= stack.capacity { stack.capacity *= 2 tmp := make([]int, stack.capacity) copy(tmp, stack.data) stack.data = tmp } if stack.size == 0 || stack.min > x { stack.min = x } stack.data[stack.size] = x stack.size++ } // Pop 出栈 func (stack *MinStack) Pop() { top := stack.Top() stack.size-- if top <= stack.min { min := stack.data[0] for i := 1; i < stack.size; i++ { if min > stack.data[i] { min = stack.data[i] } } stack.min = min } } // Top 栈顶 func (stack *MinStack) Top() int { return stack.data[stack.size-1] } // GetMin 获取栈中最小元素 func (stack *MinStack) GetMin() int { return stack.min }
package qingstor import ( "context" "strings" "github.com/pengsrc/go-shared/convert" "github.com/sirupsen/logrus" "github.com/yunify/qingstor-sdk-go/v3/service" "github.com/yunify/qscamel/model" "github.com/yunify/qscamel/utils" ) // List implement source.List func (c *Client) List(ctx context.Context, j *model.DirectoryObject, fn func(o model.Object)) (err error) { cp := utils.Join(c.Path, j.Key) + "/" if cp == "/" { cp = "" } marker := j.Marker for { resp, err := c.client.ListObjects(&service.ListObjectsInput{ Prefix: convert.String(cp), Marker: convert.String(marker), Limit: convert.Int(MaxListObjectsLimit), }) if err != nil { logrus.Errorf("List objects failed for %v.", err) return err } for _, v := range resp.Keys { if *v.MimeType == DirectoryContentType { continue } object := &model.SingleObject{ Key: utils.Relative(*v.Key, c.Path), Size: *v.Size, LastModified: int64(*v.Modified), MD5: strings.Trim(*v.Etag, "\""), } fn(object) } marker = *resp.NextMarker // Update task content. j.Marker = marker err = model.CreateObject(ctx, j) if err != nil { logrus.Errorf("Save task failed for %v.", err) return err } if marker == "" { break } } return } // Reach implement source.Fetch func (c *Client) Reach(ctx context.Context, p string) (url string, err error) { cp := utils.Join(c.Path, p) r, _, err := c.client.GetObjectRequest(cp, nil) if err != nil { return } err = r.Build() if err != nil { return } err = r.SignQuery(3600) if err != nil { return } url = r.HTTPRequest.URL.String() return } // Reachable implement source.Reachable func (c *Client) Reachable() bool { return true }
package handler import ( "testing" "github.com/stretchr/testify/assert" ) func TestHelper_getPageQueryParam(t *testing.T) { page := getPageQueryParam("10") assert.Equal(t, 10, page) page = getPageQueryParam("no number") assert.Equal(t, 1, page) page = getPageQueryParam("") assert.Equal(t, 1, page) }
package main import ( "encoding/json" "fmt" "io/ioutil" "log" "net/http" "os" "strconv" "github.com/gorilla/mux" ) type image struct { Name string `json:"name"` ID int `json:"id"` } type images []image type album struct { Name string `json:"name"` ID int `json:"id"` List images `json:"list"` } type albums []album var gallery = albums{} func CreateAlbum(w http.ResponseWriter, r *http.Request) { var a album a.Name = mux.Vars(r)["name"] a.ID = len(gallery) for _,b := range gallery{ if b.Name == a.Name{ fmt.Fprintf(w, "Album %v already exists", a.Name) json.NewEncoder(w).Encode(gallery) w.WriteHeader(http.StatusAlreadyReported) return } } gallery = append(gallery, a) w.WriteHeader(http.StatusCreated) json.NewEncoder(w).Encode(gallery) fmt.Fprintf(w, "Album %v Created", a) } func AddImage(w http.ResponseWriter, r *http.Request) { //name := mux.Vars(r)["name"] name := r.FormValue("name") r.ParseMultipartForm(10 << 20) file, handler, err := r.FormFile("myFile") if err != nil { fmt.Println("Error Retrieving the File") fmt.Println(err) return } defer file.Close() fmt.Printf("Uploaded File: %+v\n", handler.Filename) fmt.Printf("File Size: %+v\n", handler.Size) fmt.Printf("MIME Header: %+v\n", handler.Header) path, err := os.Getwd() if err != nil { log.Println(err) } tempFile, err := ioutil.TempFile(fmt.Sprintf("%v/gallery/", path), "image-*"+handler.Filename) if err != nil { fmt.Println(err) } defer tempFile.Close() // read all of the contents of our uploaded file into a // byte array fileBytes, err := ioutil.ReadAll(file) if err != nil { fmt.Println(err) } // write this byte array to our temporary file tempFile.Write(fileBytes) // return that we have successfully uploaded our file! for i, a := range gallery { if a.Name == name { n := image{ Name: tempFile.Name(), ID: len(a.List), } gallery[i].List = append(gallery[i].List, n) } } fmt.Fprintf(w, "Successfully Uploaded Image\n") } func Deleteimage(w http.ResponseWriter, r *http.Request) { name := mux.Vars(r)["name"] id, _ := strconv.Atoi(mux.Vars(r)["id"]) for _, a := range gallery { if a.Name == name { for _, b := range a.List { if b.ID == id { path, err := os.Getwd() if err != nil { log.Println(err) } err = os.Remove(path + "/" + b.Name) if err != nil { log.Fatal(err) } fmt.Fprintf(w, "Image %v Deleted", id) } } } } } func DeleteAlbum(w http.ResponseWriter, r *http.Request) { name := mux.Vars(r)["name"] for i, a := range gallery { if a.Name == name { path, err := os.Getwd() if err != nil { log.Println(err) } for _, b := range a.List { err = os.Remove(path + "/" + b.Name) if err != nil { log.Fatal(err) } } gallery = append(gallery[:i], gallery[i+1:]...) } } fmt.Fprintf(w, "Album %v Deleted", name) } func GetallAlbum(w http.ResponseWriter, r *http.Request) { json.NewEncoder(w).Encode(gallery) } func Listimagesinalbum(w http.ResponseWriter, r *http.Request) { name := mux.Vars(r)["name"] for _, a := range gallery { if a.Name == name { json.NewEncoder(w).Encode(a) } } } func Listallimages(w http.ResponseWriter, r *http.Request) { json.NewEncoder(w).Encode(gallery) }
package server import( "golang.org/x/net/context" empty "github.com/golang/protobuf/ptypes/empty" vending "github.com/dalin-williams/shoppersshop-protoc-dalinwilliams-com/vending" ) func (s *vendingServer) SessionCreateUser(ctx context.Context, msg *vending.SessionCreateUserRequest) (*vending.SessionCreateUserResponse, error){ return &vending.SessionCreateUserResponse{}, nil } func (s *vendingServer) SessionCreateSession(ctx context.Context, msg *empty.Empty)(*vending.SessionCreateSessionResponse, error){ return &vending.SessionCreateSessionResponse{}, nil } func (s *vendingServer) SessionUpdateUser(ctx context.Context, msg *vending.SessionUpdateUserRequest) (*empty.Empty, error){ return &empty.Empty{}, nil } func (s *vendingServer) SessionGetUserByUserId(ctx context.Context, msg *vending.SessionGetUserByUserIdRequest) (*vending.User, error){ return &vending.User{}, nil } func (s *vendingServer) SessionDeleteUser(ctx context.Context, msg *vending.SessionDeleteUserRequest) (*empty.Empty, error){ return &empty.Empty{}, nil } func (s *vendingServer) SessionLoginUser(ctx context.Context, msg *vending.SessionLoginUserRequest) (*empty.Empty, error) { return &empty.Empty{}, nil } func (s *vendingServer) SessionLogoutUser(ctx context.Context, msg *empty.Empty)(*empty.Empty, error){ return &empty.Empty{}, nil } func (s *vendingServer) SessionGetCurrentSession(ctx context.Context, msg *empty.Empty) (*vending.SessionGetCurrentSessionResponse, error) { return &vending.SessionGetCurrentSessionResponse{}, nil }
package consensus import ( "io" "github.com/hashicorp/raft" ) // FSM implements the raft FSM interface // and holds a state type FSM struct { state state } // NewFSM creates a new FSM with // start state of "first" func NewFSM() *FSM { return &FSM{state: first} } // Apply updates our FSM func (f *FSM) Apply(r *raft.Log) interface{} { f.state.Transition(state(r.Data)) return string(f.state) } // Snapshot needed to satisfy the raft FSM interface func (f *FSM) Snapshot() (raft.FSMSnapshot, error) { return nil, nil } // Restore needed to satisfy the raft FSM interface func (f *FSM) Restore(io.ReadCloser) error { return nil }
package main import ( "flag" "fmt" "log" "net" "os" "sync" "syscall" "time" dd "github.com/yawn/doubledash" ) const app = "sw" var ( build = "undefined" version = "unreleased" ) var ( sleep time.Duration timeout time.Duration ) func init() { flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage of %s %s (%s):\n", app, version, build) flag.PrintDefaults() } flag.DurationVar(&timeout, "timeout", 5000*time.Millisecond, "maximum waiting time") flag.DurationVar(&sleep, "sleep", 100*time.Millisecond, "time to sleep in between attempts") flag.Parse() } func main() { var ( args, extra = dd.Split(flag.Args()) urls []string wait sync.WaitGroup ) for _, e := range args[0:] { urls = append(urls, e) wait.Add(1) } if len(urls) < 1 { log.Fatalf("no hosts passed to wait for") } for _, e := range urls { go func(host string) { var started = time.Now() defer wait.Done() for { conn, err := net.Dial("tcp", host) if err != nil { time.Sleep(sleep) if time.Now().Sub(started) > timeout { log.Fatalf("timeout (%d ms) reached waiting for (at least) %q, giving up", timeout/time.Millisecond, host) } } else { conn.Close() break } } }(e) } wait.Wait() if len(extra) > 0 { if err := syscall.Exec(extra[0], extra[0:], os.Environ()); err != nil { log.Fatalf("failed to execute %q: %s", extra, err) } } }
package main import ( "fmt" // "strings" ) func conta_vogais (s string, c chan int) { soma := 0 for i := 0; i < len(s); i++ { if s[i] == 'a' || s[i] == 'e' || s[i] == 'o' || s[i] == 'i' || s[i] == 'u'{ soma++ } } c <- soma } func main () { s := "yuri oliveira franco" c := make(chan int) meio := len(s) / 2 go conta_vogais(s[:meio], c) go conta_vogais(s[meio:], c) x, y := <- c, <- c fmt.Printf("%d + %d = %d\n", x, y, x + y) }
package gcalbot import ( "crypto/hmac" "crypto/sha256" "encoding/hex" "fmt" "net/url" "strings" "github.com/malware-unicorn/go-keybase-chat-bot/kbchat" "github.com/malware-unicorn/go-keybase-chat-bot/kbchat/types/chat1" "github.com/malware-unicorn/managed-bots/base" "golang.org/x/oauth2" ) type Handler struct { *base.DebugOutput stats *base.StatsRegistry kbc *kbchat.API db *DB oauth *oauth2.Config reminderScheduler ReminderScheduler tokenSecret string httpPrefix string } var _ base.Handler = (*Handler)(nil) func NewHandler( stats *base.StatsRegistry, kbc *kbchat.API, debugConfig *base.ChatDebugOutputConfig, db *DB, oauth *oauth2.Config, reminderScheduler ReminderScheduler, tokenSecret string, httpPrefix string, ) *Handler { return &Handler{ DebugOutput: base.NewDebugOutput("Handler", debugConfig), stats: stats.SetPrefix("Handler"), kbc: kbc, db: db, oauth: oauth, reminderScheduler: reminderScheduler, tokenSecret: tokenSecret, httpPrefix: httpPrefix, } } func (h *Handler) HandleNewConv(conv chat1.ConvSummary) error { welcomeMsg := "Hello! I can get you set up with Google Calendar anytime, just send me `!gcal accounts connect <account nickname>`." return base.HandleNewTeam(h.stats, h.DebugOutput, h.kbc, conv, welcomeMsg) } func (h *Handler) HandleCommand(msg chat1.MsgSummary) error { if msg.Content.Reaction != nil && msg.Sender.Username != h.kbc.GetUsername() { return h.handleReaction(msg) } if msg.Content.Text == nil { return nil } cmd := strings.TrimSpace(msg.Content.Text.Body) if !strings.HasPrefix(cmd, "!gcal") { return nil } tokens, userErr, err := base.SplitTokens(cmd) if err != nil { return err } else if userErr != "" { h.ChatEcho(msg.ConvID, userErr) return nil } switch { case strings.HasPrefix(cmd, "!gcal accounts list"): h.stats.Count("accounts list") return h.handleAccountsList(msg) case strings.HasPrefix(cmd, "!gcal accounts connect"): h.stats.Count("accounts connect") return h.handleAccountsConnect(msg, tokens[3:]) case strings.HasPrefix(cmd, "!gcal accounts disconnect"): h.stats.Count("accounts disconnect") return h.handleAccountsDisconnect(msg, tokens[3:]) case strings.HasPrefix(cmd, "!gcal calendars list"): h.stats.Count("calendars list") return h.handleCalendarsList(msg, tokens[3:]) case strings.HasPrefix(cmd, "!gcal configure"): h.stats.Count("configure") return h.handleConfigure(msg) default: h.ChatEcho(msg.ConvID, "Unknown command %q", cmd) return nil } } func (h *Handler) handleReaction(msg chat1.MsgSummary) error { username := msg.Sender.Username messageID := msg.Content.Reaction.MessageID reaction := msg.Content.Reaction.Body invite, account, err := h.db.GetInviteAndAccountByUserMessage(username, messageID) if err != nil { return err } else if invite != nil && account != nil { return h.updateEventResponseStatus(invite, account, InviteReaction(reaction)) } return nil } func (h *Handler) handleConfigure(msg chat1.MsgSummary) error { isAdmin, err := base.IsAdmin(h.kbc, msg.Sender.Username, msg.Channel) if err != nil { return err } if !isAdmin { h.ChatEcho(msg.ConvID, "Sorry, but you need to be an admin in order to configure Google Calendar notifications for this channel.") return nil } keybaseUsername := msg.Sender.Username token := h.LoginToken(keybaseUsername) query := url.Values{} query.Add("token", token) query.Add("username", keybaseUsername) query.Add("conv_id", string(msg.ConvID)) link := fmt.Sprintf("%s/%s?%s", h.httpPrefix, "gcalbot", query.Encode()) body := fmt.Sprintf("%s: %s", GetConvHelpText(msg.Channel, true), link) if _, err := h.kbc.SendMessageByTlfName(keybaseUsername, body); err != nil { h.Debug("failed to send login attempt: %s", err) } // If we are in a 1-1 conv directly or as a bot user with the sender, skip this message. if !base.IsDirectPrivateMessage(h.kbc.GetUsername(), msg.Sender.Username, msg.Channel) { h.ChatEcho(msg.ConvID, "OK! I've sent a message to @%s to configure me.", msg.Sender.Username) } return nil } func (h *Handler) LoginToken(username string) string { return hex.EncodeToString(hmac.New(sha256.New, []byte(h.tokenSecret)).Sum([]byte(username))) }
package strings import ( "testing" ) func TestAllCharsUnique(t *testing.T) { tests := []struct { in string expected bool }{ {"abcdefg", true}, {"abcdefgfedcba", false}, } for _, test := range tests { actual := allCharsUnique(test.in) if actual != test.expected { t.Errorf("Expected: %v, got: %v", test.expected, actual) } } } func TestAllCharsUniqueNoStructures(t *testing.T) { tests := []struct { in string expected bool }{ {"abcdefg", true}, {"abcdefgfedcba", false}, } for _, test := range tests { actual := allCharsUniqueNoStructures(test.in) if actual != test.expected { t.Errorf("Expected: %v, got: %v", test.expected, actual) } } } func TestReverseCString(t *testing.T) { tests := []struct { in string expected string }{ {"abcd\000", "dcba\000"}, {"1234\000", "4321\000"}, {"∂∑ƒå\000", "åƒ∑∂\000"}, {"b∂∑ƒå\000", "åƒ∑∂b\000"}, } for _, test := range tests { actual := reverseCString(test.in) if actual != test.expected { t.Errorf("Expected: %v, got: %v", test.expected, actual) } } } func TestRemoveDuplicateChars(t *testing.T) { tests := []struct { in string expected string }{ {"aabbcc", "abc"}, {"abccbaab", "abc"}, {"abccbaaaabddggeba", "abcdge"}, } for _, test := range tests { actual := removeDuplicateChars(test.in) if actual != test.expected { t.Errorf("Expected: %v, got: %v", test.expected, actual) } } } func TestAreAnagrams(t *testing.T) { tests := []struct { in1 string in2 string expected bool }{ {"b∂∑ƒå", "åƒ∑∂b", true}, {"tar", "rat", true}, {"elbow", "below", true}, {"state", "taste", true}, {"rar", "rat", false}, {"elbow", "bellow", false}, {"cider", "cries", false}, } for _, test := range tests { actual := areAnagrams(test.in1, test.in2) actual2 := areAnagramsOptimized(test.in1, test.in2) // t.Logf("Str1: %v, Str2: %v", test.in1, test.in2) if actual != test.expected != actual2 { t.Errorf("Expected: %v, got: %v", test.expected, actual) t.Errorf("Expected: %v, got: %v", test.expected, actual2) } } } func BenchmarkAreAnagrams(b *testing.B) { str1 := "hydroxydeoxycorticosterones" str2 := "hydroxydesoxycorticosterone" for n := 0; n < b.N; n++ { areAnagrams(str1, str2) } } func BenchmarkAreAnagramsOptimized(b *testing.B) { str1 := "hydroxydeoxycorticosterones" str2 := "hydroxydesoxycorticosterone" for n := 0; n < b.N; n++ { areAnagramsOptimized(str1, str2) } } func TestEncodeSpaces(t *testing.T) { tests := []struct { in string expected string }{ {"this is a sentence with spaces", "this%20is%20a%20sentence%20with%20spaces"}, {"nospaces", "nospaces"}, } for _, test := range tests { actual := encodeSpaces(test.in) actual2 := encodeSpacesInPlace(test.in) if actual != test.expected || actual2 != test.expected { t.Errorf("Expected: %v, got: %v", test.expected, actual) } } } func BenchmarkEncodeSpaces(b *testing.B) { str := "this is a sentence with spaces" for n := 0; n < b.N; n++ { encodeSpaces(str) } } func BenchmarkEncodeSpacesInPlace(b *testing.B) { str := "this is a sentence with spaces" for n := 0; n < b.N; n++ { encodeSpacesInPlace(str) } } func TestRotateSquareMatrix(t *testing.T) { tests := []struct { in [][]int expected [][]int }{ { [][]int{[]int{1, 2}, []int{3, 4}}, [][]int{[]int{3, 1}, []int{4, 2}}, }, } for _, test := range tests { actual := rotateSquareMatrix(test.in) for r := range actual { for c := range actual[r] { if actual[r][c] != test.expected[r][c] { t.Errorf("Expected: %v, got: %v", test.expected, actual) } } } } } func TestRotateSquareMatrixInPlace(t *testing.T) { tests := []struct { in [][]int expected [][]int }{ { [][]int{[]int{1, 2}, []int{3, 4}}, [][]int{[]int{3, 1}, []int{4, 2}}, }, } for _, test := range tests { actual := rotateSquareMatrixInPlace(test.in) for r := range actual { for c := range actual[r] { if actual[r][c] != test.expected[r][c] { t.Errorf("Expected: %v, got: %v", test.expected, actual) } } } } } func TestIsRotation(t *testing.T) { tests := []struct { in1 string in2 string expected bool }{ {"waterbottle", "erbottlewat", true}, {"waterbottle", "erbotlewat", false}, } for _, test := range tests { actual := isRotatation(test.in1, test.in2) if actual != test.expected { t.Errorf("Expected: %v, got: %v", test.expected, actual) } } }
package models import( "app/services" "github.com/spf13/viper" "github.com/jinzhu/gorm" validator "github.com/asaskevich/govalidator" _ "github.com/jinzhu/gorm/dialects/mysql" "math/rand" "strings" "strconv" "fmt" "time" ) type DbMethods struct { DB *gorm.DB } var instance *gorm.DB var dmInstance *DbMethods func DatabaseConnect() { params := viper.GetString("database.username") + ":" + viper.GetString("database.password") + "@/" + viper.GetString("database.dbname") + "?charset=utf8&parseTime=True&loc=Local" db, err := gorm.Open("mysql", params) if err != nil { panic("Failed to connect database") } db.LogMode(true) fmt.Println("Database connected") instance = db dmInstance = &DbMethods{DB: instance} } func DatabaseMigrate() { instance.Set("gorm:table_options", "charset=utf8") instance.AutoMigrate(&User{}) // Create User table salt := services.GenerateRandomString(10) uniqKey := services.GenerateRandomString(10) hash, err := services.GetPasswordHash("pRCek5iFYm" + salt) if (err != nil) { panic(err) } user := User{} instance.Where(&User{Username: "mr.admin"}).Find(&user) // Create Admin record if (user.ID < 1) { admin := &User{Username: "mr.admin", Password: hash, Salt: salt, LastLogin: time.Now(), PasswordChanged: time.Now(), UniqUserKey: uniqKey} if err := instance.Create(admin); err != nil { fmt.Println("An error occurred while creating the \"Admin\" entry") fmt.Println(err) } } instance.AutoMigrate(&Notification{}) // Create Notification table instance.AutoMigrate(&Company{}) // Create Company table instance.Model(&Notification{}).AddForeignKey("company_id", "companies(id)", "SET NULL", "CASCADE") } func GetConnection() *gorm.DB { return instance } func GetDmInstance() *DbMethods { return dmInstance } func ValidateModel(modelInstance interface{}) bool { _, err := validator.ValidateStruct(modelInstance) return err == nil } func LoadFixtures() { db := GetConnection() publishersRaw := []string{ "Alibaba.com", "Flickr", "Instagram" } for _, publisher := range publishersRaw { db.Create(&Company{ Name: publisher }) } publishers := []Company{} db.Find(&publishers) for a := 0; a <= 100; a++ { _a := strconv.Itoa(a) db.Create(&Notification{ Message: "Message " + _a, Image: "Image" + _a, Header: "Header" + _a, Priority: 2, Expired: time.Now(), Button: "Button " + _a, Link: "http://link" + _a, CompanyID: publishers[rand.Intn(len(publishersRaw))].ID, }) } } func HasError(errors []error) bool { if (len(errors) < 1) { return false } for _, err := range errors { errValue := strings.ToLower(err.Error()) if (errValue == "record not found") { return false } else { fmt.Println(err.Error()) } } return true }
// Copyright © 2022 IN2P3 Computing Centre, IN2P3, CNRS // Copyright © 2018 Philippe Voinov // // Contributor(s): Remi Ferrand <remi.ferrand_at_cc.in2p3.fr>, 2021 // // This software is governed by the CeCILL license under French law and // abiding by the rules of distribution of free software. You can use, // modify and/ or redistribute the software under the terms of the CeCILL // license as circulated by CEA, CNRS and INRIA at the following URL // "http://www.cecill.info". // // As a counterpart to the access to the source code and rights to copy, // modify and redistribute granted by the license, users are provided only // with a limited warranty and the software's author, the holder of the // economic rights, and the successive licensors have only limited // liability. // // In this respect, the user's attention is drawn to the risks associated // with loading, using, modifying and/or developing or reproducing the // software by the user in light of its specific status of free software, // that may mean that it is complicated to manipulate, and that also // therefore means that it is reserved for developers and experienced // professionals having in-depth computer knowledge. Users are therefore // encouraged to load and test the software's suitability as regards their // requirements in conditions enabling the security of their systems and/or // data to be ensured and, more generally, to use and operate it in the // same conditions as regards security. // // The fact that you are presently reading this means that you have had // knowledge of the CeCILL license and that you accept its terms. package freeipa import "io" type KerberosConnectOptions struct { Krb5ConfigReader io.Reader KeytabReader io.Reader Username string Realm string }
package printer import ( "strconv" "github.com/davyxu/tabtoy/util" "github.com/davyxu/tabtoy/v2/i18n" "github.com/davyxu/tabtoy/v2/model" ) func valueWrapperJson(t model.FieldType, node *model.Node) string { switch t { case model.FieldType_String: return util.StringEscape(node.Value) case model.FieldType_Enum: return strconv.Itoa(int(node.EnumValue)) } return node.Value } type jsonPrinter struct { } func (self *jsonPrinter) Run(g *Globals, outputClass int) *Stream { bf := NewStream() bf.Printf("{\n") bf.Printf(" \"Tool\": \"github.com/davyxu/tabtoy\",\n") bf.Printf(" \"Version\": \"%s\",\n", g.Version) for tabIndex, tab := range g.Tables { if !tab.LocalFD.MatchTag(".json") { log.Infof("%s: %s", i18n.String(i18n.Printer_IgnoredByOutputTag), tab.Name()) continue } if tabIndex > 0 { bf.Printf(", \n") } if !printTableJson(bf, tab) { return nil } } bf.Printf("}") return bf } func printTableJson(bf *Stream, tab *model.Table) bool { bf.Printf(" \"%s\":[\n", tab.LocalFD.Name) // 遍历每一行 for rIndex, r := range tab.Recs { bf.Printf(" { ") var hasWriteColumn bool // 遍历每一列 for rootFieldIndex, node := range r.Nodes { if node.SugguestIgnore { continue } if hasWriteColumn && rootFieldIndex > 0 { bf.Printf(", ") hasWriteColumn = false } if node.IsRepeated { bf.Printf("\"%s\":[ ", node.Name) } else { bf.Printf("\"%s\": ", node.Name) } // 普通值 if node.Type != model.FieldType_Struct { if node.IsRepeated { // repeated 值序列 for arrIndex, valueNode := range node.Child { bf.Printf("%s", valueWrapperJson(node.Type, valueNode)) // 多个值分割 if arrIndex < len(node.Child)-1 { bf.Printf(", ") } } } else { // 单值 valueNode := node.Child[0] bf.Printf("%s", valueWrapperJson(node.Type, valueNode)) } } else { // 遍历repeated的结构体 for structIndex, structNode := range node.Child { // 结构体开始 bf.Printf("{ ") var hasWriteField bool // 遍历一个结构体的字段 for structFieldIndex, fieldNode := range structNode.Child { if fieldNode.SugguestIgnore { continue } if hasWriteField && structFieldIndex > 0 { bf.Printf(", ") hasWriteField = false } // 值节点总是在第一个 valueNode := fieldNode.Child[0] bf.Printf("\"%s\": %s", fieldNode.Name, valueWrapperJson(fieldNode.Type, valueNode)) hasWriteField = true } // 结构体结束 bf.Printf(" }") // 多个结构体分割 if structIndex < len(node.Child)-1 { bf.Printf(", ") } } } if node.IsRepeated { bf.Printf(" ]") } // 根字段分割 hasWriteColumn = true } bf.Printf(" }") if rIndex < len(tab.Recs)-1 { bf.Printf(",") } bf.Printf("\n") } bf.Printf(" ]") return true } func init() { RegisterPrinter("json", &jsonPrinter{}) }
package main import ( "fmt" ) func main() { number := 10 if number == 10 { fmt.Println("true") } else { fmt.Println("false") } fmt.Println("==================") // khởi tạo kết hợp so sánh if a := 100; a > 100 { fmt.Println("a>100") } else { fmt.Println("a <= 100") } fmt.Println("==================") // lưu ý, golang ko nhận else xuống dòng, phải viết else ngay sau } của if number1 := 10 switch number1 { case 1: fmt.Println("1") case 4, 5, 9: // bắt nhiều giá trị fmt.Println("4") case 10: fmt.Println("10") case 20: fmt.Println("high") default: fmt.Println("unknow") } fmt.Println("==================") switch { case number1 > 100: fmt.Println("high") case number1 <= 10: fmt.Println("low") } fmt.Println("==================") // switch : fallthrough switch number1 { case 1: fmt.Println("1") fallthrough case 4: fmt.Println("4") fallthrough case 10: fmt.Println("10") fallthrough case 20: fmt.Println("high") fallthrough default: fmt.Println("unknow") } // fallthrough chạy hết các giá trị kể từ case đúng }
package main type Config struct { MongoDB struct { Host string `yaml:"host"` } `yaml:"mongodb"` WWW struct { Home string `yaml:"home"` } Url struct { Port int `yaml:"port"` Base string `yaml:"base"` } }
package main import ( "context" "fmt" "os/exec" "time" ) type result struct { output []byte err error } func main() { // 基于context实现协程通信 var ( ctx context.Context cancel context.CancelFunc cmd *exec.Cmd resultChan chan *result // 下面利用channel实现协程之间通信 res *result ) ctx, cancel = context.WithCancel(context.TODO()) // channel-- 结果队列 resultChan = make(chan *result, 1000) go func() { var ( output []byte err error ) cmd = exec.CommandContext(ctx, "E:\\Git\\bin\\bash.exe", "-c", "sleep 10;echo hello;") // 基于select函数,去监听ctx.done() done()会在被取消或关闭时返回 // 于是我们可以在主函数中,执行cancel函数,此时select就会监听到ctx.done() // kill 杀死子进程 output, err = cmd.CombinedOutput() // 将子协程的结果输出到channel中 resultChan <- &result{ err: err, output: output, } }() time.Sleep(1 * time.Second) // 1s之后中断掉 // 执行取消函数,变量接收取消函数 cancel() // 在main协程中,等待子协程的退出,并打印任务执行结果 res = <-resultChan fmt.Println(string(res.output), "错误信息", res.err) }
package goose import ( "fmt" "time" "github.com/evelritual/goose/graphics" "github.com/evelritual/goose/input" ) const ( defaultImage = "../../logo.png" ) // Game declares all methods required to run a game type Game interface { Close() error Draw() error FixedUpdate(time.Duration) error Init() error Update() error } type defaultGame struct { keyboard input.Keyboard tex graphics.Texture shouldDraw bool texX float64 texY float64 speedX float64 speedY float64 } // Init ... func (d *defaultGame) Init() error { SetBackgroundColor(&graphics.ColorWhite) d.keyboard = NewKeyboard() t, err := NewTexture(defaultImage) if err != nil { return fmt.Errorf("error loading default image: %v", err) } d.tex = t d.shouldDraw = true d.texX = float64((windowX / 2) - (d.tex.W() / 16)) d.texY = float64((windowY / 2) - (d.tex.H() / 16)) d.speedX = 60 d.speedY = 40 return nil } // Draw ... func (d *defaultGame) Draw() error { if d.tex == nil { return nil } if !d.shouldDraw { return nil } err := d.tex.Draw(int32(d.texX), int32(d.texY), 0.125, 0.125, 0.0) if err != nil { return fmt.Errorf("error drawing default image: %v", err) } return nil } // FixedUpdate .. func (d *defaultGame) FixedUpdate(elapsedTime time.Duration) error { if d.texX <= 0 || int32(d.texX)+(d.tex.W()/8) >= windowX { d.speedX = 0 - d.speedX } if d.texY <= 0 || int32(d.texY)+(d.tex.H()/8) >= windowY { d.speedY = 0 - d.speedY } d.texX += d.speedX * elapsedTime.Seconds() d.texY += d.speedY * elapsedTime.Seconds() return nil } // Update ... func (d *defaultGame) Update() error { if d.keyboard.IsKeyPress(input.KeySpace) { d.shouldDraw = false } if d.keyboard.IsKeyRelease(input.KeySpace) { d.shouldDraw = true } return nil } // Close ... func (d *defaultGame) Close() error { err := d.tex.Close() if err != nil { return fmt.Errorf("error closing game: %v", err) } return nil }
package main import ( "encoding/json" "fmt" "log" "sync" "github.com/gordonrehling2/web-scrape-go/webscraper" ) // USER STORY // // As a software developer // I want to consume product item data from a web page and recompose it in JSON // So that it can be more easily re-purposed // GOL was updated on Mon 27-Mar-17 and target URL changed //var targetURL = "http://www.sainsburys.co.uk/webapp/wcs/stores/servlet/CategoryDisplay?listView=tr ue&orderBy=FAVOURITES_FIRST&parent_category_rn=12518&top_category=125 18&langId=44&beginIndex=0&pageSize=20&catalogId=10137&searchTerm=&categ oryId=185749&listId=&storeId=10151&promotionId=#langId=44&storeId=10151&cat alogId=10137&categoryId=185749&parent_category_rn=12518&top_category=1251 8&pageSize=20&orderBy=FAVOURITES_FIRST&searchTerm=&beginIndex=0&hide Filters=true" var targetURL = "http://www.sainsburys.co.uk/shop/gb/groceries/fruit-veg/ripe---ready#langId=44&storeId=10151&catalogId=10241&categoryId=185749&parent_category_rn=12518&top_category=12518&pageSize=20&orderBy=FAVOURITES_ONLY%7CTOP_SELLERS&searchTerm=&beginIndex=0" // Result is JSON output for the test type Result struct { Results []webscraper.ProductData `json:"results"` Total float64 `json:"total"` } func fatalBadURL(url string, err error) { log.Printf("can't open %s due to error '%s'\n", url, err.Error()) log.Fatal("giving up...") } func urlWorker(productData chan webscraper.ProductData, wg *sync.WaitGroup, url string) { // mark as done when func exits defer wg.Done() // get product page page, err := webscraper.GetWebPage(url) if err != nil { fatalBadURL(url, err) } // return the required product data from the product page productData <- webscraper.GetPageProductData(page) } // processURLs gets the html for each url and collects required data off the page func processURLs(urls []string) Result { // Create result structure result := Result{} // enable chanBuffLen URLs to be concurrently processed chanBuffLen := 16 // Create channel to receive product back from goroutine productData := make(chan webscraper.ProductData, chanBuffLen) // create waitgroup to sync data collection, before returning overall result var wg sync.WaitGroup // get the workers going for _, url := range urls { // tell the waitgroup that we have another worker wg.Add(1) // spin off the worker go urlWorker(productData, &wg, url) } // collect the results for i := 0; i < len(urls); i++ { select { case product := <-productData: // append the product as a slice in the result result.Results = append(result.Results, product) } } // wait for all workers to finish before delivering result wg.Wait() return result } // postProcess sums the unit prices func postProcess(result *Result) { // Use int to sum unit prices, to avoid rounding errors pence := 0 for _, product := range result.Results { // keep running total of unit prices pence += int(product.UnitPrice * 100) } // convert pence to pounds result.Total = float64(pence) / 100 } func main() { // get the target page page, err := webscraper.GetWebPage(targetURL) if err != nil { fatalBadURL(targetURL, err) } // get all the product URLs from the page urls := webscraper.GetLinksWithDivClass(page, "productInfo") // process the URLs to get product data result := processURLs(urls) // sum the unit prices and update the total in the result postProcess(&result) // Create the actual JSON // actualJSON, err := json.Marshal(result) // Create pretty JSON, with 3 space indent prettyJSON, err := json.MarshalIndent(result, "", " ") if err != nil { log.Fatal(err) } // Output pretty JSON fmt.Println(string(prettyJSON)) }
package main import "fmt" func use(fruits ...string) { allFruits := append(fruits, fruits...) for idx, fruit := range allFruits { fmt.Println(idx, " => ", fruit) } } func main() { var fruits = []string { "apple", "banana", "cherry", } use(fruits...) } //OUTPUT: //0 => apple //1 => banana //2 => cherry //3 => apple //4 => banana //5 => cherry
// time: O(n) (O(100n)), space: O(n) func numSquares(n int) int { psq := make([]int, 0, 100) for i := 1; i * i <= 10000; i++ { psq = append(psq, i*i) } m := make([]int, n+1) for i := 1; i <= n; i++ { step := m[i-psq[0]] + 1 for j := 1; j < len(psq); j++ { if psq[j] > i { break } cand := m[i-psq[j]] + 1 if step > cand { step = cand } } m[i] = step } return m[n] }
package main import ( "fmt" "math/rand" "time" ) func main() { var name string fmt.Println("Welcome to rock, paper, scissor!") options := [3]string{"rock", "paper", "scissor"} fmt.Println("The options are:", options) fmt.Printf("Please choose an option: ") fmt.Scanf("%s", &name) fmt.Println("You chose: ", name) switch name { case options[0]: c := 0 solve(c, options[:]) case options[1]: c := 1 solve(c, options[:]) case options[2]: c := 2 solve(c, options[:]) default: fmt.Println("Not a valid option") } } func solve(c int, options []string) { rand.Seed(time.Now().UnixNano()) var a = rand.Intn(3) if c == a { fmt.Println("Tie") } else { if c == 0 { switch { case a == 2: fmt.Println("Bot chooses", options[a]) fmt.Println("You win") case a == 1: fmt.Println("Bot chooses", options[a]) fmt.Println("You lose") } } else if c == 1 { switch { case a == 0: fmt.Println("Bot chooses", options[a]) fmt.Println("You win") case a == 2: fmt.Println("Bot chooses", options[a]) fmt.Println("You lose") } } else if c == 2 { switch { case a == 1: fmt.Println("Bot chooses", options[a]) fmt.Println("You win") case a == 0: fmt.Println("Bot chooses", options[a]) fmt.Println("You lose") } } } }
package config import ( "errors" "github.com/mitchellh/go-homedir" "github.com/sirupsen/logrus" "io" "os" "path/filepath" ) var errNotFound = errors.New("cannot find config file") var triedPaths []string var ( // ClientConfigFile Filename of client configuration ClientConfigFile string = "client-config.yml" // ServerConfigFile Filename of server configuration ServerConfigFile string = "server-config.yml" ) // LoadClientConfig loads client configuration from given path or panic if cannot load func LoadClientConfig(configPath string) { err := LoadClientConfigOrPass(configPath) if err != nil { for _, p := range triedPaths { logrus.Errorf("Tried path: %s", p) } logrus.Fatalf("could not load client configuration") } } // LoadServerConfig loads server configuration from given path or panic if cannot load func LoadServerConfig(configPath string) { err := LoadServerConfigOrPass(configPath) if err != nil { for _, p := range triedPaths { logrus.Errorf("Tried path: %s", p) } logrus.Fatalf("could not load server configuration") } } // LoadClientConfigOrPass loads client configuration, but return false if not able func LoadClientConfigOrPass(configPath string) error { var err error // Try custom path first if configPath != "" { err = readConfigPath(configPath, Client.LoadConfig) if err == nil || err != errNotFound { return err } } configPath = os.Getenv("BITMAELUM_CLIENT_CONFIG") if configPath != "" { err = readConfigPath(configPath, Client.LoadConfig) if err == nil || err != errNotFound { return err } } // try on our search paths for _, p := range getSearchPaths() { p = filepath.Join(p, ClientConfigFile) err = readConfigPath(p, Client.LoadConfig) if err == nil || err != errNotFound { return err } } return errors.New("cannot find " + ClientConfigFile) } // LoadServerConfigOrPass loads client configuration, but return false if not able func LoadServerConfigOrPass(configPath string) error { var err error // Try custom path first if configPath != "" { err = readConfigPath(configPath, Server.LoadConfig) if err == nil || err != errNotFound { return err } } configPath = os.Getenv("BITMAELUM_SERVER_CONFIG") if configPath != "" { err = readConfigPath(configPath, Server.LoadConfig) if err == nil || err != errNotFound { return err } } // try on our search paths for _, p := range getSearchPaths() { p = filepath.Join(p, ServerConfigFile) err = readConfigPath(p, Server.LoadConfig) if err == nil || err != errNotFound { return err } } return errors.New("cannot find " + ServerConfigFile) } // Expands the given path and loads the configuration func readConfigPath(p string, loader func(r io.Reader) error) error { p, _ = homedir.Expand(p) triedPaths = append(triedPaths, p) f, err := os.Open(p) if err != nil { return errNotFound } return loader(f) }
package utilz import ( "encoding/csv" "github.com/faiface/pixel" "github.com/faiface/pixel/imdraw" "github.com/golang/freetype/truetype" "golang.org/x/image/font" "image" "image/color" "image/png" _ "image/png" "io" "io/ioutil" "math" "math/rand" "os" "strconv" "time" ) /* e.g. usage : heroFrames := pictures.LoadAsFrames(imgSprite, 16) heroSprite := pixel.NewSprite(imgSprite, heroFrames[10]) scaledMatrix := pixel.IM.Scaled(pixel.ZV, 16) heroSprite.Draw(win, scaledMatrix.Moved(win.Bounds().Center())) *******************************OR******************************** *** below will render sprite everytime you click on Window *** ***************************************************************** heroFrames := pictures.LoadAsFrames(imgSprite, 16) if win.JustPressed(pixelgl.MouseButtonLeft) { tree := pixel.NewSprite(imgSprite, heroFrames[rand.Intn(len(heroFrames))]) trees = append(trees, tree) matrices = append(matrices, pixel.IM.Scaled(pixel.ZV, 4).Moved(win.MousePosition())) } for i, tree := range trees { tree.Draw(win, matrices[i]) } */ func LoadPicture(path string) (pixel.Picture, error) { file, err := os.Open(path) if err != nil { return nil, err } defer file.Close() img, _, err := image.Decode(file) if err != nil { return nil, err } return pixel.PictureDataFromImage(img), nil } func LoadAsFrames(imgSprite pixel.Picture, w, h float64) []pixel.Rect { var spriteFrames []pixel.Rect for y := imgSprite.Bounds().Min.Y; y < imgSprite.Bounds().Max.Y; y += h { for x := imgSprite.Bounds().Min.X; x < imgSprite.Bounds().Max.X; x += w { spriteFrames = append(spriteFrames, pixel.R(x, y, x+w, y+h)) } } //e.g. pixel.NewSprite(imgSprite, spriteFrames[frameIndex]) return spriteFrames } func LoadAsFramesFromTop(imgSprite pixel.Picture, w, h float64) []pixel.Rect { var spriteFrames []pixel.Rect minY := math.Floor(imgSprite.Bounds().Min.Y) maxY := math.Floor(imgSprite.Bounds().Max.Y) for y := maxY - h; y >= minY; y -= h { for x := imgSprite.Bounds().Min.X; x < imgSprite.Bounds().Max.X; x += w { spriteFrames = append(spriteFrames, pixel.R(x, y, x+w, y+h)) } } //e.g. pixel.NewSprite(imgSprite, spriteFrames[frameIndex]) return spriteFrames } //LoadAnimationFromCSV to set image sprite frames to good use, // load them as set of animations /*csv file: Front,0,0 FrontBlink,1,1 LookUp,2,2 Left,3,7 LeftRight,4,6 LeftBlink,7,7 Walk,8,15 Run,16,23 Jump,24,26 */ // e.g. animations = LoadAnimationFromCSV("./animations.csv", LoadAsFrames()) func LoadAnimationsFromCSV(descPath string, spriteFrames []pixel.Rect) map[string][]pixel.Rect { descFile, err := os.Open(descPath) if err != nil { return nil } defer descFile.Close() // load the animation information, Name and interval inside the spritesheet desc := csv.NewReader(descFile) var animations = make(map[string][]pixel.Rect) for { anim, err := desc.Read() if err == io.EOF { break } if err != nil { return nil } name := anim[0] start, _ := strconv.Atoi(anim[1]) end, _ := strconv.Atoi(anim[2]) animations[name] = spriteFrames[start : end+1] } return animations } func GenerateUVs(tileWidth, tileHeight float64, texture pixel.Picture) []UV { // This is the table we'll fill with uvs and return. var uvs []UV textureWidth := texture.Bounds().W() textureHeight := texture.Bounds().H() width := tileWidth / textureWidth height := tileHeight / textureHeight cols := textureWidth / tileWidth rows := textureHeight / tileHeight var ux, uy float64 var vx, vy float64 = width, height for rows > 0 { for cols > 0 { uvs = append(uvs, UV{ux, uy, vx, vy}) //Advance the UVs to the next column ux = ux + width vx = vx + width cols -= 1 } // Put the UVs back to the start of the next row ux = 0 vx = width uy = uy + height vy = vy + height rows -= 1 } return uvs } //LoadSprite load TMX tile image source func LoadSprite(path string) (*pixel.Sprite, *pixel.PictureData) { f, err := os.Open(path) PanicIfErr(err) img, err := png.Decode(f) PanicIfErr(err) pd := pixel.PictureDataFromImage(img) return pixel.NewSprite(pd, pd.Bounds()), pd } func PanicIfErr(err error) { if err != nil { panic(err) } } func RandInt(min, max int) int { r := rand.New(rand.NewSource(time.Now().UnixNano())) return r.Intn(max-min) + min } func RandFloat(min, max float64) float64 { r := rand.New(rand.NewSource(time.Now().UnixNano())) return min + r.Float64()*(max-min) } func MinInt(a, b int) int { if a < b { return a } return b } func MaxInt(a, b int) int { if a > b { return a } return b } //Clamp restricts a number to a certain range. If a value is too high, // it’s reduced to the maximum. // If it’s too low, it’s increased to the minimum. func Clamp(value, min, max float64) float64 { return math.Max(min, math.Min(value, max)) } func LoadTTF(path string, size float64) (font.Face, error) { file, err := os.Open(path) if err != nil { return nil, err } defer file.Close() bytes, err := ioutil.ReadAll(file) if err != nil { return nil, err } font, err := truetype.Parse(bytes) if err != nil { return nil, err } return truetype.NewFace(font, &truetype.Options{ Size: size, GlyphCacheEntries: 1, }), nil } //HexToColor("#E53935") func HexToColor(hex string) (c color.RGBA) { c.A = 0xff errInvalidFormat := color.RGBA{255, 255, 255, 255} if hex[0] != '#' { return errInvalidFormat } hexToByte := func(b byte) byte { switch { case b >= '0' && b <= '9': return b - '0' case b >= 'a' && b <= 'f': return b - 'a' + 10 case b >= 'A' && b <= 'F': return b - 'A' + 10 } return 0 } switch len(hex) { case 7: c.R = hexToByte(hex[1])<<4 + hexToByte(hex[2]) c.G = hexToByte(hex[3])<<4 + hexToByte(hex[4]) c.B = hexToByte(hex[5])<<4 + hexToByte(hex[6]) case 4: c.R = hexToByte(hex[1]) * 17 c.G = hexToByte(hex[2]) * 17 c.B = hexToByte(hex[3]) * 17 default: return errInvalidFormat } return } func GetAlpha(f float64) uint8 { if f >= 1 { return 255 } return uint8(f * 256) } func DebugPxPoint(x, y float64, renderer pixel.Target) { imd := imdraw.New(nil) imd.Color = HexToColor("#ff00ff") imd.Push(pixel.V(x, y)) imd.Circle(3, 0) imd.Draw(renderer) }
package False_Sharing import "sync/atomic" type MyAtomic interface { Increase() } type NoPad struct { a uint64 b uint64 c uint64 } func (atm *NoPad) Increase() { atomic.AddUint64(&atm.a,1) atomic.AddUint64(&atm.b,1) atomic.AddUint64(&atm.c,1) } type Pad struct { a uint64 _p1 [8]uint64 b uint64 _p2 [8]uint64 c uint64 _p3 [8]uint64 } func (atm *Pad) Increase() { atomic.AddUint64(&atm.a,1) atomic.AddUint64(&atm.b,1) atomic.AddUint64(&atm.c,1) }
package router import ( "time" "github.com/go-chi/chi" "github.com/go-chi/chi/middleware" // "github.com/spf13/viper" "github.com/corentindeboisset/golang-api/app/controller" ) func GetRouter() (*chi.Mux, error) { r := chi.NewRouter() // Use some middleware r.Use(middleware.RequestID) r.Use(middleware.RealIP) r.Use(middleware.Logger) r.Use(middleware.Recoverer) r.Use(middleware.NoCache) r.Use(middleware.Timeout(60 * time.Second)) // if viper.GetBool("profiler.enable") { // r.Mount(viper.GetString(), middleware.Profiler) // } userController, err := controller.InitializeUserController() if err != nil { return nil, err } settingController, err := controller.InitializeSettingController() if err != nil { return nil, err } r.Get("/", userController.UserPage) r.Get("/setting", settingController.SettingPage) return r, nil }
package game var ( explosionWidth float32 = 150 explosionHeight float32 = explosionWidth explosionMaxPushSpeed float32 = 300 ) func newExplosion(grenade *Grenade) { x, y := grenade.GetCenter() l, t, w, h := x-explosionWidth/2, y-explosionHeight/2, explosionWidth, explosionHeight gameMap := grenade.parent.gameMap world := gameMap.world gameMap.camera.Shake(6) for _, item := range world.QueryRect(l, t, w, h) { object := gameMap.Get(item) tag := object.tag() if tag == "player" || tag == "guardian" || tag == "block" { object.damage(0.7) } } radius := float32(50) for _, item := range world.QueryRect(l-radius, t-radius, w+radius+radius, h+radius+radius) { object := gameMap.Get(item) tag := object.tag() if tag == "player" || tag == "grenade" || tag == "debris" || tag == "puff" { object.push(300) } } for i := float32(0); i < randRange(15, 30); i++ { newPuff( grenade.parent.gameMap, randRange(l, l+w), randRange(t, t+h), 0, -10, 2, 10, ) } }
package inmemory import ( "github.com/Tanibox/tania-core/src/assets/repository" "github.com/Tanibox/tania-core/src/assets/storage" "github.com/gofrs/uuid" ) type FarmEventRepositoryInMemory struct { Storage *storage.FarmEventStorage } func NewFarmEventRepositoryInMemory(s *storage.FarmEventStorage) repository.FarmEventRepository { return &FarmEventRepositoryInMemory{Storage: s} } // Save is to save func (f *FarmEventRepositoryInMemory) Save(uid uuid.UUID, latestVersion int, events []interface{}) <-chan error { result := make(chan error) go func() { f.Storage.Lock.Lock() defer f.Storage.Lock.Unlock() for _, v := range events { latestVersion++ f.Storage.FarmEvents = append(f.Storage.FarmEvents, storage.FarmEvent{ FarmUID: uid, Version: latestVersion, Event: v, }) } result <- nil close(result) }() return result }
package writer import ( "bytes" "fmt" "io" "os" "strconv" "strings" "github.com/goldeneggg/ipcl/lib/parser" ) var ( Out io.Writer = os.Stdout fpf = fmt.Fprintf headers = []string{"source_cidr", "network", "mask", "host_num", "min_address", "max_address", "broadcast"} ) type Writer interface { Write(cidrs []parser.CIDRInfo) } type DefaultWriter struct { w io.Writer } type SepWriter struct { *DefaultWriter sep string } func (dw *DefaultWriter) Write(cidrs []parser.CIDRInfo) { for _, cidr := range cidrs { dw.writeSingle(cidr) } } func (dw *DefaultWriter) writeSingle(cidr parser.CIDRInfo) { fpf(dw.w, "%s : %s\n", headers[0], cidr.SrcCIDR) fpf(dw.w, "%s : %s\n", headers[1], cidr.Network) fpf(dw.w, "%s : %s\n", headers[2], mask2string(cidr.Mask)) fpf(dw.w, "%s : %d\n", headers[3], cidr.HostNum) fpf(dw.w, "%s : %s\n", headers[4], cidr.Min) fpf(dw.w, "%s : %s\n", headers[5], cidr.Max) fpf(dw.w, "%s : %s\n", headers[6], cidr.Broadcast) fpf(dw.w, "\n") } func (sw *SepWriter) Write(cidrs []parser.CIDRInfo) { sw.writeHeader() for _, cidr := range cidrs { sw.writeLine(cidr) } } func (sw *SepWriter) writeHeader() { fpf(sw.w, "%s\n", strings.Join(headers, sw.sep)) } func (sw *SepWriter) writeLine(cidr parser.CIDRInfo) { s := []string{cidr.SrcCIDR, cidr.Network.String(), mask2string(cidr.Mask), strconv.Itoa(cidr.HostNum), cidr.Min.String(), cidr.Max.String(), cidr.Broadcast.String()} fpf(sw.w, "%s\n", strings.Join(s, sw.sep)) } func NewWriter(isCsv bool, isTsv bool) Writer { defWriter := &DefaultWriter{Out} if isCsv { return &SepWriter{defWriter, ","} } else if isTsv { return &SepWriter{defWriter, "\t"} } else { return defWriter } } func mask2string(mask []byte) string { var buf bytes.Buffer for i, m := range mask { buf.WriteString(itod(uint(m))) if i < len(mask)-1 { buf.WriteString(".") } } return buf.String() } func itod(i uint) string { if i == 0 { return "0" } // Assemble decimal in reverse order. var b [32]byte bp := len(b) for ; i > 0; i /= 10 { bp-- b[bp] = byte(i%10) + '0' } return string(b[bp:]) }
package service import ( "2019_2_IBAT/pkg/app/notifs/notifsproto" // . "2019_2_IBAT/pkg/pkg/models" "context" "testing" "github.com/google/uuid" ) func TestUserService_SendNotification(t *testing.T) { h := Service{ NotifChan: make(chan NotifStruct, 5), } ctx := context.Background() msg := notifsproto.SendNotificationMessage{ VacancyID: uuid.New().String(), TagIDs: []string{ uuid.New().String(), uuid.New().String(), }, } _, err := h.SendNotification(ctx, &msg) if err != nil { t.Errorf("Unexpected error %s\n", err.Error()) return } }
package types // DONTCOVER import ( sdk "github.com/cosmos/cosmos-sdk/types" ) // query endpoints supported by the NFT Querier const ( QuerySupply = "supply" QueryOwner = "owner" QueryCollection = "collection" QueryDenoms = "denoms" QueryDenom = "denom" QueryNFT = "nft" ) // QuerySupplyParams defines the params for queries: type QuerySupplyParams struct { Denom string Owner sdk.AccAddress } // NewQuerySupplyParams creates a new instance of QuerySupplyParams func NewQuerySupplyParams(denom string, owner sdk.AccAddress) QuerySupplyParams { return QuerySupplyParams{ Denom: denom, Owner: owner, } } // Bytes exports the Denom as bytes func (q QuerySupplyParams) Bytes() []byte { return []byte(q.Denom) } // QueryOwnerParams defines the params for queries: type QueryOwnerParams struct { Denom string Owner sdk.AccAddress } // NewQuerySupplyParams creates a new instance of QuerySupplyParams func NewQueryOwnerParams(denom string, owner sdk.AccAddress) QueryOwnerParams { return QueryOwnerParams{ Denom: denom, Owner: owner, } } // QuerySupplyParams defines the params for queries: type QueryCollectionParams struct { Denom string } // NewQueryCollectionParams creates a new instance of QueryCollectionParams func NewQueryCollectionParams(denom string) QueryCollectionParams { return QueryCollectionParams{ Denom: denom, } } // QueryDenomParams defines the params for queries: type QueryDenomParams struct { ID string } // NewQueryDenomParams creates a new instance of QueryDenomParams func NewQueryDenomParams(id string) QueryDenomParams { return QueryDenomParams{ ID: id, } } // QueryNFTParams params for query 'custom/nfts/nft' type QueryNFTParams struct { Denom string TokenID string } // NewQueryNFTParams creates a new instance of QueryNFTParams func NewQueryNFTParams(denom, id string) QueryNFTParams { return QueryNFTParams{ Denom: denom, TokenID: id, } }
package gasprice import ( "crypto/rand" "log" "math/big" "sync" "time" ) // Randomizer randomly calculates a new gas price within a range at regular intervals type Randomizer struct { randomizeInterval time.Duration maxGasPrice *big.Int minGasPrice *big.Int running bool mu sync.RWMutex gasPrice *big.Int quit chan struct{} } // NewRandomizer returns a Randomizer instance func NewRandomizer(interval time.Duration, max *big.Int, min *big.Int) *Randomizer { return &Randomizer{ randomizeInterval: interval, maxGasPrice: max, minGasPrice: min, gasPrice: min, quit: make(chan struct{}), } } // Start initiates the gas price randomize loop func (r *Randomizer) Start() { if r.running { return } go r.startRandomizeLoop() r.running = true } // Stop signals the gas price randomize loop to exit gracefully func (r *Randomizer) Stop() { if !r.running { return } close(r.quit) } // GasPrice returns the current gas price func (r *Randomizer) GasPrice() *big.Int { r.mu.RLock() defer r.mu.RUnlock() return r.gasPrice } func (r *Randomizer) randomizeGasPrice() error { r.mu.Lock() defer r.mu.Unlock() diff := new(big.Int).Sub(r.maxGasPrice, r.minGasPrice) n, err := rand.Int(rand.Reader, diff) if err != nil { return err } r.gasPrice = new(big.Int).Add(n, r.minGasPrice) log.Printf("new gas price = %v", r.gasPrice) return nil } func (r *Randomizer) startRandomizeLoop() { ticker := time.NewTicker(r.randomizeInterval) for { select { case <-ticker.C: if err := r.randomizeGasPrice(); err != nil { log.Printf("error randomizing gas price: %v", err) } case <-r.quit: return } } }
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package network import ( "net" "github.com/aws/amazon-vpc-cni-plugins/network/eni" "github.com/aws/amazon-vpc-cni-plugins/network/vpc" ) // Builder knows how to build container networks and connect container network interfaces. type Builder interface { FindOrCreateNetwork(nw *Network) error DeleteNetwork(nw *Network) error FindOrCreateEndpoint(nw *Network, ep *Endpoint) error DeleteEndpoint(nw *Network, ep *Endpoint) error } // Network represents a container network. type Network struct { Name string NetworkID string BridgeType string BridgeNetNSPath string BridgeIndex int SharedENI *eni.ENI ENIIPAddresses []net.IPNet GatewayIPAddress net.IP VPCCIDRs []net.IPNet DNSServers []string DNSSuffixSearchList []string ServiceCIDR string } // Endpoint represents a container network interface. type Endpoint struct { ContainerID string NetNSName string IfName string IfType string TapUserID int MACAddress net.HardwareAddr IPAddresses []net.IPNet PortMappings []vpc.PortMapping }
/* Copyright 2021 The KubeVela Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "embed" "encoding/xml" "fmt" "html/template" "io/fs" "log" "net/http" "os" "path" "strings" "github.com/oam-dev/kubevela/e2e/addon/mock/utils" "github.com/oam-dev/kubevela/pkg/addon" ) var ( //go:embed testdata testData embed.FS paths []struct { path string length int64 } ) func main() { err := utils.ApplyMockServerConfig() if err != nil { log.Fatal(err) } http.HandleFunc("/", ossHandler) http.HandleFunc("/helm/", helmHandler) err = http.ListenAndServe(fmt.Sprintf(":%d", utils.Port), nil) if err != nil { log.Fatal("ListenAndServe: ", err) } } var ossHandler http.HandlerFunc = func(rw http.ResponseWriter, req *http.Request) { queryPath := strings.TrimPrefix(req.URL.Path, "/") if strings.Contains(req.URL.RawQuery, "prefix") { prefix := req.URL.Query().Get("prefix") res := addon.ListBucketResult{ Files: []addon.File{}, Count: 0, } for _, p := range paths { if strings.HasPrefix(p.path, prefix) { res.Files = append(res.Files, addon.File{Name: p.path, Size: int(p.length)}) res.Count++ } } data, err := xml.Marshal(res) error := map[string]error{"error": err} // Make and parse the data t, err := template.New("").Parse(string(data)) if err != nil { // Render the data t.Execute(rw, error) } // Render the data t.Execute(rw, data) } else { found := false for _, p := range paths { if queryPath == p.path { file, err := testData.ReadFile(path.Join("testdata", queryPath)) error := map[string]error{"error": err} // Make and parse the data t, err := template.New("").Parse(string(file)) if err != nil { // Render the data t.Execute(rw, error) } found = true t.Execute(rw, file) break } } if !found { nf := "not found" t, _ := template.New("").Parse(nf) t.Execute(rw, nf) } } } var helmHandler http.HandlerFunc = func(rw http.ResponseWriter, req *http.Request) { switch { case strings.Contains(req.URL.Path, "index.yaml"): file, err := os.ReadFile("./e2e/addon/mock/testrepo/helm-repo/index.yaml") if err != nil { _, _ = rw.Write([]byte(err.Error())) } rw.Write(file) case strings.Contains(req.URL.Path, "fluxcd-test-version-1.0.0.tgz"): file, err := os.ReadFile("./e2e/addon/mock/testrepo/helm-repo/fluxcd-test-version-1.0.0.tgz") if err != nil { _, _ = rw.Write([]byte(err.Error())) } rw.Write(file) case strings.Contains(req.URL.Path, "fluxcd-test-version-2.0.0.tgz"): file, err := os.ReadFile("./e2e/addon/mock/testrepo/helm-repo/fluxcd-test-version-2.0.0.tgz") if err != nil { _, _ = rw.Write([]byte(err.Error())) } rw.Write(file) case strings.Contains(req.URL.Path, "vela-workflow-v0.3.5.tgz"): file, err := os.ReadFile("./e2e/addon/mock/testrepo/helm-repo/vela-workflow-v0.3.5.tgz") if err != nil { _, _ = rw.Write([]byte(err.Error())) } rw.Write(file) case strings.Contains(req.URL.Path, "foo-v1.0.0.tgz"): file, err := os.ReadFile("./e2e/addon/mock/testrepo/helm-repo/foo-v1.0.0.tgz") if err != nil { _, _ = rw.Write([]byte(err.Error())) } rw.Write(file) case strings.Contains(req.URL.Path, "bar-v1.0.0.tgz"): file, err := os.ReadFile("./e2e/addon/mock/testrepo/helm-repo/bar-v1.0.0.tgz") if err != nil { _, _ = rw.Write([]byte(err.Error())) } rw.Write(file) case strings.Contains(req.URL.Path, "bar-v2.0.0.tgz"): file, err := os.ReadFile("./e2e/addon/mock/testrepo/helm-repo/bar-v2.0.0.tgz") if err != nil { _, _ = rw.Write([]byte(err.Error())) } rw.Write(file) case strings.Contains(req.URL.Path, "mock-be-dep-addon-v1.0.0.tgz"): file, err := os.ReadFile("./e2e/addon/mock/testrepo/helm-repo/mock-be-dep-addon-v1.0.0.tgz") if err != nil { _, _ = rw.Write([]byte(err.Error())) } rw.Write(file) } } func init() { _ = fs.WalkDir(testData, "testdata", func(path string, d fs.DirEntry, err error) error { path = strings.TrimPrefix(path, "testdata/") path = strings.TrimPrefix(path, "testdata") info, _ := d.Info() size := info.Size() if path == "" { return nil } if size == 0 { path += "/" } paths = append(paths, struct { path string length int64 }{path: path, length: size}) return nil }) }
package forkexec import ( "os" "syscall" "testing" "github.com/criyle/go-sandbox/pkg/mount" ) func TestFork_DropCaps(t *testing.T) { t.Parallel() r := Runner{ Args: []string{"/bin/echo"}, CloneFlags: syscall.CLONE_NEWUSER, DropCaps: true, } _, err := r.Start() if err != nil { t.Fatal(err) } } func TestFork_ETXTBSY(t *testing.T) { f, err := os.CreateTemp("", "") if err != nil { t.Fatal(err) } t.Cleanup(func() { os.Remove(f.Name()) f.Close() }) if err := f.Chmod(0777); err != nil { t.Fatal(err) } echo, err := os.Open("/bin/echo") if err != nil { t.Fatal(err) } defer echo.Close() _, err = f.ReadFrom(echo) if err != nil { t.Fatal(err) } r := Runner{ Args: []string{f.Name()}, ExecFile: f.Fd(), } _, err = r.Start() e, ok := err.(ChildError) if !ok { t.Fatalf("not a child error") } if e.Err != syscall.ETXTBSY && e.Location != LocExecve && e.Index != 0 { t.Fatal(err) } } func TestFork_OK(t *testing.T) { t.Parallel() f, err := os.CreateTemp("", "") if err != nil { t.Fatal(err) } defer os.Remove(f.Name()) if err := f.Chmod(0777); err != nil { t.Fatal(err) } echo, err := os.Open("/bin/echo") if err != nil { t.Fatal(err) } defer echo.Close() _, err = f.ReadFrom(echo) if err != nil { t.Fatal(err) } f.Close() r := Runner{ Args: []string{f.Name()}, } _, err = r.Start() if err != nil { t.Fatal(err) } } func TestFork_ENOENT(t *testing.T) { t.Parallel() m, err := mount.NewBuilder(). WithMount( mount.Mount{ Source: "NOT_EXISTS", }).Build() if err != nil { t.Fatal(err) } r := Runner{ Args: []string{"/bin/echo"}, CloneFlags: syscall.CLONE_NEWNS | syscall.CLONE_NEWUSER, Mounts: m, } _, err = r.Start() e, ok := err.(ChildError) if !ok { t.Fatalf("not a child error") } if e.Err != syscall.ENOENT && e.Location != LocExecve { t.Fatal(err) } }
package db import ( "encoding/json" "html/template" "strconv" "sync" "time" "github.com/Jleagle/steam-go/steam" "github.com/gosimple/slug" "github.com/steam-authority/steam-authority/helpers" "github.com/steam-authority/steam-authority/logging" "github.com/steam-authority/steam-authority/memcache" ) // todo, make column meta match table names type Package struct { ID int `gorm:"not null;primary_key"` // CreatedAt *time.Time `gorm:"not null"` // UpdatedAt *time.Time `gorm:"not null"` // PICSName string `gorm:"not null"` // PICSChangeID int `gorm:"not null"` // PICSBillingType int8 `gorm:"not null;column:billing_type"` // PICSLicenseType int8 `gorm:"not null;column:license_type"` // PICSStatus int8 `gorm:"not null;column:status"` // PICSExtended string `gorm:"not null;default:'{}'"` // JSON (TEXT) PICSAppIDs string `gorm:"not null;default:'[]';column:apps"` // JSON PICSAppItems string `gorm:"not null;default:'{}'"` // JSON (TEXT) PICSDepotIDs string `gorm:"not null;default:'[]'"` // JSON PICSRaw string `gorm:"not null;default:'{}'"` // JSON (TEXT) AppsCount int `gorm:"not null"` // ImagePage string `gorm:"not null"` // ImageHeader string `gorm:"not null"` // ImageLogo string `gorm:"not null"` // PurchaseText string `gorm:"not null"` // PriceInitial int `gorm:"not null"` // PriceFinal int `gorm:"not null"` // PriceDiscount int `gorm:"not null"` // PriceIndividual int `gorm:"not null"` // Controller string `gorm:"not null;default:'{}'"` // JSON (TEXT) ComingSoon bool `gorm:"not null"` // ReleaseDate string `gorm:"not null"` // Platforms string `gorm:"not null;default:'[]'"` // JSON } func GetDefaultPackageJSON() Package { return Package{ //PICSAppIDs: "[]", //PICSExtended: "{}", //Controller: "{}", //Platforms: "[]", } } func (pack Package) GetPath() string { s := "/packages/" + strconv.Itoa(pack.ID) if pack.PICSName != "" { s = s + "/" + slug.Make(pack.GetName()) } return s } func (pack Package) GetName() (name string) { if pack.PICSName == "" { pack.PICSName = "Package " + strconv.FormatInt(int64(pack.ID), 10) } return pack.PICSName } func (pack Package) GetDefaultAvatar() string { return "/assets/img/no-app-image-square.jpg" } func (pack Package) GetCreatedNice() string { return pack.CreatedAt.Format(helpers.DateYearTime) } func (pack Package) GetCreatedUnix() int64 { return pack.CreatedAt.Unix() } func (pack Package) GetUpdatedNice() string { return pack.UpdatedAt.Format(helpers.DateYearTime) } func (pack Package) GetUpdatedUnix() int64 { return pack.UpdatedAt.Unix() } func (pack Package) GetReleaseDateNice() string { return helpers.GetReleaseDateNice(pack.ReleaseDate) } func (pack Package) GetReleaseDateUnix() int64 { return helpers.GetReleaseDateUnix(pack.ReleaseDate) } func (pack Package) GetBillingType() string { switch pack.PICSBillingType { case 0: return "No Cost" case 1: return "Store" case 2: return "Bill Monthly" case 3: return "CD Key" case 4: return "Guest Pass" case 5: return "Hardware Promo" case 6: return "Gift" case 7: return "Free Weekend" case 8: return "OEM Ticket" case 9: return "Recurring Option" case 10: return "Store or CD Key" case 11: return "Repurchaseable" case 12: return "Free on Demand" case 13: return "Rental" case 14: return "Commercial License" case 15: return "Free Commercial License" default: return "Unknown" } } func (pack Package) GetLicenseType() string { switch pack.PICSLicenseType { case 0: return "No License" case 1: return "Single Purchase" case 2: return "Single Purchase (Limited Use)" case 3: return "Recurring Charge" case 6: return "Recurring" case 7: return "Limited Use Delayed Activation" default: return "Unknown" } } func (pack Package) GetStatus() string { switch pack.PICSStatus { case 0: return "Available" case 2: return "Unavailable" default: return "Unknown" } } func (pack Package) GetComingSoon() string { switch pack.ComingSoon { case true: return "Yes" case false: return "No" default: return "Unknown" } } func (pack Package) GetAppsCountString() string { if pack.AppsCount == 0 { return "Unknown" } return strconv.Itoa(pack.AppsCount) } func (pack Package) GetAppIDs() (apps []int, err error) { err = helpers.Unmarshal([]byte(pack.PICSAppIDs), &apps) return apps, err } func (pack *Package) SetAppIDs(apps []int) (err error) { bytes, err := json.Marshal(apps) if err != nil { pack.PICSAppIDs = string(bytes) pack.AppsCount = len(apps) } return err } func (pack *Package) SetDepotIDs(apps []int) (err error) { bytes, err := json.Marshal(apps) if err != nil { return err } pack.PICSDepotIDs = string(bytes) return nil } func (pack *Package) SetAppItems(items map[string]string) (err error) { bytes, err := json.Marshal(items) if err != nil { return err } pack.PICSAppItems = string(bytes) return nil } func (pack Package) GetPriceInitial() float64 { return helpers.CentsInt(pack.PriceInitial) } func (pack Package) GetPriceFinal() float64 { return helpers.CentsInt(pack.PriceFinal) } func (pack Package) GetPriceDiscount() float64 { return helpers.CentsInt(pack.PriceDiscount) } func (pack Package) GetPriceIndividual() float64 { return helpers.CentsInt(pack.PriceInitial) } type Extended map[string]string func (pack *Package) SetExtended(extended Extended) (err error) { bytes, err := json.Marshal(extended) if err != nil { return err } pack.PICSExtended = string(bytes) return nil } func (pack Package) GetExtended() (extended map[string]interface{}, err error) { extended = make(map[string]interface{}) err = helpers.Unmarshal([]byte(pack.PICSExtended), &extended) return extended, err } // Used in temmplate func (pack Package) GetExtendedNice() (ret map[string]interface{}) { ret = make(map[string]interface{}) extended, err := pack.GetExtended() if err != nil { logging.Error(err) return ret } for k, v := range extended { if val, ok := PackageExtendedKeys[k]; ok { ret[val] = v } else { logging.Info("Need to add " + k + " to extended map") ret[k] = v } } return ret } func (pack Package) GetController() (controller map[string]interface{}, err error) { controller = make(map[string]interface{}) err = helpers.Unmarshal([]byte(pack.Controller), &controller) return controller, err } // Used in temmplate func (pack Package) GetControllerNice() (ret map[string]interface{}) { ret = map[string]interface{}{} extended, err := pack.GetController() if err != nil { logging.Error(err) return ret } for k, v := range extended { if val, ok := PackageControllerKeys[k]; ok { ret[val] = v } else { logging.Info("Need to add " + k + " to controller map") ret[k] = v } } return ret } func (pack Package) GetPlatforms() (platforms []string, err error) { err = helpers.Unmarshal([]byte(pack.Platforms), &platforms) return platforms, err } func (pack Package) GetPlatformImages() (ret template.HTML, err error) { platforms, err := pack.GetPlatforms() if err != nil { return ret, err } for _, v := range platforms { if v == "macos" { ret = ret + `<i class="fab fa-apple"></i>` } else if v == "windows" { ret = ret + `<i class="fab fa-windows"></i>` } else if v == "linux" { ret = ret + `<i class="fab fa-linux"></i>` } } return ret, nil } func GetPackage(id int) (pack Package, err error) { db, err := GetMySQLClient() if err != nil { return pack, err } db.First(&pack, id) if db.Error != nil { return pack, db.Error } if pack.ID == 0 { return pack, ErrNotFound } return pack, nil } func GetPackages(ids []int, columns []string) (packages []Package, err error) { if len(ids) == 0 { return packages, err } db, err := GetMySQLClient() if err != nil { return packages, err } if len(columns) > 0 { db = db.Select(columns) } db.Where("id IN (?)", ids).Find(&packages) return packages, db.Error } func GetPackagesAppIsIn(appID int) (packages []Package, err error) { db, err := GetMySQLClient() if err != nil { return packages, err } db = db.Where("JSON_CONTAINS(apps, '[" + strconv.Itoa(appID) + "]')").Order("id DESC").Find(&packages) if db.Error != nil { return packages, db.Error } return packages, nil } func CountPackages() (count int, err error) { return memcache.GetSetInt(memcache.PackagesCount, func() (count int, err error) { db, err := GetMySQLClient() if err != nil { return count, err } db.Model(&Package{}).Count(&count) return count, db.Error }) } // GORM callback func (pack *Package) Update() (errs []error) { var wg sync.WaitGroup // Get package details wg.Add(1) go func(pack *Package) { // Get app details // Get data response, _, err := helpers.GetSteam().GetPackageDetails(pack.ID) if err != nil { if err == steam.ErrNullResponse { errs = append(errs, err) } } // Controller controllerString, err := json.Marshal(response.Data.Controller) if err != nil { errs = append(errs, err) } // Platforms var platforms []string if response.Data.Platforms.Linux { platforms = append(platforms, "linux") } if response.Data.Platforms.Windows { platforms = append(platforms, "windows") } if response.Data.Platforms.Windows { platforms = append(platforms, "macos") } platformsString, err := json.Marshal(platforms) if err != nil { errs = append(errs, err) } // pack.ImageHeader = response.Data.HeaderImage pack.ImageLogo = response.Data.SmallLogo pack.ImageHeader = response.Data.HeaderImage // pack.PICSAppIDs = string(appsString) // Can get from PICS pack.PriceInitial = response.Data.Price.Initial pack.PriceFinal = response.Data.Price.Final pack.PriceDiscount = response.Data.Price.DiscountPercent pack.PriceIndividual = response.Data.Price.Individual pack.Platforms = string(platformsString) pack.Controller = string(controllerString) pack.ReleaseDate = response.Data.ReleaseDate.Date pack.ComingSoon = response.Data.ReleaseDate.ComingSoon wg.Done() }(pack) // Default JSON values if pack.PICSAppIDs == "" || pack.PICSAppIDs == "null" { pack.PICSAppIDs = "[]" } if pack.PICSExtended == "" || pack.PICSExtended == "null" { pack.PICSExtended = "{}" } if pack.Controller == "" || pack.Controller == "null" { pack.Controller = "{}" } if pack.Platforms == "" || pack.Platforms == "null" { pack.Platforms = "[]" } return errs } var PackageExtendedKeys = map[string]string{ "allowcrossregiontradingandgifting": "Allow Cross Region Trading & Gifting", "allowpurchasefromretrictedcountries": "Allow Purchase From Restricted Countries", "allowpurchasefromrestrictedcountries": "Allow Purchase From Restricted Countries", "allowpurchaseinrestrictedcountries": "Allow Purchase In Restricted Countries", "allowpurchaserestrictedcountries": "Allow Purchase Restricted Countries", "allowrunincountries": "Allow Run Inc Cuntries", "alwayscountasowned": "Always Count As Owned", "alwayscountsasowned": "Always Counts As Owned", "alwayscountsasunowned": "Always Counts As Unowned", "appid": "App ID", "appidownedrequired": "App ID Owned Required", "billingagreementtype": "Billing Agreement Type", "blah": "Blah", "canbegrantedfromexternal": "Can Be Granted From External", "cantownapptopurchase": "Cant Own App To Purchase", "complimentarypackagegrant": "Complimentary Package Grant", "complimentarypackagegrants": "Complimentary Package Grants", "curatorconnect": "Curator Connect", "devcomp": "Devcomp", "dontallowrunincountries": "Dont Allow Run In Countries", "dontgrantifappidowned": "Dont Grant If App ID Owned", "enforceintraeeaactivationrestrictions": "Enforce Intraeeaactivation Restrictions", "excludefromsharing": "Exclude From Sharing", "exfgls": "Exclude From Game Library Sharing", "expirytime": "Expiry Time", "extended": "Extended", "fakechange": "Fake Change", "foo": "Foo", "freeondemand": "Free On Demand", "freeweekend": "Free Weekend", "full_gamepad": "Full Gamepad", "giftsaredeletable": "Gifts Are Deletable", "giftsaremarketable": "Gifts Are Marketable", "giftsaretradable": "Gifts Are Tradable", "grantexpirationdays": "Grant Expiration Days", "grantguestpasspackage": "Grant Guest Pass Package", "grantpassescount": "Grant Passes Count", "hardwarepromotype": "Hardware Promo Type", "ignorepurchasedateforrefunds": "Ignore Purchase Date For Refunds", "initialperiod": "Initial Period", "initialtimeunit": "Initial Time Unit", "iploginrestriction": "IP Login Restriction", "languages": "Languages", "launcheula": "Launch EULA", "legacygamekeyappid": "Legacy Game Key App ID", "lowviolenceinrestrictedcountries": "Low Violence In Restricted Countries", "martinotest": "Martino Test", "mustownapptopurchase": "Must Own App To Purchase", "onactivateguestpassmsg": "On Activate Guest Pass Message", "onexpiredmsg": "On Expired Message", "ongrantguestpassmsg": "On Grant Guest Pass Message", "onlyallowincountries": "Only Allow In Countries", "onlyallowrestrictedcountries": "Only Allow Restricted Countries", "onlyallowrunincountries": "Only Allow Run In Countries", "onpurchasegrantguestpasspackage": "On Purchase Grant Guest Pass Package", "onpurchasegrantguestpasspackage0": "On Purchase Grant Guest Pass Package 0", "onpurchasegrantguestpasspackage1": "On Purchase Grant Guest Pass Package 1", "onpurchasegrantguestpasspackage2": "On Purchase Grant Guest Pass Package 2", "onpurchasegrantguestpasspackage3": "On Purchase Grant Guest Pass Package 3", "onpurchasegrantguestpasspackage4": "On Purchase Grant Guest Pass Package 4", "onpurchasegrantguestpasspackage5": "On Purchase Grant Guest Pass Package 5", "onpurchasegrantguestpasspackage6": "On Purchase Grant Guest Pass Package 6", "onpurchasegrantguestpasspackage7": "On Purchase Grant Guest Pass Package 7", "onpurchasegrantguestpasspackage8": "On Purchase Grant Guest Pass Package 8", "onpurchasegrantguestpasspackage9": "On Purchase Grant Guest Pass Package 9", "onpurchasegrantguestpasspackage10": "On Purchase Grant Guest Pass Package 10", "onpurchasegrantguestpasspackage11": "On Purchase Grant Guest Pass Package 11", "onpurchasegrantguestpasspackage12": "On Purchase Grant Guest Pass Package 12", "onpurchasegrantguestpasspackage13": "On Purchase Grant Guest Pass Package 13", "onpurchasegrantguestpasspackage14": "On Purchase Grant Guest Pass Package 14", "onpurchasegrantguestpasspackage15": "On Purchase Grant Guest Pass Package 15", "onpurchasegrantguestpasspackage16": "On Purchase Grant Guest Pass Package 16", "onpurchasegrantguestpasspackage17": "On Purchase Grant Guest Pass Package 17", "onpurchasegrantguestpasspackage18": "On Purchase Grant Guest Pass Package 18", "onpurchasegrantguestpasspackage19": "On Purchase Grant Guest Pass Package 19", "onpurchasegrantguestpasspackage20": "On Purchase Grant Guest Pass Package 20", "onpurchasegrantguestpasspackage21": "On Purchase Grant Guest Pass Package 21", "onpurchasegrantguestpasspackage22": "On Purchase Grant Guest Pass Package 22", "onquitguestpassmsg": "On Quit Guest Pass Message", "overridetaxtype": "Override Tax Type", "permitrunincountries": "Permit Run In Countries", "prohibitrunincountries": "Prohibit Run In Countries", "purchaserestrictedcountries": "Purchase Restricted Countries", "purchaseretrictedcountries": "Purchase Restricted Countries", "recurringoptions": "Recurring Options", "recurringpackageoption": "Recurring Package Option", "releaseoverride": "Release Override", "releasestatecountries": "Release State Countries", "releasestateoverride": "Release State Override", "releasestateoverridecountries": "Release State Override Countries", "relesestateoverride": "Release State Override", "renewalperiod": "Renewal Period", "renewaltimeunit": "Renewal Time Unit", "requiredps3apploginforpurchase": "Required PS3 App Login For Purchase", "requirespreapproval": "Requires Preapproval", "restrictedcountries": "Restricted Countries", "runrestrictedcountries": "Run Restricted Countries", "shippableitem": "Shippable Item", "skipownsallappsinpackagecheck": "Skip Owns All Apps In Package Check", "starttime": "Start Time", "state": "State", "test": "Test", "testchange": "Test Change", "trading_card_drops": "Trading Card Drops", "violencerestrictedcountries": "Violence Restricted Countries", "violencerestrictedterritorycodes": "Violence Restricted Territory Codes", "virtualitemreward": "Virtual Item Reward", } var PackageControllerKeys = map[string]string{ "full_gamepad": "Full Gamepad", "allowpurchasefromrestrictedcountries": "Allow Purchase From Restricted Countries", }
package env import ( "fmt" "os" "strconv" ) // GetUint32 extracts uint32 value from env. if not set, returns default value. func GetUint32(key string, def uint32) uint32 { s, ok := os.LookupEnv(key) if !ok { return def } v, err := strconv.ParseUint(s, decimalBase, bitSize32) if err != nil { return def } return uint32(v) } // MustGetUint32 extracts uint32 value from env. if not set, it panics. func MustGetUint32(key string) uint32 { s, ok := os.LookupEnv(key) if !ok { panic(fmt.Sprintf("environment variable '%s' not set", key)) } v, err := strconv.ParseUint(s, decimalBase, bitSize32) if err != nil { panic(fmt.Sprintf("invalid environment variable '%s' has been set: %s", key, s)) } return uint32(v) }