text
stringlengths
11
4.05M
package builder import ( "errors" "io/ioutil" "os" "sync" "syscall" "time" ) var ( calibrateOnce sync.Once calibrationError error _ctimeResolution time.Duration ) // calibrateCtime will calibrate the resolution of inode change times for // temporary files. It will return the minimum resolution or an error. func calibrateCtime() (time.Duration, error) { buffer := []byte("data\n") file, err := ioutil.TempFile("", "ctimeCalibration") if err != nil { return 0, err } defer file.Close() defer os.Remove(file.Name()) fd := int(file.Fd()) // Bypass os.File overheads. if _, err := syscall.Write(fd, buffer); err != nil { return 0, err } var firstStat syscall.Stat_t if err := syscall.Stat(file.Name(), &firstStat); err != nil { return 0, err } interval := time.Nanosecond startTime := time.Now() for ; time.Since(startTime) < time.Second; interval *= 10 { if _, err := syscall.Write(fd, buffer); err != nil { return 0, err } var newStat syscall.Stat_t if err := syscall.Stat(file.Name(), &newStat); err != nil { return 0, err } if newStat.Ctim != firstStat.Ctim { return time.Since(startTime), nil } time.Sleep(interval) } return 0, errors.New("timed out calibrating Ctime changes") } func getCtimeResolution() (time.Duration, error) { calibrateOnce.Do(func() { _ctimeResolution, calibrationError = calibrateCtime() }) return _ctimeResolution, calibrationError }
package utils import ( "fmt" "log" "os" "strings" "time" "github.com/netclave/common/networkutils" "github.com/netclave/common/storage" ) const FAILED_EVENTS_TABLE = "failedEvents" const FAILED_IPS_TABLE = "failedIPs" var LAST_TIME_LOGGED = map[string]int64{} type Event struct { ID string IP string Priority string } type Fail2BanData struct { DataStorage *storage.GenericStorage RemoteAddress string TTL int64 } func CreateSimpleEvent(remoteAddress string) (*Event, error) { uuid, err := GenerateUUID() if err != nil { return nil, err } return &Event{ ID: uuid, IP: networkutils.ParseIP(remoteAddress), Priority: "1", }, nil } func StoreBannedIP(dataStorage *storage.GenericStorage, event *Event, ttl int64) error { err := dataStorage.SetKey(FAILED_IPS_TABLE, event.IP, event.IP, time.Duration(ttl)*time.Millisecond) if err != nil { return err } err = dataStorage.AddToMap(FAILED_EVENTS_TABLE, event.IP, event.ID, event) if err != nil { return err } return nil } func LogBannedIPs(dataStorage *storage.GenericStorage) error { eventsPerIpsKeys, err := dataStorage.GetKeys(FAILED_EVENTS_TABLE, "*") if err != nil { log.Println(err.Error()) return err } for _, key := range eventsPerIpsKeys { ip := strings.ReplaceAll(key, FAILED_EVENTS_TABLE+"/", "") res, err := dataStorage.GetKey(FAILED_IPS_TABLE, ip) if err != nil { return err } var events map[string]*Event err = dataStorage.GetMap(FAILED_EVENTS_TABLE, ip, &events) if err != nil { return err } if res == "" { for eventKey := range events { err = dataStorage.DelFromMap(FAILED_EVENTS_TABLE, ip, eventKey) if err != nil { return err } } continue } timestamp, ok := LAST_TIME_LOGGED[ip] if ok == false { timestamp = int64(0) } now := time.Now().UnixNano() / int64(time.Millisecond) if now-timestamp > 60000 { fmt.Fprintf(os.Stderr, "Failed request for ip: %s\n", ip) LAST_TIME_LOGGED[ip] = now } } return nil } func RetrieveIPs(dataStorage *storage.GenericStorage) ([]string, error) { result := make(map[string]struct{}, 0) eventsPerIpsKeys, err := dataStorage.GetKeys(FAILED_EVENTS_TABLE, "*") if err != nil { log.Println(err.Error()) return make([]string, 0), err } for _, key := range eventsPerIpsKeys { ip := strings.ReplaceAll(key, FAILED_EVENTS_TABLE+"/", "") result[ip] = struct{}{} } var resultArr []string for ip, _ := range result { resultArr = append(resultArr, ip) } return resultArr, nil } func RetrieveEventsForIP(dataStorage *storage.GenericStorage, ip string) (map[string]*Event, error) { var result map[string]*Event res, err := dataStorage.GetKey(FAILED_IPS_TABLE, ip) if err != nil { return result, err } var events map[string]*Event err = dataStorage.GetMap(FAILED_EVENTS_TABLE, ip, &events) if err != nil || res == "" { return result, err } else { return events, err } }
package middlewares import ( "regexp" "github.com/trustelem/zxcvbn" "github.com/authelia/authelia/v4/internal/configuration/schema" ) // PasswordPolicyProvider represents an implementation of a password policy provider. type PasswordPolicyProvider interface { Check(password string) (err error) } // NewPasswordPolicyProvider returns a new password policy provider. func NewPasswordPolicyProvider(config schema.PasswordPolicy) (provider PasswordPolicyProvider) { if !config.Standard.Enabled && !config.ZXCVBN.Enabled { return &StandardPasswordPolicyProvider{} } if config.Standard.Enabled { p := &StandardPasswordPolicyProvider{} p.min, p.max = config.Standard.MinLength, config.Standard.MaxLength if config.Standard.RequireLowercase { p.patterns = append(p.patterns, *regexp.MustCompile(`[a-z]+`)) } if config.Standard.RequireUppercase { p.patterns = append(p.patterns, *regexp.MustCompile(`[A-Z]+`)) } if config.Standard.RequireNumber { p.patterns = append(p.patterns, *regexp.MustCompile(`[0-9]+`)) } if config.Standard.RequireSpecial { p.patterns = append(p.patterns, *regexp.MustCompile(`[^a-zA-Z0-9]+`)) } return p } if config.ZXCVBN.Enabled { return &ZXCVBNPasswordPolicyProvider{minScore: config.ZXCVBN.MinScore} } return &StandardPasswordPolicyProvider{} } // ZXCVBNPasswordPolicyProvider handles zxcvbn password policy checking. type ZXCVBNPasswordPolicyProvider struct { minScore int } // Check checks the password against the policy. func (p ZXCVBNPasswordPolicyProvider) Check(password string) (err error) { result := zxcvbn.PasswordStrength(password, nil) if result.Score < p.minScore { return errPasswordPolicyNoMet } return nil } // StandardPasswordPolicyProvider handles standard password policy checking. type StandardPasswordPolicyProvider struct { patterns []regexp.Regexp min, max int } // Check checks the password against the policy. func (p StandardPasswordPolicyProvider) Check(password string) (err error) { patterns := len(p.patterns) if (p.min > 0 && len(password) < p.min) || (p.max > 0 && len(password) > p.max) { return errPasswordPolicyNoMet } if patterns == 0 { return nil } for i := 0; i < patterns; i++ { if !p.patterns[i].MatchString(password) { return errPasswordPolicyNoMet } } return nil }
package main import ( "encoding/json" "fmt" "io/ioutil" "net/http" "testing" ) type JSONResp struct { Token string `json:"token"` Value `json:"value"` } type Value struct { AccessKey string `json:"access_key"` } func TestGet(t *testing.T) { var tests = []struct { key string code int }{ {"1key", 200}, {"2key", 200}, {"asdf", 401}, {"12345", 401}, } client := &http.Client{} url := "http://localhost:8000/get-token/" req, err := http.NewRequest("GET", url, nil) if err != nil { fmt.Println(err) } for _, test := range tests { req.Header.Set("Key", test.key) resp, err := client.Do(req) if err != nil { fmt.Println(err) } defer resp.Body.Close() if resp.StatusCode != test.code { t.Errorf("GetToken(%s) returned %d", test.key, test.code) } } } func TestGetJSON(t *testing.T) { var tests = []struct { key string token string }{ {"1key", "1token"}, {"2key", "2token"}, {"3key", "3token"}, {"4key", "4token"}, {"asfdg", ""}, {"4kee", ""}, {"wrong", ""}, {"lkbdsf", ""}, } client := &http.Client{} url := "http://localhost:8000/get-token/" req, err := http.NewRequest("GET", url, nil) if err != nil { fmt.Println(err) } for _, test := range tests { jsonResp := JSONResp{} req.Header.Set("Key", test.key) resp, err := client.Do(req) if err != nil { fmt.Println(err) } defer resp.Body.Close() r, _ := ioutil.ReadAll(resp.Body) json.Unmarshal(r, &jsonResp) if err != nil { t.Error(err) } if jsonResp.AccessKey != test.key { if jsonResp.AccessKey != "" { t.Errorf("Sent access key (%s) got %s", test.key, jsonResp.AccessKey) } } if jsonResp.Token != test.token { t.Errorf("Sent token (%s) got %s", test.token, jsonResp.Token) } } } func TestVer(t *testing.T) { var tests = []struct { token string code int }{ {"1token", 200}, {"2token", 200}, {"asdf", 400}, {"12345", 400}, } client := &http.Client{} url := "http://localhost:8000/ver-token/" req, err := http.NewRequest("GET", url, nil) if err != nil { fmt.Println(err) } for _, test := range tests { req.Header.Set("Token", test.token) resp, err := client.Do(req) if err != nil { fmt.Println(err) } defer resp.Body.Close() if resp.StatusCode != test.code { t.Errorf("VerToken(%s) returned %d", test.token, test.code) } } } func TestVerJSON(t *testing.T) { var tests = map[string]string{ "1token": "1key", "2token": "2key", "3token": "3key", "4token": "4key", "dsfbd": "", "4to124ken": "", "4toweken": "", } client := &http.Client{} url := "http://localhost:8000/ver-token/" req, err := http.NewRequest("GET", url, nil) if err != nil { fmt.Println(err) } for token, key := range tests { val := Value{} req.Header.Set("Token", token) resp, err := client.Do(req) if err != nil { fmt.Println(err) } defer resp.Body.Close() r, _ := ioutil.ReadAll(resp.Body) json.Unmarshal(r, &val) if val.AccessKey != key { t.Errorf("Sent token %s, expected key %s got %s", token, key, val.AccessKey) } } } func TestDel(t *testing.T) { var tests = []struct { token string code int }{ {"1token", 200}, {"2token", 200}, {"asdf", 400}, {"12345", 400}, {"2key", 400}, // Deleting previously deleted token {"1key", 400}, } client := &http.Client{} url := "http://localhost:8000/del-token/" req, err := http.NewRequest("DELETE", url, nil) if err != nil { fmt.Println(err) } for _, test := range tests { req.Header.Set("Token", test.token) resp, err := client.Do(req) if err != nil { fmt.Println(err) } defer resp.Body.Close() if resp.StatusCode != test.code { t.Errorf("DelToken(%s) returned %d, not %d", test.token, resp.StatusCode, test.code) } } } var client = &http.Client{} var url = "http://localhost:8000/get-token/" var req = func() *http.Request { r, _ := http.NewRequest("GET", url, nil) r.Header.Set("Key", "123") return r }() func BenchmarkGet(b *testing.B) { for i := 0; i < b.N; i++ { resp, _ := client.Do(req) defer resp.Body.Close() } } var url1 = "http://localhost:8000/ver-token/" var req1 = func() *http.Request { r, _ := http.NewRequest("GET", url1, nil) r.Header.Set("Token", "321") return r }() func BenchmarkVer(b *testing.B) { for i := 0; i < b.N; i++ { resp, _ := client.Do(req1) defer resp.Body.Close() } }
package main import ( "fmt" "github.com/shurcooL/githubv4" ) func (dw *DiscordWebhook) CreateMessage(q githubv4.Int) { commit := fmt.Sprintf("今日のコミット数は%v回です!!", q) switch { case q <= 3: dw.UserName = "中野五月" dw.AvatarURL = "https://cdn-ak.f.st-hatena.com/images/fotolife/m/magazine_pocket/20171213/20171213201322.jpg" dw.Embeds = []DiscordEmbed{ DiscordEmbed{ Title: commit, Image: DiscordImage{URL: "https://media.Discordapp.net/attachments/567985071701753857/639018077589078016/S__27058352.jpg?width=1090&height=1141"}, Color: 0xff0000, }, } case q > 3 && q <= 5: dw.UserName = "中野一花" dw.AvatarURL = "https://cdn-ak.f.st-hatena.com/images/fotolife/m/magazine_pocket/20171213/20171213200413.jpg" dw.Embeds = []DiscordEmbed{ DiscordEmbed{ Title: commit, Image: DiscordImage{URL: "https://media.Discordapp.net/attachments/567985071701753857/639018284901203968/S__27058782.jpg"}, Color: 0xffff00, }, } case q > 5 && q <= 8: dw.UserName = "中野四葉" dw.AvatarURL = "http://chomanga.org/wp-content/uploads/2019/12/a6516e3f616a117ed66a7af940fdfed6.png" dw.Embeds = []DiscordEmbed{ DiscordEmbed{ Title: commit, Image: DiscordImage{URL: "https://imasoku.com/wp-content/uploads/2019/02/yZoTi8u.jpg"}, Color: 0x008000, }, } case q > 8 && q <= 12: dw.UserName = "中野三玖" dw.AvatarURL = "https://cdn-ak.f.st-hatena.com/images/fotolife/m/magazine_pocket/20171213/20171213200842.jpg" dw.Embeds = []DiscordEmbed{ DiscordEmbed{ Title: commit, Image: DiscordImage{URL: "http://phoenix-wind.com/common/img/OGP/word/gotoubun_miku_03.jpg"}, Color: 0x0000ff, }, } case q > 12: dw.UserName = "中野二乃" dw.AvatarURL = "https://pbs.twimg.com/media/DyehWWfVsAA6JWV.jpg" dw.Embeds = []DiscordEmbed{ DiscordEmbed{ Title: commit, Image: DiscordImage{URL: "https://pbs.twimg.com/media/Dgngye7U8AI9f-3?format=jpg&name=900x900"}, Color: 0x000000, }, } } }
package main import ( "fmt" "time" ) var pc [256]byte var pc1 [256]byte func init() { for i := range pc { pc[i] = pc[i/2] + byte(i&1) pc1[i] = pc1[i/2] + byte(i&1) } } func main() { start1 := time.Now() fmt.Printf("Result : %d\n", popcount(10)) //pcSec := time.Since(start1).Seconds() pcSec := time.Since(start1).Nanoseconds() start2 := time.Now() fmt.Printf("Result by loop: %d\n", popcountByLoop(10)) //pcByLoopSec := time.Since(start2).Seconds() pcByLoopSec := time.Since(start2).Nanoseconds() elapsed := pcSec - pcByLoopSec //fmt.Printf(strconv.FormatFloat(elapsed, 'G', 4, 64)) fmt.Println() fmt.Printf("%dnano sec elapsed.", elapsed) } func popcount(x uint64) int { temp := int(pc[byte(x>>(0*8))] + pc[byte(x>>(1*8))] + pc[byte(x>>(2*8))] + pc[byte(x>>(3*8))] + pc[byte(x>>(4*8))] + pc[byte(x>>(5*8))] + pc[byte(x>>(6*8))]) var lastTemp int for i := 0; i < 64; i++ { lastTemp = int(pc[byte(x>>(7*8))]) } return temp + lastTemp } func popcountByLoop(x uint64) int { var pc int var temp int var i uint for i = 0; i < 8; i++ { if i != 8 { pc += int(pc1[byte(x>>(i*8))]) } else { for i := 0; i < 64; i++ { temp = int(pc1[byte(x>>(7*8))]) } } } return pc + temp }
package main import "bufio" import "fmt" import "os" import "strconv" type test_case struct { n int // number of prisoners m int // number of sweets s int // id of prisoner where distribution begins (0 index) } type input struct { t []test_case } func savePrisonerId(t test_case) int { p := (t.m + t.s - 1) % t.n if p == 0 { return t.n } else { return p } return p } func main() { input := getInput() for x := range input.t { fmt.Println(savePrisonerId(input.t[x])) } } func getInput() (i input) { scanner := bufio.NewScanner(os.Stdin) scanner.Split(bufio.ScanWords) scanner.Scan() numTestCases, _ := strconv.Atoi(scanner.Text()) i.t = make([]test_case, numTestCases) for x := 0; x < numTestCases; x++ { scanner.Scan() i.t[x].n, _ = strconv.Atoi(scanner.Text()) scanner.Scan() i.t[x].m, _ = strconv.Atoi(scanner.Text()) scanner.Scan() i.t[x].s, _ = strconv.Atoi(scanner.Text()) } return i }
package main import ( "testing" shared "github.com/corymurphy/adventofcode/shared" ) func Test_Part1(t *testing.T) { expected := 26 input := shared.ReadInput("input_test") actual := part1(input) shared.AssertEqual(t, expected, actual) } // func Test_Part2(t *testing.T) { // expected := 93 // input := shared.ReadInput("input_test") // actual := part2(input) // shared.AssertEqual(t, expected, actual) // } // func Test_Part1_Completed(t *testing.T) { // expected := 793 // input := shared.ReadInput("input") // actual := part1(input) // shared.AssertEqual(t, expected, actual) // } // func Test_Part2_Completed(t *testing.T) { // expected := 24165 // input := shared.ReadInput("input") // actual := part2(input) // shared.AssertEqual(t, expected, actual) // }
package main import ( "fmt" "log" "net/http" "database/sql" "unicode/utf8" _ "github.com/go-sql-driver/mysql" "my.localhost/funny/gotools/badcharsdb/models" ) const ( PRODMODE = false DIRSEP = "/" DSN = "myhouse:pass_to_myhouse@/myhouse" DSN_INFOSCHEMA = "myhouse:pass_to_myhouse@/information_schema" DRIVER = "mysql" DBNAME = "myhouse" ) var ( err error db *sql.DB httpAddr string = "0.0.0.0:3000" ) type ( Env struct { db, infoschema *sql.DB } RecordError struct { Tab string Col string Id int64 Val string } ) func init() { models.PRODMODE = PRODMODE } func main() { // Init the connections pool to database db := models.InitDB(DRIVER, DSN) dbinf := models.InitDB(DRIVER, DSN_INFOSCHEMA) env := &Env{ db: db, infoschema: dbinf, } // Run web server for observ current progress results: fmt.Println("Web server start on " + httpAddr) http.HandleFunc("/", env.homeHandler) http.ListenAndServe(httpAddr, nil) } func (env *Env) homeHandler(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { http.Error(w, http.StatusText(405), 405) return } //Gather all tables, columns with varchar type: allStringColumns, err := models.GatherAllVarcharTablesColumns(env.infoschema, DBNAME) if err != nil { log.Panic(err) } var badRecords []RecordError for _, value := range allStringColumns { badRecords = utfValidationTable(value.Table, value.Column, env.db) //badResults []*RecordError if len(badRecords)>0 { fmt.Println(value.Table, value.Column) fmt.Fprintln(w, badRecords) } else { fmt.Fprintln(w, value.Table + "all normal utf-8 strings are") } } return } func utfValidationTable(table, column string, db *sql.DB) []RecordError { var badResults []RecordError var bad RecordError var datas []*models.Record withId := tableHasFieldId(db, table) if withId == true { datas = models.GetColumnRecordsWithId(db, table, column) // []*Record } else { datas = models.GetColumnRecordsWithoutId(db, table, column) // []*Record } for _, v := range datas { for _, s := range v.Val { if s == utf8.RuneError { bad = RecordError{ Tab: table, Col: column, Id: v.Id, Val: v.Val, } badResults = append(badResults, bad) } } } return badResults } func tableHasFieldId(db *sql.DB, table string) bool { var result bool columns := models.GetTableColumns(db, table) for _, col := range columns { // fmt.Println("TAB:",table,"COL:",col) if "id" == col { result = true } } return result }
/* Introduction Each Unicode codepoint can be represented as a sequence of up to 4 bytes. Because of this, it is possible to interpret some 2, 3, or 4-byte characters as multiple 1-byte characters. (See here for a UTF-8 to bytes converter). Challenge Given a UTF-8 character, output it split into a sequence of 1-byte characters. If the character is 1-byte already, return it unchanged. Your program must take one, and exactly one, UTF-8 character as input. You may use any input method you wish, as long as it has been decided on meta that is is a valid method. You cannot take input as a bytearray or series of bytes; then the challenge would just be converting hex to ASCII. Your program must output one or more ASCII 1-byte characters. Again, any output method is allowed as long as it has been marked valid on meta. Edit: As per the conversation in the comments, output should be in Code Page 850. Note: see this post for valid I/O methods. Example I/O ܀ (0x700) ▄Ç (0xdc 0x80) a (0x61) a (0x61) 聂 (0x8042) Þüé (0xe8 0x81 0x82) Rules This is code-golf, so shortest answer in bytes wins! */ package main import ( "bytes" "fmt" "unicode/utf8" "golang.org/x/text/encoding/charmap" ) func main() { assert(chop('܀') == "▄Ç") assert(chop('a') == "a") assert(chop('聂') == "Þüé") } func assert(x bool) { if !x { panic("assertion failed") } } func chop(r rune) string { var ( b [utf8.UTFMax]byte w = new(bytes.Buffer) ) utf8.EncodeRune(b[:], r) for i := range b { if b[i] == 0 { break } fmt.Fprintf(w, "%c", charmap.CodePage850.DecodeByte(b[i])) } return w.String() }
package main import ( "fmt" "sync" "time" ) var wg4 sync.WaitGroup //wait for a collection goroutine to finish func main() { wg4.Add(1) //WaitGroup计数+1, main函数等待最后一位参赛者(goroutine)跑步结束 ch := make(chan int) //创建整型无缓冲通道, 返回T而不是*T go run(ch) //创建goroutine ch <- 1 //往通道发送数据 wg4.Wait() //阻塞, 直到WaitGroup计数=0, 即所有goroutine完成 } func run(ch chan int) { var newRunner int runner := <-ch fmt.Printf("选手 %d 领跑...\n", runner) if runner != 4 { newRunner = runner + 1 fmt.Printf("选手 %d 上场准备--->\n", newRunner) go run(ch) //创建goroutine, 但此时通道为空, 因而会在阻塞, 模拟选手上场准备, 等待接棒 } time.Sleep(2 * time.Second) //跑步时间 if runner == 4 { fmt.Printf("选手 %d 到达终点, 比赛结束", runner) wg4.Done() } else { //接力棒交接 fmt.Printf("选手 %d --->交接接力棒---> %d\n", runner, newRunner) ch <- newRunner } }
package invoice import ( "fmt" "github.com/imrenagi/go-payment" ) type LineItemError struct { Code int } const ( LineItemErrInvalidQty = iota ) func (l LineItemError) Error() string { switch l.Code { case LineItemErrInvalidQty: return "Invalid minimum quantity of the items" default: return "Unrecognized error code" } } func (l LineItemError) Unwrap() error { switch l.Code { case LineItemErrInvalidQty: return fmt.Errorf("%s: %w", l.Error(), payment.ErrBadRequest) default: return fmt.Errorf("%s: %w", l.Error(), payment.ErrInternal) } } // NewLineItem ... func NewLineItem( name, category, merchant, description string, unitPrice float64, qty int, currency string, ) *LineItem { return &LineItem{ Name: name, Description: description, Category: category, MerchantName: merchant, Currency: currency, UnitPrice: unitPrice, Qty: qty, } } // LineItem ... type LineItem struct { payment.Model InvoiceID uint64 `json:"-" gorm:"index:line_item_invoice_id_k"` Name string `json:"name"` Description string `json:"description" gorm:"not null;type:text"` Category string `json:"category"` MerchantName string `json:"merchant_name"` Currency string `json:"currency"` UnitPrice float64 `json:"unit_price"` Qty int `json:"qty"` } func (LineItem) TableName() string { return "invoice_line_items" } // IncreaseQty ... func (i *LineItem) IncreaseQty() error { i.Qty = i.Qty + 1 return nil } // DecreaseQty ... func (i *LineItem) DecreaseQty() error { if i.Qty < 1 { return LineItemError{LineItemErrInvalidQty} } i.Qty = i.Qty - 1 return nil } // SubTotal ... func (i LineItem) SubTotal() float64 { return i.UnitPrice * float64(i.Qty) }
package controllers import ( "github.com/labstack/echo" "net/http" ) func GetHomePageHandler(c echo.Context) error { return c.HTML(http.StatusOK, "<div><h2>Golang Blog</h2></div>") }
package stringutil import ( "fmt" "golang.org/x/exp/maps" ) // unit is a convenient alias for struct{} type unit = struct{} // Set is a set of strings. type Set struct { m map[string]unit } // NewSet returns a new string set containing strs. func NewSet(strs ...string) (set *Set) { set = &Set{ m: make(map[string]unit, len(strs)), } for _, s := range strs { set.Add(s) } return set } // Add adds s to the set. Add panics if the set is a nil set, just like a nil // map does. func (set *Set) Add(s string) { set.m[s] = unit{} } // Clone returns a deep clone of set. If set is nil, clone is nil. func (set *Set) Clone() (clone *Set) { if set == nil { return nil } return &Set{ m: maps.Clone(set.m), } } // Del deletes s from the set. Calling Del on a nil set has no effect, just // like delete on an empty map doesn't. func (set *Set) Del(s string) { if set != nil { delete(set.m, s) } } // Equal returns true if set is equal to other. func (set *Set) Equal(other *Set) (ok bool) { if set == nil || other == nil { return set == other } else if set.Len() != other.Len() { return false } for s := range set.m { if _, ok = other.m[s]; !ok { return false } } return true } // Has returns true if s is in the set. Calling Has on a nil set returns false, // just like indexing on an empty map does. func (set *Set) Has(s string) (ok bool) { if set != nil { _, ok = set.m[s] } return ok } // Len returns the length of the set. A nil set has a length of zero, just like // an empty map. func (set *Set) Len() (n int) { if set == nil { return 0 } return len(set.m) } // Range calls f with each value of the set in an undefined order. If cont is // false, Range stops the iteration. Calling Range on a nil *Set has no effect. func (set *Set) Range(f func(s string) (cont bool)) { if set == nil { return } for s := range set.m { if !f(s) { break } } } // String implements the fmt.Stringer interface for *Set. func (set *Set) String() (s string) { return fmt.Sprintf("%q", set.Values()) } // Values returns all values in the set. The order of the values is undefined. // Values returns nil if the set is nil. func (set *Set) Values() (strs []string) { if set == nil { return nil } strs = make([]string, 0, len(set.m)) for s := range set.m { strs = append(strs, s) } return strs }
package main import ( "crypto/aes" "crypto/cipher" "crypto/rand" "crypto/rsa" "io" "os" "crypto/sha256" ) func decrypt(file string, priv *rsa.PrivateKey) { inFile, err := os.Open(file) if err != nil { panic(err) } defer inFile.Close() outFile, err := os.OpenFile(file[:len(file)-len(LockedExtension)], os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777) if err != nil { panic(err) } defer outFile.Close() header := make([]byte, EncryptedHeaderSize) _, err = io.ReadFull(inFile, header) if err != nil { panic(err) } label := []byte("") header, err = rsa.DecryptOAEP(sha256.New(), rand.Reader, priv, header, label) if err != nil { panic(err) } key := header[:KeySize] iv := header[KeySize : KeySize+aes.BlockSize] block, err := aes.NewCipher(key) if err != nil { panic(err) } stream := cipher.NewCFBDecrypter(block, iv) reader := &cipher.StreamReader{S: stream, R: inFile} _, err = io.Copy(outFile, reader) if err != nil { panic(err) } }
package main import ( "context" "flag" "fmt" "github.com/go-kit/kit/endpoint" kitlog "github.com/go-kit/kit/log" "github.com/go-kit/kit/sd" consulsd "github.com/go-kit/kit/sd/consul" httptransport "github.com/go-kit/kit/transport/http" "github.com/hashicorp/consul/api" stdopentracing "github.com/opentracing/opentracing-go" stdzipkin "github.com/openzipkin/zipkin-go" "io" "log" "net/url" "os" "strings" "time" "github.com/go-kit/kit/sd/lb" ) var ( // httpAddr = flag.String("http.addr", ":8000", "Address for HTTP (JSON) server") // consulAddr = flag.String("consul.addr", "", "Consul agent address") consulAddr = flag.String("consuladdr", "", "Consul agent address") retryMax = flag.Int("retry.max", 3, "per-request retries to different instances") //retryTimeout = flag.Duration("retry.timeout", 500*time.Millisecond, "per-request timeout, including retries") retryTimeout = flag.Duration("retry.timeout", 2*time.Second, "per-request timeout, including retries") ) func init() { consulConfig := api.DefaultConfig() if len(*consulAddr) > 0 { consulConfig.Address = *consulAddr } consulClient, err := api.NewClient(consulConfig) if err != nil { log.Fatal(err) } client = consulsd.NewClient(consulClient) { logger = kitlog.NewLogfmtLogger(os.Stderr) logger = kitlog.With(logger, "ts", kitlog.DefaultTimestampUTC) logger = kitlog.With(logger, "caller", kitlog.DefaultCaller) } } var logger kitlog.Logger var client consulsd.Client // Transport domain. var tracer = stdopentracing.GlobalTracer() //no-op var zipkinTracer, _ = stdzipkin.NewTracer(nil, stdzipkin.WithNoopTracer(true)) var ctx = context.Background() type GatewayService interface { GetAccount() endpoint.Endpoint } type GatewaySvc struct{} type SvcMiddleware func(GatewayService) GatewayService func (GatewaySvc) GetAccount() endpoint.Endpoint { var ( tags = []string{"appserver"} passingOnly = true getAccount endpoint.Endpoint instancer = consulsd.NewInstancer(client, logger, "appserver", tags, passingOnly) ) { factory := appsvcFactory(ctx, "POST", "/appserver/getaccount") endpointer := sd.NewEndpointer(instancer, factory, logger) balancer := lb.NewRoundRobin(endpointer) retry := lb.Retry(*retryMax, *retryTimeout, balancer) getAccount = retry } return getAccount } func appsvcFactory(ctx context.Context, method, path string) sd.Factory { return func(instance string) (endpoint.Endpoint, io.Closer, error) { fmt.Println("11111111111111", instance, path) if !strings.HasPrefix(instance, "http") { instance = "http://" + instance } tgt, err := url.Parse(instance) if err != nil { return nil, nil, err } tgt.Path = path var ( enc httptransport.EncodeRequestFunc dec httptransport.DecodeResponseFunc ) switch path { case "/appserver/getaccount": //enc, dec = encodeJSONRequest, decodeGetAccountResponse enc, dec = httptransport.EncodeJSONRequest, decodeGetAccountResponse default: return nil, nil, fmt.Errorf("unknown appsvc path %q", path) } return httptransport.NewClient(method, tgt, enc, dec).Endpoint(), nil, nil } }
/* Copyright The Helm Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "bufio" "bytes" "fmt" "io" "os" "path/filepath" "strings" "github.com/Masterminds/semver/v3" "github.com/gosuri/uitable" "github.com/pkg/errors" "github.com/spf13/cobra" "helm.sh/helm/v3/cmd/helm/search" "helm.sh/helm/v3/pkg/cli/output" "helm.sh/helm/v3/pkg/helmpath" "helm.sh/helm/v3/pkg/repo" ) const searchRepoDesc = ` Search reads through all of the repositories configured on the system, and looks for matches. Search of these repositories uses the metadata stored on the system. It will display the latest stable versions of the charts found. If you specify the --devel flag, the output will include pre-release versions. If you want to search using a version constraint, use --version. Examples: # Search for stable release versions matching the keyword "nginx" $ helm search repo nginx # Search for release versions matching the keyword "nginx", including pre-release versions $ helm search repo nginx --devel # Search for the latest stable release for nginx-ingress with a major version of 1 $ helm search repo nginx-ingress --version ^1.0.0 Repositories are managed with 'helm repo' commands. ` // searchMaxScore suggests that any score higher than this is not considered a match. const searchMaxScore = 25 type searchRepoOptions struct { versions bool regexp bool devel bool version string maxColWidth uint repoFile string repoCacheDir string outputFormat output.Format } func newSearchRepoCmd(out io.Writer) *cobra.Command { o := &searchRepoOptions{} cmd := &cobra.Command{ Use: "repo [keyword]", Short: "search repositories for a keyword in charts", Long: searchRepoDesc, RunE: func(cmd *cobra.Command, args []string) error { o.repoFile = settings.RepositoryConfig o.repoCacheDir = settings.RepositoryCache return o.run(out, args) }, } f := cmd.Flags() f.BoolVarP(&o.regexp, "regexp", "r", false, "use regular expressions for searching repositories you have added") f.BoolVarP(&o.versions, "versions", "l", false, "show the long listing, with each version of each chart on its own line, for repositories you have added") f.BoolVar(&o.devel, "devel", false, "use development versions (alpha, beta, and release candidate releases), too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored") f.StringVar(&o.version, "version", "", "search using semantic versioning constraints on repositories you have added") f.UintVar(&o.maxColWidth, "max-col-width", 50, "maximum column width for output table") bindOutputFlag(cmd, &o.outputFormat) return cmd } func (o *searchRepoOptions) run(out io.Writer, args []string) error { o.setupSearchedVersion() index, err := o.buildIndex() if err != nil { return err } var res []*search.Result if len(args) == 0 { res = index.All() } else { q := strings.Join(args, " ") res, err = index.Search(q, searchMaxScore, o.regexp) if err != nil { return err } } search.SortScore(res) data, err := o.applyConstraint(res) if err != nil { return err } return o.outputFormat.Write(out, &repoSearchWriter{data, o.maxColWidth}) } func (o *searchRepoOptions) setupSearchedVersion() { debug("Original chart version: %q", o.version) if o.version != "" { return } if o.devel { // search for releases and prereleases (alpha, beta, and release candidate releases). debug("setting version to >0.0.0-0") o.version = ">0.0.0-0" } else { // search only for stable releases, prerelease versions will be skip debug("setting version to >0.0.0") o.version = ">0.0.0" } } func (o *searchRepoOptions) applyConstraint(res []*search.Result) ([]*search.Result, error) { if o.version == "" { return res, nil } constraint, err := semver.NewConstraint(o.version) if err != nil { return res, errors.Wrap(err, "an invalid version/constraint format") } data := res[:0] foundNames := map[string]bool{} for _, r := range res { // if not returning all versions and already have found a result, // you're done! if !o.versions && foundNames[r.Name] { continue } v, err := semver.NewVersion(r.Chart.Version) if err != nil { continue } if constraint.Check(v) { data = append(data, r) foundNames[r.Name] = true } } return data, nil } func (o *searchRepoOptions) buildIndex() (*search.Index, error) { // Load the repositories.yaml rf, err := repo.LoadFile(o.repoFile) if isNotExist(err) || len(rf.Repositories) == 0 { return nil, errors.New("no repositories configured") } i := search.NewIndex() for _, re := range rf.Repositories { n := re.Name f := filepath.Join(o.repoCacheDir, helmpath.CacheIndexFile(n)) ind, err := repo.LoadIndexFile(f) if err != nil { warning("Repo %q is corrupt or missing. Try 'helm repo update'.", n) warning("%s", err) continue } i.AddRepo(n, ind, o.versions || len(o.version) > 0) } return i, nil } type repoChartElement struct { Name string `json:"name"` Version string `json:"version"` AppVersion string `json:"app_version"` Description string `json:"description"` } type repoSearchWriter struct { results []*search.Result columnWidth uint } func (r *repoSearchWriter) WriteTable(out io.Writer) error { if len(r.results) == 0 { _, err := out.Write([]byte("No results found\n")) if err != nil { return fmt.Errorf("unable to write results: %s", err) } return nil } table := uitable.New() table.MaxColWidth = r.columnWidth table.AddRow("NAME", "CHART VERSION", "APP VERSION", "DESCRIPTION") for _, r := range r.results { table.AddRow(r.Name, r.Chart.Version, r.Chart.AppVersion, r.Chart.Description) } return output.EncodeTable(out, table) } func (r *repoSearchWriter) WriteJSON(out io.Writer) error { return r.encodeByFormat(out, output.JSON) } func (r *repoSearchWriter) WriteYAML(out io.Writer) error { return r.encodeByFormat(out, output.YAML) } func (r *repoSearchWriter) encodeByFormat(out io.Writer, format output.Format) error { // Initialize the array so no results returns an empty array instead of null chartList := make([]repoChartElement, 0, len(r.results)) for _, r := range r.results { chartList = append(chartList, repoChartElement{r.Name, r.Chart.Version, r.Chart.AppVersion, r.Chart.Description}) } switch format { case output.JSON: return output.EncodeJSON(out, chartList) case output.YAML: return output.EncodeYAML(out, chartList) } // Because this is a non-exported function and only called internally by // WriteJSON and WriteYAML, we shouldn't get invalid types return nil } // Provides the list of charts that are part of the specified repo, and that starts with 'prefix'. func compListChartsOfRepo(repoName string, prefix string) []string { var charts []string path := filepath.Join(settings.RepositoryCache, helmpath.CacheChartsFile(repoName)) content, err := os.ReadFile(path) if err == nil { scanner := bufio.NewScanner(bytes.NewReader(content)) for scanner.Scan() { fullName := fmt.Sprintf("%s/%s", repoName, scanner.Text()) if strings.HasPrefix(fullName, prefix) { charts = append(charts, fullName) } } return charts } if isNotExist(err) { // If there is no cached charts file, fallback to the full index file. // This is much slower but can happen after the caching feature is first // installed but before the user does a 'helm repo update' to generate the // first cached charts file. path = filepath.Join(settings.RepositoryCache, helmpath.CacheIndexFile(repoName)) if indexFile, err := repo.LoadIndexFile(path); err == nil { for name := range indexFile.Entries { fullName := fmt.Sprintf("%s/%s", repoName, name) if strings.HasPrefix(fullName, prefix) { charts = append(charts, fullName) } } return charts } } return []string{} } // Provide dynamic auto-completion for commands that operate on charts (e.g., helm show) // When true, the includeFiles argument indicates that completion should include local files (e.g., local charts) func compListCharts(toComplete string, includeFiles bool) ([]string, cobra.ShellCompDirective) { cobra.CompDebugln(fmt.Sprintf("compListCharts with toComplete %s", toComplete), settings.Debug) noSpace := false noFile := false var completions []string // First check completions for repos repos := compListRepos("", nil) for _, repoInfo := range repos { // Split name from description repoInfo := strings.Split(repoInfo, "\t") repo := repoInfo[0] repoDesc := "" if len(repoInfo) > 1 { repoDesc = repoInfo[1] } repoWithSlash := fmt.Sprintf("%s/", repo) if strings.HasPrefix(toComplete, repoWithSlash) { // Must complete with charts within the specified repo. // Don't filter on toComplete to allow for shell fuzzy matching completions = append(completions, compListChartsOfRepo(repo, "")...) noSpace = false break } else if strings.HasPrefix(repo, toComplete) { // Must complete the repo name with the slash, followed by the description completions = append(completions, fmt.Sprintf("%s\t%s", repoWithSlash, repoDesc)) noSpace = true } } cobra.CompDebugln(fmt.Sprintf("Completions after repos: %v", completions), settings.Debug) // Now handle completions for url prefixes for _, url := range []string{"oci://\tChart OCI prefix", "https://\tChart URL prefix", "http://\tChart URL prefix", "file://\tChart local URL prefix"} { if strings.HasPrefix(toComplete, url) { // The user already put in the full url prefix; we don't have // anything to add, but make sure the shell does not default // to file completion since we could be returning an empty array. noFile = true noSpace = true } else if strings.HasPrefix(url, toComplete) { // We are completing a url prefix completions = append(completions, url) noSpace = true } } cobra.CompDebugln(fmt.Sprintf("Completions after urls: %v", completions), settings.Debug) // Finally, provide file completion if we need to. // We only do this if: // 1- There are other completions found (if there are no completions, // the shell will do file completion itself) // 2- If there is some input from the user (or else we will end up // listing the entire content of the current directory which will // be too many choices for the user to find the real repos) if includeFiles && len(completions) > 0 && len(toComplete) > 0 { if files, err := os.ReadDir("."); err == nil { for _, file := range files { if strings.HasPrefix(file.Name(), toComplete) { // We are completing a file prefix completions = append(completions, file.Name()) } } } } cobra.CompDebugln(fmt.Sprintf("Completions after files: %v", completions), settings.Debug) // If the user didn't provide any input to completion, // we provide a hint that a path can also be used if includeFiles && len(toComplete) == 0 { completions = append(completions, "./\tRelative path prefix to local chart", "/\tAbsolute path prefix to local chart") } cobra.CompDebugln(fmt.Sprintf("Completions after checking empty input: %v", completions), settings.Debug) directive := cobra.ShellCompDirectiveDefault if noFile { directive = directive | cobra.ShellCompDirectiveNoFileComp } if noSpace { directive = directive | cobra.ShellCompDirectiveNoSpace } if !includeFiles { // If we should not include files in the completions, // we should disable file completion directive = directive | cobra.ShellCompDirectiveNoFileComp } return completions, directive }
package main import ( DBProvider "beaver/db" "beaver/user" "github.com/gin-gonic/gin" "net/http" ) var r *gin.Engine func init() { r = gin.New() r.Use(gin.Logger()) r.Use(gin.Recovery()) db := DBProvider.InitDBConnection() user.UserRouter(r.Group("/user"), db) } func main() { r.LoadHTMLGlob("templates/*") r.GET("/", func(c *gin.Context) { c.HTML(http.StatusOK, "index.html", gin.H{}) }) r.Run(":8080") }
// Copyright (c) 2012-2014 Jeremy Latt // Copyright (c) 2016-2017 Daniel Oaks <daniel@danieloaks.net> // released under the MIT license package irc import ( "bufio" "crypto/sha256" "crypto/tls" "encoding/hex" "errors" "io" "net" "strings" "sync" "time" ) var ( handshakeTimeout, _ = time.ParseDuration("5s") errSendQExceeded = errors.New("SendQ exceeded") ) // Socket represents an IRC socket. type Socket struct { sync.Mutex conn net.Conn reader *bufio.Reader maxSendQBytes int // coordination system for asynchronous writes buffer []byte lineToSendExists chan bool closed bool sendQExceeded bool finalData string // what to send when we die } // NewSocket returns a new Socket. func NewSocket(conn net.Conn, maxReadQBytes int, maxSendQBytes int) Socket { return Socket{ conn: conn, reader: bufio.NewReaderSize(conn, maxReadQBytes), maxSendQBytes: maxSendQBytes, lineToSendExists: make(chan bool, 1), } } // Close stops a Socket from being able to send/receive any more data. func (socket *Socket) Close() { socket.Lock() socket.closed = true socket.Unlock() socket.wakeWriter() } // CertFP returns the fingerprint of the certificate provided by the client. func (socket *Socket) CertFP() (string, error) { var tlsConn, isTLS = socket.conn.(*tls.Conn) if !isTLS { return "", errNotTLS } // ensure handehake is performed, and timeout after a few seconds tlsConn.SetDeadline(time.Now().Add(handshakeTimeout)) err := tlsConn.Handshake() tlsConn.SetDeadline(time.Time{}) if err != nil { return "", err } peerCerts := tlsConn.ConnectionState().PeerCertificates if len(peerCerts) < 1 { return "", errNoPeerCerts } rawCert := sha256.Sum256(peerCerts[0].Raw) fingerprint := hex.EncodeToString(rawCert[:]) return fingerprint, nil } // Read returns a single IRC line from a Socket. func (socket *Socket) Read() (string, error) { if socket.IsClosed() { return "", io.EOF } lineBytes, isPrefix, err := socket.reader.ReadLine() if isPrefix { return "", errReadQ } // convert bytes to string line := string(lineBytes) // read last message properly (such as ERROR/QUIT/etc), just fail next reads/writes if err == io.EOF { socket.Close() } if err == io.EOF && strings.TrimSpace(line) != "" { // don't do anything } else if err != nil { return "", err } return line, nil } // Write sends the given string out of Socket. func (socket *Socket) Write(data string) (err error) { socket.Lock() if socket.closed { err = io.EOF } else if len(data)+len(socket.buffer) > socket.maxSendQBytes { socket.sendQExceeded = true err = errSendQExceeded } else { socket.buffer = append(socket.buffer, data...) } socket.Unlock() socket.wakeWriter() return } // wakeWriter wakes up the goroutine that actually performs the write, without blocking func (socket *Socket) wakeWriter() { // nonblocking send to the channel, no-op if it's full select { case socket.lineToSendExists <- true: default: } } // SetFinalData sets the final data to send when the SocketWriter closes. func (socket *Socket) SetFinalData(data string) { socket.Lock() defer socket.Unlock() socket.finalData = data } // IsClosed returns whether the socket is closed. func (socket *Socket) IsClosed() bool { socket.Lock() defer socket.Unlock() return socket.closed } // RunSocketWriter starts writing messages to the outgoing socket. func (socket *Socket) RunSocketWriter() { localBuffer := make([]byte, 0) shouldStop := false for !shouldStop { // wait for new lines select { case <-socket.lineToSendExists: // retrieve the buffered data, clear the buffer socket.Lock() localBuffer = append(localBuffer, socket.buffer...) socket.buffer = socket.buffer[:0] socket.Unlock() _, err := socket.conn.Write(localBuffer) localBuffer = localBuffer[:0] socket.Lock() shouldStop = (err != nil) || socket.closed || socket.sendQExceeded socket.Unlock() } } // mark the socket closed (if someone hasn't already), then write error lines socket.Lock() socket.closed = true finalData := socket.finalData if socket.sendQExceeded { finalData = "\r\nERROR :SendQ Exceeded\r\n" } socket.Unlock() if finalData != "" { socket.conn.Write([]byte(finalData)) } // close the connection socket.conn.Close() }
package main import "fmt" func fibonacci(index int) { var a uint64 = 0 var b uint64 = 1 for i := 0; i < index; i++ { fmt.Println(a) a = b - a b = a + b } } func main() { fibonacci(100) }
package controllers import ( "github.com/danielkrainas/shrugmud/logging" "github.com/danielkrainas/shrugmud/server" ) type nannyState struct { } type nannyCtrl struct { } func Nanny() server.Ctrl { return &nannyCtrl{} } func (ctrl *nannyCtrl) Do(input string, d *server.Descriptor) error { logging.Trace.Printf("Ctrl.Nanny: processing %s", input) return nil } func (ctrl *nannyCtrl) ValidateState(state server.CtrlState) bool { _, ok := state.(nannyState) return ok } func (ctrl *nannyCtrl) NewState(d *server.Descriptor) server.CtrlState { return &nannyState{} }
package main import ( "flag" "fmt" "io/ioutil" "log" "regexp" "strings" ) func main() { flag.Parse() if len(flag.Args()) != 1 { return } fileContents := MustOpenTextFile(flag.Args()[0]) parser := regexp.MustCompile(`[a-j0-9]`) for _, line := range strings.Split(fileContents, "\n") { if len(line) == 0 { continue } output := "" for _, d := range parser.FindAllString(line, -1) { ascii := []byte(d) if ascii[0] >= 48 && ascii[0] <= 57 { output += d } else { ascii[0] -= 49 output += string(ascii) } } if len(output) == 0 { output = "NONE" } fmt.Println(output) } } // MustOpenTextFile reads a text file or panics func MustOpenTextFile(filename string) string { inputData, err := ioutil.ReadFile(filename) if err != nil { log.Fatal(err) } return string(inputData) }
package network import ( "context" "errors" "net" "reflect" "testing" "github.com/giantswarm/aws-operator/service/locker" "github.com/giantswarm/microerror" "github.com/giantswarm/micrologger/microloggertest" ) var errArtificial = errors.New("artificial error") func mustParseCIDR(val string) net.IPNet { _, n, err := net.ParseCIDR(val) if err != nil { panic(err) } return *n } func Test_Allocator(t *testing.T) { testCases := []struct { name string callbacks AllocationCallbacks networkRange net.IPNet subnetSize net.IPMask expectedSubnet net.IPNet errorMatcher func(error) bool }{ { name: "case 0: allocate first subnet", callbacks: AllocationCallbacks{ GetReservedNetworks: func(_ context.Context) ([]net.IPNet, error) { return []net.IPNet{}, nil }, PersistAllocatedNetwork: func(_ context.Context, _ net.IPNet) error { return nil }, }, networkRange: mustParseCIDR("10.100.0.0/16"), subnetSize: net.CIDRMask(24, 32), expectedSubnet: mustParseCIDR("10.100.0.0/24"), errorMatcher: nil, }, { name: "case 1: allocate fourth subnet", callbacks: AllocationCallbacks{ GetReservedNetworks: func(_ context.Context) ([]net.IPNet, error) { reservedNetworks := []net.IPNet{ mustParseCIDR("10.100.0.0/24"), mustParseCIDR("10.100.1.0/24"), mustParseCIDR("10.100.3.0/24"), } return reservedNetworks, nil }, PersistAllocatedNetwork: func(_ context.Context, _ net.IPNet) error { return nil }, }, networkRange: mustParseCIDR("10.100.0.0/16"), subnetSize: net.CIDRMask(24, 32), expectedSubnet: mustParseCIDR("10.100.2.0/24"), errorMatcher: nil, }, { name: "case 2: handle error from getting reserved networks", callbacks: AllocationCallbacks{ GetReservedNetworks: func(_ context.Context) ([]net.IPNet, error) { return nil, errArtificial }, PersistAllocatedNetwork: func(_ context.Context, _ net.IPNet) error { return nil }, }, networkRange: mustParseCIDR("10.100.0.0/16"), subnetSize: net.CIDRMask(24, 32), expectedSubnet: net.IPNet{}, errorMatcher: func(err error) bool { return microerror.Cause(err) == errArtificial }, }, { name: "case 3: handle error from persisting allocated network", callbacks: AllocationCallbacks{ GetReservedNetworks: func(_ context.Context) ([]net.IPNet, error) { return []net.IPNet{}, nil }, PersistAllocatedNetwork: func(_ context.Context, _ net.IPNet) error { return errArtificial }, }, networkRange: mustParseCIDR("10.100.0.0/16"), subnetSize: net.CIDRMask(24, 32), expectedSubnet: net.IPNet{}, errorMatcher: func(err error) bool { return microerror.Cause(err) == errArtificial }, }, } var err error var mutexLocker locker.Interface { c := locker.MutexLockerConfig{ Logger: microloggertest.New(), } mutexLocker, err = locker.NewMutexLocker(c) if err != nil { t.Fatal(err) } } var a Allocator { c := Config{ Locker: mutexLocker, Logger: microloggertest.New(), } a, err = New(c) if err != nil { t.Fatal(err) } } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { net, err := a.Allocate(context.Background(), tc.networkRange, tc.subnetSize, tc.callbacks) switch { case err == nil && tc.errorMatcher == nil: // correct; carry on case err != nil && tc.errorMatcher == nil: t.Fatalf("error == %#v, want nil", err) case err == nil && tc.errorMatcher != nil: t.Fatalf("error == nil, want non-nil") case !tc.errorMatcher(err): t.Fatalf("error == %#v, want matching", err) } if !reflect.DeepEqual(net, tc.expectedSubnet) { t.Fatalf("Allocated subnet == %q, want %q", net.String(), tc.expectedSubnet.String()) } }) } }
/* Euler discovered the remarkable quadratic formula: n^2 + n + 41 It turns out that the formula will produce 40 primes for the consecutive integer values 0 <= n < 39. However, when n=40 40^2 + 40 + 41=40(40+1) + 41 is divisible by 41, and certainly when n=41, 41^2 + 41 + 41 is clearly divisible by 41. The incredible formula n^2 - 79n + 1601 was discovered, which produces 80 primes for the consecutive values 0 <= n <= 79. The product of the coefficients, −79 and 1601, is −126479. Considering quadratics of the form: n^2 + an + b, where |a| < 1000 and |b| <= 1000 where |n| is the modulus/absolute value of n e.g. |11 = 11 and |-4| = 4 Find the product of the coefficients, a and b, for the quadratic expression that produces the maximum number of primes for consecutive values of n, starting with n=0. */ package main import ( "fmt" "math/big" ) func main() { fmt.Println(solve(1000)) } func solve(n int) int { r := 0 m := 0 for a := -n; a < n; a++ { for b := -n; b <= n; b++ { v := chain(a, b) if m < v { r = a * b m = v } } } return r } func chain(a, b int) int { n := 0 for { v := n*n + a*n + b if !isprime(v) { break } n++ } return n } func isprime(n int) bool { x := big.NewInt(int64(n)) return x.ProbablyPrime(2) }
package order import ( "fmt" "time" "github.com/tppgit/we_service/log" "github.com/tppgit/we_service/log/field" "strings" "github.com/satori/go.uuid" "github.com/tppgit/we_service/core" "github.com/tppgit/we_service/database" "github.com/tppgit/we_service/dto/worder" ) type commentRepository struct { DB database.DBEngine `inject:"db_engine"` } func CreateCommentRepository() *commentRepository { return &commentRepository{} } func (c *commentRepository) InsertComment(model *Comment) error { // TODO: Add UserID for comment chat. model.CreatedAt = time.Now().UTC() if err := c.DB.Create(&model).Error(); err != nil { log.Error("InsertComment error, ", field.Error(err)) return err } return nil } func (c *commentRepository) UpdateComment(model *Comment) error { result := c.DB.Create(model) return result.Error() } func (c *commentRepository) DeleteComment(model *Comment) error { if err := c.DB.Delete(model).Error(); err != nil { log.Error("DeleteComment error, ", field.Error(err)) return err } return nil } func (c *commentRepository) GetCommentById(id uuid.UUID) (*Comment, error) { model := &Comment{ID: id} if err := c.DB.Find(&model).Error(); err != nil { log.Error("GetCommentById error, ", field.Error(err)) return nil, err } return model, nil } func (c *commentRepository) GetListComment(orderId uuid.UUID) (comments []*Comment, err error) { if err := c.DB.Model(comments).Preload("User").Where("fk_order = ? ", orderId.String()).Order("created_at"). Find(&comments).Error(); err != nil { log.Error("GetListComment error, ", field.Error(err)) return nil, err } return comments, nil } func (this *commentRepository) residentGetAllCommentAppointment(query *worder.ResidentGetCommentAppointment) ([]*Comment, int32, error) { var order Order var comments []*Comment var totalItems int32 //get order by resident first if err := this.DB.Model(&Order{}).Where(&Order{ID: query.OrderId}).Find(&order).Error(); err != nil { log.Error("residentGetAllCommentAppointment error, ", field.Error(err)) return nil, 0, err } var sortType = strings.ToLower(strings.TrimSpace(query.SortType)) if sortType != "asc" { sortType = "desc" } //then get comment by that orderId if err := this.DB.Model(&Comment{}).Where("fk_order = ?", query.OrderId).Count(&totalItems). Offset(int((query.Page - 1) * query.PageSize)). Limit(int(query.PageSize)).Preload("User"). Order(fmt.Sprintf("created_at %s", sortType)). Find(&comments).Error(); err != nil { log.Error("residentGetAllCommentAppointment error, ", field.Error(err)) return nil, 0, err } return comments, totalItems, nil } type historyRepository struct { DB database.DBEngine `inject:"db_engine"` } func CreateHistoryRepository() *historyRepository { return &historyRepository{} } func (c *historyRepository) InsertHistory(model *History) error { model.CreatedAt = time.Now().UTC() if err := c.DB.Create(&model).Error(); err != nil { log.Error("InsertHistory error, ", field.Error(err)) return err } return nil } func (c *historyRepository) UpdateHistory(model *History) error { model.UpdatedAt = time.Now().UTC() if err := c.DB.Save(&model).Error(); err != nil { log.Error("UpdateHistory error, ", field.Error(err)) return err } return nil } func (c *historyRepository) DeleteHistory(model *History) error { if err := c.DB.Delete(&model).Error(); err != nil { log.Error("DeleteHistory error, ", field.Error(err)) return err } return nil } func (c *historyRepository) GetHistoryById(id uuid.UUID) (*History, error) { model := History{ID: id} if err := c.DB.Find(&model).Error(); err != nil { log.Error("GetHistoryById error, ", field.Error(err)) return nil, err } return &model, nil } type orderRepository struct { DB database.DBEngine `inject:"db_engine"` } func CreateOrderRepository() *orderRepository { return &orderRepository{} } func (c *orderRepository) GetOrdersByResidentId(query *core.GetOrderByResident, id string) (totalItem int32, orders []*Order, err error) { result := c.DB.Model(orders). Preload("Company"). Preload("Staff"). Where("fk_resident = ?", id). Not("state", []string{string(Completed), string(Cancel)}) if query.Paging != nil && query.Paging.TextSearch != "" { result = result.Joins("JOIN users ON users.id = orders.fk_staff") result = result.Where("users.name LIKE ? ", "%"+query.Paging.GetTextSearch()+"%") } if err = result. Limit(int(query.Paging.GetPageSize())). Offset((int(query.Paging.GetCurrentPage()) - 1) * int(query.Paging.GetPageSize())). Count(&totalItem). Find(&orders).Error(); err != nil { log.Error("GetOrdersByResidentId error, ", field.Error(err)) return 0, nil, err } return totalItem, orders, result.Error() } func (c *orderRepository) GetOrdersByCompanyId(id uuid.UUID) ([]*Order, error) { var orders []*Order if err := c.DB.Model(&orders). Preload("Service"). Preload("Company"). Preload("Resident"). Preload("Staff"). Preload("Comments"). Where("fk_company = ?", id.String()). Find(&orders).Error(); err != nil { log.Error("GetOrdersByCompanyId error, ", field.Error(err)) return nil, err } return orders, nil } func (c *orderRepository) GetStatusOrdersByStaffId(query *core.GetOrdersByStaffId, id string) (list []*CountOrderByState, err error) { newRequest, err := c.queryListStatusOrderOfStaffFollowGroup(query, id, NewRequest) if err != nil { return list, err } pending, err := c.queryListStatusOrderOfStaffFollowGroup(query, id, string(ResidentPending), string(SPPending)) if err != nil { return list, err } accepted, err := c.queryListStatusOrderOfStaffFollowGroup(query, id, string(RequestConfirmed), string(InProgress)) if err != nil { return list, err } payment, err := c.queryListStatusOrderOfStaffFollowGroup(query, id, string(PaymentPending)) if err != nil { return list, err } list = append(list, &CountOrderByState{Label: LabelNew, Total: newRequest}, &CountOrderByState{Label: LabelPending, Total: pending}, &CountOrderByState{Label: LabelAccepted, Total: accepted}, &CountOrderByState{Label: LabelPayment, Total: payment}) return list, err } func (c *orderRepository) GetListDataOrderByStaffId(query *core.GetOrdersByStaffId, id string) (data []*Order, err error) { result := c.DB.Model(&Order{}). Preload("Resident"). Where(&Order{StaffID: uuid.FromStringOrNil(id)}). Where("state not in (?)", []string{string(Completed), string(Cancel)}).Order("date_start , time_start asc") if query.Status != "" { result = result.Where("state in (?)", strings.Split(query.Status, ",")) } if query.DateStart != "" && query.DateEnd != "" { result = result.Where("date_start BETWEEN ? AND ?", query.DateStart, query.DateEnd) } if err := result.Find(&data).Error(); err != nil { log.Error("GetListDataOrderByStaffId error, ", field.Error(err)) return nil, err } return data, nil } func (c *orderRepository) InsertOrder(model *Order) error { if err := c.DB.Create(model).Error(); err != nil { log.Error("InsertOrder error, ", field.Error(err)) return err } return nil } func (c *orderRepository) UpdateOrder(data *Order) error { // TODO: Add state machine for booking. if err := c.DB.Model(data).Update(data).Error(); err != nil { log.Error("UpdateOrder error, ", field.Error(err)) return err } return nil } func (c *orderRepository) UpdateNotificationOrder(id string,flag bool) error{ if err := c.DB.Model(&Order{ID:uuid.FromStringOrNil(id)}).Update("flag_notification",flag).Error(); err != nil { log.Error("UpdateNotificationOrder error, ", field.Error(err)) return err } return nil } func (c *orderRepository) DeleteOrder(model *Order) error { if err := c.DB.Delete(model).Error(); err != nil { log.Error("DeleteOrder error, ", field.Error(err)) return err } return nil } func (c *orderRepository) GetOrderById(id uuid.UUID) (*Order, error) { model := &Order{} if err := c.DB. Preload("Service"). Preload("Company"). Preload("Resident"). Preload("Staff"). Where("id = ?", id.String()).First(model).Error(); err != nil { log.Error("GetOrderById error, ", field.Error(err)) return nil, err } return model, nil } func (c *orderRepository) GetHistoryOrderByStaffId(id string, req *core.StaffGetListHistoryOrderRequest) (orders []*Order, err error) { if err := c.DB.Model(&Order{}). Preload("Resident"). Where("fk_staff = ? AND state in (?)", id, []string{string(Cancel), string(Completed)}). Limit(int(req.Paging.GetPageSize())). Offset((int(req.Paging.GetCurrentPage()) - 1) * int(req.Paging.GetPageSize())). Find(&orders).Error(); err != nil { log.Error("GetHistoryOrderByStaffId error, ", field.Error(err)) return nil, err } return orders, nil } func (c *orderRepository) GetAppointmentHistory(q *worder.AppointmentHistory) ([]*Order, *int32, error) { var totalItems int32 var models []*Order result := c.DB.Model(&Order{}). Preload("Staff"). Where(&Order{CompanyID: q.CompanyId}).Where("state in (?)", []string{string(Cancel), string(Completed)}). Joins("JOIN users ON users.id = orders.fk_staff"). Where("users.name LIKE ?", "%"+q.TextSearch+"%") if q.StartDate != "" && q.EndDate != "" { result = result.Where("date_start BETWEEN ? AND ?", q.StartDate, q.EndDate) } if q.SortColumn == "earning" { result = result.Order(fmt.Sprintf("(total_cost + extra_cost) %s", q.SortType)) } if q.SortColumn != "" && q.SortColumn != "earning" { result = result.Order(q.SortColumn + " " + q.SortType) } if err := result.Count(&totalItems). Offset(int((q.Page - 1) * q.PageSize)). Limit(int(q.PageSize)). Find(&models).Error(); err != nil { log.Error("GetAppointmentHistory error, ", field.Error(err)) return nil, nil, err } return models, &totalItems, nil } func (c *orderRepository) GetListHistoryOrderByResident(query *core.ResidentGetListHistoryOrderRequest, id string) (totalItem int32, orders []*Order, err error) { result := c.DB.Model(orders). Preload("Company"). Preload("Staff"). Where("fk_resident = ?", id). Where("state in (?)", []string{string(Completed), string(Cancel)}) if query.Paging != nil && query.Paging.TextSearch != "" { result = result.Joins("JOIN users ON users.id = orders.fk_resident") result = result.Where("users.name LIKE ? ", "%"+query.Paging.GetTextSearch()+"%") } if err := result. Limit(int(query.Paging.GetPageSize())). Offset((int(query.Paging.GetCurrentPage()) - 1) * int(query.Paging.GetPageSize())). Count(&totalItem). Find(&orders).Error(); err != nil { log.Error("GetListHistoryOrderByResident error, ", field.Error(err)) return 0, nil, err } return totalItem, orders, nil } func (c *orderRepository) GetAllOrderForResident(id string, query *core.ResidentGetAllOrderRequest) (data []*Order, total int64, err error) { var sortType = strings.ToLower(strings.TrimSpace(query.SortType)) if sortType != "asc" { sortType = "desc" } result := c.DB.Model(&Order{}). Preload("Staff"). Preload("Service"). Preload("Company"). Where(&Order{ResidentID: uuid.FromStringOrNil(id)}).Order(fmt.Sprintf("date_start %s , time_start %s", sortType,sortType)).Count(&total) if query.State != nil { result = result.Where("state in (?)", strings.Split(query.State[0], ",")) } if query.DateStart != "" { result = result.Where("date_start >= ? ", query.DateStart) } result = result.Limit(int(query.Paging.GetPageSize())).Offset((int(query.Paging.GetCurrentPage()) - 1) * int(query.Paging.GetPageSize())).Find(&data) if result.Error() != nil { log.Error("GetAllOrderForResident Error", field.Error(result.Error())) return nil, 0, result.Error() } return data, total, nil } func (c *orderRepository) GetAllOrderReminder() (data []*Order, err error) { result := c.DB.Model(&Order{}). Where("state in (?)", []string{string(InProgress), string(NewRequest), string(ResidentPending)}). Where("DATEDIFF(updated_at, CURRENT_TIMESTAMP()) < 0"). Find(&data) return data, result.Error() } func (c *orderRepository) GetNotificationByOrderIdAndState(id, state string) int32 { var notification NotificationForOrderMobile if err := c.DB.Raw("select count(*) as total from order_notifications where state = ? and order_id = ? and is_read = ?", state, id, false). Scan(&notification).Error(); err != nil { log.Error("Error", field.Error(err)) return 0 } return notification.Total } type paymentRepository struct { DB database.DBEngine `inject:"db_engine"` } func CreatePaymentRepository() *paymentRepository { return &paymentRepository{} } func (c *paymentRepository) InsertPayment(model *Payment) error { model.CreatedAt = time.Now().UTC() if err := c.DB.Create(model).Error(); err != nil { log.Error("InsertPayment error, ", field.Error(err)) return err } return nil } func (c *paymentRepository) UpdatePayment(model *Payment) error { model.CreatedAt = time.Now().UTC() if err := c.DB.Save(model).Error(); err != nil { log.Error("UpdatePayment error, ", field.Error(err)) return err } return nil } func (c *paymentRepository) DeletePayment(model *Payment) error { if err := c.DB.Delete(model).Error(); err != nil { log.Error("DeletePayment error, ", field.Error(err)) return err } return nil } func (c *paymentRepository) GetPaymentById(id uuid.UUID) (*Payment, error) { model := &Payment{ID: id} if err := c.DB.Find(model).Error(); err != nil { log.Error("GetPaymentById error, ", field.Error(err)) return nil, err } return model, nil } type statusRepository struct { DB database.DBEngine `inject:"db_engine"` } func CreateStatusRepository() *statusRepository { return &statusRepository{} } func (c *statusRepository) InsertStatus(model *Status) error { model.CreatedAt = time.Now().UTC() if err := c.DB.Create(model).Error(); err != nil { log.Error("GetPayInsertStatusmentById error, ", field.Error(err)) return err } return nil } func (c *statusRepository) UpdateStatus(model *Status) error { model.UpdatedAt = time.Now().UTC() if err := c.DB.Save(model).Error(); err != nil { log.Error("UpdateStatus error, ", field.Error(err)) return err } return nil } func (c *statusRepository) DeleteStatus(model *Status) error { if err := c.DB.Delete(&model).Error(); err != nil { log.Error("DeleteStatus error, ", field.Error(err)) return err } return nil } func (c *statusRepository) GetStatusById(id int32) (*Status, error) { model := Status{ID: id} if err := c.DB.Find(&model).Error(); err != nil { log.Error("GetStatusById error, ", field.Error(err)) return nil, err } return &model, nil } //F func (c *orderRepository) queryListStatusOrderOfStaffFollowGroup(query *core.GetOrdersByStaffId, id string, st ...string) (total int32, err error) { result := c.DB.Model(&Order{}).Where("fk_staff =? and state in (?) ", id, st) if query.DateStart != "" && query.DateEnd != "" { result = result.Where("date_start BETWEEN ? AND ?", query.DateStart, query.DateEnd) } result = result.Group("id").Count(&total) return total, result.Error() } type OrderVersionRepository interface { GetOrderVersionsByOrderId(orderId uuid.UUID) ([]*OrderVersion, error) GetOrderChangesHistoryByOrderId(orderId uuid.UUID) ([]*OrderVersion, error) Insert(c *OrderVersion) error } type orderVersionRepository struct { DB database.DBEngine `inject:"db_engine"` } func CreateOrderVersionRepository() *orderVersionRepository { return &orderVersionRepository{} } func (o *orderVersionRepository) GetOrderVersionsByOrderId(orderId uuid.UUID) ([]*OrderVersion, error) { var orderVersions []*OrderVersion result := o.DB.Model(&orderVersions). Preload("Service"). Preload("Company"). Preload("Resident"). Preload("Staff"). Preload("Comments"). Preload("Status"). Where("fk_order = ?", orderId.String()). Find(&orderVersions) return orderVersions, result.Error() } func (o *orderVersionRepository) GetOrderChangesHistoryByOrderId(orderId uuid.UUID) ([]*OrderVersion, error) { var orderVersions []*OrderVersion result := o.DB.Model(&orderVersions). Where("fk_order = ?", orderId.String()). Preload("WhoChangedUser"). Order("created_at desc"). Find(&orderVersions) return orderVersions, result.Error() } func (o *orderVersionRepository) Insert(c *OrderVersion) error { result := o.DB.Save(c) return result.Error() }
package route import ( "github.com/nokamoto/grpc-proxy/descriptor" "github.com/nokamoto/grpc-proxy/yaml" "testing" ) func TestNewRoutes_ping_method_prefix(t *testing.T) { _, afterEach, err := testRoutes(t, "../testdata/yaml/ping.yaml") defer afterEach() if err != nil { t.Fatal(err) } } func TestNewRoutes_ping_method_equal(t *testing.T) { _, afterEach, err := testRoutes(t, "../testdata/yaml/ping_method_equal.yaml") defer afterEach() if err != nil { t.Fatal(err) } } func TestNewRoutes_ping_method_missing(t *testing.T) { _, afterEach, err := testRoutes(t, "../testdata/yaml/ping_method_missing.yaml") defer afterEach() if err == nil { t.Fatal() } _, ok := err.(*missingRoutesError) if !ok { t.Fatal(err) } } func TestNewRoutes_ping_method_ambiguous(t *testing.T) { _, afterEach, err := testRoutes(t, "../testdata/yaml/ping_method_ambiguous.yaml") defer afterEach() if err == nil { t.Fatal() } _, ok := err.(*ambiguousRoutesError) if !ok { t.Fatal(err) } } func testRoutes(t *testing.T, y string) (*Routes, func(), error) { t.Helper() pb, err := descriptor.NewDescriptor("../testdata/protobuf/ping/ping.pb") if err != nil { t.Fatal(err) } yml, err := yaml.NewYaml(y) if err != nil { t.Fatal(err) } r, err := NewRoutes(pb, yml) f := func() { if r != nil { r.Destroy() } } return r, f, err }
// +build !windows,!linux package main import ( "os" "syscall" "github.com/nsf/termbox-go" ) func handleSpecialKeys(key termbox.Key) { if key == termbox.KeyCtrlZ { process, _ := os.FindProcess(os.Getpid()) termbox.Close() process.Signal(syscall.SIGSTOP) termbox.Init() } } const outputMode = termbox.Output256 func defaultStyle() Style { var style Style style.default_bg = termbox.Attribute(1) style.default_fg = termbox.Attribute(256) style.rune_fg = termbox.Attribute(248) style.int_fg = termbox.Attribute(154) style.bit_fg = termbox.Attribute(154) style.space_rune_fg = termbox.Attribute(240) style.selected_option_bg = termbox.Attribute(240) style.search_progress_fg = termbox.Attribute(76) style.text_cursor_hex_bg = termbox.Attribute(167) style.bit_cursor_hex_bg = termbox.Attribute(26) style.int_cursor_hex_bg = termbox.Attribute(63) style.fp_cursor_hex_bg = termbox.Attribute(127) style.hilite_hex_fg = termbox.Attribute(231) style.hilite_rune_fg = termbox.Attribute(256) style.about_logo_bg = termbox.Attribute(125) style.field_editor_bg = style.default_fg style.field_editor_fg = style.default_bg style.field_editor_last_bg = style.rune_fg style.field_editor_last_fg = style.default_fg style.field_editor_invalid_bg = termbox.Attribute(125) style.field_editor_invalid_fg = style.rune_fg style.space_rune = '•' style.filled_bit_rune = '●' style.empty_bit_rune = '○' return style }
package expvar import ( "strings" "github.com/gofiber/fiber/v2" "github.com/valyala/fasthttp/expvarhandler" ) // New creates a new middleware handler func New() fiber.Handler { // Return new handler return func(c *fiber.Ctx) error { path := c.Path() // We are only interested in /debug/vars routes if len(path) < 11 || !strings.HasPrefix(path, "/debug/vars") { return c.Next() } if path == "/debug/vars" { expvarhandler.ExpvarHandler(c.Context()) return nil } return c.Redirect("/debug/vars", 302) } }
/* Introduction When building an electronics project, a schematic may call for a resistor of an unusual value (say, 510 ohms). You check your parts bin and find that you have no 510-ohm resistors. But you do have many common values above and below this value. By combining resistors in parallel and series, you should be able to approximate the 510-ohm resistor fairly well. Task You must write a function or program which accepts a list of resistor values (resistors you stock) and a target value (which you aim to approximate). The program must consider: Individual resistors Two resistors in series Two resistors in parallel The program should compute all possible combinations of 1 and 2 resistors from the stock list (including two copies of the same resistor value), compute their series and parallel resistance, then sort the configurations according to how well they approximate the target value. The output format should be one configuration per line, with a + denoting series and | denoting parallel, and some space or an = sign before the net resistance. Formulas The resistance of one resistor is R1 The net resistance of two resistors in series is R1 + R2 The net resistance of two resistors in parallel is 1 / (1/R1 + 1/R2) The distance between an approximated resistance value and the target value can be calculated as pseudo-logarithmic distance, not linear distance: dist = abs(Rapprox / Rtarget - 1). For example, 200 is closer to 350 than it is to 100. A better distance measure is true logarithmic distance dist = abs(log(Rapprox/Rtarget)), but since this was not specified in the original question, you are free to use either measurement. Scoring Score is measured in characters of code, per usual golf rules. Lowest score wins. Example We have the following resistors in stock [100, 150, 220, 330, 470, 680, 1000, 1500, 2200, 3300, 4700] and wish to target 510 ohms. The program should output 143 configurations, approximately as shown (you can change the format, but make sure the meaning is easily determined): 680 | 2200 519.444 1000 | 1000 500. 150 + 330 480. 220 + 330 550. 470 470 680 | 1500 467.89 680 | 3300 563.819 100 + 470 570. 220 + 220 440. 100 + 330 430. 470 | 4700 427.273 680 | 4700 594.052 1000 | 1500 600. 470 | 3300 411.406 680 | 1000 404.762 150 + 470 620. ... many more rows ... 2200 + 4700 6900. 3300 + 4700 8000. 4700 + 4700 9400. In this example, the best approximation of 510 ohms is given by 680- and 2200-ohm resistors in parallel. */ package main import ( "fmt" "math" "sort" ) func main() { r := gen([]float64{100, 150, 220, 330, 470, 680, 1000, 1500, 2200, 3300, 4700}, 510) dump(r) } func gen(v []float64, t float64) []resistance { r := []resistance{} n := len(v) for i := 0; i < n; i++ { r = append(r, resistance{op: 'u', r1: v[i], value: v[i]}) for j := i; j < n; j++ { r = append(r, resistance{op: '+', r1: v[i], r2: v[j], value: v[i] + v[j]}) r = append(r, resistance{op: '|', r1: v[i], r2: v[j], value: 1 / (1/v[i] + 1/v[j])}) } } sort.Slice(r, func(i, j int) bool { d1 := math.Abs(math.Log(r[i].value / t)) d2 := math.Abs(math.Log(r[j].value / t)) return d1 < d2 }) return r } func dump(r []resistance) { for _, r := range r { switch r.op { case 'u': fmt.Printf("%4.0f = %.3f\n", r.r1, r.value) default: fmt.Printf("%4.0f %c %4.0f = %.3f\n", r.r1, r.op, r.r2, r.value) } } } type resistance struct { op rune r1, r2 float64 value float64 }
//+build . package shmallocator import ( "os" "syscall" "unsafe" ) type SegmentManager struct { region MappedRegion } func NewSegmentManager(fd int) *SegmentManager { syscall.Mm } func (s *SegmentManager) Allocate(size uintptr) unsafe.Pointer {} func (s *SegmentManager) DeAllocate(ptr unsafe.Pointer) {} func (s *SegmentManager) GetPointer() unsafe.Pointer { } type MappedRegion struct { base uintptr } func NewMappedRegion(path string, size uintptr) *MappedRegion { fd, err := syscall.Open(path, os.O_RDWR|os.O_CREATE, 0644) if err != nil { panic(err) } if _, err = syscall.Write(fd, make([]byte, size)); err != nil { panic(err) } data, err := syscall.Mmap(fd, 0, int(size), syscall.PROT_WRITE, syscall.MAP_SHARED) if err != nil { panic(err) } if err = syscall.Close(fd); err != nil { panic(err) } region := &MappedRegion{} region.base = uintptr(unsafe.Pointer(&data[0])) return region }
package goproxy import ( "net/http" ) // Plugin gives a way to get a source object for a request type Plugin interface { Module(req *http.Request, prefix string) (Module, error) Leave(source Module) error Close() error String() string }
package common import ( "testing" "time" "github.com/stretchr/testify/require" ) func TestNewUpload(t *testing.T) { upload := NewUpload() require.NotNil(t, upload) require.NotZero(t, upload.ID, "missing upload id") require.NotZero(t, upload.UploadToken, "missing upload token") } func TestUploadNewFile(t *testing.T) { upload := &Upload{} upload.NewFile() require.NotZero(t, len(upload.Files), "invalid file count") } func TestUploadSanitize(t *testing.T) { upload := &Upload{} upload.NewFile() upload.RemoteIP = "ip" upload.Login = "login" upload.Password = "password" upload.UploadToken = "token" upload.Token = "token" upload.User = "user" config := NewConfiguration() config.DownloadDomain = "download.domain" upload.Sanitize(config) require.Zero(t, upload.RemoteIP, "invalid sanitized upload") require.Zero(t, upload.Login, "invalid sanitized upload") require.Zero(t, upload.Password, "invalid sanitized upload") require.Zero(t, upload.UploadToken, "invalid sanitized upload") require.Zero(t, upload.Token, "invalid sanitized upload") require.Zero(t, upload.UploadToken, "invalid sanitized upload") require.Equal(t, config.DownloadDomain, upload.DownloadDomain, "invalid download domain") } func TestUploadSanitizeAdmin(t *testing.T) { upload := &Upload{} upload.NewFile() upload.UploadToken = "token" upload.IsAdmin = true upload.Sanitize(NewConfiguration()) require.Equal(t, "token", upload.UploadToken, "invalid sanitized upload") } func TestUpload_GetFile(t *testing.T) { upload := &Upload{} file1 := upload.NewFile() file1.ID = "id_1" file2 := upload.NewFile() file2.ID = "id_2" f := upload.GetFile(file1.ID) require.NotNil(t, f) require.Equal(t, file1, f) } func TestUpload_GetFileByReference(t *testing.T) { upload := &Upload{} file1 := upload.NewFile() file1.Reference = "1" file2 := upload.NewFile() file2.Reference = "2" f := upload.GetFileByReference(file1.Reference) require.NotNil(t, f) require.Equal(t, file1, f) } func TestUpload_PrepareInsertForTests(t *testing.T) { upload := &Upload{} upload.NewFile().Name = "file" upload.InitializeForTests() require.NotZero(t, upload.ID) require.NotZero(t, upload.Files[0].ID) require.Equal(t, upload.ID, upload.Files[0].UploadID) } func TestUpload_IsExpired(t *testing.T) { upload := &Upload{} deadline := time.Now().Add(time.Hour) upload.ExpireAt = &deadline require.False(t, upload.IsExpired()) deadline = time.Now().Add(-time.Hour) upload.ExpireAt = &deadline require.True(t, upload.IsExpired()) } func TestUpload_ExtendExpirationDate(t *testing.T) { upload := &Upload{} upload.TTL = 1 upload.ExtendExpirationDate() require.NotNil(t, upload.ExpireAt) require.False(t, upload.IsExpired()) time.Sleep(time.Second) require.True(t, upload.IsExpired()) upload.ExtendExpirationDate() require.NotNil(t, upload.ExpireAt) require.False(t, upload.IsExpired()) time.Sleep(time.Second) require.True(t, upload.IsExpired()) }
package network import( "data" "net/http" "fmt" "io/ioutil" "encoding/json" ) /**微信获取openid的链接*/ func getWeiChatCodeUrl(code string)(string){ var url=data.WEI_CHAT_CODE_URL+"?appid="+data.APP_ID+"&secret="+data.APP_SECRET+"&js_code="+code+"&&grant_type=authorization_code" return url } // type UserInfo struct{ // openId string // session_key string // nickname string // avatarUrl string // gender string // city string // province string // country string // lang string // email string // phone string // address string // } // func (userInfo *data.UserInfo) toString() string{ // return "" // } /** 请求 链接 http://www.nbin01.com/dannu/login?code=111 */ func Login(w http.ResponseWriter, r *http.Request) { fmt.Printf("login") code := r.URL.Query().Get("code") if code != "" { url := getWeiChatCodeUrl(code) println("url=" + url) reps, err := http.Get(url) if err == nil { print(reps) defer reps.Body.Close() var userInfo data.UserInfo bodyBytes, err := ioutil.ReadAll(reps.Body) fmt.Println(string(bodyBytes)) var weichat_auth map[string]string if err == nil { if err_json:=json.Unmarshal(bodyBytes,&weichat_auth);err_json==nil{ userInfo=parseUser(r) } userInfo.OpenId=weichat_auth["openid"] userInfo.OpenId=weichat_auth["session_key"] data.InsertUser(userInfo) userInfo=data.Find_with_openid(userInfo) if userInfo.ID > 0{ w.Write([]byte(userInfo.ToBaseUser())) } } } } } func parseUser(r *http.Request) (data.UserInfo){ var userInfo data.UserInfo userInfo.Nickname=parseString(r,"nickName") userInfo.AvatarUrl=parseString(r,"avatarUrl") userInfo.Gender=parseString(r,"gender") userInfo.City=parseString(r,"city") userInfo.Province=parseString(r,"province") userInfo.Country=parseString(r,"country") userInfo.Lang=parseString(r,"lang") return userInfo } func parseString(r *http.Request,key string)string{ value:= r.URL.Query().Get(key) return value }
package leetcode import ( "reflect" "testing" ) func TestRemoveDuplicates(t *testing.T) { tests := []struct { nums []int results []int }{ { nums: []int{}, results: []int{}, }, { nums: []int{1}, results: []int{1}, }, { nums: []int{1, 1}, results: []int{1}, }, { nums: []int{1, 1, 2}, results: []int{1, 2}, }, { nums: []int{1, 1, 2, 2}, results: []int{1, 2}, }, { nums: []int{0, 0, 1, 1, 1, 2, 2, 3, 3, 4}, results: []int{0, 1, 2, 3, 4}, }, } for i, tt := range tests { n := removeDuplicates(tt.nums) if got, want := tt.nums[:n], tt.results; !reflect.DeepEqual(got, want) { t.Fatalf("%d: got %v, want %v", i, got, want) } else { t.Logf("%d: got %v", i, got) } } }
package company_repository import ( "github.com/jinzhu/gorm" "gitlab.com/username/online-service-and-customer-care/company" "gitlab.com/username/online-service-and-customer-care/entity" ) // CompanyGormRepo implements company repository interface type CompanyGormRepo struct { conn *gorm.DB } // NewCompanyGormRepo returns new object of CompanyGormRepo func NewCompanyGormRepo(db *gorm.DB) company.CompanyRepository { return &CompanyGormRepo{conn: db} } // Companies returns all companies stored in the database func (compRepo *CompanyGormRepo) Companies() ([]entity.Company, []error) { comps := []entity.Company{} //fmt.Println(cmnts) errs := compRepo.conn.Find(&comps).GetErrors() if len(errs) > 0 { return nil, errs } return comps, errs } // Company retrieves a company from the database by its id func (compRepo *CompanyGormRepo) Company(id uint) (*entity.Company, []error) { comps := entity.Company{} errs := compRepo.conn.First(&comps, id).GetErrors() if len(errs) > 0 { return nil, errs } return &comps, errs } // UpdateCompany updates a given company in the database func (compRepo *CompanyGormRepo) UpdateCompany(company *entity.Company) (*entity.Company, []error) { comp := company errs := compRepo.conn.Save(comp).GetErrors() if len(errs) > 0 { return nil, errs } return comp, errs } // DeleteCompany deletes a given company from the database func (compRepo *CompanyGormRepo) DeleteCompany(id uint) (*entity.Company, []error) { comp, errs := compRepo.Company(id) if len(errs) > 0 { return nil, errs } errs = compRepo.conn.Delete(comp, id).GetErrors() if len(errs) > 0 { return nil, errs } return comp, errs } // StoreCompany stores a given company in the database func (compRepo *CompanyGormRepo) StoreCompany(company *entity.Company) (*entity.Company, []error) { comp := company errs := compRepo.conn.Create(comp).GetErrors() if len(errs) > 0 { return nil, errs } return comp, errs }
package goxtremio import ( "os" "testing" ) var c *Client func TestMain(m *testing.M) { var err error c, err = NewClient() if err != nil { panic(err) } os.Exit(m.Run()) }
// Copyright 2013 tsuru authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package queue import ( "github.com/adeven/redismq" "launchpad.net/gocheck" "time" ) type RedismqSuite struct { queue *redismq.Queue consumer *redismq.Consumer } var _ = gocheck.Suite(&RedismqSuite{}) func (s *RedismqSuite) SetUpSuite(c *gocheck.C) { s.queue = redismq.CreateQueue("localhost", "6379", "", 3, "redismq_tests") err := s.queue.Delete() c.Assert(err, gocheck.IsNil) s.consumer, err = s.queue.AddConsumer("redismq_tests") c.Assert(err, gocheck.IsNil) } func (s *RedismqSuite) TestPut(c *gocheck.C) { msg := Message{ Action: "regenerate-apprc", Args: []string{"myapp"}, } q := redismqQ{name: "default", queue: s.queue, consumer: s.consumer} err := q.Put(&msg, 0) c.Assert(err, gocheck.IsNil) got, err := q.Get(1e6) c.Assert(err, gocheck.IsNil) c.Assert(*got, gocheck.DeepEquals, msg) } func (s *RedismqSuite) TestPutWithDelay(c *gocheck.C) { msg := Message{ Action: "regenerate-apprc", Args: []string{"myapp"}, } q := redismqQ{name: "default", queue: s.queue, consumer: s.consumer} err := q.Put(&msg, 1e9) c.Assert(err, gocheck.IsNil) _, err = q.Get(1e6) c.Assert(err, gocheck.NotNil) time.Sleep(15e8) got, err := q.Get(1e6) c.Assert(err, gocheck.IsNil) c.Assert(*got, gocheck.DeepEquals, msg) } func (s *RedismqSuite) TestGet(c *gocheck.C) { msg := Message{ Action: "regenerate-apprc", Args: []string{"myapp"}, } q := redismqQ{name: "default", queue: s.queue, consumer: s.consumer} err := q.Put(&msg, 0) c.Assert(err, gocheck.IsNil) got, err := q.Get(1e6) c.Assert(err, gocheck.IsNil) c.Assert(*got, gocheck.DeepEquals, msg) } func (s *RedismqSuite) TestGetTimeout(c *gocheck.C) { q := redismqQ{name: "default", queue: s.queue, consumer: s.consumer} got, err := q.Get(1e6) c.Assert(err, gocheck.NotNil) c.Assert(got, gocheck.IsNil) e, ok := err.(*timeoutError) c.Assert(ok, gocheck.Equals, true) c.Assert(e.timeout, gocheck.Equals, time.Duration(1e6)) }
package main import ( "fmt" "math" ) // 8. 字符串转换整数 (atoi) // 请你来实现一个 atoi 函数,使其能将字符串转换成整数。 // 首先,该函数会根据需要丢弃无用的开头空格字符,直到寻找到第一个非空格的字符为止。接下来的转化规则如下: // 如果第一个非空字符为正或者负号时,则将该符号与之后面尽可能多的连续数字字符组合起来,形成一个有符号整数。 // 假如第一个非空字符是数字,则直接将其与之后连续的数字字符组合起来,形成一个整数。 // 该字符串在有效的整数部分之后也可能会存在多余的字符,那么这些字符可以被忽略,它们对函数不应该造成影响。 // 注意:假如该字符串中的第一个非空格字符不是一个有效整数字符、字符串为空或字符串仅包含空白字符时,则你的函数不需要进行转换,即无法进行有效转换。 // 在任何情况下,若函数不能进行有效的转换时,请返回 0 。 // 提示: // 本题中的空白字符只包括空格字符 ' ' 。 // 假设我们的环境只能存储 32 位大小的有符号整数,那么其数值范围为 [−2^31, 2^31 − 1]。如果数值超过这个范围,请返回 INT_MAX (231 − 1) 或 INT_MIN (−231) 。 // https://leetcode-cn.com/problems/string-to-integer-atoi/ func main() { fmt.Println(myAtoi(" ")) fmt.Println(myAtoi("42")) fmt.Println(myAtoi(" -42")) fmt.Println(myAtoi("4193 with words")) fmt.Println(myAtoi("words 987")) fmt.Println(myAtoi("-91283472332")) // -2147483648 } // 法一:暴力 func myAtoi(s string) (num int) { length := len(s) // 去掉左侧空格 k := 0 for k < length && s[k] == ' ' { k++ } signed := 1 // 记录符号,去掉符号位 if k < length && (s[k] == '+' || s[k] == '-') { if s[k] == '-' { signed = -1 } k++ } // 禁止出现两个符号位或非数字字符 if k < length && (s[k] == '+' || s[k] == '-' || s[k] < '0' || s[k] > '9') { return 0 } for k < length { if s[k] < '0' || s[k] > '9' { break } n := int(s[k] - '0') if num > (math.MaxInt32-n)/10 { if signed == 1 { return math.MaxInt32 } else { return math.MinInt32 } } num = num*10 + n k++ } return num * signed } // 法二:正则 // ^[\+\-]?\d+ 匹配开头的正负号和紧接着的数字,再进行转换
/* Copyright 2021 The KubeVela Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package types import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) // ComponentManifest contains resources rendered from an application component. type ComponentManifest struct { Name string Namespace string RevisionName string RevisionHash string // StandardWorkload contains K8s resource generated from "output" block of ComponentDefinition StandardWorkload *unstructured.Unstructured // Traits contains both resources generated from "outputs" block of ComponentDefinition and resources generated from TraitDefinition Traits []*unstructured.Unstructured // PackagedWorkloadResources contain all the workload related resources. It could be a Helm // Release, Git Repo or anything that can package and run a workload. PackagedWorkloadResources []*unstructured.Unstructured PackagedTraitResources map[string][]*unstructured.Unstructured }
package Problem0024 // ListNode ListNode type ListNode struct { Val int Next *ListNode } func swapPairs(head *ListNode) *ListNode { if head == nil || head.Next == nil { return head } // 让temp指向head.Next节点 temp := head.Next // 让head.Next指向转换好了temp.Next节点 head.Next = swapPairs(temp.Next) // 让temp.Next指向head节点 temp.Next = head // temp成为新的head节点 return temp }
package main import . "./queue" import . "./message" import "container/heap" import "fmt" func main(){ msg := Message{} msg.CreateMessage("husadhusaid",7) msg2 := Message{} msg3 := Message{} msg4 := Message{} msg2.CreateMessage("yolo", 37000) msg3.CreateMessage("olol", 5) msg4.CreateMessage("kkkraiolaser", 96) pq := make(PriorityQueue, 1) pq[0] = &msg //pq[1] = &msg2 //pq[2] = &msg3 //pq[3] = &msg4 heap.Init(&pq) //heap.Push(&pq, &msg) heap.Push(&pq, &msg2) heap.Push(&pq, &msg3) heap.Push(&pq, &msg4) fmt.Println("precisa ser yolo") msgPop := heap.Pop(&pq).(*Message) fmt.Println(msgPop.Msgtext) }
/* SPDX-License-Identifier: Apache-2.0 * Copyright (c) 2019-2020 Intel Corporation */ package ngcnef import ( "encoding/json" "io/ioutil" "net/http" "path/filepath" ) func closeReqBody(r *http.Request) { err := r.Body.Close() if err != nil { log.Errf("response body was not closed properly") } } func sendCustomeErrorRspToAF(w http.ResponseWriter, eCode int, custTitleString string) { eRsp := nefSBRspData{errorCode: eCode, pd: ProblemDetails{Title: custTitleString}} sendErrorResponseToAF(w, eRsp) } func sendErrorResponseToAF(w http.ResponseWriter, rsp nefSBRspData) { mdata, eCode := createErrorJSON(rsp) w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(eCode) _, err := w.Write(mdata) if err != nil { log.Err("NEF ERROR : Failed to send response to AF !!!") } log.Infof("HTTP Response sent: %d", eCode) } func createErrorJSON(rsp nefSBRspData) (mdata []byte, statusCode int) { var err error statusCode = 404 /* TBD for future: Removed check for 401, 403, 413 and 429 due cyclometrix complexity lint warning. Once a better mechanism is found to be added back. Anyhow currently these errors are not supported */ if rsp.errorCode == 400 || rsp.errorCode == 404 || rsp.errorCode == 411 || rsp.errorCode == 415 || rsp.errorCode == 500 || rsp.errorCode == 503 { statusCode = rsp.errorCode mdata, err = json.Marshal(rsp.pd) if err == nil { /*No return */ log.Info(statusCode) return mdata, statusCode } } /*Send default error */ pd := ProblemDetails{Title: " NEF Error "} mdata, err = json.Marshal(pd) if err != nil { return mdata, statusCode } /*Any case return mdata */ return mdata, statusCode } func logNef(nef *nefData) { log.Infof("AF count %+v", len(nef.afs)) if len(nef.afs) > 0 { for key, value := range nef.afs { log.Infof(" AF ID : %+v, Sub Registered Count %+v", key, len(value.subs)) for _, vs := range value.subs { log.Infof(" SubId : %+v, ServiceId: %+v", vs.subid, vs.ti.AfServiceID) } } } } // LoadJSONConfig reads a file located at configPath and unmarshals it to // config structure func loadJSONConfig(configPath string, config interface{}) error { cfgData, err := ioutil.ReadFile(filepath.Clean(configPath)) if err != nil { return err } return json.Unmarshal(cfgData, config) }
package igo import "testing" func TestMd5(t *testing.T) { if GetMd5String("1234") != "81dc9bdb52d04dc20036dbd8313ed055" { t.Fatal("failed.") } }
package clusteragent import ( "github.com/devopstoday11/tarian/pkg/tarianpb" falcoclient "github.com/falcosecurity/client-go/pkg/client" "google.golang.org/grpc" ) type ClusterAgentConfig struct { ServerAddress string ServerGrpcDialOptions []grpc.DialOption EnableFalcoIntegration bool EnableAddConstraint bool FalcoClientConfig *falcoclient.Config } type ClusterAgent struct { grpcServer *grpc.Server configServer *ConfigServer eventServer *EventServer falcoAlertsSubsriber *FalcoAlertsSubscriber } func NewClusterAgent(config *ClusterAgentConfig) *ClusterAgent { grpcServer := grpc.NewServer() configServer := NewConfigServer(config.ServerAddress, config.ServerGrpcDialOptions) configServer.EnableAddConstraint(config.EnableAddConstraint) eventServer := NewEventServer(config.ServerAddress, config.ServerGrpcDialOptions) tarianpb.RegisterConfigServer(grpcServer, configServer) tarianpb.RegisterEventServer(grpcServer, eventServer) ca := &ClusterAgent{ grpcServer: grpcServer, configServer: configServer, eventServer: eventServer, } if config.EnableFalcoIntegration { var err error ca.falcoAlertsSubsriber, err = NewFalcoAlertsSubscriber(config.ServerAddress, config.ServerGrpcDialOptions, config.FalcoClientConfig) if err != nil { logger.Fatalw("falco: unable to connect to falco grpc server", "err", err) } } return ca } func (ca *ClusterAgent) Close() { ca.configServer.Close() ca.eventServer.Close() if ca.falcoAlertsSubsriber != nil { ca.falcoAlertsSubsriber.Close() } } func (ca *ClusterAgent) GetGrpcServer() *grpc.Server { return ca.grpcServer } func (ca *ClusterAgent) GetFalcoAlertsSubscriber() *FalcoAlertsSubscriber { return ca.falcoAlertsSubsriber }
package i18n import ( "encoding/json" "fmt" "path" "strings" "golang.org/x/text/language" "golang.org/x/text/message" "golang.org/x/text/message/catalog" "github.com/toolkits/pkg/file" "github.com/toolkits/pkg/runner" ) var ( catalogs = make(map[string]*catalog.Builder) printers = make(map[string]*message.Printer) ) // Init will init i18n support via input language. func Init(dictPath ...string) { dp := path.Join(runner.Cwd, "etc", "i18n.json") if len(dictPath) > 0 && dictPath[0] != "" { dp = dictPath[0] } DictFileRegister(dp) // en for default printers[""] = message.NewPrinter(langTag("en")) printers["en"] = message.NewPrinter(langTag("en")) } func DictFileRegister(filePath string) { if !file.IsExist(filePath) { // fmt.Printf("i18n config file %s not found. donot worry, we'll use default configuration\n", filePath) return } content, err := file.ToTrimString(filePath) if err != nil { fmt.Printf("read i18n config file %s fail: %s\n", filePath, err) return } m := make(map[string]map[string]string) err = json.Unmarshal([]byte(content), &m) if err != nil { fmt.Printf("parse i18n config file %s fail: %s\n", filePath, err) return } DictRegister(m) } func DictRegister(m map[string]map[string]string) { for lang, dict := range m { tag := langTag(lang) cata := catalog.NewBuilder() for k, v := range dict { cata.SetString(tag, k, v) } catalogs[lang] = cata printers[lang] = message.NewPrinter(tag, message.Catalog(cata)) } } func langTag(l string) language.Tag { switch strings.ToLower(l) { case "zh", "cn": return language.Chinese default: return language.English } } // Sprintf formats according to a format specifier and returns the resulting string. func Sprintf(lang, format string, a ...interface{}) string { if _, exists := printers[lang]; !exists { return fmt.Sprintf(format, a...) } return printers[lang].Sprintf(format, a...) }
//Package main represents the main package for the client package main import ( "bufio" "flag" "fmt" "net" "strings" "github.com/TomOrth/go-chat/lists" "github.com/gizak/termui" ) //Type Client represents the client to connect to the server type Client struct { MsgList *lists.MsgList //list of messages conn net.Conn //connection to server text string //message to be sent name string //name of client } //Add takes a message and adds it to the list of messages func (c *Client) Add(msg string) { c.MsgList.Append(msg) } var ( input *termui.Par //input for user ls *termui.List //list to show msgs ) //Kbd handles majority of the keyboard input for the user to display a message func (c *Client) Kbd() { termui.Handle("/sys/kbd", func(e termui.Event) { c.text += e.Data.(termui.EvtKbd).KeyStr input.Text = ">" + c.text termui.Render(termui.Body) }) } //Entr handles the enter click by the user, sending the message to the server func (c *Client) Entr() { termui.Handle("/sys/kbd/<enter>", func(e termui.Event) { fmt.Fprintf(c.conn, c.name+": "+c.text+"\n") c.text = "" input.Text = ">" + c.text termui.Render(termui.Body) }) } //BackSp handles the backspace click by the user, deleting a character in the message func (c *Client) BackSp() { termui.Handle("/sys/kbd/<backspace>", func(e termui.Event) { sz := len(c.text) if sz > 0 { c.text = c.text[:sz-1] input.Text = ">" + c.text termui.Render(termui.Body) } }) } //Spce handles the space click by the user, adding a space func (c *Client) Spce() { termui.Handle("/sys/kbd/<space>", func(e termui.Event) { c.text += " " input.Text = ">" + c.text termui.Render(termui.Body) }) } //Close handles the ctrl-c click by the user, terminating the client and letting the client know func (c *Client) Close() { termui.Handle("/sys/kbd/C-c", func(termui.Event) { c.conn.Write([]byte("/kill/" + c.name)) termui.StopLoop() }) } //Listen handles listening for incoming messages from the server func (c *Client) Listen() { for { // listen for reply message, err := bufio.NewReader(c.conn).ReadString('\n') if err != nil { panic(err) } c.Add(strings.Replace(message, "\n", "", -1)) //odd thing where theres an extra \n occasionally. This needs to be fixed if c.MsgList.Size > 20 { c.MsgList.DeleteHead() } ls.Items = c.MsgList.MessageArr() termui.Render(termui.Body) } } //Run starts up the client and creates the terminal UI, passing a username, host and port func (c *Client) Run(name, host, port string) { // connect to this socket conn, err := net.Dial("tcp", host+":"+port) if err != nil { panic(err) } conn.Write([]byte("un:" + name + "\n")) c.conn = conn errTer := termui.Init() if errTer != nil { panic(err) } defer termui.Close() input = termui.NewPar(">") input.Height = 1 input.Border = false ls = termui.NewList() ls.Items = c.MsgList.MessageArr() ls.Overflow = "wrap" ls.ItemFgColor = termui.ColorYellow ls.Height = 22 termui.Body.AddRows( termui.NewRow(termui.NewCol(12, 0, ls)), termui.NewRow(termui.NewCol(12, 0, input))) termui.Body.Align() termui.Render(termui.Body) c.Kbd() c.Entr() c.BackSp() c.Spce() c.Close() termui.Handle("/sys/wnd/resize", func(e termui.Event) { termui.Body.Width = termui.TermWidth() termui.Body.Align() termui.Clear() termui.Render(termui.Body) }) go c.Listen() termui.Loop() } func main() { name := flag.String("name", "tom", "username") host := flag.String("host", "localhost", "host name") port := flag.String("port", "8000", "port number") flag.Parse() messages := &lists.MsgList{nil, nil, 0} c := &Client{messages, nil, "", *name} c.Run(*name, *host, *port) }
package main import ( "fmt" "io/ioutil" "os" "strconv" "strings" ) func main() { var part int // part is defined as cmd argument if len(os.Args) > 1 && os.Args[1] == "part2" { part = 2 } else { //run part 1 as default part = 1 } input, _ := ioutil.ReadFile("input.txt") strList := strings.Split(string(input), "\r\n") list := make([]int, len(strList)) for i, s := range strList { list[i], _ = strconv.Atoi(s) } i := 0 steps := 0 for true { if i >= len(list) { fmt.Println(steps) break } prev := i i = i + list[i] if part == 2 && list[prev] > 2 { list[prev]-- } else { list[prev]++ } steps++ } }
// // Created by Nick on 18-11-2019 // // Main.go // CSV to VCF converter, my first golang program package main import ( "bufio" "encoding/csv" "fmt" "io" "log" "os" "strings" ) func getVCFDataFrom(name string, mobile *string) string { // Check cases where it might be possible that the mobile number is blank if *mobile == "NA" || *mobile == "" { *mobile = "" fmt.Printf("Ignored empty mobile number for: %v\n", name) } else if strings.Contains(*mobile, "/") { // Check cases where there might be more than 1 mobile number, delimited by a slash // Slice up the mobile and use the first elem fmt.Printf("Found 2 mobile numbers for %v, using the first one\n", name) *mobile = strings.Split(*mobile, "/")[0] } return fmt.Sprintf("BEGIN:VCARD\nVERSION:3.0\nREV:2019-11-18 12:00:07\nTEL;TYPE=cell,voice:%v\nN:;%v;;;;\nFN:%v\nEND:VCARD\n", *mobile, name, name) } //noinspection GoNilness func main() { csvFile, err := os.Open("file.csv") defer csvFile.Close() // Check for errors if err != nil { log.Fatal(err) } // Create a new reader for CSV reader := csv.NewReader(bufio.NewReader(csvFile)) // An array of strings that will hold our data, we will then write this using a buffered writer var contacts []string for { line, err := reader.Read() if err == io.EOF { break } else if err != nil { log.Fatal(err) } contacts = append(contacts, getVCFDataFrom(line[0], &line[1])) } // Create a file in the pwd file, err := os.Create("contact.vcf") defer file.Close() // Check error if err != nil { log.Fatal(err) } // Create a writer writer := bufio.NewWriter(file) // Loop and write the slice to the file for _, contact := range contacts { _, _ = writer.WriteString(contact) } // Sync FS _ = writer.Flush() // Success fmt.Println("Successfully created the VCF file named: contacts.vcf") }
package main import "fmt" func exploreBehindK(k int, v []interface{}) interface{} { n := 0 for { a := _getOrNil(v, n) if a == nil { break } n++ } return v[n-k-1] } // simulates a Singly Linked Lists func _getOrNil(v []interface{}, index int) interface{} { if len(v) <= index { return nil } return v[index] } func main() { { fmt.Println( exploreBehindK( 1, []interface{}{ "a", "b", "c", "d", "e", }, ), ) fmt.Println( exploreBehindK( 3, []interface{}{ "a", "b", "c", "d", "e", }, ), ) } } func a() (x, y int) { if true { return 10, 10 } return }
package main import "github.com/gganley/gomultimod/log" func main() { log.Log("Hello world") }
package Services import ( "github.com/kylesliu/gin-demo/App/Repositories/MySQL" //"github.com/kylesliu/gin-demo/Bootstrap" ) func GetAllArticleGroup() *[]MySQL.ArticleGroup { groups := []MySQL.ArticleGroup{} db.Table("blog_article_group"). Select("" + "blog_article_group.id, " + "blog_article_group.name," + "count(blog_article.id) AS count, " + "blog_article_group.created_at, " + "blog_article_group.updated_at"). Joins("left join blog_article on blog_article_group.id = blog_article.g_id"). Group("blog_article_group.id"). Find(&groups) return &groups }
package tests import ( "testing" "os" "io" "io/ioutil" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "gopkg.in/mgo.v2" "github.com/manyminds/api2go" "github.com/manyminds/api2go-adapter/gingonic" "gopkg.in/gin-gonic/gin.v1" "themis/schema" "themis/resources" "themis/models" "themis/database" "themis/mockdb" "themis/routes" "themis/utils" ) type M map[string]interface{} var dbServer dbserver.DBServer var session *mgo.Session var configuration utils.Configuration var SpaceID string func TestModels(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Service Suite") } var _ = BeforeSuite(func() { // setup logger utils.InitLogger() utils.SetLogFile("test.log") // test configuration configuration = utils.Configuration { ServiceURL: "http://localhost:8080", ServicePort: ":8080", ServiceMode: "production", DatabaseHost: "localhost", DatabasePort: 27017, DatabaseDatabase: "themis", DatabaseUser: "themis", DatabasePassword: "themis", } // launch test database instance os.Setenv("CHECK_SESSIONS", "1") dir, _ := ioutil.TempDir("", "themis_test") dbServer.SetPath(dir) session = dbServer.Session() db := session.DB(configuration.DatabaseDatabase) // creating all storage backends storageBackends := database.StorageBackends { Space: database.NewSpaceStorage(db), WorkItem: database.NewWorkItemStorage(db), WorkItemType: database.NewWorkItemTypeStorage(db), Area: database.NewAreaStorage(db), Comment: database.NewCommentStorage(db), Iteration: database.NewIterationStorage(db), LinkCategory: database.NewLinkCategoryStorage(db), Link: database.NewLinkStorage(db), LinkType: database.NewLinkTypeStorage(db), User: database.NewUserStorage(db), } // Create the meta storage backend utils.NewDatabaseMeta(db) // setup test fixtures SpaceID = schema.SetupFixtureData(storageBackends) // launch service gin.SetMode(gin.ReleaseMode) gin.DefaultWriter = io.MultiWriter(utils.LogFile) r := gin.Default() r.Use(gin.Logger()) api := api2go.NewAPIWithRouting( "api", api2go.NewStaticResolver(configuration.ServiceURL), gingonic.New(r), ) r.StaticFile("/", "./static/index.html") api.AddResource(models.Space{}, resources.SpaceResource{SpaceStorage: storageBackends.Space}) api.AddResource(models.WorkItem{}, resources.WorkItemResource{WorkItemStorage: storageBackends.WorkItem}) api.AddResource(models.Area{}, resources.AreaResource{AreaStorage: storageBackends.Area}) api.AddResource(models.Comment{}, resources.CommentResource{CommentStorage: storageBackends.Comment}) api.AddResource(models.Iteration{}, resources.IterationResource{IterationStorage: storageBackends.Iteration}) api.AddResource(models.Link{}, resources.LinkResource{LinkStorage: storageBackends.Link}) api.AddResource(models.LinkCategory{}, resources.LinkCategoryResource{LinkCategoryStorage: storageBackends.LinkCategory}) api.AddResource(models.LinkType{}, resources.LinkTypeResource{LinkTypeStorage: storageBackends.LinkType}) api.AddResource(models.User{}, resources.UserResource{UserStorage: storageBackends.User, SpaceStorage: storageBackends.Space}) api.AddResource(models.WorkItemType{}, resources.WorkItemTypeResource{WorkItemTypeStorage: storageBackends.WorkItemType}) routes.Init(r) go r.Run(configuration.ServicePort) }) var _ = AfterSuite(func() { session.Close() dbServer.Wipe() dbServer.Stop() utils.CloseLogfile() })
package pkce import "fmt" const ( CodeChallengeMethodPlain = "plain" CodeChallengeMethodS256 = "S256" ) type ( Verifier interface { Verify(challenge, verifier string) bool } plainVerifier struct{} s256Verifier struct{} ) var plain = &plainVerifier{} var s256 = &s256Verifier{} func FindVerifierByMethod(method string) (Verifier, error) { switch method { case CodeChallengeMethodPlain: return plain, nil case CodeChallengeMethodS256: return s256, nil default: return nil, fmt.Errorf("unknown code_challange_method: %s", method) } } func (v *plainVerifier) Verify(challenge, verifier string) bool { return challenge == verifier } func (v *s256Verifier) Verify(challenge, verifier string) bool { return S256Encode(verifier) == challenge }
package xolphin import ( "encoding/json" "fmt" "net/url" "strings" ) type DCVRequest struct { Domain string `json:"domain"` DCVType string `json:"dcvType"` ApproverEmail string `json:"approverEmail"` } type CertificateCreationRequest struct { Product int Years int CSR string DCVType string SubjectAlternativeNames []string DCV []DCVRequest Company string Department string Address string ZIPCode string City string ApproverFirstName string ApproverLastName string ApproverEmail string ApproverPhone string KVK string Reference string Language string } func (self CertificateCreationRequest) ToURLValues() url.Values { values := url.Values{} values.Set("product", fmt.Sprintf("%d", self.Product)) values.Set("years", fmt.Sprintf("%d", self.Years)) values.Set("csr", self.CSR) values.Set("dcvType", self.DCVType) if len(self.SubjectAlternativeNames) > 0 { values.Set("subjectAlternativeNames", strings.Join(self.SubjectAlternativeNames, ",")) } if len(self.DCV) > 0 { d, e := json.Marshal(self.DCV) if e == nil { values.Set("dcv", string(d)) } } if self.Company != "" { values.Set("company", self.Company) } if self.Department != "" { values.Set("department", self.Department) } if self.Address != "" { values.Set("address", self.Address) } if self.ZIPCode != "" { values.Set("zipcode", self.ZIPCode) } if self.City != "" { values.Set("city", self.City) } if self.ApproverFirstName != "" { values.Set("approverFirstName", self.ApproverFirstName) } if self.ApproverLastName != "" { values.Set("approverLastName", self.ApproverLastName) } if self.ApproverEmail != "" { values.Set("approverEmail", self.ApproverEmail) } if self.ApproverPhone != "" { values.Set("approverPhone", self.ApproverPhone) } if self.KVK != "" { values.Set("kvk", self.KVK) } if self.Reference != "" { values.Set("reference", self.Reference) } if self.Language != "" { values.Set("language", self.Language) } return values } type EECreationRequest struct { CSR string DCVType string SubjectAlternativeNames []string ApproverFirstName string ApproverLastName string ApproverEmail string ApproverPhone string Validate bool } func (self EECreationRequest) ToURLValues() url.Values { values := url.Values{} values.Set("csr", self.CSR) values.Set("dcvType", self.DCVType) values.Set("approverFirstName", self.ApproverFirstName) values.Set("approverLastName", self.ApproverLastName) values.Set("approverEmail", self.ApproverEmail) values.Set("approverPhone", self.ApproverPhone) if len(self.SubjectAlternativeNames) > 0 { values.Set("subjectAlternativeNames", strings.Join(self.SubjectAlternativeNames, ",")) } return values } type CertificateReissueRequest struct { CSR string DCVType string SubjectAlternativeNames []string DCV []DCVRequest Company string Department string Address string ZIPCode string City string ApproverFirstName string ApproverLastName string ApproverEmail string ApproverPhone string KVK string Reference string Language string } func (self CertificateReissueRequest) ToURLValues() url.Values { values := url.Values{} values.Set("csr", self.CSR) values.Set("dcvType", self.DCVType) if len(self.SubjectAlternativeNames) > 0 { values.Set("subjectAlternativeNames", strings.Join(self.SubjectAlternativeNames, ",")) } if len(self.DCV) > 0 { d, e := json.Marshal(self.DCV) if e == nil { values.Set("dcv", string(d)) } } if self.Company != "" { values.Set("company", self.Company) } if self.Department != "" { values.Set("department", self.Department) } if self.Address != "" { values.Set("address", self.Address) } if self.ZIPCode != "" { values.Set("zipcode", self.ZIPCode) } if self.City != "" { values.Set("city", self.City) } if self.ApproverFirstName != "" { values.Set("approverFirstName", self.ApproverFirstName) } if self.ApproverLastName != "" { values.Set("approverLastName", self.ApproverLastName) } if self.ApproverEmail != "" { values.Set("approverEmail", self.ApproverEmail) } if self.ApproverPhone != "" { values.Set("approverPhone", self.ApproverPhone) } if self.KVK != "" { values.Set("kvk", self.KVK) } if self.Reference != "" { values.Set("reference", self.Reference) } if self.Language != "" { values.Set("language", self.Language) } return values } type CertificateRenewRequest struct { Product int Years int CSR string DCVType string SubjectAlternativeNames []string DCV []DCVRequest Company string Department string Address string ZIPCode string City string ApproverFirstName string ApproverLastName string ApproverEmail string ApproverPhone string KVK string Reference string Language string } func (self CertificateRenewRequest) ToURLValues() url.Values { values := url.Values{} values.Set("product", fmt.Sprintf("%d", self.Product)) values.Set("years", fmt.Sprintf("%d", self.Years)) values.Set("csr", self.CSR) values.Set("dcvType", self.DCVType) if len(self.SubjectAlternativeNames) > 0 { values.Set("subjectAlternativeNames", strings.Join(self.SubjectAlternativeNames, ",")) } if len(self.DCV) > 0 { d, e := json.Marshal(self.DCV) if e == nil { values.Set("dcv", string(d)) } } if self.Company != "" { values.Set("company", self.Company) } if self.Department != "" { values.Set("department", self.Department) } if self.Address != "" { values.Set("address", self.Address) } if self.ZIPCode != "" { values.Set("zipcode", self.ZIPCode) } if self.City != "" { values.Set("city", self.City) } if self.ApproverFirstName != "" { values.Set("approverFirstName", self.ApproverFirstName) } if self.ApproverLastName != "" { values.Set("approverLastName", self.ApproverLastName) } if self.ApproverEmail != "" { values.Set("approverEmail", self.ApproverEmail) } if self.ApproverPhone != "" { values.Set("approverPhone", self.ApproverPhone) } if self.KVK != "" { values.Set("kvk", self.KVK) } if self.Reference != "" { values.Set("reference", self.Reference) } if self.Language != "" { values.Set("language", self.Language) } return values }
package main import ( "fmt" pb "redis/message" "strconv" "time" "golang.org/x/net/context" "google.golang.org/grpc" "redis/test/pool" ) type RedisClient struct { Conn *grpc.ClientConn Client pb.RedisClient } func (rc RedisClient) Close() error { return rc.Conn.Close() } func main() { fmt.Println("Client") cp, errPool := pool.NewConnPool(func() (pool.ConnRes, error) { conn, err := grpc.Dial("192.168.1.181:50012", grpc.WithInsecure()) if err != nil { return RedisClient{}, err } return RedisClient{Conn: conn, Client: pb.NewRedisClient(conn)}, nil }, 4, 10, time.Second*10) if errPool != nil { fmt.Println("NewConnPool: ", errPool) return } c, errC := cp.Get() if errC != nil { fmt.Println("GET: ", errC) return } v, ok := c.(RedisClient) if !ok { fmt.Println("type error") return } begin := time.Now() i := 1010 if reply, err := v.Client.Cmd(context.Background(), &pb.RequestCmd{ Cmd: pb.CmdOption_SET, Type: pb.KeyType_STRING, Key: strconv.Itoa(i), Value: []string{"ssssss"}, }); err != nil { fmt.Println("failed : ", err) // fmt.Println(v.GetState()) } else { fmt.Println(*reply) // fmt.Println(v.GetState()) } if reply, err := v.Client.Cmd(context.Background(), &pb.RequestCmd{ Cmd: pb.CmdOption_GET, Type: pb.KeyType_STRING, Key: strconv.Itoa(i), }); err != nil { // fmt.Println(conn.GetState()) fmt.Println("failed : ", err) } else { fmt.Println(*reply) } if reply, err := v.Client.Cmd(context.Background(), &pb.RequestCmd{ Cmd: pb.CmdOption_SET_EXPIRE, Type: pb.KeyType_STRING, Key: strconv.Itoa(i), Expire: 2018, }); err != nil { fmt.Println("failed : ", err) } else { fmt.Println(*reply) } if reply, err := v.Client.Cmd(context.Background(), &pb.RequestCmd{ Cmd: pb.CmdOption_GET_EXPIRE, Type: pb.KeyType_STRING, Key: strconv.Itoa(i), }); err != nil { fmt.Println("failed : ", err) } else { fmt.Println(*reply) } // time.Sleep(20 * time.Second) if reply, err := v.Client.Cmd(context.Background(), &pb.RequestCmd{ Cmd: pb.CmdOption_DEL, Type: pb.KeyType_STRING, Key: strconv.Itoa(i), }); err != nil { fmt.Println("failed : ", err) } else { fmt.Println(*reply) } i = 999 if reply, err := v.Client.Cmd(context.Background(), &pb.RequestCmd{ Cmd: pb.CmdOption_SET, Type: pb.KeyType_SORTSET, Key: strconv.Itoa(i), Zset: []*pb.Z{&pb.Z{Score: 1, Member: "haha"}, &pb.Z{Score: 2, Member: "22222222"}, &pb.Z{Score: 4, Member: "ssssssss"}}, }); err != nil { fmt.Println("failed : ", err) } else { fmt.Println(*reply) } if reply, err := v.Client.Cmd(context.Background(), &pb.RequestCmd{ Cmd: pb.CmdOption_GET, Type: pb.KeyType_SORTSET, Key: strconv.Itoa(i), Data: map[string]string{"min": "1", "max": "7", "offset": "0", "count": "10"}, }); err != nil { fmt.Println("failed : ", err) } else { fmt.Println(*reply) } // time.Sleep(2 * time.Second) if reply, err := v.Client.Cmd(context.Background(), &pb.RequestCmd{ Cmd: pb.CmdOption_ZSET_REM, Type: pb.KeyType_SORTSET, Key: strconv.Itoa(i), Data: map[string]string{"min": "2", "max": "4"}, }); err != nil { fmt.Println("failed : ", err) } else { fmt.Println(*reply) } fmt.Println(time.Now().Sub(begin).Seconds()) cp.Put(c) //若连接发生错误或已关闭,那么执行如下 // cp.PutWithError(c) cp.Close() }
package orders import ( "fmt" "time" cla "github.com/zond/godip/variants/classical/common" dip "github.com/zond/godip/common" ) func init() { generators = append(generators, func() dip.Order { return &disband{} }) } func Disband(source dip.Province, at time.Time) *disband { return &disband{ targets: []dip.Province{source}, at: at, } } type disband struct { targets []dip.Province at time.Time } func (self *disband) String() string { return fmt.Sprintf("%v %v", self.targets[0], cla.Disband) } func (self *disband) Type() dip.OrderType { return cla.Disband } func (self *disband) DisplayType() dip.OrderType { return cla.Disband } func (self *disband) Flags() map[dip.Flag]bool { return nil } func (self *disband) Targets() []dip.Province { return self.targets } func (self *disband) At() time.Time { return self.at } func (self *disband) adjudicateBuildPhase(r dip.Resolver) error { unit, _, _ := r.Unit(self.targets[0]) _, disbands, _ := cla.AdjustmentStatus(r, unit.Nation) if len(disbands) == 0 || self.at.After(disbands[len(disbands)-1].At()) { return cla.ErrIllegalDisband } return nil } func (self *disband) adjudicateRetreatPhase(r dip.Resolver) error { return nil } func (self *disband) Adjudicate(r dip.Resolver) error { if r.Phase().Type() == cla.Adjustment { return self.adjudicateBuildPhase(r) } return self.adjudicateRetreatPhase(r) } func (self *disband) validateRetreatPhase(v dip.Validator) (dip.Nation, error) { if !v.Graph().Has(self.targets[0]) { return "", cla.ErrInvalidTarget } var ok bool var dislodged dip.Unit dislodged, self.targets[0], ok = v.Dislodged(self.targets[0]) if !ok { return "", cla.ErrMissingUnit } return dislodged.Nation, nil } func (self *disband) validateBuildPhase(v dip.Validator) (dip.Nation, error) { if !v.Graph().Has(self.targets[0]) { return "", cla.ErrInvalidTarget } var unit dip.Unit var ok bool if unit, self.targets[0], ok = v.Unit(self.targets[0]); !ok { return "", cla.ErrMissingUnit } if _, _, balance := cla.AdjustmentStatus(v, unit.Nation); balance > -1 { return "", cla.ErrMissingDeficit } return unit.Nation, nil } func (self *disband) Options(v dip.Validator, nation dip.Nation, src dip.Province) (result dip.Options) { if src.Super() != src { return } if v.Phase().Type() == cla.Adjustment { if v.Graph().Has(src) { if unit, actualSrc, ok := v.Unit(src); ok { if unit.Nation == nation { if _, _, balance := cla.AdjustmentStatus(v, unit.Nation); balance < 0 { result = dip.Options{ dip.SrcProvince(actualSrc): nil, } } } } } } else if v.Phase().Type() == cla.Retreat { if v.Graph().Has(src) { if unit, actualSrc, ok := v.Dislodged(src); ok { if unit.Nation == nation { result = dip.Options{ dip.SrcProvince(actualSrc): nil, } } } } } return } func (self *disband) Validate(v dip.Validator) (dip.Nation, error) { if v.Phase().Type() == cla.Adjustment { return self.validateBuildPhase(v) } else if v.Phase().Type() == cla.Retreat { return self.validateRetreatPhase(v) } return "", cla.ErrInvalidPhase } func (self *disband) Execute(state dip.State) { if state.Phase().Type() == cla.Adjustment { state.RemoveUnit(self.targets[0]) } else { state.RemoveDislodged(self.targets[0]) } }
func isStrobogrammatic(num string) bool { mirrors := map[byte]byte{ '0': '0', '1': '1', '6': '9', '8': '8', '9': '6', } st, ed := 0, len(num) - 1 for st <= ed{ mirror, valid := mirrors[num[st]] if !valid || mirror != num[ed]{ return false } st += 1 ed -= 1 } return true }
package validation_test import ( "github.com/APTrust/exchange/constants" "github.com/APTrust/exchange/validation" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "path" "strings" "testing" ) func TestNewBagValidationConfig(t *testing.T) { conf := validation.NewBagValidationConfig() assert.NotNil(t, conf.FileSpecs) assert.NotNil(t, conf.TagSpecs) assert.False(t, conf.AllowMiscTopLevelFiles) assert.False(t, conf.AllowMiscDirectories) assert.False(t, conf.TopLevelDirMustMatchBagName) } func TestLoadBagValidationConfig(t *testing.T) { configFilePath := path.Join("testdata", "json_objects", "bag_validation_config.json") conf, errors := validation.LoadBagValidationConfig(configFilePath) if errors != nil && len(errors) > 0 { assert.Fail(t, errors[0].Error()) } assert.False(t, conf.AllowFetchTxt) assert.True(t, conf.AllowMiscTopLevelFiles) assert.True(t, conf.AllowMiscDirectories) assert.True(t, conf.TopLevelDirMustMatchBagName) assert.Equal(t, 7, len(conf.FileSpecs)) assert.Equal(t, 4, len(conf.TagSpecs)) assert.Equal(t, 2, len(conf.FixityAlgorithms)) // Spot checks if _, ok := conf.FileSpecs["manifest-md5.txt"]; !ok { assert.Fail(t, "FileSpec for manifest-md5.txt is missing") } if _, ok := conf.FileSpecs["manifest-sha256.txt"]; !ok { assert.Fail(t, "FileSpec for manifest-sha256.txt is missing") } if _, ok := conf.TagSpecs["Title"]; !ok { assert.Fail(t, "TagSpec for Title is missing") } if len(conf.FixityAlgorithms) > 1 { assert.Equal(t, "md5", conf.FixityAlgorithms[0]) assert.Equal(t, "sha256", conf.FixityAlgorithms[1]) } assert.Equal(t, validation.REQUIRED, conf.FileSpecs["manifest-md5.txt"].Presence) assert.Equal(t, validation.OPTIONAL, conf.FileSpecs["manifest-sha256.txt"].Presence) assert.Equal(t, "aptrust-info.txt", conf.TagSpecs["Title"].FilePath) assert.Equal(t, validation.REQUIRED, conf.TagSpecs["Title"].Presence) assert.False(t, conf.TagSpecs["Title"].EmptyOK) assert.Equal(t, 3, len(conf.TagSpecs["Access"].AllowedValues)) } // Make sure we get an error and not a panic. func TestLoadBagValidationConfigBadFiles(t *testing.T) { // Missing file configFilePath := path.Join("testdata", "json_objects", "file_does_not_exist.json") _, err := validation.LoadBagValidationConfig(configFilePath) assert.NotNil(t, err) // Unparseable JSON configFilePath = path.Join("testdata", "json_objects", "virginia.edu.uva-lib_2278801.tar") _, err = validation.LoadBagValidationConfig(configFilePath) assert.NotNil(t, err) } func TestValidPresenceValue(t *testing.T) { assert.True(t, validation.ValidPresenceValue("required")) assert.True(t, validation.ValidPresenceValue("optional")) assert.True(t, validation.ValidPresenceValue("forbidden")) assert.False(t, validation.ValidPresenceValue("naugahyde")) } func TestFileSpecValid(t *testing.T) { filespec := &validation.FileSpec{ Presence: "required", } assert.True(t, filespec.Valid()) filespec.Presence = "elastic" assert.False(t, filespec.Valid()) } func TestTagSpecValid(t *testing.T) { tagspec := &validation.TagSpec{ Presence: "verisimilitude", } assert.False(t, tagspec.Valid()) tagspec.Presence = "optional" assert.False(t, tagspec.Valid()) tagspec.FilePath = "data/blah/blah/blah.txt" assert.True(t, tagspec.Valid()) } func TestValidateConfig(t *testing.T) { configFilePath := path.Join("testdata", "json_objects", "bag_validation_config.json") conf, errors := validation.LoadBagValidationConfig(configFilePath) if errors != nil && len(errors) > 0 { assert.Fail(t, errors[0].Error()) } errors = conf.ValidateConfig() assert.Empty(t, errors) badPathSpec := validation.TagSpec{ FilePath: "", Presence: "REQUIRED", EmptyOK: true, } badPresenceSpec := validation.TagSpec{ FilePath: "orangina", Presence: "orangina", EmptyOK: true, } conf.TagSpecs["bad_path_spec"] = badPathSpec conf.TagSpecs["bad_presence"] = badPresenceSpec errors = conf.ValidateConfig() assert.Equal(t, 2, len(errors)) } func TestCompileFileNameRegex(t *testing.T) { configFilePath := path.Join("testdata", "json_objects", "bag_validation_config.json") conf, errors := validation.LoadBagValidationConfig(configFilePath) if errors != nil && len(errors) > 0 { assert.Fail(t, errors[0].Error()) } err := conf.CompileFileNameRegex() assert.Nil(t, err) conf.FileNamePattern = "ThisPatternIsInvalid[-" err = conf.CompileFileNameRegex() require.NotNil(t, err) assert.True(t, strings.HasPrefix(err.Error(), "Cannot compile regex")) conf.FileNamePattern = "aptrust" err = conf.CompileFileNameRegex() assert.Nil(t, err) assert.Equal(t, constants.APTrustFileNamePattern, conf.FileNameRegex) conf.FileNamePattern = "APTRUST" err = conf.CompileFileNameRegex() assert.Nil(t, err) assert.Equal(t, constants.APTrustFileNamePattern, conf.FileNameRegex) conf.FileNamePattern = "posix" err = conf.CompileFileNameRegex() assert.Nil(t, err) assert.Equal(t, constants.PosixFileNamePattern, conf.FileNameRegex) conf.FileNamePattern = "POSIX" err = conf.CompileFileNameRegex() assert.Nil(t, err) assert.Equal(t, constants.PosixFileNamePattern, conf.FileNameRegex) }
package app import ( "github.com/btnguyen2k/prom" "github.com/btnguyen2k/henge" "main/src/gvabe/bo" ) // NewAppDaoMultitenantCosmosdb is helper method to create CosmosDB-implementation (multi-tenant table) of AppDao. func NewAppDaoMultitenantCosmosdb(sqlc *prom.SqlConnect, tableName string) AppDao { spec := &henge.CosmosdbDaoSpec{PkName: bo.CosmosdbMultitenantPkName, PkValue: bo.CosmosdbMultitenantPkValueApp, TxModeOnWrite: true} innerDao := AppDaoSql{UniversalDao: henge.NewUniversalDaoCosmosdbSql(sqlc, tableName, spec)} dao := &AppDaoCosmosdb{AppDaoSql: innerDao, spec: spec} return dao }
package lv2_rectangular import ( "github.com/stretchr/testify/assert" "testing" ) func Test_solution(t *testing.T) { tcs := []struct { w int h int expectedValue int64 }{ {8, 12, 80}, } for _, tc := range tcs { res := solution(tc.w, tc.h) assert.Equal(t, res, tc.expectedValue) } }
package main import ( "fmt" "io/ioutil" "log" "math" "sort" "strconv" "strings" ) func main() { f, err := ioutil.ReadFile("day03/input.txt") if err != nil { log.Fatal(err) } s := fmt.Sprintf("%s", f) strs := strings.Split(s, "\n") fmt.Println(PartOne(strs[0], strs[1])) fmt.Println(PartTwo(strs[0], strs[1])) } type Coord struct { X int Y int } func PartOne(str1 string, str2 string) int { path1 := DrawFullPath(str1) path2 := DrawFullPath(str2) coordsMap1 := make(map[Coord]bool) for _, coord := range path1 { coordsMap1[coord] = true } coordsMap2 := make(map[Coord]bool) for _, coord := range path2 { coordsMap2[coord] = true } origin := Coord{ X: 0, Y: 0, } var distances []int for coord, _ := range coordsMap1 { if _, hasKey := coordsMap2[coord]; hasKey && (coord != origin) { distances = append(distances, ManhattanDistance(coord, origin)) } } sort.Ints(distances) return distances[0] } func DrawPath(origin Coord, instruction string) (coords []Coord) { direction := instruction[0] distance, err := strconv.Atoi(instruction[1:]) if err != nil { log.Fatal(err) } switch direction { case 'U': for i := 1; i <= distance; i++ { coord := Coord{ X: origin.X, Y: origin.Y + i, } coords = append(coords, coord) } case 'D': for i := 1; i <= distance; i++ { coord := Coord{ X: origin.X, Y: origin.Y - i, } coords = append(coords, coord) } case 'L': for i := 1; i <= distance; i++ { coord := Coord{ X: origin.X - i, Y: origin.Y, } coords = append(coords, coord) } case 'R': for i := 1; i <= distance; i++ { coord := Coord{ X: origin.X + i, Y: origin.Y, } coords = append(coords, coord) } } return } func DrawFullPath(fullInstruction string) []Coord { instructions := strings.Split(fullInstruction, ",") origin := Coord{ X: 0, Y: 0, } var coords []Coord for _, instruction := range instructions { crds := DrawPath(origin, instruction) coords = append(coords, crds...) origin = crds[len(crds)-1] } return coords } func DrawFullPathWithSteps(fullInstruction string) map[Coord]int { instructions := strings.Split(fullInstruction, ",") origin := Coord{ X: 0, Y: 0, } coordsMap := make(map[Coord]int) var steps int for _, instruction := range instructions { crds := DrawPath(origin, instruction) for _, crd := range crds { steps += 1 coordsMap[crd] = steps } origin = crds[len(crds)-1] } return coordsMap } func ManhattanDistance(i, j Coord) int { return int(math.Abs(float64(i.X-j.X)) + math.Abs(float64(i.Y-j.Y))) } func PartTwo(str1 string, str2 string) int { coordsMap1 := DrawFullPathWithSteps(str1) coordsMap2 := DrawFullPathWithSteps(str2) origin := Coord{ X: 0, Y: 0, } var steps []int for coord, _ := range coordsMap1 { if step, hasKey := coordsMap2[coord]; hasKey && (coord != origin) { steps = append(steps, step + coordsMap1[coord]) } } sort.Ints(steps) return steps[0] }
package cmd import ( "bufio" "context" "errors" "fmt" "github.com/kobtea/go-todoist/cmd/util" "github.com/kobtea/go-todoist/todoist" "github.com/spf13/cobra" "os" "strconv" "strings" ) // filterCmd represents the filter command var filterCmd = &cobra.Command{ Use: "filter", Short: "subcommand for filter", } var filterListCmd = &cobra.Command{ Use: "list", Short: "list filters", RunE: func(cmd *cobra.Command, args []string) error { client, err := util.NewClient() if err != nil { return err } filters := client.Filter.GetAll() fmt.Println(util.FilterTableString(filters)) return nil }, } var filterAddCmd = &cobra.Command{ Use: "add", Short: "add filter", RunE: func(cmd *cobra.Command, args []string) error { client, err := util.NewClient() if err != nil { return err } name := strings.Join(args, " ") query, err := cmd.Flags().GetString("query") if err != nil { return err } filter := todoist.Filter{ Name: name, Query: query, } colorStr, err := cmd.Flags().GetString("color") if err != nil { return errors.New("Invalid filter color") } if len(colorStr) > 0 { color, err := strconv.Atoi(colorStr) if err != nil { return fmt.Errorf("Invalid filter color: %s", colorStr) } filter.Color = color } if _, err = client.Filter.Add(filter); err != nil { return err } ctx := context.Background() if err = client.Commit(ctx); err != nil { return err } if err = client.FullSync(ctx, []todoist.Command{}); err != nil { return err } filters := client.Filter.FindByName(name) if len(filters) == 0 { return errors.New("Failed to add this filter. It may be failed to sync.") } syncedFilter := filters[len(filters)-1] fmt.Println("Successful addition of a filter.") fmt.Println(util.FilterTableString([]todoist.Filter{syncedFilter})) return nil }, } var filterUpdateCmd = &cobra.Command{ Use: "update id [new_name]", Short: "update filter", RunE: func(cmd *cobra.Command, args []string) error { if len(args) < 1 { return errors.New("Require filter ID to update") } id, err := todoist.NewID(args[0]) if err != nil { return fmt.Errorf("Invalid ID: %s", args[0]) } client, err := util.NewClient() if err != nil { return err } filter := client.Filter.Resolve(id) if filter == nil { return fmt.Errorf("No such filter id: %s", id) } if len(args) > 1 { filter.Name = strings.Join(args[1:], " ") } query, err := cmd.Flags().GetString("query") if err != nil { return err } if len(query) > 0 { filter.Query = query } colorStr, err := cmd.Flags().GetString("color") if err != nil { return errors.New("Invalid filter color") } if len(colorStr) > 0 { color, err := strconv.Atoi(colorStr) if err != nil { return fmt.Errorf("Invalid filter color: %s", colorStr) } filter.Color = color } if _, err = client.Filter.Update(*filter); err != nil { return err } ctx := context.Background() if err = client.Commit(ctx); err != nil { return err } if err = client.FullSync(ctx, []todoist.Command{}); err != nil { return err } syncedFilter := client.Filter.Resolve(id) if syncedFilter == nil { return errors.New("Failed to add this filter. It may be failed to sync.") } fmt.Println("Successful updating filter.") fmt.Println(util.FilterTableString([]todoist.Filter{*syncedFilter})) return nil }, } var filterDeleteCmd = &cobra.Command{ Use: "delete id [...]", Short: "delete filters", RunE: func(cmd *cobra.Command, args []string) error { if err := util.AutoCommit(func(client todoist.Client, ctx context.Context) error { return util.ProcessIDs( args, func(ids []todoist.ID) error { var filters []todoist.Filter for _, id := range ids { filter := client.Filter.Resolve(id) if filter == nil { return fmt.Errorf("invalid id: %s", id) } filters = append(filters, *filter) } fmt.Println(util.FilterTableString(filters)) reader := bufio.NewReader(os.Stdin) fmt.Print("are you sure to delete above filter(s)? (y/[n]): ") ans, err := reader.ReadString('\n') if ans != "y\n" || err != nil { fmt.Println("abort") return errors.New("abort") } for _, id := range ids { if err := client.Filter.Delete(id); err != nil { return err } } return nil }) }); err != nil { if err.Error() == "abort" { return nil } return err } fmt.Println("Successful deleting of filter(s).") return nil }, } func init() { RootCmd.AddCommand(filterCmd) filterCmd.AddCommand(filterListCmd) filterAddCmd.Flags().StringP("query", "q", "", "query") filterAddCmd.Flags().StringP("color", "c", "12", "color") filterCmd.AddCommand(filterAddCmd) filterUpdateCmd.Flags().StringP("query", "q", "", "query") filterUpdateCmd.Flags().StringP("color", "c", "", "color") filterCmd.AddCommand(filterUpdateCmd) filterCmd.AddCommand(filterDeleteCmd) }
package main import ( "fmt" ) var nextId chan string func init() { nextId = make(chan string) go func() { var counter int64 = 0 for { s := fmt.Sprintf("%x", counter) nextId <- s counter += 1 } }() } func main() { fmt.Println("OpenBrain Version: xxx") pb := peaBrain() fmt.Printf("PeaBrain: %s\n", pb) }
package main import ( "reflect" "testing" . "github.com/dave/jennifer/jen" "github.com/karantin2020/csvgen/parser" ) //go:generate ./csvgen -p tests -s tests -f tests/fixture/test.go -o test //go:generate ./csvgen -f tests/fixture func Test_main(t *testing.T) { tests := []struct { name string }{ // TODO: Add test cases. } for range tests { t.Run(tt.name, func(t *testing.T) { main() }) } } func TestWriteString(t *testing.T) { type args struct { pkgCnt string } tests := []struct { name string args args }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { WriteString(tt.args.pkgCnt) }) } } func TestGenerateCode(t *testing.T) { tests := []struct { name string }{ // TODO: Add test cases. } for range tests { t.Run(tt.name, func(t *testing.T) { GenerateCode() }) } } func TestGenerateFuncs(t *testing.T) { type args struct { vstr parser.StructInfo } tests := []struct { name string args args }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { GenerateFuncs(tt.args.vstr) }) } } func Test_genReturn(t *testing.T) { type args struct { star string fieldName string fieldType string } tests := []struct { name string args args want *Statement }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := genReturn(tt.args.star, tt.args.fieldName, tt.args.fieldType); !reflect.DeepEqual(got, tt.want) { t.Errorf("genReturn() = %v, want %v", got, tt.want) } }) } } func Test_marshalBody(t *testing.T) { type args struct { typeRes *Statement } tests := []struct { name string args args want *Statement }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := marshalBody(tt.args.typeRes); !reflect.DeepEqual(got, tt.want) { t.Errorf("marshalBody() = %v, want %v", got, tt.want) } }) } } func Test_nilCheck(t *testing.T) { type args struct { star string iname string itype string marshall bool } tests := []struct { name string args args want *Statement }{ // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := nilCheck(tt.args.star, tt.args.iname, tt.args.itype, tt.args.marshall); !reflect.DeepEqual(got, tt.want) { t.Errorf("nilCheck() = %v, want %v", got, tt.want) } }) } }
package main import ( "fmt" "strings" "jvmgo_c/ch2/classpath" "jvmgo_c/ch2/cmd" "os" ) func main() { cmd := cmd.ParseCmd() if cmd.VersionFlag { fmt.Println("version 0.0.1") }else if cmd.HelpFlag { fmt.Printf("Usage: %s [-option] class [args...]\n",os.Args[0]) } else{ startJVM(cmd) } } func startJVM(cmd *cmd.Cmd) { cp := classpath.Parse(cmd.XjreOption,cmd.CpOption) fmt.Printf("classpath:%v class:%v args:%v\n", cp,cmd.Class,cmd.Args) className := strings.Replace(cmd.Class,".","/",-1) classData,_,err := cp.ReadClass(className) if err != nil { fmt.Printf("Could not find or load main class %s\n",cmd.Class) } fmt.Printf("class data:%v\n",classData) }
package aememcache import ( "bytes" "context" "encoding/gob" "time" "go.mercari.io/datastore/v2" "go.mercari.io/datastore/v2/dsmiddleware/storagecache" "google.golang.org/appengine/v2" "google.golang.org/appengine/v2/memcache" ) var _ storagecache.Storage = &cacheHandler{} var _ datastore.Middleware = &cacheHandler{} // New AE Memcache middleware creates & returns. func New(opts ...CacheOption) interface { datastore.Middleware storagecache.Storage } { ch := &cacheHandler{ stOpts: &storagecache.Options{}, } for _, opt := range opts { opt.Apply(ch) } s := storagecache.New(ch, ch.stOpts) ch.Middleware = s if ch.logf == nil { ch.logf = func(ctx context.Context, format string, args ...interface{}) {} } if ch.cacheKey == nil { ch.cacheKey = func(key datastore.Key) string { return "mercari:aememcache:" + key.Encode() } } return ch } type cacheHandler struct { datastore.Middleware stOpts *storagecache.Options raiseMemcacheError bool expireDuration time.Duration logf func(ctx context.Context, format string, args ...interface{}) cacheKey func(key datastore.Key) string } // A CacheOption is an cache option for a AE Memcache middleware. type CacheOption interface { Apply(*cacheHandler) } func (ch *cacheHandler) SetMulti(ctx context.Context, cis []*storagecache.CacheItem) error { ch.logf(ctx, "dsmiddleware/aememcache.SetMulti: incoming len=%d", len(cis)) itemList := make([]*memcache.Item, 0, len(cis)) for _, ci := range cis { if ci.Key.Incomplete() { panic("incomplete key incoming") } var buf bytes.Buffer enc := gob.NewEncoder(&buf) err := enc.Encode(ci.PropertyList) if err != nil { ch.logf(ctx, "dsmiddleware/aememcache.SetMulti: gob.Encode error key=%s err=%s", ci.Key.String(), err.Error()) continue } itemList = append(itemList, &memcache.Item{ Key: ch.cacheKey(ci.Key), Value: buf.Bytes(), Expiration: ch.expireDuration, }) } ch.logf(ctx, "dsmiddleware/aememcache.SetMulti: len=%d", len(itemList)) err := memcache.SetMulti(ctx, itemList) if err != nil { ch.logf(ctx, "dsmiddleware/aememcache: error on memcache.SetMulti %s", err.Error()) if ch.raiseMemcacheError { if merr, ok := err.(appengine.MultiError); ok { for _, err := range merr { if err == nil || err == memcache.ErrCacheMiss { continue } return merr } } else { return err } } keys := make([]string, 0, len(cis)) for _, ci := range cis { keys = append(keys, ci.Key.Encode()) } err = memcache.DeleteMulti(ctx, keys) if err != nil { ch.logf(ctx, "dsmiddleware/aememcache: error on memcache.DeleteMulti %s", err.Error()) if ch.raiseMemcacheError { if merr, ok := err.(appengine.MultiError); ok { for _, err := range merr { if err == nil || err == memcache.ErrCacheMiss { continue } return merr } } else { return err } } } } return nil } func (ch *cacheHandler) GetMulti(ctx context.Context, keys []datastore.Key) ([]*storagecache.CacheItem, error) { ch.logf(ctx, "dsmiddleware/aememcache.GetMulti: incoming len=%d", len(keys)) resultList := make([]*storagecache.CacheItem, len(keys)) cacheKeys := make([]string, 0, len(keys)) for _, key := range keys { cacheKeys = append(cacheKeys, ch.cacheKey(key)) } itemMap, err := memcache.GetMulti(ctx, cacheKeys) if err != nil { ch.logf(ctx, "dsmiddleware/aememcache: error on memcache.GetMulti %s", err.Error()) if ch.raiseMemcacheError { if merr, ok := err.(appengine.MultiError); ok { for _, err := range merr { if err == nil || err == memcache.ErrCacheMiss { continue } return nil, datastore.MultiError(merr) } } else { return nil, err } } return resultList, nil } hit, miss := 0, 0 for idx, key := range keys { item, ok := itemMap[ch.cacheKey(key)] if !ok { resultList[idx] = nil miss++ continue } buf := bytes.NewBuffer(item.Value) dec := gob.NewDecoder(buf) var ps datastore.PropertyList err = dec.Decode(&ps) if err != nil { resultList[idx] = nil ch.logf(ctx, "dsmiddleware/aememcache.GetMulti: gob.Decode error key=%s err=%s", key.String(), err.Error()) miss++ continue } resultList[idx] = &storagecache.CacheItem{ Key: key, PropertyList: ps, } hit++ } ch.logf(ctx, "dsmiddleware/aememcache.GetMulti: hit=%d miss=%d", hit, miss) return resultList, nil } func (ch *cacheHandler) DeleteMulti(ctx context.Context, keys []datastore.Key) error { ch.logf(ctx, "dsmiddleware/aememcache.DeleteMulti: incoming len=%d", len(keys)) cacheKeys := make([]string, 0, len(keys)) for _, key := range keys { cacheKeys = append(cacheKeys, ch.cacheKey(key)) } err := memcache.DeleteMulti(ctx, cacheKeys) if err != nil { ch.logf(ctx, "dsmiddleware/aememcache: error on memcache.DeleteMulti %s", err.Error()) if ch.raiseMemcacheError { if merr, ok := err.(appengine.MultiError); ok { for _, err := range merr { if err == nil || err == memcache.ErrCacheMiss { continue } return merr } } else { return err } } } return nil }
package controller import ( "fmt" "time" "github.com/therecipe/qt/core" "github.com/therecipe/qt/gui" "github.com/therecipe/qt/internal/examples/showcases/wallet/controller" ) type ProgressBarController struct { core.QObject _ func() `constructor:"init"` _ string `property:"text"` _ float64 `property:"value"` _ func() `signal:"clicked,auto"` _ func(uint64) `signal:"heightChanged,<-(controller.Controller)"` } func (c *ProgressBarController) init() { if controller.DEMO { c.heightChanged(uint64(0)) } } func (c *ProgressBarController) clicked() { gui.QDesktopServices_OpenUrl(core.NewQUrl3("https://example.com/", 0)) } func (c *ProgressBarController) heightChanged(height uint64) { if controller.Controller.IsSynced() { c.SetText(fmt.Sprintf("BH: %v", controller.Controller.Height())) c.SetValue(100) } else if controller.DEMO { c.SetText("BH: DEMO") c.SetValue(80) } else { estimatedHeight := estimatedHeightAt(time.Now()) estimatedProgress := float64(height) / float64(estimatedHeight) * 100 if estimatedProgress > 100 { estimatedProgress = 100 } c.SetText(fmt.Sprintf("%.1f%%", estimatedProgress)) c.SetValue(estimatedProgress) } } // estimatedHeightAt returns the estimated block height for the given time. // Block height is estimated by calculating the minutes since a known block in // the past and dividing by 10 minutes (the block time). func estimatedHeightAt(t time.Time) uint64 { block100kTimestamp := time.Date(2017, time.April, 13, 23, 29, 49, 0, time.UTC) blockTime := float64(9) // overestimate block time for better UX diff := t.Sub(block100kTimestamp) estimatedHeight := 100e3 + (diff.Minutes() / blockTime) return uint64(estimatedHeight + 0.5) // round to the nearest block }
package main import "github.com/chhkay/Answer" func main() { Answer.Run() }
package output import ( "fmt" "log" "sort" "../movi" ) func DumpGroupsAsIPTVSimple(groups map[int]*movi.ChannelGroup, prefix string) []byte{ var keys []int data := []byte("#EXTM3U\n") for k := range groups{ keys = append(keys, k) } sort.Ints(keys) for _, k := range keys{ group := groups[k] if len(group.HD) > 0{ data = append(data, dumpIPTVSimpleChannel(group.HD[0], prefix)...) }else if len(group.SD) > 0{ data = append(data, dumpIPTVSimpleChannel(group.SD[0], prefix)...) }else{ log.Println("WARNING: No SD or HD channels in group", group) } } return data } func dumpIPTVSimpleChannel(c *movi.LogicalChannel, prefix string) []byte{ extinf := fmt.Sprintf("#EXTINF:-1 tvg-id=\"%d\" tvg-name=\"%s\" tvg-logo=\"%s\" tvg-chno=\"%d\" group-title=\"%s\", %s\n", c.Id, c.Name, c.GetLogoPath(), c.Number, "MovistarTV", // c.FromPackage, c.Name) url := fmt.Sprintf("%s%s\n", prefix, c.Url.Raw()) return append([]byte(extinf), []byte(url)...) } func DumpIPTVSimple(channels []*movi.LogicalChannel, prefix string) []byte{ data := []byte("#EXTM3U\n") for _, c := range channels{ data = append(data, dumpIPTVSimpleChannel(c, prefix)...) } return data }
package main import ( "fmt" pb "github.com/gautamrege/gochat/api" ) func addFakeHandles() { for i := 0; i < 10; i++ { h := pb.Handle{ Name: fmt.Sprintf("test+%d", i), Port: int32(i * 23), Host: "fake IP", } HANDLES.Insert(h) } }
package cmd import ( "errors" "io/ioutil" "os" "path/filepath" "regexp" "testing" "github.com/balaji-dongare/gophercises/CLI/task/dbrepository" "github.com/spf13/cobra" ) // initdb initalize db for test environment func initdb() { dir, _ := os.Getwd() databasepath := filepath.Join(dir, "tasks.db") dbrepository.InitDatabase(databasepath) } // TestAddCMD testcase for test task add command func TestAddCMD(t *testing.T) { initdb() testSuit := []struct { args []string expected string }{ {args: []string{"do", "testing"}, expected: `Task:"do testing" is Added in todo list`}, {args: []string{"do", "development"}, expected: `Task:"do development" is Added in todo list`}, {args: []string{"do", "deployment"}, expected: `Task:"do deployment" is Added in todo list`}, {args: []string{"do", "release"}, expected: `Task:"do release" is Added in todo list`}, {args: []string{}, expected: `Please provide task`}, } file, _ := os.Create("./testresult.txt") file.Truncate(0) defer file.Close() defer os.Remove(file.Name()) old := os.Stdout os.Stdout = file for _, testcase := range testSuit { AddTask.Run(AddTask, testcase.args) file.Seek(0, 0) fp, _ := ioutil.ReadFile(file.Name()) match, err := regexp.Match(testcase.expected, fp) if err != nil { t.Error("Error in expected result regex") } if match { t.Log("Result is as Expected") } else { t.Error("Result is not as Expected") } } os.Stdout = old } func TestAddCMDError(t *testing.T) { initdb() testdef := addTask defer func() { addTask = testdef }() addTask = func(task string) (bool, error) { return false, errors.New("Got Error in add task") } AddTask.Run(&cobra.Command{}, []string{"Got", "Error"}) }
/* * Copyright 2017 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package main import ( "encoding/json" "fmt" "log" "os" "os/signal" "syscall" "github.com/bsm/sarama-cluster" "gopkg.in/Shopify/sarama.v1" "github.com/sk8sio/function-sidecar/pkg/dispatcher/http" "github.com/sk8sio/function-sidecar/pkg/dispatcher/stdio" "github.com/sk8sio/function-sidecar/pkg/dispatcher" "github.com/sk8sio/function-sidecar/pkg/message" "github.com/sk8sio/function-sidecar/pkg/dispatcher/grpc" ) func main() { var saj map[string]interface{} err := json.Unmarshal([]byte(os.Getenv("SPRING_APPLICATION_JSON")), &saj) if err != nil { panic(err) } consumerConfig := makeConsumerConfig() consumerConfig.Consumer.Offsets.Initial = sarama.OffsetOldest brokers := []string{saj["spring.cloud.stream.kafka.binder.brokers"].(string)} input := saj["spring.cloud.stream.bindings.input.destination"].(string) output := saj["spring.cloud.stream.bindings.output.destination"] group := saj["spring.cloud.stream.bindings.input.group"].(string) protocol := saj["spring.profiles.active"].(string) fmt.Printf("Sidecar for function '%v' (%v->%v) using %v dispatcher starting\n", group, input, output, protocol) dispatcher := createDispatcher(protocol) var producer sarama.AsyncProducer if output != nil { producer, err = sarama.NewAsyncProducer(brokers, nil) if err != nil { panic(err) } defer producer.Close() } consumer, err := cluster.NewConsumer(brokers, group, []string{input}, consumerConfig) if err != nil { panic(err) } defer consumer.Close() // trap SIGINT, SIGTERM, and SIGKILL to trigger a shutdown. signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt, syscall.SIGTERM, os.Kill) if consumerConfig.Consumer.Return.Errors { go consumeErrors(consumer) } if consumerConfig.Group.Return.Notifications { go consumeNotifications(consumer) } // consume messages, watch signals for { select { case msg, ok := <-consumer.Messages(): if ok { messageIn, err := message.ExtractMessage(msg.Value) fmt.Fprintf(os.Stdout, "<<< %s\n", messageIn) if err != nil { log.Printf("Error receiving message from Kafka: %v", err) break } strPayload := string(messageIn.Payload.([]byte)) dispatched, err := dispatcher.Dispatch(strPayload) if err != nil { log.Printf("Error dispatching message: %v", err) break } if output != nil { messageOut := message.Message{Payload: []byte(dispatched.(string)), Headers: messageIn.Headers} bytesOut, err := message.EncodeMessage(messageOut) fmt.Fprintf(os.Stdout, ">>> %s\n", messageOut) if err != nil { log.Printf("Error encoding message: %v", err) break } outMessage := &sarama.ProducerMessage{Topic: output.(string), Value: sarama.ByteEncoder(bytesOut)} producer.Input() <- outMessage } else { fmt.Fprintf(os.Stdout, "=== Not sending function return value as function did not provide an output channel. Raw result = %s\n", dispatched) } consumer.MarkOffset(msg, "") // mark message as processed } case <-signals: return } } } func createDispatcher(protocol string) dispatcher.Dispatcher { switch protocol { case "http": return http.NewHttpDispatcher() case "stdio": return stdio.NewStdioDispatcher() case "grpc": return grpc.NewGrpcDispatcher() default: panic("Unsupported Dispatcher " + protocol) } } func consumeNotifications(consumer *cluster.Consumer) { for ntf := range consumer.Notifications() { log.Printf("Rebalanced: %+v\n", ntf) } } func consumeErrors(consumer *cluster.Consumer) { for err := range consumer.Errors() { log.Printf("Error: %s\n", err.Error()) } } func makeConsumerConfig() *cluster.Config { consumerConfig := cluster.NewConfig() consumerConfig.Consumer.Return.Errors = true consumerConfig.Group.Return.Notifications = true return consumerConfig }
// Application entities. // // @author TSS package domain type Config struct { Timeout int UpdateNotify bool UpdatePeriod int Vault string } type Item struct { Category *ItemCategory Created int64 Notes string Title string Trashed bool Sections []*ItemSection Uid string Url string Updated int64 } type ItemField struct { Name string Value string } type ItemSection struct { Title string Fields []*ItemField } type Keys struct { DerivedKey []byte DerivedMac []byte MasterKey []byte MasterMac []byte OverviewKey []byte OverviewMac []byte } type RawItem struct { Category string Created int64 Details string Hmac string Keys string Overview string Trashed bool Uid string Updated int64 } type SimpleItem struct { Category *ItemCategory Title string Trashed bool Uid string } type UpdateInfo struct { ArchiveUrl string Changelog string ChecksumUrl string Newer bool Version string } type Vault struct { Path string } func NewConfig(timeout, updatePeriod int, updateNotify bool, vault string) *Config { return &Config{ Timeout: timeout, UpdateNotify: updateNotify, UpdatePeriod: updatePeriod, Vault: vault, } } func NewItem(uid, title, url, notes string, trashed bool, category *ItemCategory, sections []*ItemSection, created, updated int64) *Item { return &Item{ Category: category, Created: created, Notes: notes, Title: title, Trashed: trashed, Sections: sections, Uid: uid, Url: url, Updated: updated, } } func NewItemSection(title string, fields []*ItemField) *ItemSection { return &ItemSection{ Title: title, Fields: fields, } } func NewItemField(name, value string) *ItemField { return &ItemField{ Name: name, Value: value, } } func NewKeys(derivedKey, derivedMac, masterKey, masterMac, overviewKey, overviewMac []byte) *Keys { return &Keys{ DerivedKey: derivedKey, DerivedMac: derivedMac, MasterKey: masterKey, MasterMac: masterMac, OverviewKey: overviewKey, OverviewMac: overviewMac, } } func NewRawItem(category, details, hmac, keys, overview, uid string, created, updated int64, trashed bool) *RawItem { return &RawItem{ Category: category, Created: created, Details: details, Hmac: hmac, Keys: keys, Overview: overview, Trashed: trashed, Uid: uid, Updated: updated, } } func NewSimpleItem(category *ItemCategory, title string, trashed bool, uid string) *SimpleItem { return &SimpleItem{ Category: category, Title: title, Trashed: trashed, Uid: uid, } } func NewUpdateInfo(archiveUrl, checksumUrl, changelog, version string, newer bool) *UpdateInfo { return &UpdateInfo{ ArchiveUrl: archiveUrl, Changelog: changelog, ChecksumUrl: checksumUrl, Newer: newer, Version: version, } } func NewVault(path string) *Vault { return &Vault{ Path: path, } }
package format import ( "testing" ) func TestHasExporter(t *testing.T) { if !HasExporter("text") { t.Error("Incorrect, the IS text exporter") } if HasExporter("unknown1") { t.Error("Incorrect, the ISN'T unknown1 exporter") } } func TestNewExporter(t *testing.T) { ex := NewExporter("text") if ex == nil { t.Error("Exporter was not created") } defer func() { e := recover() if e == nil { t.Error("Did not panic") } if e != "Unknown format name: unknown1" { panic(e) } }() ex = NewExporter("unknown1") if ex == nil { t.Error("Unknown exporter return nil") } t.Error("Unknown exporter created") }
package mounts import ( "bufio" "fmt" "os" "strings" ) const ( procMounts = "/proc/mounts" ) func getMountTable() (*MountTable, error) { file, err := os.Open(procMounts) if err != nil { return nil, err } defer file.Close() scanner := bufio.NewScanner(file) table := &MountTable{} for scanner.Scan() { line := scanner.Text() var junk string var entry MountEntry nScanned, err := fmt.Sscanf(line, "%s %s %s %s %s", &entry.Device, &entry.MountPoint, &entry.Type, &entry.Options, &junk) if err != nil { return nil, err } if nScanned < 4 { return nil, fmt.Errorf("only read %d values from %s", nScanned, line) } table.Entries = append(table.Entries, &entry) } if err := scanner.Err(); err != nil { return nil, err } return table, nil } func (mt *MountTable) findEntry(path string) *MountEntry { var lastMatch *MountEntry var lastLength int for _, entry := range mt.Entries { length := len(entry.MountPoint) if strings.HasPrefix(path, entry.MountPoint) && length > lastLength { lastMatch = entry lastLength = length } } return lastMatch }
package consul import ( "fmt" _consul "github.com/hashicorp/consul/api" "github.com/tornadoyi/viking/http" "github.com/tornadoyi/viking/log" "github.com/tornadoyi/viking/task" "net/url" "strings" "sync" "time" ) var ( clients = map[string]*Client{} mutex = sync.RWMutex{} ) func CreateClient(name string, cfg *_consul.Config) (*Client, error) { mutex.Lock() defer mutex.Unlock() // check if _, ok := clients[name]; ok { return nil, fmt.Errorf("Repeated client %v", name)} // create client c, err := _consul.NewClient(cfg) if err != nil { return nil, err} // save client := &Client{name, c, nil, nil, make(map[string]*_consul.AgentService), &sync.RWMutex{}} clients[name] = client return client, nil } func GetClient(name string) (*Client, bool) { mutex.RLock() defer mutex.RUnlock() c, ok := clients[name] return c, ok } type Client struct { name string client *_consul.Client registration *_consul.AgentServiceRegistration timer *time.Timer services map[string]*AgentService mutex *sync.RWMutex } func (h *Client) RegisterServer(regCfg *AgentServiceRegistrationConfig) error { h.mutex.Lock() defer h.mutex.Unlock() // check if h.registration != nil { return fmt.Errorf("Repeated server register by client %v", h.name) } if regCfg == nil { return fmt.Errorf("Empty registration config")} registration := regCfg.AgentServiceRegistration() h.registration = registration // register err := h.client.Agent().ServiceRegister(registration) if err != nil { return err } // start health checking server u, err := url.Parse(registration.Check.HTTP) if err != nil { return fmt.Errorf("Heath checking url parse error, %v", err ) } s := strings.Split(u.Host, ":") address := ":80" if len(s) >= 2 { address = fmt.Sprintf(":%v", s[1])} checkHandler := func (ctx *http.RequestCtx){ fmt.Fprintf(ctx, "check") } t := task.NewTask(func() { if err := http.ListenAndServe(address, checkHandler); err != nil { log.Error(err) } }) t.Start() return nil } func (h *Client) SetFetchInterval(interval time.Duration) { h.mutex.Lock() defer h.mutex.Unlock() if h.timer != nil { h.timer.Stop() } h.timer = time.AfterFunc(interval, func() { h.mutex.Lock() defer h.mutex.Unlock() defer h.timer.Reset(interval) services, err := h.client.Agent().Services() if err != nil { log.Error(err); return } h.services = services }) } func (h *Client) FetchServices() (map[string]*AgentService, error) { h.mutex.Lock() defer h.mutex.Unlock() services, err := h.client.Agent().Services() if err != nil { return nil, err} h.services = services return services, nil } type AgentService = _consul.AgentService
package modconfig import ( "github.com/foxcpp/maddy/internal/config" "github.com/foxcpp/maddy/internal/module" ) func StorageDirective(m *config.Map, node *config.Node) (interface{}, error) { var backend module.Storage if err := ModuleFromNode(node.Args, node, m.Globals, &backend); err != nil { return nil, err } return backend, nil }
package main //2回以上現れる行を出現回数とともに表示する。 import( "bufio" "fmt" "os" ) func main() { counts:=make(map[string]int) input:=bufio.NewScanner(os.Stdin) for input.Scan(){ counts[input.Text()]++ } for line,n := range counts { if n>1{ fmt.Printf("%d\t%s\n",n,line) } } }
package db import ( "github.com/nektro/mantle/pkg/idata" dbstorage "github.com/nektro/go.dbstorage" etc "github.com/nektro/go.etc" ) const ( cTableSettings = "server_settings" cTableUsers = "users" cTableChannels = "channels" cTableRoles = "roles" cTableChannelPerms = "channel_perms" cTableMessagesPrefix = "channel_messages_" cTableInvites = "invites" ) var ( db dbstorage.Database pa = PermAllow pi = PermIgnore ) var ( Props = Properties{} BuiltInRoles = map[string]*Role{ "o": &Role{ 0, "o", 0, "Owner", "", pa, pa, false, pa, pa, Time(epoch), }, } ) // Init sets up db tables and properties func Init() { db = etc.Database // table init db.CreateTableStruct(cTableSettings, Setting{}) db.CreateTableStruct(cTableUsers, User{}) db.CreateTableStruct(cTableChannels, Channel{}) db.CreateTableStruct(cTableRoles, Role{}) db.CreateTableStruct(cTableInvites, Invite{}) db.CreateTableStruct(cTableChannelPerms, ChannelPerm{}) // load server properties Props.SetDefault("name", idata.Name) Props.SetDefault("owner", "") Props.SetDefault("public", "1") Props.SetDefault("description", "The new easy and effective communication platform for any successful team or community that's independently hosted and puts users, privacy, and effiecency first.") Props.SetDefault("cover_photo", "https://www.transparenttextures.com/patterns/gplay.png") Props.SetDefault("profile_photo", "https://avatars.discourse.org/v4/letter/m/ec9cab/90.png") Props.Init() Props.Set("version", idata.Version) // for loop create channel message tables _chans := (Channel{}.All()) for _, item := range _chans { item.AssertMessageTableExists() } // add default channel, if none exist if len(_chans) == 0 { CreateChannel("general") } } // Close db func Close() { db.Close() }
// Copyright 2019 The OpenSDS Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oceanstor import ( "fmt" "strings" ) type NFS struct { *Client } func (c *NFS) getShareID(share interface{}) string { return share.(*NFSShareData).ID } func (c *NFS) createShare(shareName, fsID string) (interface{}, error) { sharePath := getSharePath(shareName) data := map[string]string{ "DESCRIPTION": "", "FSID": fsID, "SHAREPATH": sharePath, } url := "/NFSHARE" resp, err := c.request(url, "POST", data) if err != nil { return nil, fmt.Errorf("create nfs share %s failed: %v", sharePath, err) } var nfsShare NFSShare if err := handleReponse(resp, &nfsShare); err != nil { return nil, fmt.Errorf("create nfs share %s failed: %v", sharePath, err) } return &nfsShare.Data, nil } func (c *NFS) getShare(shareName string) (interface{}, error) { url := fmt.Sprintf("/NFSHARE?filter=SHAREPATH::%s&range=[0-100]", getSharePath(shareName)) resp, err := c.request(url, "GET", nil) if err != nil { return nil, err } var nfsShareList NFSShareList if err := handleReponse(resp, &nfsShareList); err != nil { return nil, err } if len(nfsShareList.Data) > 0 { return &nfsShareList.Data[0], nil } return nil, nil } func (c *NFS) deleteShare(shareID string) error { url := "/nfshare/" + shareID resp, err := c.request(url, "DELETE", nil) if err != nil { return err } var errDelete DeleteError if err := handleReponse(resp, &errDelete); err != nil { return err } return nil } func (c *NFS) getShareByID(shareID string) (interface{}, error) { url := "/NFSHARE/" + shareID resp, err := c.request(url, "GET", nil) if err != nil { return nil, err } var nfsShare NFSShare if err := handleReponse(resp, &nfsShare); err != nil { return nil, err } if nfsShare.Data.ID == "" { return nil, nil } return &nfsShare.Data, nil } func (c *NFS) allowAccess(shareID, accessTo, accessLevel string) (interface{}, error) { url := "/NFS_SHARE_AUTH_CLIENT" data := map[string]string{ "TYPE": "16409", "NAME": accessTo, "PARENTID": shareID, "ACCESSVAL": accessLevel, "SYNC": "0", "ALLSQUASH": "1", "ROOTSQUASH": "0", } resp, err := c.request(url, "Post", data) if err != nil { return nil, err } var nfsClient NFSShareClient if err := handleReponse(resp, &nfsClient); err != nil { return nil, err } return &nfsClient, nil } // getLocation func (c *NFS) getLocation(sharePath, ipAddr string) string { path := strings.Replace(sharePath, "-", "_", -1) return fmt.Sprintf("%s:/%s", ipAddr, path) } func (c *NFS) getAccessLevel(accessLevel string) string { if accessLevel == AccessLevelRW { return AccessNFSRw } return AccessNFSRo }
// Definition of repositories. // // @author TSS package out import ( "github.com/mashmb/1pass/1pass-core/core/domain" ) type ConfigRepo interface { IsAvailable() bool GetDefaultVault() string GetTimeout() int GetUpdateNotification() bool GetUpdatePeriod() int Save(config *domain.Config) } type ItemRepo interface { CountByCategoryAndTrashed(category *domain.ItemCategory, trashed bool) int FindByCategoryAndTitleAndTrashed(category *domain.ItemCategory, title string, trashed bool) []*domain.Item FindFirstByUidAndTrashed(uid string, trashed bool) *domain.Item LoadItems(vault *domain.Vault) []*domain.RawItem RemoveItems() StoreItems(items []*domain.Item) } type ProfileRepo interface { GetIterations() int GetMasterKey() string GetOverviewKey() string GetSalt() string LoadProfile(vault *domain.Vault) }
package graph type Filter func(*Node, *Arc, *Node) bool func Any() Filter { return func(from *Node, a *Arc, to *Node) bool { return true } } func HasRelationship(relationship string) Filter { return func(from *Node, a *Arc, to *Node) bool { for _, r := range a.relationships { if r == relationship { return true } } return false } }
// Package parser provides a way to take a string set of ingredients and turn them into // an array of Ingredient. package parser import ( "github.com/chvck/ingredients-parser/pkg/ingredient" "fmt" "encoding/json" ) // IParser is the interface that wraps the basic Parse method. type IParser interface { isConfigured() bool Parse(ingredients string) ([]ingredient.Ingredient, error) } type parserConfig struct { ParserType string `json:"parsertype"` } func NewParser(data []byte) (IParser, error) { var cf parserConfig if err := json.Unmarshal(data, &cf); err != nil { return nil, err } parser, err := stringToStruct(cf.ParserType, data) if err != nil { return nil, err } return parser, nil } func stringToStruct(name string, data []byte) (IParser, error) { switch name { case "crfppParser": parser := crfppParser{} if err := parser.setConfig(data); err != nil { return nil, err } return parser, nil default: return nil, fmt.Errorf("%s is not a known struct name", name) } }
package tool import ( "log" "path/filepath" "github.com/pkg/errors" "github.com/spf13/afero" ) // Config contains configurations to manage development tools. type Config struct { FS afero.Fs WorkingDir string RootDir string ManifestName string BinDirName string Verbose bool Log *log.Logger } // RequireManifest returns an error if the manifest file does not exist. func (c *Config) RequireManifest() error { if ok, err := afero.Exists(c.FS, c.ManifestPath()); err != nil { return errors.WithStack(err) } else if !ok { return errors.Errorf("could not find %s", c.ManifestPath()) } return nil } func (c *Config) ManifestPath() string { return filepath.Join(c.baseDir(), c.ManifestName) } func (c *Config) BinDir() string { return filepath.Join(c.baseDir(), c.BinDirName) } func (c *Config) BinPath(bin string) string { return filepath.Join(c.BinDir(), bin) } func (c *Config) baseDir() (dir string) { dir = c.RootDir if dir == "" { dir = c.WorkingDir } return }
package myList import ( "container/list" "sync" // "time" ) type MyList struct { lock sync.Mutex l *list.List name string } func NewList(name string) *MyList { return &MyList{ l: list.New(), name: name} } func (l *MyList) Front() interface{} { l.lock.Lock() defer l.lock.Unlock() if l.l.Len() == 0 { return nil } else { s := l.l.Front() return s.Value } } func (l *MyList) PopFront() interface{} { l.lock.Lock() defer l.lock.Unlock() if l.l.Len() == 0 { return nil } else { s := l.l.Front() l.l.Remove(s) return s.Value } } func (l *MyList) PushBack(s interface{}) bool { l.lock.Lock() defer l.lock.Unlock() if s == nil { return false } l.l.PushBack(s) return true } func (l *MyList) FrontMoveToBack() interface{} { l.lock.Lock() defer l.lock.Unlock() if l.l.Len() == 0 { return nil } else { s := l.l.Front() l.l.MoveToBack(s) return s.Value } } func (l *MyList) Len() int { l.lock.Lock() defer l.lock.Unlock() return l.l.Len() } func (l *MyList) Clean() { l.lock.Lock() defer l.lock.Unlock() for { if l.l.Len() > 0 { l.l.Remove(l.l.Front()) } else { return } } } //func (l *MyList) WatchLen(i time.Duration) { // go func(l *MyList) { // for { // log.Println("list:", l.name, "len:", l.l.Len()) // time.Sleep(i) // } // }(l) //}
package db import ( "fmt" "github.com/Maymomo/Switch-Harmony/common" "github.com/jinzhu/gorm" _ "github.com/jinzhu/gorm/dialects/mysql" "log" ) func DataBaseInit() { config := common.GetConfig().MysqlConfig argsStr := fmt.Sprintf("%s:%s@(%s:%d)/%s?charset=utf8&parseTime=true&loc=Local", config.User, config.Password, config.Address, config.Port, config.DB) db, err := gorm.Open("mysql", argsStr) if err != nil { log.Fatal(err) } defer db.Close() db.DB().SetMaxOpenConns(10) db.AutoMigrate(&GameDetailDBTemp{}) db.AutoMigrate(&GameSummaryDBTemp{}) db.AutoMigrate(&GameDetailDB{}) db.AutoMigrate(&GameSummaryDB{}) } func dbConnection() (*gorm.DB, error) { config := common.GetConfig().MysqlConfig argsStr := fmt.Sprintf("%s:%s@(%s:%d)/%s?charset=utf8&parseTime=true&loc=Local", config.User, config.Password, config.Address, config.Port, config.DB) return gorm.Open("mysql", argsStr) }
package mails import ( "fmt" "log" "net/smtp" ) // Setup mail to works func Setup() error { err := loadConfig() if err == nil { log.Println("Mail service set up!") } return err } // SendMessageToContactTeam send a mail message to contact team func SendMessageToContactTeam(msg string) error { msg = fmt.Sprintf("From: %s\nTo: %s\nSubject: Contact us\n\n%s", config.ContactTeamEmail, config.ContactTeamEmail, msg) auth := smtp.PlainAuth("", config.ContactTeamEmail, config.ContactTeamPassword, config.SmtpServerHost) return smtp.SendMail(fmt.Sprintf("%s:%d", config.SmtpServerHost, config.SmtpServerPort), auth, config.ContactTeamEmail, []string{config.ContactTeamEmail}, []byte(msg)) }
package testutils import ( "context" "encoding/hex" "math/rand" "testing" dopts "github.com/libp2p/go-libp2p-kad-dht/opts" routedhost "github.com/libp2p/go-libp2p/p2p/host/routed" datastore "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" keystore "github.com/ipfs/go-ipfs-keystore" "github.com/ipfs/go-ipns" "github.com/libp2p/go-libp2p" crypto "github.com/libp2p/go-libp2p-core/crypto" host "github.com/libp2p/go-libp2p-core/host" peerstore "github.com/libp2p/go-libp2p-core/peerstore" dht "github.com/libp2p/go-libp2p-kad-dht" "github.com/libp2p/go-libp2p-peerstore/pstoremem" pnet "github.com/libp2p/go-libp2p-pnet" record "github.com/libp2p/go-libp2p-record" "github.com/multiformats/go-multiaddr" "go.uber.org/zap" "go.uber.org/zap/zaptest" ) var ( // EncodedPK is a hex encoded key // to be reused across tests EncodedPK = "0801124018c93db89bc9614d463003dab59eb9f8028b27835d4b42abe0b707770cbfc6bd9873de48ab48d753e6be17bc50e821e09f50959da17e45448074fdecccf3e7c0" ) // NewLibp2pHostAndDHT is used to create a new libp2p host // and an unbootstrapped dht func NewLibp2pHostAndDHT( ctx context.Context, t *testing.T, logger *zap.Logger, ds datastore.Batching, ps peerstore.Peerstore, pk crypto.PrivKey, addrs []multiaddr.Multiaddr, secret []byte) (host.Host, *dht.IpfsDHT) { var opts []libp2p.Option if secret != nil && len(secret) > 0 { var key [32]byte copy(key[:], secret) prot, err := pnet.NewV1ProtectorFromBytes(&key) if err != nil { t.Fatal(err) } opts = append(opts, libp2p.PrivateNetwork(prot)) } opts = append(opts, libp2p.Identity(pk), libp2p.ListenAddrs(addrs...), libp2p.Peerstore(ps), libp2p.DefaultMuxers, libp2p.DefaultTransports, libp2p.DefaultSecurity) h, err := libp2p.New(ctx, opts...) if err != nil { t.Fatal(err) } idht, err := dht.New(ctx, h, dopts.Validator(record.NamespacedValidator{ "pk": record.PublicKeyValidator{}, "ipns": ipns.Validator{KeyBook: ps}, }), ) if err != nil { t.Fatal(err) } rHost := routedhost.Wrap(h, idht) return rHost, idht } // NewPrivateKey is used to create a new private key // for testing purposes func NewPrivateKey(t *testing.T) crypto.PrivKey { pkBytes, err := hex.DecodeString(EncodedPK) if err != nil { t.Fatal(err) } pk, err := crypto.UnmarshalPrivateKey(pkBytes) if err != nil { t.Fatal(err) } return pk } // NewSecret is used to generate a // secret used to secure private libp2p connections func NewSecret(t *testing.T) []byte { data := make([]byte, 32) if _, err := rand.Read(data); err != nil { t.Fatal(err) } return data } // NewPeerstore is ued to generate an in-memory peerstore func NewPeerstore(t *testing.T) peerstore.Peerstore { return pstoremem.NewPeerstore() } // NewDatastore is used to create a new in memory datastore func NewDatastore(t *testing.T) datastore.Batching { return dssync.MutexWrap(datastore.NewMapDatastore()) } // NewMultiaddr is used to create a new multiaddress func NewMultiaddr(t *testing.T) multiaddr.Multiaddr { addr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/4005") if err != nil { t.Fatal(err) } return addr } // NewLogger is used to return a test zap logger func NewLogger(t *testing.T) *zap.SugaredLogger { return zaptest.NewLogger(t).Sugar() } // NewKeystore is used to return a new in memory keystore func NewKeystore(t *testing.T) keystore.Keystore { return keystore.NewMemKeystore() }
/* Use var para declarar três variáveis. Elas devem ter package-level scope. Não atribua valores a estas variáveis. Utilize os seguintes identificadores e tipos para estas variáveis: Identificador "x" deverá ter tipo int Identificador "y" deverá ter tipo string Identificador "z" deverá ter tipo bool Na função main: Demonstre os valores de cada identificador O compilador atribuiu valores para essas variáveis. Como esses valores se chamam? */ package main import ( "fmt" ) var x int var y string var z bool func main() { fmt.Print("\nVariável x -> ") fmt.Printf("Valor zero: %v, Tipo: %T\n", x, x) fmt.Print("\nVariável y -> ") fmt.Printf("Valor zero: %v, Tipo: %T\n", y, y) fmt.Print("\nVariável z -> ") fmt.Printf("Valor zero: %v, Tipo: %T\n", z, z) fmt.Println("\nValor zero é o valor que o compilador atribue à variável que não foi inicializada") }
package api import ( "database/sql" "s3-web-browser/server/go/domain/db" "github.com/gin-gonic/gin" ) func responseError(c *gin.Context, errorcode int, msg string) { c.JSON(errorcode, gin.H{ "result": "error", "message": msg, }) } func getConnTx() (*sql.DB, *sql.Tx, error) { conn, err := db.Connection() if err != nil { return nil, nil, err } tx, err := conn.Begin() if err != nil { conn.Close() return nil, nil, err } return conn, tx, nil }
package utils import ( "github.com/MShoaei/Pineapple/windows" ) var PwszBuff = make([]rune, 1) var KState = make([]byte, 256) func ToUnicode(key *windows.KBDLLHOOKSTRUCT) string { var ( hkl windows.HKL dwThreadId windows.DWORD dwProcessId windows.DWORD ) hWindowHandle := windows.GetForegroundWindow() dwThreadId = windows.DWORD(windows.GetWindowThreadProcessID(hWindowHandle, &dwProcessId)) windows.GetKeyboardState(&KState) hkl = windows.GetKeyboardLayout(dwThreadId) // vKey := windows.MapVirtualKeyExW(windows.UINT(key.VkCode), 2, hkl) vKey := windows.UINT(key.ScanCode) windows.ToUnicodeEx(windows.UINT(key.VkCode), vKey, &KState, &PwszBuff, 4, 0, hkl) // fmt.Println(string(PwszBuff)) return string(PwszBuff) }
package sdk import ( "context" "encoding/json" "fmt" "io" "net/http" "time" rm "github.com/brigadecore/brigade/sdk/v3/internal/restmachinery" "github.com/brigadecore/brigade/sdk/v3/meta" "github.com/brigadecore/brigade/sdk/v3/restmachinery" ) // LogLevel represents the desired granularity of Worker log output. type LogLevel string const ( // LogLevelDebug represents DEBUG level granularity in Worker log output. LogLevelDebug LogLevel = "DEBUG" // LogLevelInfo represents INFO level granularity in Worker log output. LogLevelInfo LogLevel = "INFO" // LogLevelWarn represents WARN level granularity in Worker log output. LogLevelWarn LogLevel = "WARN" // LogLevelError represents ERROR level granularity in Worker log output. LogLevelError LogLevel = "ERROR" ) // WorkerPhase represents where a Worker is within its lifecycle. type WorkerPhase string const ( // WorkerPhaseAborted represents the state wherein a worker was forcefully // stopped during execution. WorkerPhaseAborted WorkerPhase = "ABORTED" // WorkerPhaseCanceled represents the state wherein a pending worker was // canceled prior to execution. WorkerPhaseCanceled WorkerPhase = "CANCELED" // WorkerPhaseFailed represents the state wherein a worker has run to // completion but experienced errors. WorkerPhaseFailed WorkerPhase = "FAILED" // WorkerPhasePending represents the state wherein a worker is awaiting // execution. WorkerPhasePending WorkerPhase = "PENDING" // WorkerPhaseRunning represents the state wherein a worker is currently // being executed. WorkerPhaseRunning WorkerPhase = "RUNNING" // WorkerPhaseSchedulingFailed represents the state wherein a worker was not // scheduled due to some unexpected and unrecoverable error encountered by the // scheduler. WorkerPhaseSchedulingFailed WorkerPhase = "SCHEDULING_FAILED" // WorkerPhaseStarting represents the state wherein a Worker is starting on // the substrate but isn't running yet. WorkerPhaseStarting WorkerPhase = "STARTING" // WorkerPhaseSucceeded represents the state where a worker has run to // completion without error. WorkerPhaseSucceeded WorkerPhase = "SUCCEEDED" // WorkerPhaseTimedOut represents the state wherein a worker has has not // completed within a designated timeframe. WorkerPhaseTimedOut WorkerPhase = "TIMED_OUT" // WorkerPhaseUnknown represents the state wherein a worker's state is // unknown. Note that this is possible if and only if the underlying Worker // execution substrate (Kubernetes), for some unanticipated reason, does not // know the Worker's (Pod's) state. WorkerPhaseUnknown WorkerPhase = "UNKNOWN" ) // WorkerPhasesAll returns a slice of WorkerPhases containing ALL possible // phases. Note that instead of utilizing a package-level slice, this a function // returns ad-hoc copies of the slice in order to preclude the possibility of // this important collection being modified at runtime by a client. func WorkerPhasesAll() []WorkerPhase { return []WorkerPhase{ WorkerPhaseAborted, WorkerPhaseCanceled, WorkerPhaseFailed, WorkerPhasePending, WorkerPhaseRunning, WorkerPhaseSchedulingFailed, WorkerPhaseStarting, WorkerPhaseSucceeded, WorkerPhaseTimedOut, WorkerPhaseUnknown, } } // WorkerPhasesTerminal returns a slice of WorkerPhases containing ALL phases // that are considered terminal. Note that instead of utilizing a package-level // slice, this a function returns ad-hoc copies of the slice in order to // preclude the possibility of this important collection being modified at // runtime by a client. func WorkerPhasesTerminal() []WorkerPhase { return []WorkerPhase{ WorkerPhaseAborted, WorkerPhaseCanceled, WorkerPhaseFailed, WorkerPhaseSucceeded, WorkerPhaseTimedOut, } } // WorkerPhasesNonTerminal returns a slice of WorkerPhases containing ALL phases // that are considered non-terminal. Note that instead of utilizing a // package-level slice, this a function returns ad-hoc copies of the slice in // order to preclude the possibility of this important collection being modified // at runtime by a client. func WorkerPhasesNonTerminal() []WorkerPhase { return []WorkerPhase{ WorkerPhasePending, WorkerPhaseRunning, WorkerPhaseUnknown, } } // IsTerminal returns a bool indicating whether the WorkerPhase is terminal. func (w WorkerPhase) IsTerminal() bool { switch w { case WorkerPhaseAborted: fallthrough case WorkerPhaseCanceled: fallthrough case WorkerPhaseFailed: fallthrough case WorkerPhaseSchedulingFailed: fallthrough case WorkerPhaseSucceeded: fallthrough case WorkerPhaseTimedOut: return true } return false } // Worker represents a component that orchestrates handling of a single Event. type Worker struct { // Spec is the technical blueprint for the Worker. Spec WorkerSpec `json:"spec"` // Status contains details of the Worker's current state. Status WorkerStatus `json:"status"` // Jobs contains details of all Jobs spawned by the Worker during handling of // the Event. Jobs []Job `json:"jobs,omitempty"` } // Job retrieves a Job by name. It returns a boolean indicating whether the // returned Job is the one requested (true) or a zero value (false) because no // Job with the specified name belongs to this Worker. func (w *Worker) Job(jobName string) (Job, bool) { for _, j := range w.Jobs { if j.Name == jobName { return j, true } } return Job{}, false } // WorkerSpec is the technical blueprint for a Worker. type WorkerSpec struct { // Container specifies the details of an OCI container that forms the // cornerstone of the Worker. Container *ContainerSpec `json:"container,omitempty"` // UseWorkspace indicates whether the Worker and/or any Jobs it may spawn // requires access to a shared workspace. When false, no such workspace is // provisioned prior to Worker creation. UseWorkspace bool `json:"useWorkspace"` // WorkspaceSize specifies the size of a volume that will be provisioned as // a shared workspace for the Worker and any Jobs it spawns. // The value can be expressed in bytes (as a plain integer) or as a // fixed-point integer using one of these suffixes: E, P, T, G, M, K. // Power-of-two equivalents may also be used: Ei, Pi, Ti, Gi, Mi, Ki. WorkspaceSize string `json:"workspaceSize,omitempty"` // Git contains git-specific Worker details. Git *GitConfig `json:"git,omitempty"` // Kubernetes contains Kubernetes-specific Worker details. Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` // JobPolicies specifies policies for any Jobs spawned by the Worker. JobPolicies *JobPolicies `json:"jobPolicies,omitempty"` // LogLevel specifies the desired granularity of Worker log output. LogLevel LogLevel `json:"logLevel,omitempty"` // ConfigFilesDirectory specifies a directory within the Worker's workspace // where any relevant configuration files (e.g. brigade.js, package.json, // etc.) can be located. ConfigFilesDirectory string `json:"configFilesDirectory,omitempty"` // DefaultConfigFiles is a map of configuration file names to configuration // file content. This is useful for Workers that do not integrate with any // source control system and would like to embed configuration (e.g. // package.json) or scripts (e.g. brigade.js) directly within the WorkerSpec. DefaultConfigFiles map[string]string `json:"defaultConfigFiles,omitempty"` // TimeoutDuration specifies the time duration that must elapse before a // running Job should be considered to have timed out. This duration string // is a possibly signed sequence of decimal numbers, each with optional // fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". TimeoutDuration string `json:"timeoutDuration,omitempty"` } // GitConfig represents git-specific Worker details. type GitConfig struct { // CloneURL specifies the location from where a source code repository may // be cloned. CloneURL string `json:"cloneURL,omitempty"` // Commit specifies a revision (by SHA) to be checked out. If non-empty, this // field takes precedence over any value in the Ref field. Commit string `json:"commit,omitempty"` // Ref is a symbolic reference to a revision to be checked out. If non-empty, // the value of the Commit field supercedes any value in this field. Example // uses of this field include referencing a branch (refs/heads/<branch name>) // or a tag (refs/tags/<tag name>). If left blank, this field is interpreted // as a reference to the repository's default branch. Ref string `json:"ref,omitempty"` // InitSubmodules indicates whether to clone the repository's submodules. InitSubmodules bool `json:"initSubmodules"` } // KubernetesConfig represents Kubernetes-specific Worker or Job configuration. type KubernetesConfig struct { // ImagePullSecrets enumerates any image pull secrets that Kubernetes may use // when pulling the OCI image on which a Worker's or Job's container is based. // This field only needs to be utilized in the case of private, custom Worker // or Job images. The image pull secrets in question must be created // out-of-band by a sufficiently authorized user of the Kubernetes cluster. ImagePullSecrets []string `json:"imagePullSecrets,omitempty"` } // JobPolicies represents policies for any Jobs spawned by a Worker. type JobPolicies struct { // AllowPrivileged specifies whether the Worker is permitted to launch Jobs // that utilize privileged containers. AllowPrivileged bool `json:"allowPrivileged"` // AllowDockerSocketMount specifies whether the Worker is permitted to launch // Jobs that mount the underlying host's Docker socket into its own file // system. // // Note: This is being removed for the 2.0.0 release because of security // issues AND declining usefulness. (Many Kubernetes distros now use // containerd instead of Docker.) This can be put back in the future if the // need is proven AND if it can be done safely. // // For more details, see https://github.com/brigadecore/brigade/issues/1666 // // AllowDockerSocketMount bool `json:"allowDockerSocketMount"` } // WorkerStatus represents the status of a Worker. type WorkerStatus struct { // Started indicates the time the Worker began execution. It will be nil for // a Worker that is not yet executing. Started *time.Time `json:"started,omitempty"` // Ended indicates the time the Worker concluded execution. It will be nil // for a Worker that is not done executing (or hasn't started). Ended *time.Time `json:"ended,omitempty"` // Phase indicates where the Worker is in its lifecycle. Phase WorkerPhase `json:"phase,omitempty"` } // MarshalJSON amends WorkerStatus instances with type metadata so that clients // do not need to be concerned with the tedium of doing so. func (w WorkerStatus) MarshalJSON() ([]byte, error) { type Alias WorkerStatus return json.Marshal( struct { meta.TypeMeta `json:",inline"` Alias `json:",inline"` }{ TypeMeta: meta.TypeMeta{ APIVersion: meta.APIVersion, Kind: "WorkerStatus", }, Alias: (Alias)(w), }, ) } // WorkerStartOptions represents useful, optional settings for starting a // Worker. It currently has no fields, but exists to preserve the possibility of // future expansion without having to change client function signatures. type WorkerStartOptions struct{} // WorkerStatusGetOptions represents useful, optional criteria for retrieval of // a Worker's Status. It currently has no fields, but exists to preserve the // possibility of future expansion without having to change client function // signatures. type WorkerStatusGetOptions struct{} // WorkerStatusWatchOptions represents useful, optional criteria for // establishing a stream of a Worker's Status. It currently has no fields, but // exists to preserve the possibility of future expansion without having to // change client function signatures. type WorkerStatusWatchOptions struct{} // WorkerStatusUpdateOptions represents useful, optional settings for updating a // Worker's Status. It currently has no fields, but exists to preserve the // possibility of future expansion without having to change client function // signatures. type WorkerStatusUpdateOptions struct{} // WorkerCleanupOptions represents useful, optional settings for cleaning up // after a Worker. It currently has no fields, but exists to preserve the // possibility of future expansion without having to change client function // signatures. type WorkerCleanupOptions struct{} // WorkerTimeoutOptions represents useful, optional settings for timing out a // Worker. It currently has no fields, but exists to preserve the possibility of // future expansion without having to change client function signatures. type WorkerTimeoutOptions struct{} // WorkersClient is the specialized client for managing Event Workers with the // Brigade API. type WorkersClient interface { // Start starts the indicated Event's Worker on Brigade's workload execution // substrate. Start(ctx context.Context, eventID string, opts *WorkerStartOptions) error // Get returns an Event's Worker's status. GetStatus( ctx context.Context, eventID string, opts *WorkerStatusGetOptions, ) (WorkerStatus, error) // WatchStatus returns a channel over which an Event's Worker's status is // streamed. The channel receives a new WorkerStatus every time there is any // change in that status. WatchStatus( ctx context.Context, eventID string, opts *WorkerStatusWatchOptions, ) (<-chan WorkerStatus, <-chan error, error) // UpdateStatus updates the status of an Event's Worker. UpdateStatus( ctx context.Context, eventID string, status WorkerStatus, opts *WorkerStatusUpdateOptions, ) error Cleanup( ctx context.Context, eventID string, opts *WorkerCleanupOptions, ) error // Timeout executes timeout logic for an Event's Worker when it has exceeded // its timeout limit. Timeout(ctx context.Context, eventID string, opts *WorkerTimeoutOptions) error Jobs() JobsClient } type workersClient struct { *rm.BaseClient jobsClient JobsClient } // NewWorkersClient returns a specialized client for managing Event Workers. func NewWorkersClient( apiAddress string, apiToken string, opts *restmachinery.APIClientOptions, ) WorkersClient { return &workersClient{ BaseClient: rm.NewBaseClient(apiAddress, apiToken, opts), jobsClient: NewJobsClient(apiAddress, apiToken, opts), } } func (w *workersClient) Start( ctx context.Context, eventID string, _ *WorkerStartOptions, ) error { return w.ExecuteRequest( ctx, rm.OutboundRequest{ Method: http.MethodPut, Path: fmt.Sprintf("v2/events/%s/worker/start", eventID), SuccessCode: http.StatusOK, }, ) } func (w *workersClient) GetStatus( ctx context.Context, eventID string, _ *WorkerStatusGetOptions, ) (WorkerStatus, error) { status := WorkerStatus{} return status, w.ExecuteRequest( ctx, rm.OutboundRequest{ Method: http.MethodGet, Path: fmt.Sprintf("v2/events/%s/worker/status", eventID), SuccessCode: http.StatusOK, RespObj: &status, }, ) } func (w *workersClient) WatchStatus( ctx context.Context, eventID string, _ *WorkerStatusWatchOptions, ) (<-chan WorkerStatus, <-chan error, error) { resp, err := w.SubmitRequest( // nolint: bodyclose ctx, rm.OutboundRequest{ Method: http.MethodGet, Path: fmt.Sprintf("v2/events/%s/worker/status", eventID), QueryParams: map[string]string{ "watch": trueStr, }, SuccessCode: http.StatusOK, }, ) if err != nil { return nil, nil, err } statusCh := make(chan WorkerStatus) errCh := make(chan error) // This goroutine will close the response body when it completes go w.receiveStatusStream(ctx, resp.Body, statusCh, errCh) return statusCh, errCh, nil } func (w *workersClient) UpdateStatus( ctx context.Context, eventID string, status WorkerStatus, _ *WorkerStatusUpdateOptions, ) error { return w.ExecuteRequest( ctx, rm.OutboundRequest{ Method: http.MethodPut, Path: fmt.Sprintf("v2/events/%s/worker/status", eventID), ReqBodyObj: status, SuccessCode: http.StatusOK, }, ) } func (w *workersClient) Cleanup( ctx context.Context, eventID string, _ *WorkerCleanupOptions, ) error { return w.ExecuteRequest( ctx, rm.OutboundRequest{ Method: http.MethodPut, Path: fmt.Sprintf("v2/events/%s/worker/cleanup", eventID), SuccessCode: http.StatusOK, }, ) } func (w *workersClient) Timeout( ctx context.Context, eventID string, _ *WorkerTimeoutOptions, ) error { return w.ExecuteRequest( ctx, rm.OutboundRequest{ Method: http.MethodPut, Path: fmt.Sprintf("v2/events/%s/worker/timeout", eventID), SuccessCode: http.StatusOK, }, ) } func (w *workersClient) Jobs() JobsClient { return w.jobsClient } func (w *workersClient) receiveStatusStream( ctx context.Context, reader io.ReadCloser, statusCh chan<- WorkerStatus, errCh chan<- error, ) { defer reader.Close() decoder := json.NewDecoder(reader) for { status := WorkerStatus{} if err := decoder.Decode(&status); err != nil { select { case errCh <- err: case <-ctx.Done(): } return } select { case statusCh <- status: case <-ctx.Done(): return } } }
package main import ( //"strings" "fmt" ) func main() { var n int fmt.Scan(&n) //var sb strings.Builder var d, k int for i:=0; i<n; i++ { if 1+i*2 <= n { d = 1+i*2 } else { d = n-(1+i*2%n) } k = (n-d)/2 for j:=0; j<k; j++ { //sb.WriteString("*") fmt.Printf("*") } for j:=0; j<d; j++ { //sb.WriteString("D") fmt.Printf("D") } for j:=0; j<k; j++ { fmt.Printf("*") //sb.WriteString("*") } fmt.Println("") //sb.WriteString("\n") } //fmt.Printf(sb.String()) }
package main import "fmt" func main() { a := "abab" b := "abeb" if len(a) == 0 || len(b) == 0 { return } if len(a) != len(b) { fmt.Println("false") } if len(a) == 1 { return false } if len(a) == 2 { if string(a[0]) == string(a[1]) { //fmt.Println("false") return true } else if string(a[1])+string(a[0]) == b { return true } else { return false } } //create a map of both str mapA := make(map[string]int) mapB := make(map[string]int) for i := 0; i < len(a); i++ { mapA[string(a[i])] = 0 } for i := 0; i < len(b); i++ { mapB[string(b[i])] = 0 } var arrKey1 []string var arrKey2 []string for key, _ := range mapA { arrKey1 = append(arrKey1, key) } for key, _ := range mapB { arrKey2 = append(arrKey2, key) } fmt.Println(arrKey1) fmt.Println(arrKey2) // fmt.Println(len(mapA)) if len(arrKey1) == len(arrKey2) { for i := 0; i < len(arrKey1); i++ { bool1 := contains(arrKey2, arrKey1[i]) if bool1 == false { fmt.Println("false") } } } else { fmt.Println("false") } if len(arrKey1) == len(arrKey2) { for i := 0; i < len(arrKey1); i++ { bool2 := contains(arrKey1, arrKey2[i]) if bool2 == false { fmt.Println("false") } } } else { fmt.Println("false") } count := 0 for i := 0; i < len(a); i++ { if string(a[i]) != string(b[i]) { count++ } } fmt.Println(count) if count <= 2 { fmt.Println("true") } else { fmt.Println("false") } } func contains(arr []string, str string) bool { for _, x := range arr { if x == str { return true } } return false }
package main import ( "fmt" "math/rand" "time" ) func main() { array1 := [5]int{1, 2, 3, 4, 5} slice1 := array1[2:4:5] // [start: end : cap] , start是包含的, end是不包含的, fmt.Println("slice is ", slice1) // [3 4] [start .... end - 1] fmt.Println("len is ", len(slice1)) // 2 end - start - 1 fmt.Println("cap is ", cap(slice1)) // 3 cap - start //----- 初始化 ------------------ // 方法一 自动推导类型 s1 := []int{1, 2} fmt.Println(s1) // [1 2] // 方法二 make s2 := make([]int, 5, 10) // 长度为5, cap 为10, 如果没有指定cap // 那么cap = 长度 fmt.Println(s2) // [0 0 0 0 0] s3 := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} // 截取规则 [low: hight : max] 取下标为low, 到high-1的数值, cap = max - low; //----- slice与底层数组的关系 ---------------- s4 := s3[2:5] fmt.Println("s4: ", s4) // 2 3 4] s4[0] = 666 fmt.Println("s4: ", s4) // [666 3 4] fmt.Println("s3, ", s3) // [0 1 666 3 4 5 6 7 8 9] // 底层数组数值也改变了 //----- 常用方法 ---------------- // append func, 如果原来的容量不够用, 会自动扩容 (2倍的容量扩容) s5 := []int{} fmt.Printf("s5: len = %d, cap = %d \n", len(s5), cap(s5)) // s5: len = 0, cap = 0 s5 = append(s5, 1) fmt.Printf("s5: len = %d, cap = %d \n", len(s5), cap(s5)) // s5: len = 1, cap = 1 s5 = append(s5, 2) fmt.Printf("s5: len = %d, cap = %d \n", len(s5), cap(s5)) // s5: len = 2, cap = 2 s5 = append(s5, 3) fmt.Printf("s5: len = %d, cap = %d \n", len(s5), cap(s5)) // s5: len = 3, cap = 4 // copy func srcSlice := []int{1, 2} dstSlice := []int{3, 4, 5, 6} copy(dstSlice, srcSlice) // 会把dstSlice的部分元素覆盖 fmt.Println("dstSlice: ", dstSlice) // dstSlice: [1 2 5 6] //----- slice as parameter ------- // 会传递地址, 并非传真实值, 这个和array不一样 n := 10 s := make([]int, n) fmt.Println("s before InitData: ", s) InitData(s) fmt.Println("s after InitData: ", s) bubbleSort(s) fmt.Println("s after bubbleSort: ", s) } func InitData(s []int) { rand.Seed(time.Now().UnixNano()) for i := 0; i < len(s); i++ { s[i] = rand.Intn(100) // 100的随机数 } } func bubbleSort(s []int) { for i := 0; i < len(s)-1; i++ { for j := 0; j < len(s)-1-i; j++ { if s[j] > s[j+1] { s[j], s[j+1] = s[j+1], s[j] } } } }
package main import ( "io/ioutil" "net/http" ) func sendRequest(client *http.Client, addr string) { res, err := client.Get(addr) if err != nil { panic(err) } if res.StatusCode != 200 { panic("request failed") } _, err = ioutil.ReadAll(res.Body) if err != nil { panic(err) } err = res.Body.Close() if err != nil { panic(err) } }
// Copyright (C) 2019 Cisco Systems Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. // See the License for the specific language governing permissions and // limitations under the License. package watchers import ( "net" "reflect" "sort" "time" bgpapi "github.com/osrg/gobgp/v3/api" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/context" "gopkg.in/tomb.v2" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" calicov3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3" "github.com/projectcalico/calico/libcalico-go/lib/backend/api" calicov3cli "github.com/projectcalico/calico/libcalico-go/lib/clientv3" "github.com/projectcalico/calico/libcalico-go/lib/options" "github.com/projectcalico/calico/libcalico-go/lib/selector" "github.com/projectcalico/calico/libcalico-go/lib/watch" "github.com/projectcalico/vpp-dataplane/v3/calico-vpp-agent/common" "github.com/projectcalico/vpp-dataplane/v3/config" ) type LocalBGPPeer struct { Peer *bgpapi.Peer BGPFilterNames []string BGPPolicies map[string]*ImpExpPol NeighborSet *bgpapi.DefinedSet } type BGPPrefixesPolicyAndAssignment struct { PolicyAssignment *bgpapi.PolicyAssignment Policy *bgpapi.Policy Prefixes []*bgpapi.DefinedSet } type ImpExpPol struct { Imp *BGPPrefixesPolicyAndAssignment Exp *BGPPrefixesPolicyAndAssignment } type PeerWatcher struct { log *logrus.Entry clientv3 calicov3cli.Interface // Subcomponent for accessing and watching secrets (that hold BGP passwords). secretWatcher *secretWatcher nodeStatesByName map[string]common.LocalNodeSpec peerWatcherEventChan chan common.CalicoVppEvent BGPConf *calicov3.BGPConfigurationSpec watcher watch.Interface currentWatchRevision string } type bgpPeer struct { AS uint32 SweepFlag bool BGPPeerSpec *calicov3.BGPPeerSpec SecretChanged bool } // selectsNode determines whether or not the selector mySelector // matches the labels on the given node. func selectsNode(mySelector string, n *common.LocalNodeSpec) (bool, error) { // No node selector means that the selector matches the node. if len(mySelector) == 0 { return true, nil } // Check for valid selector syntax. sel, err := selector.Parse(mySelector) if err != nil { return false, err } // Return whether or not the selector matches. return sel.Evaluate(n.Labels), nil } func (w *PeerWatcher) shouldPeer(peer *calicov3.BGPPeer) bool { matches, err := selectsNode(peer.Spec.NodeSelector, w.currentCalicoNode()) if err != nil { w.log.Error(errors.Wrapf(err, "Error in nodeSelector matching for peer %s", peer.Name)) } if (peer.Spec.Node != "" && peer.Spec.Node != *config.NodeName) || (peer.Spec.NodeSelector != "" && !matches) { return false } return true } func (w *PeerWatcher) getAsNumber(node *common.LocalNodeSpec) uint32 { if node.ASNumber == nil { return uint32(*w.BGPConf.ASNumber) } else { return uint32(*node.ASNumber) } } // Select among the nodes those that match with peerSelector // Return corresponding ips and ASN in a map func (w *PeerWatcher) selectPeers(peerSelector string) map[string]uint32 { ipAsn := make(map[string]uint32) for _, node := range w.nodeStatesByName { if node.Name == *config.NodeName { continue // Don't peer with ourselves :) } matches, err := selectsNode(peerSelector, &node) if err != nil { w.log.Errorf("Error in peerSelector matching: %v", err) } if matches { if node.IPv4Address != nil && w.currentCalicoNode().IPv4Address != nil { ipAsn[node.IPv4Address.IP.String()] = w.getAsNumber(&node) } if node.IPv6Address != nil && w.currentCalicoNode().IPv6Address != nil { ipAsn[node.IPv6Address.IP.String()] = w.getAsNumber(&node) } } } return ipAsn } func (w *PeerWatcher) currentCalicoNode() *common.LocalNodeSpec { node := w.nodeStatesByName[*config.NodeName] return &node } // This function watches BGP peers configured in Calico // These peers are configured in GoBGP in addition to the other nodes in the cluster // They may also control which nodes to pair with if the peerSelector is set func (w *PeerWatcher) WatchBGPPeers(t *tomb.Tomb) error { w.log.Infof("PEER watcher starts") state := make(map[string]*bgpPeer) for t.Alive() { w.currentWatchRevision = "" err := w.resyncAndCreateWatcher(state) if err != nil { w.log.Error(err) goto restart } // node and peer updates should be infrequent enough so just reevaluate // all peerings everytime there is an update. select { case <-t.Dying(): w.log.Infof("Peers Watcher asked to stop") w.cleanExistingWatcher() return nil case event, ok := <-w.watcher.ResultChan(): if !ok { err := w.resyncAndCreateWatcher(state) if err != nil { goto restart } continue } switch event.Type { case watch.EventType(api.WatchError): w.log.Debug("peers watch returned, restarting...") goto restart default: w.log.Info("Peers updated, reevaluating peerings") } case evt := <-w.peerWatcherEventChan: /* Note: we will only receive events we ask for when registering the chan */ switch evt.Type { case common.PeerNodeStateChanged: old, _ := evt.Old.(*common.LocalNodeSpec) new, _ := evt.New.(*common.LocalNodeSpec) if old != nil { delete(w.nodeStatesByName, old.Name) } if new != nil { w.nodeStatesByName[new.Name] = *new } w.log.Debugf("Nodes updated, reevaluating peerings old %v new %v", old, new) case common.BGPSecretChanged: old, _ := evt.Old.(*v1.Secret) new, _ := evt.New.(*v1.Secret) secretEvt := "" secretName := "" // secret added if old == nil && new != nil { secretEvt = "add" secretName = new.Name w.log.Infof("New secret '%s' added", new.Name) } // secret deleted if old != nil && new == nil { secretEvt = "del" secretName = old.Name w.log.Infof("secret '%s' deleted", old.Name) } // secret updated if old != nil && new != nil { secretEvt = "upd" secretName = old.Name w.log.Infof("secret '%s' updated", old.Name) } // sweep through the peers and update the SecretChanged field of impacted peers for _, peer := range state { switch secretEvt { case "add": // Note: any future add event specifc processing code goes here. For now we fallthrough. fallthrough case "del": // Note: any future delete event specifc processing code goes here. For now we fallthrough. fallthrough case "upd": // BGP password has changed if w.getSecretName(peer.BGPPeerSpec) == secretName { w.log.Infof("SecretChanged field set for peer=%s", peer.BGPPeerSpec.PeerIP) peer.SecretChanged = true } default: w.log.Warn("Unrecognized secret change event received. Ignoring...") } } default: goto restart } } restart: w.log.Debug("restarting peers watcher...") w.cleanExistingWatcher() time.Sleep(2 * time.Second) } w.log.Warn("BGPPeer watcher stopped") return nil } func CompareStringSlices(slice1, slice2 []string) bool { if len(slice1) != len(slice2) { return false } // Sort the slices in ascending order sort.Strings(slice1) sort.Strings(slice2) // Compare the sorted slices return reflect.DeepEqual(slice1, slice2) } func (w *PeerWatcher) resyncAndCreateWatcher(state map[string]*bgpPeer) error { if w.currentWatchRevision == "" { w.log.Debugf("Reconciliating peers...") peers, err := w.clientv3.BGPPeers().List(context.Background(), options.ListOptions{ ResourceVersion: w.currentWatchRevision, }) if err != nil { return errors.Wrap(err, "cannot list bgp peers") } w.currentWatchRevision = peers.ResourceVersion // Start mark and sweep for _, p := range state { p.SweepFlag = true } // If in mesh mode, add a fake peer to the list to select all nodes if w.isMeshMode() { w.log.Debugf("Node to node mesh enabled") peers.Items = append(peers.Items, calicov3.BGPPeer{ ObjectMeta: metav1.ObjectMeta{ Name: "<internal> virtual full mesh peer", }, Spec: calicov3.BGPPeerSpec{ Node: *config.NodeName, PeerSelector: "all()", }, }) } else { w.log.Debugf("Node to node mesh disabled") } // Initialize the set consisting of active secrets activeSecrets := map[string]struct{}{} for _, peer := range peers.Items { if !w.shouldPeer(&peer) { continue } ipAsn := make(map[string]uint32) if peer.Spec.PeerSelector != "" { // this peer has a peerSelector, use it ipAsn = w.selectPeers(peer.Spec.PeerSelector) } else { // use peerIP and ASNumber specified in the peer ipAsn[peer.Spec.PeerIP] = uint32(peer.Spec.ASNumber) } for ip, asn := range ipAsn { existing, ok := state[ip] if ok { w.log.Debugf("peer(update) neighbor ip=%s for BGPPeer=%s", ip, peer.ObjectMeta.Name) existing.SweepFlag = false oldSecret := w.getSecretName(existing.BGPPeerSpec) newSecret := w.getSecretName(&peer.Spec) w.log.Debugf("peer(update) oldSecret=%s newSecret=%s SecretChanged=%t for BGPPeer=%s", oldSecret, newSecret, existing.SecretChanged, peer.ObjectMeta.Name) filtersChanged := !CompareStringSlices(existing.BGPPeerSpec.Filters, peer.Spec.Filters) if existing.AS != asn || oldSecret != newSecret || existing.SecretChanged || filtersChanged { err := w.updateBGPPeer(ip, asn, &peer.Spec, existing.BGPPeerSpec) if err != nil { w.log.Warn(errors.Wrapf(err, "error updating BGP peer %s, ip=%s", peer.ObjectMeta.Name, ip)) continue } existing.AS = asn existing.BGPPeerSpec = peer.Spec.DeepCopy() existing.SecretChanged = false } // Else no change, nothing to do } else { // New peer w.log.Infof("peer(add) neighbor ip=%s for BGPPeer=%s", ip, peer.ObjectMeta.Name) err := w.addBGPPeer(ip, asn, &peer.Spec) if err != nil { w.log.Warn(errors.Wrapf(err, "error adding BGP peer %s, ip=%s", peer.ObjectMeta.Name, ip)) // Add the secret to the set of active secrets so it does not get cleaned up secretName := w.getSecretName(&peer.Spec) if secretName != "" { activeSecrets[secretName] = struct{}{} } continue } state[ip] = &bgpPeer{ AS: asn, SweepFlag: false, SecretChanged: false, BGPPeerSpec: peer.Spec.DeepCopy(), } } } } // Remove all peers that still have sweepflag to true for ip, peer := range state { if peer.SweepFlag { w.log.Infof("peer(del) neighbor ip=%s", ip) err := w.deleteBGPPeer(ip) if err != nil { w.log.Warn(errors.Wrapf(err, "error deleting BGP peer %s", ip)) } delete(state, ip) } } // Clean up any secrets that are no longer referenced by any bgp peers for _, peer := range state { secretName := w.getSecretName(peer.BGPPeerSpec) if secretName != "" { activeSecrets[secretName] = struct{}{} } } w.secretWatcher.SweepStale(activeSecrets) } w.cleanExistingWatcher() watcher, err := w.clientv3.BGPPeers().Watch( context.Background(), options.ListOptions{ResourceVersion: w.currentWatchRevision}, ) if err != nil { return err } w.watcher = watcher return nil } func (w *PeerWatcher) cleanExistingWatcher() { if w.watcher != nil { w.watcher.Stop() w.log.Debug("Stopped watcher") w.watcher = nil } } func (w *PeerWatcher) createBGPPeer(ip string, asn uint32, peerSpec *calicov3.BGPPeerSpec) (*bgpapi.Peer, error) { w.log.Infof("createBGPPeer with ip %s", ip) ipAddr, err := net.ResolveIPAddr("ip", ip) if err != nil { return nil, err } typ := &common.BgpFamilyUnicastIPv4 typSRv6 := &common.BgpFamilySRv6IPv6 typvpn4 := &common.BgpFamilyUnicastIPv4VPN typvpn6 := &common.BgpFamilyUnicastIPv6VPN if ipAddr.IP.To4() == nil { typ = &common.BgpFamilyUnicastIPv6 } afiSafis := []*bgpapi.AfiSafi{ { Config: &bgpapi.AfiSafiConfig{ Family: typ, Enabled: true, }, MpGracefulRestart: &bgpapi.MpGracefulRestart{ Config: &bgpapi.MpGracefulRestartConfig{ Enabled: true, }, }, }, { Config: &bgpapi.AfiSafiConfig{ Family: typSRv6, Enabled: true, }, }, { Config: &bgpapi.AfiSafiConfig{ Family: typvpn4, Enabled: true, }, }, { Config: &bgpapi.AfiSafiConfig{ Family: typvpn6, Enabled: true, }, }, } peer := &bgpapi.Peer{ Conf: &bgpapi.PeerConf{ NeighborAddress: ipAddr.String(), PeerAsn: asn, }, GracefulRestart: &bgpapi.GracefulRestart{ Enabled: true, RestartTime: 120, LonglivedEnabled: true, NotificationEnabled: true, }, AfiSafis: afiSafis, } if w.getSecretKeyRef(peerSpec) != nil { peer.Conf.AuthPassword, err = w.getPassword(peerSpec.Password.SecretKeyRef) if err != nil { return nil, err } } return peer, nil } func (w *PeerWatcher) addBGPPeer(ip string, asn uint32, peerSpec *calicov3.BGPPeerSpec) error { peer, err := w.createBGPPeer(ip, asn, peerSpec) if err != nil { return errors.Wrap(err, "cannot add bgp peer") } common.SendEvent(common.CalicoVppEvent{ Type: common.BGPPeerAdded, New: &LocalBGPPeer{Peer: peer, BGPFilterNames: peerSpec.Filters}, }) return nil } func (w *PeerWatcher) updateBGPPeer(ip string, asn uint32, peerSpec, oldPeerSpec *calicov3.BGPPeerSpec) error { peer, err := w.createBGPPeer(ip, asn, peerSpec) if err != nil { return errors.Wrap(err, "cannot update bgp peer") } common.SendEvent(common.CalicoVppEvent{ Type: common.BGPPeerUpdated, New: &LocalBGPPeer{Peer: peer, BGPFilterNames: peerSpec.Filters}, Old: &LocalBGPPeer{BGPFilterNames: oldPeerSpec.Filters}, }) return nil } func (w *PeerWatcher) deleteBGPPeer(ip string) error { common.SendEvent(common.CalicoVppEvent{ Type: common.BGPPeerDeleted, New: ip, }) return nil } func (w *PeerWatcher) isMeshMode() bool { if w.BGPConf.NodeToNodeMeshEnabled != nil { return *w.BGPConf.NodeToNodeMeshEnabled } return true } func (w *PeerWatcher) SetBGPConf(bgpConf *calicov3.BGPConfigurationSpec) { w.BGPConf = bgpConf } // Given peer's BGPPeerConf check if Password is set and return SecretKeyRef func (w *PeerWatcher) getSecretKeyRef(spec *calicov3.BGPPeerSpec) *v1.SecretKeySelector { if spec.Password != nil && spec.Password.SecretKeyRef != nil { return spec.Password.SecretKeyRef } return nil } // Given peer's BGPPeerConf check if Password is set and return secret name func (w *PeerWatcher) getSecretName(spec *calicov3.BGPPeerSpec) string { if spec.Password != nil && spec.Password.SecretKeyRef != nil { return spec.Password.SecretKeyRef.Name } return "" } // Get the BGP password from SecretWatcher func (w *PeerWatcher) getPassword(secretKeySelector *v1.SecretKeySelector) (string, error) { password, err := w.secretWatcher.GetSecret( secretKeySelector.Name, secretKeySelector.Key, ) return password, err } // This function gets called from SecretWatcher when a secret is added, updated or deleted func (w *PeerWatcher) OnSecretUpdate(old, new *v1.Secret) { common.SendEvent(common.CalicoVppEvent{ Type: common.BGPSecretChanged, Old: old, New: new, }) } func NewPeerWatcher(clientv3 calicov3cli.Interface, k8sclient *kubernetes.Clientset, log *logrus.Entry) *PeerWatcher { var err error w := PeerWatcher{ clientv3: clientv3, nodeStatesByName: make(map[string]common.LocalNodeSpec), log: log, peerWatcherEventChan: make(chan common.CalicoVppEvent, common.ChanSize), } w.secretWatcher, err = NewSecretWatcher(&w, k8sclient) if err != nil { log.Fatalf("NewSecretWatcher failed with %s", err) } reg := common.RegisterHandler(w.peerWatcherEventChan, "peers watcher events") reg.ExpectEvents(common.PeerNodeStateChanged, common.BGPSecretChanged) return &w }
package 路径和问题 /** * Definition for a binary tree node. * type TreeNode struct { * Val int * Left *TreeNode * Right *TreeNode * } */ func hasPathSum(root *TreeNode, sum int) bool { if isNil(root) { return false } nextSearchSum := sum - root.Val if isLeaf(root) { return nextSearchSum == 0 } return hasPathSum(root.Left, nextSearchSum) || hasPathSum(root.Right, nextSearchSum) } func isNil(root *TreeNode) bool { return root == nil } func isLeaf(root *TreeNode) bool { return root.Left == nil && root.Right == nil } /* 题目链接: https://leetcode-cn.com/problems/path-sum/ */
package ProdService import "strconv" type ProdModel struct { ProdID int `json:"pid"` ProdName string `json:"pName"` } func NewProd(id int, pname string) *ProdModel { return &ProdModel{ id, pname, } } func NewProdList(n int) []*ProdModel { ret := make([]*ProdModel, 0) for i := 0; i < n; i++ { ret = append(ret, NewProd(100+i, "Product"+strconv.Itoa(i))) } return ret }
package models import ( "github.com/astaxie/beego/orm" "github.com/astaxie/beego/logs" ) type UserPower struct { UserID string `orm:"pk"` PassWord string //todo 现在没有内加密。明文密码是不被赞许的,现在先偷个懒 PowerLev int PowerInfo string `orm:"size(2048)" ` // json:"power_info" Remark string `orm:"size(64)" json:"remark"` } func init() { orm.RegisterModel(new(UserPower)) } func AddUserPower(u_power UserPower) (int64, error) { o := orm.NewOrm() var _l_upower UserPower _l_upower.PowerInfo = u_power.PowerInfo _l_upower.PowerLev = u_power.PowerLev _l_upower.Remark = u_power.Remark _l_upower.UserID = u_power.UserID return o.Insert(&u_power) } func GetPower(user_id string) (UserPower, error) { o := orm.NewOrm() var _l_userpower UserPower err := o.QueryTable(UserPower{UserID: user_id}).Filter("user_i_d" , user_id).One(&_l_userpower) logs.Warn("get power user err %s == _l_userpower %s ", err, _l_userpower) return _l_userpower, err } func GetAllPower() ([]*UserPower, int64, error) { o := orm.NewOrm() var _l_power_list []*UserPower num, err := o.QueryTable(UserPower{}).Offset(0).All(&_l_power_list) //fmt.Printf("ret %s, err %s", num, err) logs.Info("ret %d, err %s", num, err) logs.Info("ret %s", _l_power_list) return _l_power_list, num, err } func GetPowerNornamls(start int, limit int) ([]*UserPower, int64, error) { o := orm.NewOrm() var _l_power_list [] *UserPower num, err := o.QueryTable(UserPower{}).Limit(limit, start).All(&_l_power_list, "UserID", "PowerLev", "PowerInfo", "Remark") return _l_power_list, num, err } // 非常严肃情况下的使用 func DelPowerById(p_user_id string) (int64, error) { o := orm.NewOrm() return o.Delete(&UserPower{UserID: p_user_id}) } func UpdateUserPower(uid string, power UserPower) (int64, error) { o := orm.NewOrm() _l_power := UserPower{UserID: uid} if read_err := o.Read(&_l_power); read_err == nil { //_l_power.Remark = power.Remark //_l_power.PowerLev = power.PowerLev _l_power.PowerInfo = power.PowerInfo num, err := o.Update(&_l_power) logs.Info("power info " + _l_power.PowerInfo) return num, err } else { return 0, read_err } }