text
stringlengths
11
4.05M
package main import ( "github.com/snwfdhmp/errlog" ) func main() { errlog.PrintRawStack() }
package testdata import "github.com/jackc/pgtype" // 3 Checking incoming native parameters; checking outgoing composite parameters. // входные параметры: 1 шт нативный параметр // выходные параметры: режим template.QueryRow: 1 параметр составной, кроме ошибки // GoDao: generate type GoDao3 struct { // language=PostgreSQL GetSettings func(id int64) (json pgtype.JSON, err error) ` with "tmp"("k", "v") as (values (0::int8, '{"dark_theme": true}'::json), (1::int8, '{"cookies": false}'::json) ) select "v" from "tmp" where "k" = $1 limit 1;` }
package avatar import ( "io/ioutil" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func Test_RandomImage(t *testing.T) { avt := Avatar{ 290, 290, 4096, 4096, } img, err := avt.RandomImage([]byte("gogs@local")) require.NoError(t, err) assert.Equal(t, 290, img.Bounds().Max.X) assert.Equal(t, 290, img.Bounds().Max.Y) avt.Height = 280 img, err = avt.RandomImage([]byte("gogs@local")) require.NoError(t, err) assert.Equal(t, 280, img.Bounds().Max.X) assert.Equal(t, 280, img.Bounds().Max.Y) avt.Width = 0 avt.Height = 0 _, err = avt.RandomImage([]byte("gogs@local")) require.Error(t, err) } func Test_PrepareWithPNG(t *testing.T) { avt := Avatar{ 290, 290, 4096, 4096, } data, err := ioutil.ReadFile("testdata/avatar.png") require.NoError(t, err) img, err := avt.Prepare(data) require.NoError(t, err) assert.Equal(t, 290, img.Bounds().Max.X) assert.Equal(t, 290, img.Bounds().Max.Y) } func Test_PrepareWithJPEG(t *testing.T) { avt := Avatar{ 290, 290, 4096, 4096, } data, err := ioutil.ReadFile("testdata/avatar.jpeg") require.NoError(t, err) imgPtr, err := avt.Prepare(data) require.NoError(t, err) assert.Equal(t, 290, imgPtr.Bounds().Max.X) assert.Equal(t, 290, imgPtr.Bounds().Max.Y) } func Test_PrepareWithInvalidImage(t *testing.T) { avt := Avatar{ 290, 290, 5, 5, } _, err := avt.Prepare([]byte{}) require.EqualError(t, err, "DecodeConfig: image: unknown format") } func Test_PrepareWithInvalidImageSize(t *testing.T) { avt := Avatar{ 290, 290, 5, 5, } data, err := ioutil.ReadFile("testdata/avatar.png") require.NoError(t, err) _, err = avt.Prepare(data) assert.EqualError(t, err, "Image width is too large: 10 > 5") avt.MaxWidth = 4095 _, err = avt.Prepare(data) assert.EqualError(t, err, "Image height is too large: 10 > 5") }
package gocube import "errors" // A Rotation is a way of rotating the entire // cube around the x, y, or z axes. // There are 9 total rotations, 0 through 8. // The first three rotation values are x, y, and z. // The next three are x', y', and z'. // The final three are x2, y2, and z2. type Rotation int // NewRotation creates a Rotation around a given // axis (0=x, 1=y, 2=z), for a given number of // turns, where "x" has 1 turn, "x'" has -1 turn, // and "x2" has 2 turns. func NewRotation(axis, turns int) Rotation { if turns == 1 { return Rotation(axis) } else if turns == -1 { return Rotation(axis + 3) } else if turns == 2 { return Rotation(axis + 6) } else { panic("unsupported turns value") } } // Axis returns a number 0, 1, or 2, indicating // the x, y, or z axis respectively. func (r Rotation) Axis() int { return int(r) % 3 } // Turns returns the number of "turns". // This is 1 for regular rotations, -1 for inverse // rotations, and 2 for double rotations. func (r Rotation) Turns() int { return [3]int{1, -1, 2}[int(r)/3] } // String returns the string representation // of this rotation, in WCA notation. func (r Rotation) String() string { axisStr := []string{"x", "y", "z"}[r.Axis()] turnsStr := map[int]string{-1: "'", 1: "", 2: "2"}[r.Turns()] return axisStr + turnsStr } // Inverse returns the inverse of this rotation. func (r Rotation) Inverse() Rotation { if r.Turns() == 2 { return r } return NewRotation(r.Axis(), -r.Turns()) } // ParseRotation parses a WCA rotation string. func ParseRotation(s string) (Rotation, error) { if len(s) == 1 { axis, ok := map[string]int{"x": 0, "y": 1, "z": 2}[s] if !ok { return 0, errors.New("unknown axis: " + s) } return Rotation(axis), nil } else if len(s) == 2 { rot, err := ParseRotation(s[:1]) if err != nil { return 0, err } switch s[1] { case '\'': return NewRotation(rot.Axis(), -1), nil case '2': return NewRotation(rot.Axis(), 2), nil default: return 0, errors.New("invalid rotation: " + s) } } else { return 0, errors.New("invalid rotation: " + s) } }
package dnslb import ( crand "crypto/rand" "encoding/hex" "errors" "fmt" "strings" "time" "github.com/Cloud-Foundations/golib/pkg/log" ) type blockedType struct { IP string IpExpires time.Time OwnerId string OwnerExpires time.Time } func parseBlocked(txts []string) (*blockedType, error) { if len(txts) < 1 { return nil, nil } else if len(txts) < 2 { return nil, errors.New("wrong number of values") } var blocked blockedType for _, txt := range txts { txt = strings.TrimSpace(txt) splitTxt := strings.Split(txt, "=") if len(splitTxt) != 2 { return nil, fmt.Errorf("bad split for: %s", txt) } splitTxt[0] = strings.TrimSpace(splitTxt[0]) splitTxt[1] = strings.TrimSpace(splitTxt[1]) switch splitTxt[0] { case "IP": blocked.IP = splitTxt[1] case "IpExpires": expires, err := time.Parse(time.RFC3339, splitTxt[1]) if err != nil { return nil, err } blocked.IpExpires = expires case "OwnerId": blocked.OwnerId = splitTxt[1] case "OwnerExpires": expires, err := time.Parse(time.RFC3339, splitTxt[1]) if err != nil { return nil, err } blocked.OwnerExpires = expires } } if blocked.OwnerId == "" { return nil, errors.New("no OwnerId specified") } if blocked.OwnerExpires.IsZero() { return nil, errors.New("no owner expiration time specified") } if time.Until(blocked.OwnerExpires) <= 0 { return nil, errors.New("expired owner") } return &blocked, nil } func rollingReplace(config Config, params Params, region string, logger log.DebugLogger) error { lb := &LoadBalancer{ config: config, p: params, } regionalIPs, ttl, err := lb.getRegionalIPs() if err != nil { return err } if lb.config.CheckInterval < time.Second { lb.config.CheckInterval = ttl } regionalIpList := make([]string, 0, len(regionalIPs)) anyBlocked := false for ip := range regionalIPs { blocked, err := lb.checkBlocked(ip) if err != nil { return err } if blocked > 0 { anyBlocked = true logger.Printf("%s is blocked\n", ip) } regionalIpList = append(regionalIpList, ip) } if anyBlocked { return errors.New( "some IP(s) are blocked: another rolling replace is active") } logger.Debugf(0, "%s: regional IPs: %v\n", config.FQDN, regionalIpList) if len(regionalIpList) < 2 { return fmt.Errorf("need 2+ regional IPs, have: %v\n", regionalIpList) } crandData := make([]byte, 4) if _, err := crand.Read(crandData); err != nil { return err } myId := hex.EncodeToString(crandData) for _, ip := range regionalIpList { if err := lb.replaceOne(myId, ip, ttl, len(regionalIPs)); err != nil { return err } } if err := lb.cleanupBlock(); err != nil { return err } return nil } func (lb *LoadBalancer) block(myId, ip string, ttl time.Duration) error { fqdn := lb.generateBlockedFqdn() blocked, err := lb.getBlockedData(fqdn) if err != nil { return err } if blocked != nil && blocked.OwnerId != myId { return fmt.Errorf("blocked by another owner: %s", blocked.OwnerId) } var txts []string interval := ttl * 2 if ip != "" { txts = append(txts, "IP="+ip, "IpExpires="+time.Now().Add(interval).Format(time.RFC3339)) } txts = append(txts, "OwnerId="+myId, "OwnerExpires="+time.Now().Add(ttl*5).Format(time.RFC3339)) rrw := lb.p.RecordReadWriter if err := rrw.WriteRecords(fqdn, "TXT", txts, ttl, false); err != nil { return fmt.Errorf("error writing: %s: TXT=%v", fqdn, txts) } if ip == "" { lb.p.Logger.Printf("locked for: %s\n", ttl*5) } else { lb.p.Logger.Printf("blocked: %s for: %s\n", ip, interval) } return nil } // Returns duration blocked, else <= 0. func (lb *LoadBalancer) checkBlocked(ip string) (time.Duration, error) { fqdn := lb.generateBlockedFqdn() blocked, err := lb.getBlockedData(fqdn) if err != nil { return 0, err } if blocked == nil { return 0, nil } if ip != blocked.IP { return 0, nil } if blocked.IpExpires.IsZero() { return 0, nil } return time.Until(blocked.IpExpires), nil } func (lb *LoadBalancer) cleanupBlock() error { fqdn := lb.generateBlockedFqdn() rrw := lb.p.RecordReadWriter if err := rrw.DeleteRecords(fqdn, "TXT"); err != nil { return err } lb.p.Logger.Printf("cleaned up: %s\n", fqdn) return nil } func (lb *LoadBalancer) generateBlockedFqdn() string { return "_blocked." + lb.config.FQDN } func (lb *LoadBalancer) getBlockedData(fqdn string) (*blockedType, error) { rrw := lb.p.RecordReadWriter txts, _, err := rrw.ReadRecords(fqdn, "TXT") if err != nil { return nil, err } blocked, err := parseBlocked(txts) if err != nil { if err := rrw.DeleteRecords(fqdn, "TXT"); err != nil { return nil, err } lb.p.Logger.Printf("deleted: %s: %s\n", fqdn, err) return nil, nil } return blocked, nil } func (lb *LoadBalancer) getRegionalIPs() ( map[string]struct{}, time.Duration, error) { ipList, ttl, err := lb.p.RecordReadWriter.ReadRecords(lb.config.FQDN, "A") if err != nil { return nil, 0, err } ips := make(map[string]struct{}, len(ipList)) for _, ip := range ipList { ips[ip] = struct{}{} } regionalIPs, err := lb.p.RegionFilter.Filter(ips) if err != nil { return nil, 0, err } return regionalIPs, ttl, nil } func (lb *LoadBalancer) replaceOne(myId, ip string, ttl time.Duration, numRequired int) error { newTtl := time.Second * 5 if newTtl > ttl { newTtl = ttl } // Grab lock and block the instance from adding itself to DNS. if err := lb.block(myId, ip, ttl); err != nil { return err } // Remove instance from DNS. oldList, _, err := lb.p.RecordReadWriter.ReadRecords(lb.config.FQDN, "A") if err != nil { return err } newList := make([]string, 0, len(oldList)-1) for _, oldIP := range oldList { if oldIP != ip { newList = append(newList, oldIP) } } err = lb.p.RecordReadWriter.WriteRecords(lb.config.FQDN, "A", newList, newTtl, true) if err != nil { return err } lb.p.Logger.Printf("removed: %s from: %s\n", ip, lb.config.FQDN) // Wait for TTL to expire. lb.p.Logger.Printf("sleeping for: %s before destroying: %s\n", ttl, ip) time.Sleep(ttl) // Destroy instance which should no longer be visable via DNS. ipMap := map[string]struct{}{ip: struct{}{}} if err := lb.p.Destroyer.Destroy(ipMap); err != nil { return err } lb.p.Logger.Printf("destroyed: %s, now waiting for replacement\n", ip) // Wait for required number of healthy instances, keeping the lock fresh. for { time.Sleep(ttl >> 2) if err := lb.block(myId, "", ttl); err != nil { return err } ips, _, err := lb.getRegionalIPs() if err != nil { return err } if len(ips) < numRequired { lb.p.Logger.Printf("only %d instances registered, need %d\n", len(ips), numRequired) continue } badIPs := lb.checkIPs(ips) if len(badIPs) < 1 { break } lb.p.Logger.Printf("unhealthy instances: %v\n", badIPs) } return nil }
package main import ( "fmt" "errors" "math/rand" ) type Character interface { getHp() int attack() int defend(dmg int) scream() init() } type Player struct { hp int damage int defense int phrase string healingPotions int } func (p *Player) scream() { fmt.Println(p.phrase) } func (p *Player) getHp() int { return p.hp } func (p *Player) attack() int { return p.damage } func (p *Player) defend(dmg int) { if p.defense <= dmg { p.hp -= (dmg - p.defense) } if p.hp <= 10 && p.healingPotions > 0 { p.hp += 7 p.healingPotions -= 1 } } // Weak enemy type WeakEnemy struct { hp int damage int defence int phrase string } func (we *WeakEnemy)init() { we.hp = 5 we.damage = 2 we.defence = 0 we.phrase = "Have mercy !!" we.scream() } func (we *WeakEnemy)getHp() int { return we.hp } func (we *WeakEnemy)attack() int { return we.damage } func (we *WeakEnemy)defend(damage int) { if damage > we.defence { we.hp -= (damage - we.defence) } } func (we *WeakEnemy)scream() { fmt.Println(we.phrase) } // AVG enemy type AvgEnemy struct { hp int damage int defence int phrase string } func (avg *AvgEnemy)init() { avg.hp = 10 avg.damage = 4 avg.defence = 3 avg.phrase = "Ha Ha Ha ... I'm not as weak as you think ! Die !" avg.scream() } func (avg *AvgEnemy)getHp() int { return avg.hp } func (avg *AvgEnemy)attack() int { return avg.damage } func (avg *AvgEnemy)defend(damage int) { if damage > avg.defence { avg.hp -= (damage - avg.defence) } if avg.hp <= 3 { fmt.Println("Alright ... I give up !!") avg.hp = 0 } } func (avg *AvgEnemy)scream() { fmt.Println(avg.phrase) } // Strong enemy type StrongEnemy struct { hp int damage int defence int phrase string frenzy bool } func (s *StrongEnemy)init() { s.hp = 15 s.damage = 7 s.defence = 5 s.phrase = "You are dead ..." s.frenzy = false s.scream() } func (s *StrongEnemy)getHp() int { return s.hp } func (s *StrongEnemy)attack() int { return s.damage } func (s *StrongEnemy)defend(damage int) { if damage > s.defence { s.hp -= (damage - s.defence) } if s.hp <= 7 && s.frenzy == false { s.damage += 3 s.frenzy = true fmt.Println("YOU ARE MAKING ME ANGRY !! GRRRwaaa !!") } } func (s *StrongEnemy)scream() { fmt.Println(s.phrase) } /* This is the factory function. Note that this factory returns the interface rather than the struct type. This is handy when dealing with multiple implementations of the same Interface. */ const ( WEAK = iota AVG STRONG ) func enemyFactory(t int) (Character, error) { switch t { case WEAK: return new(WeakEnemy), nil case AVG: return new(AvgEnemy), nil case STRONG: return new(StrongEnemy), nil default: return nil, errors.New("Invalid enemy type") } } func main() { player := Player{hp: 20, damage: 7, defense: 5, healingPotions: 3, phrase: "I'm the new player"} myEnemy, _ := enemyFactory(rand.Intn(3)) myEnemy.init() for player.getHp() > 0 { myEnemy.defend(player.attack()) if myEnemy.getHp() <= 0 { fmt.Println("Enemy is dead !") myEnemy, _ = enemyFactory(rand.Intn(3)) myEnemy.init() continue } player.defend(myEnemy.attack()) } fmt.Println("Game Over") }
// // Copyright (c) SAS Institute Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Microsoft Compound Document File // Reference: https://www.openoffice.org/sc/compdocfileformat.pdf // ERRATA: The above document says the 0th sector is always 512 bytes into the // file. This is not correct. If SectorSize > 512 bytes then the 0th sector is // SectorSize bytes into the file. package comdoc import ( "bytes" "encoding/binary" "errors" "io" "os" ) // CDF file open for reading or writing type ComDoc struct { File io.ReaderAt Header *Header SectorSize int ShortSectorSize int FirstSector int64 // MSAT is a list of sector IDs holding a SAT MSAT []SecID // SAT is a table where the index is the sector ID and the value is a pointer to the next sector ID in the same stream SAT []SecID SSAT []SecID Files []DirEnt sectorBuf []byte changed bool rootStorage int // index into files rootFiles []int // index into Files msatList []SecID // list of sector IDs holding a MSAT writer *os.File closer io.Closer } // Open a CDF file for reading func ReadPath(path string) (*ComDoc, error) { f, err := os.Open(path) if err != nil { return nil, err } return openFile(f, nil, f) } // Open a CDF file for reading and writing func WritePath(path string) (*ComDoc, error) { f, err := os.OpenFile(path, os.O_RDWR, 0) if err != nil { return nil, err } return openFile(f, f, f) } // Parse an already-open CDF file for reading func ReadFile(reader io.ReaderAt) (*ComDoc, error) { return openFile(reader, nil, nil) } // Parse an already-open CDF file for reading and writing func WriteFile(f *os.File) (*ComDoc, error) { return openFile(f, f, nil) } func openFile(reader io.ReaderAt, writer *os.File, closer io.Closer) (*ComDoc, error) { header := new(Header) r := &ComDoc{ File: reader, Header: header, writer: writer, closer: closer, } sr := io.NewSectionReader(reader, 0, 512) if err := binary.Read(sr, binary.LittleEndian, header); err != nil { return nil, err } if !bytes.Equal(header.Magic[:], fileMagic) { return nil, errors.New("not a compound document file") } if header.ByteOrder != byteOrderMarker { return nil, errors.New("incorrect byte order marker") } if header.SectorSize < 5 || header.SectorSize > 28 || header.ShortSectorSize >= header.SectorSize { return nil, errors.New("unreasonable header values") } r.SectorSize = 1 << header.SectorSize r.ShortSectorSize = 1 << header.ShortSectorSize if r.SectorSize < 512 { r.FirstSector = 512 } else { r.FirstSector = int64(r.SectorSize) } r.sectorBuf = make([]byte, r.SectorSize) if err := r.readMSAT(); err != nil { return nil, err } if err := r.readSAT(); err != nil { return nil, err } if err := r.readShortSAT(); err != nil { return nil, err } if err := r.readDir(); err != nil { return nil, err } return r, nil }
package game import ( "c-server/model" "fmt" "reflect" uuid "github.com/satori/go.uuid" ) //用户中心 type Webcenter struct { Name string } type WebcenterActions interface { Login(interface{}) []byte Register(interface{}) interface{} } func (wb Webcenter) Login(req ClientMessage) { res := ResponseData{} fmt.Println("loging ..............") res.Data = make(map[string]interface{}) loginfo, ok := req.Data["msg"].(map[string]interface{}) if !ok { req.error("草拟吗啊,数据错误") return } var p model.Player if loginfo["type"] == "cm" { loginData := loginfo["data"].(map[string]interface{}) accountType := loginData["accountType"] password := loginData["password"] if accountType == "email" { if err := model.DB.Where("email = ?", loginData["email"]).First(&p).Error; err != nil { req.error("草拟吗啊,账户或密码错误") return } if !p.CheckPassword(password.(string)) { req.error("草拟吗啊,账户或密码错误") return } } else { if err := model.DB.Where("phone = ?", loginData["phone"]).First(&p).Error; err != nil { req.error("草拟吗啊,账户或密码错误") return } if !p.CheckPassword(password.(string)) { req.error("草拟吗啊,账户或密码错误") return } } } else { email := loginfo["email"] password := loginfo["password"] if err := model.DB.Where("email = ?", email).First(&p).Error; err != nil { fmt.Println(err) req.error("草拟吗啊,账户或密码错误") return } if !p.CheckPassword(password.(string)) { req.error("草拟吗啊,账户或密码错误") return } } b := p.GetNeoBalance() res.Put("uid", p.Id) res.Put("balance", b) jwt := uuid.NewV4() token := Token{token: jwt.String()} token.saveToken(p.Id) cake := p.GetCoin("coin1") res.Put("token", jwt.String()) res.Put("ethChargeAddress", p.NeoChargeAddress) res.Put("cake", cake) req.response(res) } func (wb Webcenter) Register(b interface{}) interface{} { return nil } func RegisterController(wb interface{}) map[string]reflect.Value { cont := make(map[string]reflect.Value) v := reflect.ValueOf(wb).Elem() t := reflect.TypeOf(wb).Elem() for i := 0; i < v.NumMethod(); i++ { action := v.Method(i) actionName := t.Method(i).Name cont[actionName] = action } return cont }
package feed import ( "net/http" "time" ) // Default HTTP client timeout. const timeout = 3 * time.Second func newHTTPClient() *http.Client { return &http.Client{Timeout: timeout} }
//nolint:scopelint,gosec // we don't care about these linters in test cases package serializer_test import ( "errors" "fmt" "math/rand" "sort" "testing" "github.com/stretchr/testify/assert" "github.com/iotaledger/hive.go/serializer/v2" ) const ( TypeA byte = 0 TypeB byte = 1 aKeyLength = 16 bNameLength = 32 typeALength = serializer.SmallTypeDenotationByteSize + aKeyLength typeBLength = serializer.SmallTypeDenotationByteSize + bNameLength ) var ( ErrUnknownDummyType = errors.New("unknown example type") dummyTypeArrayRules = &serializer.ArrayRules{ Guards: serializer.SerializableGuard{ ReadGuard: DummyTypeSelector, WriteGuard: func(seri serializer.Serializable) error { switch seri.(type) { case *A: case *B: return ErrUnknownDummyType } return nil }, }, } ) func DummyTypeSelector(dummyType uint32) (serializer.Serializable, error) { var seri serializer.Serializable switch byte(dummyType) { case TypeA: seri = &A{} case TypeB: seri = &B{} default: return nil, ErrUnknownDummyType } return seri, nil } type Keyer interface { GetKey() [aKeyLength]byte } type A struct { Key [aKeyLength]byte } func (a *A) String() string { return "A" } func (a *A) GetKey() [16]byte { return a.Key } func (a *A) MarshalJSON() ([]byte, error) { panic("implement me") } func (a *A) UnmarshalJSON(i []byte) error { panic("implement me") } func (a *A) Deserialize(data []byte, deSeriMode serializer.DeSerializationMode, deSeriCtx interface{}) (int, error) { data = data[serializer.SmallTypeDenotationByteSize:] copy(a.Key[:], data[:aKeyLength]) return typeALength, nil } func (a *A) Serialize(deSeriMode serializer.DeSerializationMode, deSeriCtx interface{}) ([]byte, error) { var b [typeALength]byte b[0] = TypeA copy(b[serializer.SmallTypeDenotationByteSize:], a.Key[:]) return b[:], nil } type As []*A func (a As) ToSerializables() serializer.Serializables { seris := make(serializer.Serializables, len(a)) for i, x := range a { seris[i] = x } return seris } func (a *As) FromSerializables(seris serializer.Serializables) { *a = make(As, len(seris)) for i, seri := range seris { (*a)[i] = seri.(*A) } } type Keyers []Keyer func (k Keyers) ToSerializables() serializer.Serializables { seris := make(serializer.Serializables, len(k)) for i, x := range k { seris[i] = x.(serializer.Serializable) } return seris } func (k *Keyers) FromSerializables(seris serializer.Serializables) { *k = make(Keyers, len(seris)) for i, seri := range seris { (*k)[i] = seri.(Keyer) } } // RandBytes returns length amount random bytes. func RandBytes(length int) []byte { var b []byte for i := 0; i < length; i++ { b = append(b, byte(rand.Intn(256))) } return b } func randSerializedA() []byte { var b [typeALength]byte b[0] = TypeA keyData := RandBytes(aKeyLength) copy(b[serializer.SmallTypeDenotationByteSize:], keyData) return b[:] } func randA() *A { var k [aKeyLength]byte copy(k[:], RandBytes(aKeyLength)) return &A{Key: k} } type B struct { Name [bNameLength]byte } func (b *B) String() string { return "B" } func (b *B) MarshalJSON() ([]byte, error) { panic("implement me") } func (b *B) UnmarshalJSON(i []byte) error { panic("implement me") } func (b *B) Deserialize(data []byte, deSeriMode serializer.DeSerializationMode, deSeriCtx interface{}) (int, error) { data = data[serializer.SmallTypeDenotationByteSize:] copy(b.Name[:], data[:bNameLength]) return typeBLength, nil } func (b *B) Serialize(deSeriMode serializer.DeSerializationMode, deSeriCtx interface{}) ([]byte, error) { var bf [typeBLength]byte bf[0] = TypeB copy(bf[serializer.SmallTypeDenotationByteSize:], b.Name[:]) return bf[:], nil } func randB() *B { var n [bNameLength]byte copy(n[:], RandBytes(bNameLength)) return &B{Name: n} } func TestDeserializeA(t *testing.T) { seriA := randSerializedA() objA := &A{} bytesRead, err := objA.Deserialize(seriA, serializer.DeSeriModePerformValidation, nil) assert.NoError(t, err) assert.Equal(t, len(seriA), bytesRead) assert.Equal(t, seriA[serializer.SmallTypeDenotationByteSize:], objA.Key[:]) } func TestLexicalOrderedByteSlices(t *testing.T) { type test struct { name string source serializer.LexicalOrderedByteSlices target serializer.LexicalOrderedByteSlices } tests := []test{ { name: "ok - order by first ele", source: serializer.LexicalOrderedByteSlices{ {3, 2, 1}, {2, 3, 1}, {1, 2, 3}, }, target: serializer.LexicalOrderedByteSlices{ {1, 2, 3}, {2, 3, 1}, {3, 2, 1}, }, }, { name: "ok - order by last ele", source: serializer.LexicalOrderedByteSlices{ {1, 1, 3}, {1, 1, 2}, {1, 1, 1}, }, target: serializer.LexicalOrderedByteSlices{ {1, 1, 1}, {1, 1, 2}, {1, 1, 3}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { sort.Sort(tt.source) assert.Equal(t, tt.target, tt.source) }) } } func TestRemoveDupsAndSortByLexicalOrderArrayOf32Bytes(t *testing.T) { type test struct { name string source serializer.LexicalOrdered32ByteArrays target serializer.LexicalOrdered32ByteArrays } tests := []test{ { name: "ok - dups removed and order by first ele", source: serializer.LexicalOrdered32ByteArrays{ {3, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, {3, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, {2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, {2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, }, target: serializer.LexicalOrdered32ByteArrays{ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, {2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, {3, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, }, }, { name: "ok - dups removed and order by last ele", source: serializer.LexicalOrdered32ByteArrays{ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 34}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 34}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, }, target: serializer.LexicalOrdered32ByteArrays{ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 34}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tt.source = serializer.RemoveDupsAndSortByLexicalOrderArrayOf32Bytes(tt.source) assert.Equal(t, tt.target, tt.source) }) } } func TestSerializationMode_HasMode(t *testing.T) { type args struct { mode serializer.DeSerializationMode } tests := []struct { name string sm serializer.DeSerializationMode args args want bool }{ { "has no validation", serializer.DeSeriModeNoValidation, args{mode: serializer.DeSeriModePerformValidation}, false, }, { "has validation", serializer.DeSeriModePerformValidation, args{mode: serializer.DeSeriModePerformValidation}, true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := tt.sm.HasMode(tt.args.mode); got != tt.want { t.Errorf("HasMode() = %v, want %v", got, tt.want) } }) } } func TestArrayValidationMode_HasMode(t *testing.T) { type args struct { mode serializer.ArrayValidationMode } tests := []struct { name string sm serializer.ArrayValidationMode args args want bool }{ { "has no validation", serializer.ArrayValidationModeNone, args{mode: serializer.ArrayValidationModeNoDuplicates}, false, }, { "has mode duplicates", serializer.ArrayValidationModeNoDuplicates, args{mode: serializer.ArrayValidationModeNoDuplicates}, true, }, { "has mode lexical order", serializer.ArrayValidationModeLexicalOrdering, args{mode: serializer.ArrayValidationModeLexicalOrdering}, true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := tt.sm.HasMode(tt.args.mode); got != tt.want { t.Errorf("HasMode() = %v, want %v", got, tt.want) } }) } } func TestArrayRules_ElementUniqueValidator(t *testing.T) { type test struct { name string args [][]byte valid bool ar *serializer.ArrayRules } tests := []test{ { name: "ok - no dups", args: [][]byte{ {1, 2, 3}, {2, 3, 1}, {3, 2, 1}, }, ar: &serializer.ArrayRules{}, valid: true, }, { name: "not ok - dups", args: [][]byte{ {1, 1, 1}, {1, 1, 2}, {1, 1, 3}, {1, 1, 3}, }, ar: &serializer.ArrayRules{}, valid: false, }, { name: "not ok - dups with reduction", args: [][]byte{ {1, 1, 1}, {1, 1, 2}, {1, 1, 3}, }, ar: &serializer.ArrayRules{ UniquenessSliceFunc: func(next []byte) []byte { return next[:2] }, }, valid: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { arrayElementValidator := tt.ar.ElementUniqueValidator() valid := true for i := range tt.args { element := tt.args[i] if err := arrayElementValidator(i, element); err != nil { valid = false } } assert.Equal(t, tt.valid, valid) }) } } func TestArrayRules_Bounds(t *testing.T) { type test struct { name string args [][]byte min int max int valid bool } arrayRules := serializer.ArrayRules{} tests := []test{ { name: "ok - min", args: [][]byte{ {1}, }, min: 1, max: 3, valid: true, }, { name: "ok - max", args: [][]byte{ {1}, {2}, {3}, }, min: 1, max: 3, valid: true, }, { name: "not ok - min", args: [][]byte{ {1}, {2}, {3}, }, min: 4, max: 5, valid: false, }, { name: "not ok - max", args: [][]byte{ {1}, {2}, {3}, }, min: 1, max: 2, valid: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { arrayRules.Min = uint(tt.min) arrayRules.Max = uint(tt.max) err := arrayRules.CheckBounds(uint(len(tt.args))) assert.Equal(t, tt.valid, err == nil) }) } } func TestArrayRules_LexicalOrderValidator(t *testing.T) { type test struct { name string args [][]byte valid bool ar *serializer.ArrayRules } tests := []test{ { name: "ok - order by first ele", args: [][]byte{ {1, 2, 3}, {2, 3, 1}, {3, 2, 1}, }, ar: &serializer.ArrayRules{}, valid: true, }, { name: "ok - order by last ele", args: [][]byte{ {1, 1, 1}, {1, 1, 2}, {1, 1, 3}, }, ar: &serializer.ArrayRules{}, valid: true, }, { name: "not ok", args: [][]byte{ {2, 1, 1}, {1, 1, 2}, {3, 1, 3}, }, ar: &serializer.ArrayRules{}, valid: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { arrayElementValidator := tt.ar.LexicalOrderValidator() valid := true for i := range tt.args { element := tt.args[i] if err := arrayElementValidator(i, element); err != nil { valid = false } } assert.Equal(t, tt.valid, valid) }) } } func TestArrayRules_LexicalOrderWithoutDupsValidator(t *testing.T) { type test struct { name string args [][]byte valid bool ar *serializer.ArrayRules } tests := []test{ { name: "ok - order by first ele - no dups", args: [][]byte{ {1, 2, 3}, {2, 3, 1}, {3, 2, 1}, }, ar: &serializer.ArrayRules{}, valid: true, }, { name: "ok - order by last ele - no dups", args: [][]byte{ {1, 1, 1}, {1, 1, 2}, {1, 1, 3}, }, ar: &serializer.ArrayRules{}, valid: true, }, { name: "not ok - dups", args: [][]byte{ {1, 1, 1}, {1, 1, 2}, {1, 1, 3}, {1, 1, 3}, }, ar: &serializer.ArrayRules{}, valid: false, }, { name: "not ok - order", args: [][]byte{ {2, 1, 1}, {1, 1, 2}, {3, 1, 3}, }, ar: &serializer.ArrayRules{}, valid: false, }, { name: "not ok - dups with reduction", args: [][]byte{ {1, 1, 1}, {1, 1, 2}, {1, 1, 3}, }, ar: &serializer.ArrayRules{ UniquenessSliceFunc: func(next []byte) []byte { return next[:2] }, }, valid: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { arrayElementValidator := tt.ar.LexicalOrderWithoutDupsValidator() valid := true for i := range tt.args { element := tt.args[i] if err := arrayElementValidator(i, element); err != nil { valid = false } } assert.Equal(t, tt.valid, valid) }) } } func TestArrayRules_AtMostOneOfEachTypeValidatorValidator(t *testing.T) { type test struct { name string args [][]byte valid bool ar *serializer.ArrayRules ty serializer.TypeDenotationType } tests := []test{ { name: "ok - types unique - byte", args: [][]byte{ {1, 1, 1}, {2, 2, 2}, {3, 3, 3}, }, valid: true, ar: &serializer.ArrayRules{}, ty: serializer.TypeDenotationByte, }, { name: "ok - types unique - uint32", args: [][]byte{ {1, 1, 1, 1}, {2, 2, 2, 2}, {3, 3, 3, 3}, }, valid: true, ar: &serializer.ArrayRules{}, ty: serializer.TypeDenotationUint32, }, { name: "not ok - types not unique - byte", args: [][]byte{ {1, 1, 1}, {1, 2, 2}, {3, 3, 3}, }, valid: false, ar: &serializer.ArrayRules{}, ty: serializer.TypeDenotationByte, }, { name: "not ok - types not unique - uint32", args: [][]byte{ {1, 1, 1, 1}, {2, 2, 2, 2}, {1, 1, 1, 1}, }, valid: false, ar: &serializer.ArrayRules{}, ty: serializer.TypeDenotationUint32, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { arrayElementValidator := tt.ar.AtMostOneOfEachTypeValidator(tt.ty) valid := true for i := range tt.args { element := tt.args[i] if err := arrayElementValidator(i, element); err != nil { valid = false } } assert.Equal(t, tt.valid, valid) }) } } func TestSerializableSlice(t *testing.T) { keyers := make(Keyers, 0) seris := make(serializer.Serializables, 5) for i := range seris { seris[i] = randA() } keyers.FromSerializables(seris) for _, a := range keyers { fmt.Println(a.GetKey()) } }
// SPDX-License-Identifier: ISC // Copyright (c) 2014-2020 Bitmark Inc. // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package main import ( "fmt" "path/filepath" "github.com/bitmark-inc/bitmarkd/zmqutil" "github.com/bitmark-inc/exitwithstatus" ) const ( recorderdPublicKeyFilename = "recorderd.public" recorderdPrivateKeyFilename = "recorderd.private" ) // setup command handler // // commands that run to create key and certificate files these // commands cannot access any internal database or states or the // configuration file func processSetupCommand(program string, arguments []string) bool { command := "help" if len(arguments) > 0 { command = arguments[0] arguments = arguments[1:] } switch command { case "generate-identity", "id": publicKeyFilename := getFilenameWithDirectory(arguments, recorderdPublicKeyFilename) privateKeyFilename := getFilenameWithDirectory(arguments, recorderdPrivateKeyFilename) err := zmqutil.MakeKeyPair(publicKeyFilename, privateKeyFilename) if nil != err { fmt.Printf("cannot generate private key: %q and public key: %q\n", privateKeyFilename, publicKeyFilename) fmt.Printf("error generating server key pair: %v\n", err) exitwithstatus.Exit(1) } fmt.Printf("generated private key: %q and public key: %q\n", privateKeyFilename, publicKeyFilename) case "start", "run": return false // continue processing case "version", "v": fmt.Printf("%s\n", version) default: switch command { case "help", "h", "?": case "", " ": fmt.Printf("error: missing command\n") default: fmt.Printf("error: no such command: %v\n", command) } fmt.Printf("usage: %s [--help] [--verbose] [--quiet] --config-file=FILE [[command|help] arguments...]", program) fmt.Printf("supported commands:\n\n") fmt.Printf(" help (h) - display this message\n\n") fmt.Printf(" version (v) - display version sting\n\n") fmt.Printf(" generate-identity [DIR] (id) - create private key in: %q\n", "DIR/"+recorderdPrivateKeyFilename) fmt.Printf(" and the public key in: %q\n", "DIR/"+recorderdPublicKeyFilename) fmt.Printf("\n") fmt.Printf(" start (run) - just run the program, same as no arguments\n") fmt.Printf(" for convienience when passing script arguments\n") fmt.Printf("\n") exitwithstatus.Exit(1) } return true } // get the working directory; if not set in the arguments // it's set to the current directory func getFilenameWithDirectory(arguments []string, name string) string { dir := "." if len(arguments) >= 1 { dir = arguments[0] } return filepath.Join(dir, name) }
package main import ( "io" "log" "net/http" "path" "strings" ) // ShiftPath splits off the first component of p, which will be cleaned of // relative components before processing. `head` will never contain a slash and // `tail` will always be a rooted path without trailing slash. // Original: http://blog.merovius.de/2017/06/18/how-not-to-use-an-http-router.html // *************************************************************************************** func ShiftPath(p string) (head, tail string) { p = path.Clean("/" + p) i := strings.Index(p[1:], "/") + 1 if i <= 0 { return p[1:], "/" } return p[1:i], p[i:] } // *************************************************************************************** // TODO Add a way to track a single request. // https://joeshaw.org/revisiting-context-and-http-handler-for-go-17/ // https://medium.com/@cep21/how-to-correctly-use-context-context-in-go-1-7-8f2c0fafdf39 // *************************************************************************************** // AppHandler is a generic type for the top-level application handler instance. type AppHandler struct{} // ServeHTTP on the AppHandler will identify the initial URL branch. func (a AppHandler) ServeHTTP(res http.ResponseWriter, req *http.Request) { log.Print("AppHandler.ServeHTTP(): ", req.URL.String()) var head string head, req.URL.Path = ShiftPath(req.URL.Path) switch head { case "dev": APIHandlerv1(res, req) case "v1": APIHandlerv1(res, req) default: http.Error(res, "Not Found", http.StatusNotFound) } } // TODO Split the API versions into separate files // APIHandlerv1 routes to the version 1 URLs. func APIHandlerv1(res http.ResponseWriter, req *http.Request) { log.Print("APIHandlerv1(): ", req.URL.String()) var head string head, req.URL.Path = ShiftPath(req.URL.Path) switch head { case "hello": HelloHandlerv1(res, req) default: http.Error(res, "Not Found", http.StatusNotFound) } } // HelloHandlerv1 is the v1 hello endpoint. func HelloHandlerv1(res http.ResponseWriter, req *http.Request) { log.Print("HelloHandlerv1(): ", req.URL.String()) q := req.URL.Query() name := q.Get("name") if len(name) == 0 { name = "world" } io.WriteString(res, "hello, "+name+"!\n") } func main() { a := &AppHandler{} log.Fatal(http.ListenAndServe(":8080", a)) }
// Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved. // This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. package main import ( "crypto/x509" "flag" "log" "os" "github.com/pivotal-cf/on-demand-service-broker/brokerinitiator" "github.com/cloudfoundry/bosh-cli/v7/director" boshuaa "github.com/cloudfoundry/bosh-cli/v7/uaa" boshlog "github.com/cloudfoundry/bosh-utils/logger" "github.com/pivotal-cf/on-demand-service-broker/boshdirector" "github.com/pivotal-cf/on-demand-service-broker/boshlinks" "github.com/pivotal-cf/on-demand-service-broker/broker" "github.com/pivotal-cf/on-demand-service-broker/cf" "github.com/pivotal-cf/on-demand-service-broker/config" "github.com/pivotal-cf/on-demand-service-broker/loggerfactory" "github.com/pivotal-cf/on-demand-service-broker/noopservicescontroller" "github.com/pivotal-cf/on-demand-service-broker/serviceadapter" ) func main() { loggerFactory := loggerfactory.New(os.Stdout, broker.ComponentName, loggerfactory.Flags) logger := loggerFactory.New() logger.Println("Starting broker") config := configParser(logger) boshClient := createBoshClient(logger, config) commandRunner := serviceadapter.NewCommandRunner() stopServer := make(chan os.Signal, 1) cfClient := createCfClient(config, logger) brokerinitiator.Initiate(config, boshClient, boshClient, cfClient, commandRunner, stopServer, loggerFactory) } func configParser(logger *log.Logger) config.Config { configFilePath := flag.String("configFilePath", "", "path to config file") flag.Parse() if *configFilePath == "" { logger.Fatal("must supply -configFilePath") } config, err := config.Parse(*configFilePath) if err != nil { logger.Fatalf("error parsing config: %s", err) } return config } func createCfClient(conf config.Config, logger *log.Logger) broker.CloudFoundryClient { var cfClient broker.CloudFoundryClient if !conf.Broker.DisableCFStartupChecks { cfClient = createRealCfClient(conf, logger, cfClient) } else { cfClient = noopservicescontroller.New() } return cfClient } func createRealCfClient(conf config.Config, logger *log.Logger, cfClient broker.CloudFoundryClient) broker.CloudFoundryClient { cfAuthenticator, err := conf.CF.NewAuthHeaderBuilder(conf.Broker.DisableSSLCertVerification) if err != nil { logger.Fatalf("error creating CF authorization header builder: %s", err) } cfClient, err = cf.New(conf.CF.URL, cfAuthenticator, []byte(conf.CF.TrustedCert), conf.Broker.DisableSSLCertVerification, logger) if err != nil { logger.Fatalf("error creating Cloud Foundry client: %s", err) } return cfClient } func createBoshClient(logger *log.Logger, conf config.Config) *boshdirector.Client { certPool, err := x509.SystemCertPool() if err != nil { logger.Fatalf("error getting a certificate pool to append our trusted cert to: %s", err) } boshLogger := boshlog.NewLogger(boshlog.LevelError) directorFactory := director.NewFactory(boshLogger) uaaFactory := boshuaa.NewFactory(boshLogger) boshClient, err := boshdirector.New( conf.Bosh.URL, []byte(conf.Bosh.TrustedCert), certPool, directorFactory, uaaFactory, conf.Bosh.Authentication, boshlinks.NewDNSRetriever, boshdirector.NewBoshHTTP, logger) if err != nil { logger.Fatalf("error creating bosh client: %s", err) } return boshClient }
// Copyright 2019 Yunion // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package shell import ( "github.com/pkg/errors" "yunion.io/x/onecloud/pkg/multicloud/qcloud" "yunion.io/x/onecloud/pkg/util/shellutils" ) func init() { type RouteTableListOption struct { VpcId string } shellutils.R(&RouteTableListOption{}, "routetable-list", "list routetable", func(cli *qcloud.SRegion, args *RouteTableListOption) error { routetables, err := cli.GetAllRouteTables(args.VpcId, []string{}) if err != nil { return errors.Wrapf(err, "GetAllRouteTables") } printList(routetables, 0, 0, 0, nil) return nil }) type RouteTableShowOption struct { ROUTETABLEID string } shellutils.R(&RouteTableShowOption{}, "routetable-show", "show routetable", func(cli *qcloud.SRegion, args *RouteTableShowOption) error { routetables, err := cli.GetAllRouteTables("", []string{args.ROUTETABLEID}) if err != nil { return errors.Wrapf(err, "GetAllRouteTables") } printList(routetables, 0, 0, 0, nil) return nil }) }
package main import ( "fmt" "net" "os/user" ) // type User string type Config struct { User *user.User TransientPath string StoragePath string } func HostIP() (string, error) { addrs, err := net.InterfaceAddrs() if err != nil { return "", err } for _, a := range addrs { if ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { if ipnet.IP.To4() != nil { return ipnet.IP.String(), nil } } } return "", fmt.Errorf("No ip found") } func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { var aux struct { User string `yaml:"user"` TransientPath string `yaml:"transient-path"` StoragePath string `yaml:"storage-path"` } if err := unmarshal(&aux); err != nil { return err } chieftainUser, err := user.Lookup(aux.User) if _, ok := err.(user.UnknownUserError); ok { return err } *c = Config{ User: chieftainUser, TransientPath: aux.TransientPath, StoragePath: aux.StoragePath, } return nil }
package api import ( "github.com/cyberark/secretless-broker/bin/juxtaposer/timing" ) type OutputFormatter interface { ProcessResults([]string, map[string]timing.BackendTiming, int) error } type FormatterOptions map[string]string type FormatterConstructor func(FormatterOptions) (OutputFormatter, error)
// Package provider defines a built-in configuration providers that are // automatically registered by usrv. package provider import ( "os" "regexp" "strings" ) var ( invalidCharRegex = regexp.MustCompile(`[\s-\/]`) ) // EnvVars implements a configuration provider that fetches configuration values // from the environment variables associated with the currently running process. type EnvVars struct{} // NewEnvVars creates a new EnvVars provider instance. func NewEnvVars() *EnvVars { return &EnvVars{} } // Get returns a map containing configuration values associated with a particular path. // // To match the standard envvar form, the path is first normalized by converting // it to uppercase and then replacing any path separators with an underscore while // also trimming any leading separators. // // The provider then scans the process envvars and selects the ones whose names // begin with the normalized path. The configuration map keys are built by stripping // the path prefix from any matched envvars, lowercasing the remaining part of the // envvar name and replacing any underscores with the path separator. // // For example, given the path "/redis/MASTER" the following envvars // would be matched as configuration options: // // - REDIS_MASTER_SERVICE_HOST=10.0.0.11 // - REDIS_MASTER_PORT_6379_TCP=tcp://10.0.0.11:6379 // // The above options would yield the following configuration map: // { // "service/host": "10.0.0.11", // "port/6379/tcp": "tcp://10.0.0.11:6379", // } func (p *EnvVars) Get(path string) map[string]string { cfg := make(map[string]string) prefix := strings.Trim(strings.ToUpper(invalidCharRegex.ReplaceAllString(path, "_")), "_") + "_" for _, envvar := range os.Environ() { tokens := strings.SplitN(envvar, "=", 2) if !strings.HasPrefix(tokens[0], prefix) { continue } normalizedName := strings.Replace(strings.ToLower(strings.TrimPrefix(tokens[0], prefix)), "_", "/", -1) cfg[normalizedName] = tokens[1] } return cfg } // Watch installs a monitor for a particular path and invokes the supplied value // changer when its value changes. The method returns a function that should be // used to terminate the watch. // // The func (p *EnvVars) Watch(path string, valueSetter func(string, map[string]string)) func() { // Assume that envvars never change and treat the call as a NOOP return func() {} }
package main import ( "fmt" m "math" "github.com/MaxHalford/gago" ) // Rastrigin minimum is 0 reached in (0, ..., 0) // Recommended search domain is [-5.12, 5.12] func Rastrigin(X []float64) float64 { sum := 10.0 * float64(len(X)) for _, x := range X { sum += m.Pow(x, 2) - 10*m.Cos(2*m.Pi*x) } return sum } func main() { // Instantiate a population ga := gago.Default // Fitness function function := Rastrigin // Number of variables the function takes as input variables := 2 // Initialize the genetic algorithm ga.Initialize(function, variables) // Enhancement for i := 0; i < 40; i++ { fmt.Println(ga.Best) ga.Enhance() } }
package api import ( "github.com/antihax/optional" openapi "github.com/sapphi-red/go-traq" ) var ( allUsersCache []openapi.User currentUsersCache []openapi.User ) type NameUserMap map[string]*openapi.User func GetNameUserMap(includeSuspended bool, canUseCache bool) (NameUserMap, error) { users, err := GetUsers(includeSuspended, canUseCache) if err != nil { return nil, err } ret := make(NameUserMap, len(users)) for _, u := range users { user := u ret[user.Name] = &user } return ret, nil } func GetUsers(includeSuspended bool, canUseCache bool) ([]openapi.User, error) { if canUseCache { if includeSuspended && allUsersCache != nil { return allUsersCache, nil } else if !includeSuspended && currentUsersCache != nil { return currentUsersCache, nil } } users, _, err := client.UserApi.GetUsers(auth, &openapi.UserApiGetUsersOpts{ IncludeSuspended: optional.NewBool(includeSuspended), }) if err != nil { return nil, err } if includeSuspended { allUsersCache = users } else { currentUsersCache = users } return users, err }
/* * @lc app=leetcode.cn id=623 lang=golang * * [623] 在二叉树中增加一行 */ type TreeNode struct { Val int Left *TreeNode Right *TreeNode } // @lc code=start /** * Definition for a binary tree node. * type TreeNode struct { * Val int * Left *TreeNode * Right *TreeNode * } */ func addOneRow(root *TreeNode, val int, depth int) *TreeNode { if depth == 1 { return &TreeNode{val, root, nil} } que := []*TreeNode{root} curdepth := 1 for len(que) != 0 && curdepth < depth - 1 { size := len(que) for i := 0; i < size; i++ { ptr := que[0] if ptr.Left != nil { que = append(que, ptr.Left) } if ptr.Right != nil { que = append(que, ptr.Right) } que = que[1:] } curdepth++ } for len(que) != 0 { ptr := que[0] ptr.Left = &TreeNode{val, ptr.Left, nil} ptr.Right = &TreeNode{val, nil, ptr.Right} que = que[1:] } return root } // @lc code=end
package app import ( "bytes" "context" "io" "net/http" "os" "path" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/dikaeinstein/godl/internal/pkg/version" "github.com/dikaeinstein/godl/test" ) func TestListRemoteVersions(t *testing.T) { testClient := test.NewTestClient(test.RoundTripFunc(func(req *http.Request) *http.Response { f, err := os.Open(path.Join("..", "..", "test", "testdata", "listbucketresult.xml")) if err != nil { panic(err) } return &http.Response{ StatusCode: http.StatusOK, Body: f, } })) failingTestClient := test.NewTestClient(test.RoundTripFunc(func(req *http.Request) *http.Response { return &http.Response{ StatusCode: http.StatusNotFound, Body: io.NopCloser(bytes.NewBufferString("")), } })) testCases := []struct { name string client *http.Client want string }{ {name: "getBinaryReleases succeeds", client: testClient}, { name: "handles getBinaryReleases error", client: failingTestClient, want: "\nerror fetching list: https://storage.googleapis.com/golang/?prefix=go1: ", }, } for i := range testCases { tC := testCases[i] t.Run(tC.name, func(t *testing.T) { lsRemote := ListRemote{tC.client, 2 * time.Second} err := lsRemote.Run(context.Background(), version.SortAsc) if err != nil { diff := cmp.Diff(tC.want, err.Error()) if diff != "" { t.Errorf(diff) } } }) } }
package main import ( "log" "github.com/LiveSocket/bot/service" "github.com/gammazero/nexus/v3/wamp" "github.com/gempir/go-twitch-irc/v2" ) func UserNoticeHandler(service *service.Service, client *Client) func(twitch.UserNoticeMessage) { return func(message twitch.UserNoticeMessage) { switch message.MsgID { case "resub": case "sub": case "subgift": case "anonsubgift": case "submysterygift": case "anonsubmysterygift": case "primepaidupgrade": broadcast("subscription", service, client, message) return case "giftpaidupgrade": case "anongiftpaidupgrade": case "raid": broadcast("raid", service, client, message) return } } } func broadcast(name string, service *service.Service, client *Client, message twitch.UserNoticeMessage) { if err := service.Publish("event."+name, nil, wamp.List{message}, nil); err != nil { log.Print(err) } }
package bdd_spike_test import ( "encoding/json" "github.com/tebeka/selenium" "io/ioutil" "strings" "time" ) type Page struct { Wd selenium.WebDriver } func (p *Page) Refresh() error { return p.Wd.Refresh() } func (p *Page) WaitForNavigateToUrlContains(keyword string, timeout time.Duration) error { return p.Wd.WaitWithTimeout(func(wd selenium.WebDriver) (bool, error) { currentURL, err := wd.CurrentURL() if err != nil { return false, nil } return strings.Contains(currentURL, keyword), nil }, timeout) } func (p *Page) FindElementWithTimeout(by, value string, timeout time.Duration) (selenium.WebElement, error) { err := p.Wd.WaitWithTimeout(func(wd selenium.WebDriver) (bool, error) { _, err := wd.FindElement(by, value) return err == nil, nil }, timeout) if err != nil { return nil, err } element, err := p.Wd.FindElement(by, value) if err != nil { return nil, err } return element, nil } func (p *Page) AddCookies(cookies []Cookie) error { for _, cookie := range cookies { err := hp.Wd.AddCookie(&selenium.Cookie{ Name: cookie.Name, Value: cookie.Value, Path: cookie.Path, Domain: cookie.Domain, Secure: cookie.Secure, Expiry: uint(cookie.Expiry), }) if err != nil { return err } } return nil } func readLoggedInUserCookies() ([]Cookie, error) { loginUserCookies, err := ioutil.ReadFile("cookie.json") if err != nil { return nil, err } var cookies []Cookie err = json.Unmarshal(loginUserCookies, &cookies) if err != nil { return nil, err } return cookies, nil } func (*Page) HoverElement(element selenium.WebElement, xOffset, yOffset int) error { return element.MoveTo(xOffset, yOffset) } type Cookie struct { Domain string `json:"domain"` Name string `json:"name"` Secure bool `json:"secure"` Expiry float64 `json:"expirationDate"` Path string `json:"path"` Value string `json:"value"` }
package main // 无重不可复选排列 func permute(nums []int) [][]int { res := make([][]int, 0) visited := make(map[int]bool, 0) // 注意:使用map表示visited path := make([]int, 0) // 注意:path的初始化长度为0 backtrace(nums, &path, &visited, &res) return res } func backtrace(nums []int, path *[]int, visited *map[int]bool, res *[][]int) { if len(*path) == len(nums) { v := make([]int, len(*path)) copy(v, *path) *res = append(*res, v) return } for i := range nums { if (*visited)[nums[i]] { continue } *path = append(*path, nums[i]) (*visited)[nums[i]] = true backtrace(nums, path, visited, res) *path = (*path)[0 : len(*path)-1] (*visited)[nums[i]] = false } }
package smtpd import ( "fmt" "net" "net/smtp" "strings" "github.com/fitraditya/surelin-smtpd/config" "github.com/fitraditya/surelin-smtpd/data" "github.com/fitraditya/surelin-smtpd/log" ) var ( ports = []int{25, 2525, 587} ) type Mailer struct { Config config.SmtpConfig Store *data.DataStore SendMailChan chan *config.SMTPMessage NotifyMailChan chan interface{} } func NewMailer(ds *data.DataStore) *Mailer { cfg := config.GetSmtpConfig() sendMailChan := make(chan *config.SMTPMessage, 256) notifyMailChan := make(chan interface{}, 256) return &Mailer{Config: cfg, Store: ds, SendMailChan: sendMailChan, NotifyMailChan: notifyMailChan} } func (md *Mailer) Start() { // Start some mailer daemon for i := 0; i < 3; i++ { go md.SendMail(i) } } func (md *Mailer) SendMail(id int) { log.LogTrace("Running Mailer Daemon #<%d>", id) for { mc := <-md.SendMailChan for i := range mc.To { if strings.Contains(mc.To[i], md.Config.Domain) { md.Store.SaveMailChan <- mc } else { if !strings.Contains(mc.To[i], "@") { log.LogError("Invalid recipient address: <%s>", mc.To[i]) return } host := strings.Split(mc.To[i], "@")[1] addr, err := net.LookupMX(host) if err != nil { log.LogError("Cannot not lookup host: <%s>", addr) return } c, err := newClient(addr, ports) if err != nil { log.LogError("Cannot not create SMTP client") return } err = send(c, mc.From, mc.To[i], mc.Data) if err != nil { log.LogError("Cannot not send message") return } mc.Notify <- 1 } } } } func newClient(mx []*net.MX, ports []int) (*smtp.Client, error) { for i := range mx { for j := range ports { server := strings.TrimSuffix(mx[i].Host, ".") hostPort := fmt.Sprintf("%s:%d", server, ports[j]) client, err := smtp.Dial(hostPort) if err != nil { if j == len(ports)-1 { return nil, fmt.Errorf("Couldn't connect to servers %v on port %d", mx, ports[j]) } continue } return client, nil } } return nil, fmt.Errorf("Couldn't connect to servers %v on any common port", mx) } func send(c *smtp.Client, from string, to string, msg string) error { if err := c.Mail(from); err != nil { return err } if err := c.Rcpt(to); err != nil { return err } m, err := c.Data() if err != nil { return err } /* if m.Subject != "" { _, err = msg.Write([]byte("Subject: " + m.Subject + "\r\n")) if err != nil { return err } } if m.From != "" { _, err = msg.Write([]byte("From: <" + m.From + ">\r\n")) if err != nil { return err } } if m.To != "" { _, err = msg.Write([]byte("To: <" + m.To + ">\r\n")) if err != nil { return err } } */ _, err = fmt.Fprint(m, msg) if err != nil { return err } err = m.Close() if err != nil { return err } err = c.Quit() if err != nil { return err } return nil }
// Copyright 2019 The Cockroach Authors. // // Licensed as a CockroachDB Enterprise file under the Cockroach Community // License (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at // // https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt package cliccl import ( gosql "database/sql" "fmt" "io/ioutil" "net/http" "time" "github.com/cockroachdb/cockroach/pkg/build" "github.com/cockroachdb/cockroach/pkg/cli" "github.com/cockroachdb/cockroach/pkg/util/envutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" ) // This URL grants a license that is valid for 24 hours. const licenseDefaultURL = "https://register.cockroachdb.com/api/license" // We make licenseURL configurable for use in tests. var licenseURL = envutil.EnvOrDefaultString("COCKROACH_DEMO_LICENSE_URL", licenseDefaultURL) func getLicense(clusterID uuid.UUID) (string, error) { client := &http.Client{ Timeout: 5 * time.Second, } req, err := http.NewRequest("GET", licenseURL, nil) if err != nil { return "", err } // Send some extra information to the endpoint. q := req.URL.Query() // Let the endpoint know we are requesting a demo license. q.Add("kind", "demo") q.Add("version", build.VersionPrefix()) q.Add("clusterid", clusterID.String()) req.URL.RawQuery = q.Encode() resp, err := client.Do(req) if err != nil { return "", err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return "", errors.New("unable to connect to licensing endpoint") } bodyBytes, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } return string(bodyBytes), nil } func getAndApplyLicense(db *gosql.DB, clusterID uuid.UUID, org string) (bool, error) { license, err := getLicense(clusterID) if err != nil { fmt.Fprintf(log.OrigStderr, "\nerror while contacting licensing server:\n%+v\n", err) return false, nil } if _, err := db.Exec(`SET CLUSTER SETTING cluster.organization = $1`, org); err != nil { return false, err } if _, err := db.Exec(`SET CLUSTER SETTING enterprise.license = $1`, license); err != nil { return false, err } return true, nil } func init() { // Set the GetAndApplyLicense function within cockroach demo. // This separation is done to avoid using enterprise features in an OSS/BSL build. cli.GetAndApplyLicense = getAndApplyLicense }
package model // PathConfig represents the options available for the vault path template type PathConfig struct { Namespace string ContainerName string DeploymentName string }
package config // GetPlatformDefaultConfig gets the defaults for the platform func GetPlatformDefaultConfig() []byte { return []byte( `os: openCommand: 'cmd /c "start "" {{filename}}"' openLinkCommand: 'cmd /c "start "" {{link}}"'`) }
package view import ( "encoding/json" "fmt" "github.com/zput/innodb_view/log" "github.com/zput/innodb_view/mysql_define" "github.com/zput/innodb_view/print" "github.com/zput/ringbuffer" ) // ----------------- FspHeaderPage ------------------------------------// type FspHeaderPage struct { FileAllPage `yaml:"FileAllPage" self:"FileAllPage"` FspHeader `yaml:"FspHeader" self:"FspHeader"` } type FspHeader struct { FspSpaceID uint32 `yaml:"FspSpaceID" self:"FspSpaceID"` FspNotUsed uint32 `yaml:"FspNotUsed" self:"FspNotUsed"` FspSize uint32 `yaml:"FspSize" self:"FspSize"` FspFreeLimit uint32 `yaml:"FspFreeLimit" self:"FspFreeLimit"` FspSpaceFlags uint32 `yaml:"FspSpaceFlags" self:"FspSpaceFlags"` FspFragNUsed uint32 `yaml:"FspFragNUsed" self:"FspFragNUsed"` FspFree *ListBaseNode `yaml:"FspFree" self:"FspFree"` FspFreeFrag *ListBaseNode `yaml:"FspFreeFrag" self:"FspFreeFrag"` FspFullFrag *ListBaseNode `yaml:"FspFullFrag" self:"FspFullFrag"` FspSegID uint64 `yaml:"FspSegID" self:"FspSegID"` FspSegInodesFull *ListBaseNode `yaml:"FspSegInodesFull" self:"FspSegInodesFull"` FspSegInodesFree *ListBaseNode `yaml:"FspSegInodesFree" self:"FspSegInodesFree"` } func (fhp *FspHeaderPage) GetFileType() mysql_define.T_FIL_PAGE_TYPE { return mysql_define.T_FIL_PAGE_TYPE(fhp.FileAllPage.PageType) } func (fhp *FspHeaderPage) PageParseFILHeader(buffer *ringbuffer.RingBuffer) error { if err := fhp.FileAllPage.PageParseFILHeader(buffer); err != nil { return err } return nil } func (fhp *FspHeaderPage) PageParseFILTailer(buffer *ringbuffer.RingBuffer, pageSize mysql_define.PAGE_SIZE) error { if err := fhp.FileAllPage.PageParseFILTailer(buffer, pageSize); err != nil { return err } return nil } func (fhp *FspHeaderPage) PageParseBody(buffer *ringbuffer.RingBuffer, pageSize mysql_define.PAGE_SIZE) error { var isUsingExplore = true buffer.ExploreBegin() if err := buffer.ExploreRetrieve(mysql_define.FIL_PAGE_DATA); err != nil { log.Error(err) return err } fhp.FspSpaceID = buffer.PeekUint32(isUsingExplore) log.Debugf("FSP spaceID[%d]", fhp.FspSpaceID) if err := buffer.ExploreRetrieve(mysql_define.FSP_NOT_USED); err != nil { log.Error(err) return err } fhp.FspNotUsed = buffer.PeekUint32(isUsingExplore) if err := buffer.ExploreRetrieve(mysql_define.FSP_SIZE - mysql_define.FSP_NOT_USED); err != nil { log.Error(err) return err } fhp.FspSize = buffer.PeekUint32(isUsingExplore) if err := buffer.ExploreRetrieve(mysql_define.FSP_FREE_LIMIT - mysql_define.FSP_SIZE); err != nil { log.Error(err) return err } fhp.FspFreeLimit = buffer.PeekUint32(isUsingExplore) if err := buffer.ExploreRetrieve(mysql_define.FSP_SPACE_FLAGS - mysql_define.FSP_FREE_LIMIT); err != nil { log.Error(err) return err } fhp.FspSpaceFlags = buffer.PeekUint32(isUsingExplore) if err := buffer.ExploreRetrieve(mysql_define.FSP_FRAG_N_USED - mysql_define.FSP_SPACE_FLAGS); err != nil { log.Error(err) return err } fhp.FspFragNUsed = buffer.PeekUint32(isUsingExplore) if err := buffer.ExploreRetrieve(mysql_define.FSP_FREE - mysql_define.FSP_FRAG_N_USED); err != nil { log.Error(err) return err } var err error if fhp.FspFree, err = getListBaseNode(buffer); err != nil { log.Error(err) return err } if fhp.FspFreeFrag, err = getListBaseNode(buffer); err != nil { log.Error(err) return err } if fhp.FspFullFrag, err = getListBaseNode(buffer); err != nil { log.Error(err) return err } fhp.FspSegID = buffer.PeekUint64(isUsingExplore) if err := buffer.ExploreRetrieve(mysql_define.FSP_SEG_INODES_FULL - mysql_define.FSP_SEG_ID); err != nil { log.Error(err) return err } if fhp.FspSegInodesFull, err = getListBaseNode(buffer); err != nil { log.Error(err) return err } if fhp.FspSegInodesFree, err = getListBaseNode(buffer); err != nil { log.Error(err) return err } buffer.ExploreBreak() return nil } // --------------- inner method function ----------------- // func (fhp *FspHeaderPage) printPageType() error { prettyFormat, err := json.MarshalIndent(fhp, "", " ") if err != nil { return err } fmt.Printf("%s", string(prettyFormat)) return nil } func (fhp *FspHeaderPage) generateHumanFormat() []print.PrintFormatT { var waitPrintT []print.PrintFormatT var currentPosition int waitPrintT = append(waitPrintT, fhp.FileAllPage.generateHumanFormatHeader()...) waitPrintT = append(waitPrintT, *print.NewPrintFormatT(print.PrintDivideSignBlock, "index page:FSP header")) currentPosition = mysql_define.FIL_PAGE_DATA currentPosition *= 8 waitPrintT = append(waitPrintT, print.Translate(&currentPosition, fhp.FspHeader)...) // TODO //waitPrintT = append(waitPrintT, *print.NewPrintFormatT(print.PrintDivideSignBlock, "index page:entry(0-84)")) //currentPosition = mysql_define.FIL_PAGE_DATA+mysql_define.FSEG_INODE_PAGE_NODE //currentPosition *= 8 //waitPrintT = append(waitPrintT, print.Translate(&currentPosition, fhp.INodeEntrySlice)...) waitPrintT = append(waitPrintT, fhp.FileAllPage.generateHumanFormatTrailer()...) return waitPrintT } func (fhp *FspHeaderPage) PrintPageType() error { fmt.Printf("%s\n", print.PrintFun(fhp.generateHumanFormat())) fmt.Println() //fhp.printPageType() if err := fhp.FileAllPage.PrintPageType(); err != nil { log.Error(err) return err } return nil }
package main import "fmt" func main() { numbers := []int{1, 2, 3, 4, 5, 6, 7, 8} for i := range numbers { fmt.Println("slice item", i, "is", numbers[i]) } countryCapitalMap := map[string]string{"flance": "paris", "italy": "rome", "japan": "tokyo"} for country := range countryCapitalMap { fmt.Println("capital of", country, "is", countryCapitalMap[country]) } for country, capital := range countryCapitalMap { fmt.Println("capital of", country, "is", capital) } }
package main import "fmt" func main() { var name string fmt.Print("input your name: ") fmt.Scan(&name) fmt.Printf("Hello %v", name) }
package orm import ( "github.com/jinzhu/gorm" ) // OrderDataStore is the order data store type OrderDataStore struct { DB *gorm.DB } // GetAll returns all the saved orders func (store *OrderDataStore) GetAll() (interface{}, int64, error) { orders := []Order{} connection := store.DB.Preload("CartItems.Product").Find(&orders) if connection.RecordNotFound() { return nil, connection.RowsAffected, nil } if connection.Error != nil { return nil, connection.RowsAffected, connection.Error } return orders, connection.RowsAffected, nil } // GetByID returns a order based on its ID func (store *OrderDataStore) GetByID(id uint) (interface{}, int64, error) { order := Order{} connection := store.DB.Preload("CartItems.Product").First(&order, id) if connection.RecordNotFound() { return nil, connection.RowsAffected, nil } if connection.Error != nil { return nil, connection.RowsAffected, connection.Error } return []Order{order}, connection.RowsAffected, nil } // Add creates a new order func (store *OrderDataStore) Add(item interface{}) (interface{}, int64, error) { order := item.(*Order) connection := store.DB.Create(order) if connection.Error != nil { return nil, connection.RowsAffected, connection.Error } return []Order{*order}, connection.RowsAffected, nil } // DeleteByID removes a order based in its ID func (store *OrderDataStore) DeleteByID(id uint) (int64, error) { order := Order{Model: Model{ID: id}} connection := store.DB.Delete(&order) if connection.Error != nil { return connection.RowsAffected, connection.Error } connection.Model(&order).Association("CartItems").Clear() if connection.Error != nil { return connection.RowsAffected, connection.Error } return connection.RowsAffected, nil } // UpdateByID updates a order based on its ID func (store *OrderDataStore) UpdateByID(id uint, item interface{}) (interface{}, int64, error) { order := item.(*Order) order.ID = id connection := store.DB.Save(order) connection.Model(order).Association("CartItems").Replace(order.CartItems) if connection.Error != nil { return nil, connection.RowsAffected, connection.Error } return []Order{*order}, connection.RowsAffected, nil }
package leetcode import ( "reflect" "testing" ) func TestBank_Deposit(t *testing.T) { type fields struct { B []int64 } type args struct { account int money int64 } tests := []struct { name string fields fields args args want bool }{ { name: "testDeposit01", fields: fields{B: []int64{10, 20, 25}}, args: args{account: 1, money: 30}, want: true, }, { name: "testDeposit01", fields: fields{B: []int64{10, 20, 25}}, args: args{account: 4, money: 30}, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { b := &Bank{ B: tt.fields.B, } if got := b.Deposit(tt.args.account, tt.args.money); got != tt.want { t.Errorf("Deposit() = %v, want %v", got, tt.want) } }) } } func TestBank_Transfer(t *testing.T) { type fields struct { B []int64 } type args struct { account1 int account2 int money int64 } tests := []struct { name string fields fields args args want bool }{ { name: "testTransfer01", fields: fields{B: []int64{10, 20, 30, 40, 50}}, args: args{account1: 4, account2: 1, money: 25}, want: true, }, { name: "testTransfer02", fields: fields{B: []int64{10, 20, 30, 40, 50}}, args: args{account1: 4, account2: 1, money: 52}, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { b := &Bank{ B: tt.fields.B, } if got := b.Transfer(tt.args.account1, tt.args.account2, tt.args.money); got != tt.want { t.Errorf("Transfer() = %v, want %v", got, tt.want) } }) } } func TestBank_Withdraw(t *testing.T) { type fields struct { B []int64 } type args struct { account int money int64 } tests := []struct { name string fields fields args args want bool }{ { name: "testWithdraw01", fields: fields{B: []int64{10, 20, 30, 40, 50}}, args: args{account: 4, money: 25}, want: true, }, { name: "testWithdraw02", fields: fields{B: []int64{10, 20, 30, 40, 50}}, args: args{account: 1, money: 25}, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { b := &Bank{ B: tt.fields.B, } if got := b.Withdraw(tt.args.account, tt.args.money); got != tt.want { t.Errorf("Withdraw() = %v, want %v", got, tt.want) } }) } } func TestConstructor(t *testing.T) { type args struct { balance []int64 } tests := []struct { name string args args want Bank }{ { name: "testConstructor", args: args{balance: []int64{10, 20, 30}}, want: Bank{B: []int64{10, 20, 30}}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := Constructor(tt.args.balance); !reflect.DeepEqual(got, tt.want) { t.Errorf("Constructor() = %v, want %v", got, tt.want) } }) } }
package main import ( "fmt" "unicode" ) // unicode包提供数据和函数来测试Unicode代码点的一些属性 func main() { // 判断示例 exampleIs() // 对应示例 exampleSimpleFold() // 转换示例 exampleTo() } func exampleIs() { // constant with mixed type runes const mixed = "\b5Ὂg̀9! ℃ᾭG" for _, c := range mixed { fmt.Printf("For %q:\n", c) // 判断一个字符是否是控制字符,主要是策略C的字符和一些其他的字符如代理字符 if unicode.IsControl(c) { fmt.Println("\tis control rune") } // 判断一个r字符是否是十进制数字字符 if unicode.IsDigit(c) { fmt.Println("\tis digit rune") } // 判断一个字符是否是unicode图形。包括字母、标记、数字、符号、标点、空白,参见L、M、N、P、S、Zs if unicode.IsGraphic(c) { fmt.Println("\tis graphic rune") } // 判断一个字符是否是字母 if unicode.IsLetter(c) { fmt.Println("\tis letter rune") } // 判断字符是否是小写字母 if unicode.IsLower(c) { fmt.Println("\tis lower case rune") } // 判断字符是否是大写字母 if unicode.IsUpper(c) { fmt.Println("\tis upper case rune") } // 判断一个字符是否是标记字符 if unicode.IsMark(c) { fmt.Println("\tis mark rune") } // 判断一个字符是否是数字字符 if unicode.IsNumber(c) { fmt.Println("\tis number rune") } // 判断一个字符是否是go的可打印字符 // 本函数基本和IsGraphic一致,只是ASCII空白字符U+0020会返回假 if unicode.IsPrint(c) { fmt.Println("\tis printable rune") } // 判断一个字符是否是unicode标点字符 if unicode.IsPunct(c) { fmt.Println("\tis punct rune") } // 判断一个字符是否是空白字符 // 在Latin-1字符空间中,空白字符为:'\t', '\n', '\v', '\f', '\r', ' ', U+0085 (NEL), U+00A0 (NBSP).其它的空白字符请参见策略Z和属性Pattern_White_Space if unicode.IsSpace(c) { fmt.Println("\tis space rune") } // 判断一个字符是否是unicode符号字符 if unicode.IsSymbol(c) { fmt.Println("\tis symbol rune") } // 判断字符是否是标题字母 if unicode.IsTitle(c) { fmt.Println("\tis title case rune") } } } func exampleSimpleFold() { // 迭代在unicode标准字符映射中互相对应的unicode码值 // 在与r对应的码值中(包括r自身),会返回最小的那个大于r的字符(如果有);否则返回映射中最小的字符 fmt.Printf("%#U\n", unicode.SimpleFold('A')) // 'a' fmt.Printf("%#U\n", unicode.SimpleFold('a')) // 'A' fmt.Printf("%#U\n", unicode.SimpleFold('K')) // 'k' fmt.Printf("%#U\n", unicode.SimpleFold('k')) // '\u212A' (Kelvin symbol, K) fmt.Printf("%#U\n", unicode.SimpleFold('\u212A')) // 'K' fmt.Printf("%#U\n", unicode.SimpleFold('1')) // '1' } func exampleTo() { const lcG = 'g' // 转大写 fmt.Printf("%#U\n", unicode.To(unicode.UpperCase, lcG)) fmt.Println(unicode.ToUpper(lcG)) // 转小写 fmt.Printf("%#U\n", unicode.To(unicode.LowerCase, lcG)) fmt.Println(unicode.ToLower(lcG)) // 转标题 fmt.Printf("%#U\n", unicode.To(unicode.TitleCase, lcG)) fmt.Println(unicode.ToTitle(lcG)) }
package perceiving import ( "github.com/20zinnm/entity" "github.com/20zinnm/spac/client/physics" "github.com/20zinnm/spac/common/net/downstream" "github.com/faiface/pixel" "github.com/faiface/pixel/imdraw" "github.com/faiface/pixel/pixelgl" "github.com/google/flatbuffers/go" "github.com/jakecoffman/cp" "image/color" "math" "sort" "time" ) var ( shipVertices = []cp.Vector{{0, 51}, {-24, -21}, {0, -9}, {24, -21}} ) type shipPhysics struct { timestamp time.Time physics.TranslationalState physics.RotationalState } func (s shipPhysics) Step(dt float64) shipPhysics { return shipPhysics{ TranslationalState: s.TranslationalState.Step(dt), RotationalState: s.RotationalState.Step(dt), } } func (s shipPhysics) Lerp(to shipPhysics, delta float64) shipPhysics { return shipPhysics{ TranslationalState: s.TranslationalState.Lerp(to.TranslationalState, delta), RotationalState: s.RotationalState.Lerp(to.RotationalState, delta), } } type Ship struct { shipPhysics ID entity.ID health int bufferLen int buffer [InterpolationBuffer]shipPhysics Thrusting bool Armed bool } func NewShip(id entity.ID) *Ship { return &Ship{ID: id} } func (s *Ship) Update(timestamp time.Time, table *flatbuffers.Table) { shipUpdate := new(downstream.Ship) shipUpdate.Init(table.Bytes, table.Pos) posn := tocpv(shipUpdate.Position(new(downstream.Vector))) vel := tocpv(shipUpdate.Velocity(new(downstream.Vector))) angle := float64(shipUpdate.Angle()) angularVel := float64(shipUpdate.AngularVelocity()) phys := shipPhysics{ timestamp: timestamp, TranslationalState: physics.TranslationalState{ Position: posn, Velocity: vel, }, RotationalState: physics.RotationalState{ Angle: angle, AngularVelocity: angularVel, }, } copy(s.buffer[1:], s.buffer[:]) s.buffer[0] = phys if s.bufferLen == 0 { s.shipPhysics = phys } if s.bufferLen < InterpolationBuffer { s.bufferLen++ } sort.SliceStable(s.buffer[:s.bufferLen], func(i, j int) bool { return s.buffer[i].timestamp.After(s.buffer[j].timestamp) }) //// interpolation //if posn.DistanceSq(s.lastPos) < 1000 { // fmt.Println("lerping") // s.Physics.SetPosition(s.lastPos.Lerp(posn, delta)) // s.Physics.SetVelocityVector(s.lastVel.Lerp(vel, delta)) // s.Physics.SetAngle(cp.Lerp(s.lastAngle, angle, delta)) // s.Physics.SetAngularVelocity(cp.Lerp(s.lastAngularVel, angularVel, delta)) //} else { // fmt.Println("jumping") // s.Physics.SetPosition(posn) // s.Physics.SetVelocityVector(vel) // s.Physics.SetAngle(angle) // s.Physics.SetAngularVelocity(angularVel) //} //s.lastPos = posn //s.lastVel = vel //s.lastAngle = angle //s.lastAngularVel = angularVel s.Thrusting = shipUpdate.Thrusting() s.Armed = shipUpdate.Armed() s.health = int(shipUpdate.Health()) } func (s *Ship) Position() pixel.Vec { return pixel.Vec(s.TranslationalState.Position) } func (s *Ship) Health() int { return s.health } var ( shipThrusterVertices = []pixel.Vec{{-8, -9}, {8, -9}, {0, -40}} shipArmedVertex = pixel.Vec{Y: 8} ) func calcLabelY(theta float64) float64 { return -12.7096*math.Sin(-2*(theta+3.75912)) + 44 } func (s *Ship) FixedUpdate() { interpolationTime := time.Now().Add(-InterpolationBackTime) if s.buffer[0].timestamp.After(interpolationTime) { // INTERPOLATE for i := 1; i <= s.bufferLen; i++ { if s.buffer[i].timestamp.Before(interpolationTime) || i == s.bufferLen-1 { newer := s.buffer[i-1] older := s.buffer[i] length := newer.timestamp.Sub(older.timestamp).Seconds() var t float64 if length > 0.0001 { t = interpolationTime.Sub(older.timestamp).Seconds() / length } s.shipPhysics = s.shipPhysics.Lerp(older.Lerp(newer, t), InterpolationBackTime.Seconds()) return } } } else { // EXTRAPOLATE (rough) dt := time.Now().Sub(s.buffer[0].timestamp).Seconds() newer := s.shipPhysics.Step(dt) s.shipPhysics = s.shipPhysics.Lerp(s.buffer[0].Lerp(newer, InterpolationBackTime.Seconds()), InterpolationConstant) } } func (s *Ship) Draw(_ *pixelgl.Canvas, imd *imdraw.IMDraw) { a := s.Angle p := pixel.Vec(s.TranslationalState.Position) // draw thruster if s.Thrusting { imd.Color = color.RGBA{ R: 248, G: 196, B: 69, A: 255, } for _, v := range shipThrusterVertices { imd.Push(v.Rotated(a).Add(p)) } imd.Polygon(0) } // draw body imd.Color = color.RGBA{ R: 242, G: 75, B: 105, A: 255, } for _, v := range shipVertices { imd.Push(pixel.Vec(v).Rotated(a).Add(p)) } imd.Polygon(0) // draw bullet if s.Armed { imd.Color = color.RGBA{ R: 74, G: 136, B: 212, A: 255, } imd.Push(shipArmedVertex.Rotated(a).Add(p)) imd.Circle(8, 0) } //// draw name //if s.Name != "" { // if s.text == nil { // s.text = text.New(pixel.Vec{}, fonts.Atlas) // } // s.text.Clear() // s.text.Write([]byte(s.Name)) // s.text.Draw(canvas, pixel.IM.Moved(p.Sub(pixel.Vec{s.text.Bounds().W() / 2, -calcLabelY(s.Physics.Angle())}))) // s.text.Clear() // //fmt.Println(s.Physics.Angle(), calcLabelY(s.Physics.Angle())) //} }
package main import ( "flag" "fmt" "os" "bufio" "time" "github.com/nsf/termbox-go" "github.com/inazak/cpu3bit" ) const ( fgColor = termbox.ColorWhite bgColor = termbox.ColorBlack fgEmColor = termbox.ColorBlack bgEmColor = termbox.ColorWhite ) var display = []string{ //01234567890123456789012345678901234567890123456789 /* 0 */ " 3bit CPU Demo ", /* 1 */ " ", /* 2 */ " Register A [####] Address Memory ", /* 3 */ " 0 [#######] ", /* 4 */ " Register B [####] 1 [#######] ", /* 5 */ " 2 [#######] ", /* 6 */ " Carry Flag [#] 3 [#######] ", /* 7 */ " 4 [#######] ", /* 8 */ " Program Counter 5 [#######] ", /* 9 */ " [###] 6 [#######] ", /* 10 */ " 7 [#######] ", /* 11 */ " q: quit bit6 ... 0 ", /* 12 */ " t: tick(manual mode only) ", /* 13 */ " ", /* 14 */ " ########## 3bit cpu Instructions ########## ", /* 15 */ " ", /* 16 */ " 6 5 4 3-0 carry | mnemonic ", /* 17 */ " ---------------------|---------------- ", /* 18 */ " 0 1 0 Imd x | ADD A, Imd ", /* 19 */ " 0 1 1 Imd x | MOV A, Imd ", /* 20 */ " 1 0 0 Imd x | ADD B, Imd ", /* 21 */ " 1 0 1 Imd x | MOV B, Imd ", /* 22 */ " 1 1 0 Imd 0 | JNC Imd (jump if carry=0)", /* 23 */ " 1 1 1 Imd x | JMP Imd ", /* 24 */ " 0 0 0 Imd x | NOP ", /* 25 */ " 0 0 1 Imd x | NOP ", } // sample image is 3*3=9 var memoryimage = [][]int{ //bit0 .. bit6 {0,0,0,0,1,0,1}, //0: mov b,0 {1,0,1,1,1,1,0}, //1: mov a,13 {1,1,0,0,0,0,1}, //2: add b,3 {1,0,0,0,0,1,0}, //3: add a,1 {0,1,0,0,0,1,1}, //4: jnc 2 {1,0,1,0,1,1,1}, //5: jmp 5 {0,0,0,0,0,0,0}, // Register B is 1001 (9) {0,0,0,0,0,0,0}, } var load = flag.String("load", "", "textfile representing memory image") var manual = flag.Bool("manual", false, "ticking by hand") var info *cpu3bit.CPUInfo func main() { flag.Parse() // load text file if *load != "" { var err error memoryimage, err = loadMemoryImageText(*load) if err != nil { fmt.Printf("%v", err) return } } // initialize cpu3bit.Initialize() info = cpu3bit.MakeComputer(memoryimage) err := termbox.Init() if err != nil { panic(err) } defer termbox.Close() eventQueue := make(chan termbox.Event) go func(){ for { eventQueue <- termbox.PollEvent() } }() render() //auto ticking if ! *manual { go func(){ for { select { case <- time.After(time.Millisecond * 1000): cpu3bit.TickTock() render() } } }() } for { select { case ev := <-eventQueue: if ev.Type == termbox.EventKey { switch { case ev.Ch == 't': if *manual { cpu3bit.TickTock() // clockdown/up and update render() } case ev.Ch == 'q' || ev.Key == termbox.KeyEsc: return } } } } } func render() { termbox.Clear(termbox.ColorBlack, termbox.ColorBlack) //title setText(0, 0, fgEmColor, bgEmColor, display[0]) //other for i:=1; i<len(display); i++ { setText(0, i, fgColor, bgColor, display[i]) } //text update setBinaryText(13, 2, ToRunes(cpu3bit.ToString(info.RegisterA()))) setBinaryText(13, 4, ToRunes(cpu3bit.ToString(info.RegisterB()))) setBinaryText(13, 6, ToRunes(cpu3bit.ToString(info.CarryFlag()))) setBinaryText( 2, 9, ToRunes(cpu3bit.ToString(info.ProgramCounter()))) setMemoryText() setAddrArrow() //reflesh termbox.Flush() } func setText(x, y int, fg, bg termbox.Attribute, msg string) { for _, c := range msg { termbox.SetCell(x, y, c, fg, bg) x++ } } func ToRunes(s string) []rune { runes := []rune{} for _, r := range s { runes = append(runes, r) } //reverse order for i,j := 0,len(runes)-1; i<j; i,j = i+1,j-1 { runes[i], runes[j] = runes[j], runes[i] } return runes } func setBinaryText(x, y int, runes []rune) { for i, r := range runes { if r == '1' { termbox.SetCell(x+i, y, r, fgEmColor, bgEmColor) } else { termbox.SetCell(x+i, y, r, fgColor, bgColor) } } } func setMemoryText() { for i, w := range info.Memory() { runes := ToRunes(cpu3bit.ToString(w)) setBinaryText(37, 3+i, runes) } } func setAddrArrow() { y := 0 p := info.ProgramCounter() for i:=0; i<len(p); i++ { y += p[i] << uint(i) } termbox.SetCell(28, 3+y, '>', fgColor, bgColor) } // load memory image [8][7]int from textfile func loadMemoryImageText(filename string) (image [][]int, err error) { f, err := os.Open(filename) if err != nil { return image, err } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if a := convert(s.Text()); len(a) != 0 { //reverse the order for i,j := 0,len(a)-1; i < j; i,j = i+1,j-1 { a[i], a[j] = a[j], a[i] } image = append(image, a) } } if s.Err() != nil { return image, s.Err() } return image, nil } func convert(s string) (result []int) { for _, r := range s { switch r { case '#': return result case '0': result = append(result, 0) case '1': result = append(result, 1) } } return result }
package main import "fmt" func main() { fmt.Println("here are more numbers... utF8") for z := 0; z < 200; z++ { fmt.Printf("%d \t %b \t %#x \t %q \n", z, z, z, z) } }
package bufferpool import ( "log" "os" "runtime" "sync/atomic" "unsafe" ) type BufferPool interface { Alloc(length int) ([]byte, error) Release(buffer []byte) } /* * array / map ~= 30 : 1 * integer assignment performace * num array map * 1000*1000 385us 11788 us * 1000*1000*1000 349899us 11776979us **/ // TODO: 由于使用了两个锁,可能导致性能的下降,考虑如何将锁合并和优化代码处理流程 type bufferpool struct { memCache [MEM_ARR_SIZE][]unsafe.Pointer mtxMc [MEM_ARR_SIZE]*uint32 memSlice [MEM_ARR_SIZE][]unsafe.Pointer memCap [MEM_ARR_SIZE]int memBig map[unsafe.Pointer][]byte mtxMb *uint32 } var ( gpool [][]byte ) func init() { log.SetFlags(log.LstdFlags | log.Lshortfile) gpool = make([][]byte, 256) } func New() *bufferpool { bp := &bufferpool{ memBig: make(map[unsafe.Pointer][]byte, 8192), mtxMb: new(uint32), } for idx := 0; idx < MEM_ARR_SIZE; idx++ { bp.mtxMc[idx] = new(uint32) bp.memSlice[idx] = make([]unsafe.Pointer, memCnt[idx]*5*(idx+1)) bp.allocMemory(idx) } return bp } func (bp *bufferpool) allocMemory(idx int) { size := memSize[idx] num := memCnt[idx] pool := make([]byte, size*num) gpool = append(gpool, pool) var list []unsafe.Pointer mc := bp.memCache[idx] switch { case mc == nil: list = bp.memSlice[idx][:num] bp.memCap[idx] = num case mc != nil && cap(bp.memSlice[idx]) >= (bp.memCap[idx]+num)*2: // TODO: 更好的处理slice大小 copy(bp.memSlice[idx], mc) list = bp.memSlice[idx][len(mc) : len(mc)+num] bp.memCap[idx] += num case mc != nil && cap(bp.memSlice[idx]) < (bp.memCap[idx]+num)*2: bp.memSlice[idx] = make([]unsafe.Pointer, (bp.memCap[idx]+num)*2) copy(bp.memSlice[idx], mc) list = bp.memSlice[idx][len(mc) : len(mc)+num] bp.memCap[idx] += num default: } bp.memCache[idx] = bp.memSlice[idx][:len(mc)+len(list)] for pre, cur := 0, 1; cur-1 < num; pre, cur = cur*size, cur+1 { pool[pre] = uint8(idx) list[cur-1] = unsafe.Pointer(&pool[pre+1]) } } func (bp *bufferpool) Alloc(length int) (buf []byte, e error) { ptr, e := bp.AllocPointer(length) if e != nil { return nil, e } buf = (*((*[BUF_MAX_LEN]byte)(unsafe.Pointer(ptr))))[:length:length] return buf, nil } func (bp *bufferpool) Free(buf []byte) { bp.FreePointer(unsafe.Pointer(&buf[0])) } func (bp *bufferpool) AllocPointer(length int) (unsafe.Pointer, error) { switch { case length >= MAX_SIZE: buffer := make([]byte, length+1) buffer[0] = 'B' bp.mbLock() bp.memBig[unsafe.Pointer(&buffer[0])] = buffer bp.mbUnLock() return unsafe.Pointer(&buffer[1]), nil default: return bp.allocPointer(length) } } func (bp *bufferpool) FreePointer(ptr unsafe.Pointer) { switch buffer := *((*[4]byte)(unsafe.Pointer(uintptr(ptr) - 1))); { case buffer[0] == 'B': bp.mbLock() delete(bp.memBig, unsafe.Pointer(&buffer[0])) bp.mbUnLock() default: bp.freePointer(ptr) } } func (bp *bufferpool) allocPointer(length int) (p unsafe.Pointer, e error) { for idx := 0; idx < MEM_ARR_SIZE; idx++ { if length <= memSize[idx]-1 { bp.mcLock(idx) switch mc := bp.memCache[idx]; { case len(mc) > 1: p = mc[0] bp.memCache[idx] = mc[1:] case len(mc) == 1: p = mc[0] bp.memCache[idx] = mc[1:] bp.allocMemory(idx) /*case len(mc) == 0: return nil, os.ErrInvalid*/ default: bp.mcUnLock(idx) return nil, os.ErrInvalid } bp.mcUnLock(idx) //log.Println("alloc ", p) return p, nil } } return p, os.ErrInvalid } func (bp *bufferpool) freePointer(ptr unsafe.Pointer) { if idx := int(*((*uint8)(unsafe.Pointer((uintptr(ptr) - 1))))); idx < MEM_ARR_SIZE { bp.mcLock(idx) if cap(bp.memCache[idx])-len(bp.memCache[idx]) == 0 { src := bp.memCache[idx] dst := bp.memSlice[idx][:len(src)] copy(dst, src) /*log.Println(idx, cap(bp.memSlice[idx]),*/ //"src", len(src), cap(src), /*"dst", len(dst), cap(dst))*/ bp.memCache[idx] = dst } //log.Println("free ", ptr) bp.memCache[idx] = append(bp.memCache[idx], ptr) bp.mcUnLock(idx) } } func (bp *bufferpool) mcLock(idx int) { for !atomic.CompareAndSwapUint32(bp.mtxMc[idx], 0, 1) { runtime.Gosched() } } func (bp *bufferpool) mcUnLock(idx int) { atomic.StoreUint32(bp.mtxMc[idx], 0) } func (bp *bufferpool) mbLock() { for !atomic.CompareAndSwapUint32(bp.mtxMb, 0, 1) { runtime.Gosched() } } func (bp *bufferpool) mbUnLock() { atomic.StoreUint32(bp.mtxMb, 0) }
package raft // specify a done channel for cancellation, ensures that only one goroutine that // sends to ch can be effective func sendWithCancellation(ch chan struct{}, done chan struct{}) { // for { // seems meaningless // select { // case <-done: // return // default: // select { // case ch <- struct{}{}: // default: // } // } // } select { case <-ch: // consume anything in channel if exists, hopefully we reduce number of waiting goroutines default: } select { case <-done: // abort case ch <- struct{}{}: // send } } func send(ch chan struct{}) { //send a signal so that some one does not block select { case <-ch: // consume anything in channel if exists default: } ch <- struct{}{} } func consume(ch chan struct{}) { select { case <-ch: // consume anything in channel if exists default: } }
package commands import ( "fmt" "strings" "time" "github.com/go-telegram-bot-api/telegram-bot-api" ) // GlobalCommand will handle a /global command from a chat, and give back the global scoreboard. type GlobalCommand struct { bot *tgbotapi.BotAPI } // SetBotAPI is used to make the bot api available for the handler. func (gc *GlobalCommand) SetBotAPI(bot *tgbotapi.BotAPI) { gc.bot = bot } // IsCommandMatch will check if the message string contains an help command. func (gc *GlobalCommand) IsCommandMatch(update *tgbotapi.Update) bool { return strings.HasPrefix(strings.ToLower(update.Message.Text), "/global") } // PreProcessText does nothing to the message as its not used. func (gc *GlobalCommand) PreProcessText(update *tgbotapi.Update) error { return nil } // Execute will run the help command towards the chat where the command was posted. func (gc *GlobalCommand) Execute(update *tgbotapi.Update) error { hour, min, sec := time.Now().Clock() timestring := fmt.Sprintf("%d:%d:%d", hour, min, sec) msg := tgbotapi.NewMessage(update.Message.Chat.ID, timestring) _, err := gc.bot.Send(msg) return err }
package main import ( "encoding/json" "fmt" "io/ioutil" "net/http" "os" "strings" "time" ) const ( TIMEOUT = 60 METHOD_POST = "POST" EXIT_CODE = -1 BODY_TYPE = "application/json" ZERO = 0 ) var client *HttpClient type HttpClient struct { Client *http.Client } func NewHttpClient(timeout int) *HttpClient { return &HttpClient{ Client: &http.Client{ TimeOut: time.Duration(timeout) * time.Second, }, } } func InitHttp() error { client = NewHttpClient(TIMEOUT) return nil } type HttpReq struct { Errno int64 `json:"errno"` Errmsg string `json:"errmsg"` Data string `json:"data"` } type Params struct { Param1 string `json:"param1"` Param2 string `json:"param2"` Param3 string `json:"param3"` Param4 string `json:"param4"` } //第一种,http.Post() func PostHttpRequest(params *Params) error { var httpreq HttpReq if params == nil { return nil } //接口 url := fmt.Sprintf("http://xxx.xxx.xxx.xxx/api/v1/post") postParams, err := json.Marshal(params) if err != nil { fmt.Printf("parse post request params failed:%v\n", err) return err } resp, err := http.Post(url, BODY_TYPE, strings.NewReader(string(postParams))) if err != nil { fmt.Printf("post request failed:%v\n", err) return err } defer resp.Body.Close() err := json.NewDecoder(resp.Body).Decoder(&httpreq) if err != nil { fmt.Printf("parse resp body faield:%v\n", err) return err } if httpreq.Errno != ZERO { return errors.New("Request failed") } return nil } //第二种,http.NewRequest() func HttpPostRequest(params *Params) error { var httpreq HttpReq if params == nil { return nil } url := fmt.Sprintf("http://xxx.xxx.xxx.xxx:xxx/api/v1/post") postParams, err := json.Marshal(params) if err != nil { fmt.Printf("parse post request params failed:%v\n", err) return err } req, err := http.NewRequest(METHOD_POST, URL, strings.NewReader(string(postParams))) if err != nil { fmt.Printf("new request failed:%v\n", err) return err } req.Header.Add("Content-Type", BODY_TYPE) resp, err := http.DefaultClient.Do(req) if err != nil { fmt.Printf("do req failed:%v\n", err) return err } defer resp.Body.Close() err := json.NewDecoder(resp.Body).Decoder(&httpreq) if err != nil { fmt.Printf("parse resp body faield:%v\n", err) return err } if httpreq.Errno != ZERO { return errors.New("Request failed") } return nil }
package main import ( "fmt" "gitgo/src/test/popcount" ) func main() { fmt.Println(popcount.PopCount(6)) fmt.Println(popcount.PopCountWithForLoop(6)) fmt.Println(popcount.PopCountWithBitMove(6)) fmt.Println(popcount.PopCountWithCleanLowest(6)) }
package controllers import ( "btcu-final/clientSDK" "btcu-final/server/models" "btcu-final/server/utils" "fmt" "github.com/astaxie/beego" "log" "time" ) type RegisterController struct { beego.Controller } func (this *RegisterController) Get() { this.TplName = "register.html" } //处理注册 func (this *RegisterController) Post() { //获取表单信息 username := this.GetString("username") password := this.GetString("password") repassword := this.GetString("repassword") fmt.Println(username, password, repassword) //注册之前先判断该用户名是否已经被注册,如果已经注册,返回错误 id := models.QueryUserWithUsername(username) fmt.Println("id:", id) if id > 0 { this.Data["json"] = map[string]interface{}{"code": 0, "message": "用户名已经存在"} this.ServeJSON() return } //注册用户名和密码 //存储的密码是md5后的数据,那么在登录的验证的时候,也是需要将用户的密码md5之后和数据库里面的密码进行判断 password = utils.MD5(password) fmt.Println("md5后:", password) // 用户注册的时候,生成公钥和私钥,并返回私钥给用户 privateKey, publicKey, err := clientSDK.GenerateKeys() if err != nil { log.Fatal(err) } fmt.Println("publicKey:", publicKey) user := models.User{0, username, password, string(*publicKey), 0, time.Now().Unix()} _, err = models.InsertUser(user) if err != nil { this.Data["json"] = map[string]interface{}{"code": 0, "message": "注册失败"} } else { this.Data["json"] = map[string]interface{}{"code": 1, "message": "注册成功", "privateKey": privateKey} } this.ServeJSON() }
package pkg import ( "context" "fmt" "github.com/machinebox/progress" "github.com/rylio/ytdl" "log" "net/http" "net/url" "os" "path" "strconv" "sync" "time" ) type worker struct { *State uri url.URL baseFileName string mp4File string mp3File string } type dispatcher struct { newTask chan *worker connect chan chan State disconnect chan chan State update chan State converter chan *worker pool []*worker lock sync.RWMutex } func NewDispatcher() *dispatcher { return &dispatcher{ newTask: make(chan *worker), connect: make(chan chan State), disconnect: make(chan chan State), update: make(chan State, 0), pool: make([]*worker, 0), converter: make(chan *worker, 16), } } func (d *dispatcher) Downloading() { clients := make([]chan State, 0) for { select { case w := <-d.newTask: d.lock.Lock() d.pool = append(d.pool, w) d.lock.Unlock() log.Printf("Added: %s (%s)", w.Url, w.Vid.Title) go func() { file, _ := os.Create(w.mp4File) if err := w.Download(d.update, file); err != nil { log.Println(err) w.Error = err.Error() d.update <- *w.State return } w.Status = "Scheduled" w.Percent = -1 d.update <- *w.State if err := file.Close(); err != nil { log.Println(err) } d.converter <- w }() case upd := <-d.connect: clients = append(clients, upd) log.Printf("Connect: %d", len(clients)) case upd := <-d.disconnect: for k, v := range clients { if v == upd { clients = append(clients[:k], clients[k+1:]...) continue } } log.Printf("Disconnect: %d", len(clients)) case state := <-d.update: for _, q := range clients { q <- state } } } } func (w *worker) fetchMeta() error { w.Status = "Receiving info" vid, err := ytdl.GetVideoInfo(w.uri.String()) if err != nil { return err } w.Vid = vid w.baseFileName = sanitizeFilename(w.Vid.Title) w.mp4File = path.Join("out", w.baseFileName+".mp4") w.mp3File = path.Join("out", w.baseFileName+".mp3") return nil } func (w *worker) Download(q chan State, fp *os.File) error { var ( downUrl *url.URL resp *http.Response err error wg sync.WaitGroup ) q <- *w.State if downUrl, err = w.Vid.GetDownloadURL(w.Vid.Formats[0]); err != nil { return err } if resp, err = http.Head(downUrl.String()); err != nil { return err } if resp.StatusCode != http.StatusOK { return fmt.Errorf("status code: %d", resp.StatusCode) } size, _ := strconv.Atoi(resp.Header.Get("Content-Length")) w.DownloadSize = int64(size) r := progress.NewWriter(fp) wg.Add(1) go func() { ctx := context.Background() progressChan := progress.NewTicker(ctx, r, w.DownloadSize, 100*time.Millisecond) for p := range progressChan { w.Remaining = p.Remaining().Round(time.Second) w.Percent = p.Percent() q <- *w.State } wg.Done() }() w.Status = "Downloading" q <- *w.State if err = w.Vid.Download(w.Vid.Formats[0], r); err != nil { return err } wg.Wait() return nil }
package main //309. 最佳买卖股票时机含冷冻期 //给定一个整数数组,其中第i个元素代表了第i天的股票价格 。​ // //设计一个算法计算出最大利润。在满足以下约束条件下,你可以尽可能地完成更多的交易(多次买卖一支股票): // //你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。 //卖出股票后,你无法在第二天买入股票 (即冷冻期为 1 天)。 //示例: // //输入: [1,2,3,0,2] //输出: 3 //解释: 对应的交易状态为: [买入, 卖出, 冷冻期, 买入, 卖出] //思路 单调递增 ,动态规划 //dp i 累计的当前最大钱数 状态:持有股票,不持有冻结中,不持有不冻结 // dp[0][持有] = -prices[0],dp[0][冻结]=0,dp[0][不冻结]=0 // 当前持有时,前一个不可能是冻结 ,dp[i][持有] = max(dp[i-1][持有],d[i-1][不冻结]-prices[i]) // 不持有冻结中,dp[i][冻结] = dp[i-1][持有]+prices[i] // 不持有不冻结,dp[i][不冻结] = max(dp[i-1][冻结],dp[i-1][不冻结]) func maxProfit(prices []int) int { n := len(prices) if n < 1 { return 0 } dp := make([][3]int, n) dp[0][0] = -prices[0] dp[0][1] = 0 dp[0][2] = 0 for i := 1; i < n; i++ { dp[i][0] = max(dp[i-1][0], dp[i-1][2]-prices[i]) dp[i][1] = dp[i-1][0] + prices[i] dp[i][2] = max(dp[i-1][1], dp[i-1][2]) } return max(dp[n-1][1], dp[n-1][2]) } func max(x, y int) int { if x > y { return x } return y } func main() { println(maxProfit([]int{8, 6, 4, 3, 3, 2, 3, 5, 8, 3, 8, 2, 6})) }
package main import "fmt" type Element interface{} type vector struct { a []Element } func (p *vector) At(i int) Element { return p.a[i] } func (p *vector) Set(i int, e Element) { p.a[i] = e } func main() { v := new(vector) fmt.Println(*v) v.Set(1, "abc") fmt.Println(v) }
package main // User ... type User struct { ID int `json:"id"` Name string `json:"name"` } // RecipeCategory ... type RecipeCategory struct { ID int `json:"id"` ParentID int `json:"parent_id"` Name string `json:"name"` } // RecipeCategoryRecipe ... type RecipeCategoryRecipe struct { RecipeCategoryID int `json:"recipe_category_id"` RecipeID int `json:"recipe_id"` } // Recipe ... type Recipe struct { ID int `json:"id"` Name string `json:"name"` Description string `json:"description"` AuthorID int `json:"author_id"` } // RecipeResponse ... type RecipeResponse struct { ID int `json:"id"` Name string `json:"name"` Description string `json:"description"` Author string `json:"author"` Category []string `json:"category"` Steps []StepResponse `json:"steps"` } // StepResponse ... type StepResponse struct { StepNumber int `json:"step_number"` Description string `json:"description"` Timer int `json:"timer"` Image string `json:"image"` Ingredients []IngredientResponse `json:"ingredients"` } // IngredientResponse ... type IngredientResponse struct { Name string `json:"name"` Amount int `json:"amount"` Unit string `json:"unit"` } // IngredientCategory ... type IngredientCategory struct { ID int `json:"id"` ParentID int `json:"parent_id"` Name string `json:"name"` Description string `json:"description"` } // IngredientCategoryIngredient ... type IngredientCategoryIngredient struct { IngredientCategoryID int `json:"ingredient_category_id"` IngredientID int `json:"ingredient_id"` } // Ingredient ... type Ingredient struct { ID int `json:"id"` Name string `json:"name"` Color string `json:"color"` IMG string `json:"img"` } // Step ... type Step struct { ID int `json:"id"` RecipeID int `json:"recipe_id"` StepNumber int `json:"step_number"` Description string `json:"description"` Timer int `json:"timer"` Image string `json:"image"` } // StepIngredient ... type StepIngredient struct { RecipeID int `json:"recipe_id"` IngredientID int `json:"ingredient_id"` StepID int `json:"step_id"` Amount int `json:"amount"` Unit string `json:"unit"` }
package oneagent_mutation import ( "testing" dynatracev1beta1 "github.com/Dynatrace/dynatrace-operator/src/api/v1beta1" "github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/deploymentmetadata" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" ) func TestAddPreloadEnv(t *testing.T) { t.Run("Add preload env", func(t *testing.T) { container := &corev1.Container{} installPath := "path/test" addPreloadEnv(container, installPath) require.Len(t, container.Env, 1) assert.Equal(t, container.Env[0].Name, preloadEnv) assert.Contains(t, container.Env[0].Value, installPath) }) } func TestAddNetworkZoneEnv(t *testing.T) { t.Run("Add networkZone env", func(t *testing.T) { container := &corev1.Container{} networkZone := "testZone" addNetworkZoneEnv(container, networkZone) require.Len(t, container.Env, 1) assert.Equal(t, container.Env[0].Name, networkZoneEnv) assert.Equal(t, container.Env[0].Value, networkZone) }) } func TestAddProxyEnv(t *testing.T) { t.Run("Add proxy env", func(t *testing.T) { container := &corev1.Container{} addProxyEnv(container) require.Len(t, container.Env, 1) assert.IsType(t, container.Env[0].ValueFrom, &corev1.EnvVarSource{}) }) } func TestAddInstallerInitEnvs(t *testing.T) { t.Run("Add installer init env", func(t *testing.T) { container := &corev1.Container{} testVolumeMode := "testMode" installerInfo := getTestInstallerInfo() addInstallerInitEnvs(container, installerInfo, testVolumeMode) require.Len(t, container.Env, expectedBaseInitContainerEnvCount) assert.Equal(t, installerInfo.flavor, container.Env[0].Value) assert.Equal(t, installerInfo.technologies, container.Env[1].Value) assert.Equal(t, installerInfo.installPath, container.Env[2].Value) assert.Equal(t, installerInfo.installerURL, container.Env[3].Value) assert.Equal(t, installerInfo.version, container.Env[4].Value) assert.Equal(t, testVolumeMode, container.Env[5].Value) assert.Equal(t, "true", container.Env[6].Value) }) } func TestAddContainerInfoInitEnv(t *testing.T) { t.Run("Add container info init env", func(t *testing.T) { container := &corev1.Container{} addContainerInfoInitEnv(container, 1, "test-pod", "test-namespace") require.Len(t, container.Env, 2) }) } func TestAddDeploymentMetadataEnv(t *testing.T) { t.Run("Add cloudNative deployment metadata env", func(t *testing.T) { container := &corev1.Container{} dynakube := dynatracev1beta1.DynaKube{ Spec: dynatracev1beta1.DynaKubeSpec{ OneAgent: dynatracev1beta1.OneAgentSpec{ CloudNativeFullStack: &dynatracev1beta1.CloudNativeFullStackSpec{}, }, }, } addDeploymentMetadataEnv(container, dynakube, testClusterID) require.Len(t, container.Env, 1) assert.Contains(t, container.Env[0].Value, testClusterID) assert.Contains(t, container.Env[0].Value, deploymentmetadata.CloudNativeDeploymentType) }) t.Run("Add appMonitoring deployment metadata env", func(t *testing.T) { container := &corev1.Container{} dynakube := dynatracev1beta1.DynaKube{ Spec: dynatracev1beta1.DynaKubeSpec{ OneAgent: dynatracev1beta1.OneAgentSpec{ ApplicationMonitoring: &dynatracev1beta1.ApplicationMonitoringSpec{}, }, }, } addDeploymentMetadataEnv(container, dynakube, testClusterID) require.Len(t, container.Env, 1) assert.Contains(t, container.Env[0].Value, testClusterID) assert.Contains(t, container.Env[0].Value, deploymentmetadata.ApplicationMonitoringDeploymentType) }) } func TestAddVersionDetectionEnvs(t *testing.T) { t.Run("adds defaults", func(t *testing.T) { container := &corev1.Container{} addVersionDetectionEnvs(container, defaultVersionLabelMapping) require.Len(t, container.Env, len(defaultVersionLabelMapping)) for _, envvar := range container.Env { assert.Equal(t, defaultVersionLabelMapping[envvar.Name], envvar.ValueFrom.FieldRef.FieldPath) } }) t.Run("not overwrite present envs", func(t *testing.T) { testVersion := "1.2.3" testProduct := "testy" container := &corev1.Container{ Env: []corev1.EnvVar{ {Name: releaseVersionEnv, Value: testVersion}, {Name: releaseProductEnv, Value: testProduct}, }, } addVersionDetectionEnvs(container, defaultVersionLabelMapping) require.Len(t, container.Env, 2) assert.Equal(t, testVersion, container.Env[0].Value) assert.Equal(t, testProduct, container.Env[1].Value) }) t.Run("partial addition", func(t *testing.T) { testVersion := "1.2.3" container := &corev1.Container{ Env: []corev1.EnvVar{ {Name: releaseVersionEnv, Value: testVersion}, }, } addVersionDetectionEnvs(container, defaultVersionLabelMapping) require.Len(t, container.Env, 2) assert.Equal(t, testVersion, container.Env[0].Value) assert.Equal(t, defaultVersionLabelMapping[releaseProductEnv], container.Env[1].ValueFrom.FieldRef.FieldPath) }) }
package responder import ( "fmt" "github.com/family/translator" ) type _response int // Enum-like const declaration for supported response types const ( ChildAdditionSuccessful _response = iota ChildAdditionFailed _response = iota PersonNotFound _response = iota None _response = iota ) // Response emits the params in form of a reponse func Response(reponseType _response) { switch reponseType { case ChildAdditionSuccessful: fmt.Println(translator.Get("child_addition_succeeded", "en")) break case ChildAdditionFailed: fmt.Println(translator.Get("child_addition_failed", "en")) break case PersonNotFound: fmt.Println(translator.Get("person_not_found", "en")) break case None: fmt.Println(translator.Get("none", "en")) break } } // ResponseWithData emits the params in form of a reponse func ResponseWithData(data string) { fmt.Println(data) }
package main import ( "context" "flag" "fmt" "net/http" "os" "time" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" "github.com/dustin/go-humanize" _ "github.com/vicanso/diving/controller" "github.com/vicanso/diving/log" "github.com/vicanso/diving/router" _ "github.com/vicanso/diving/schedule" "github.com/vicanso/diving/util" "github.com/vicanso/elton" "github.com/vicanso/elton/middleware" maxprocs "go.uber.org/automaxprocs/maxprocs" ) var ( runMode string ) // 获取监听地址 func getListen() string { listen := os.Getenv("LISTEN") if listen == "" { listen = ":7001" } return listen } func check() { listen := getListen() url := "" if listen[0] == ':' { url = "http://127.0.0.1" + listen + "/ping" } else { url = "http://" + listen + "/ping" } client := http.Client{ Timeout: 3 * time.Second, } resp, err := client.Get(url) if err != nil || resp == nil || resp.StatusCode != http.StatusOK { os.Exit(1) return } os.Exit(0) } func init() { _, _ = maxprocs.Set(maxprocs.Logger(func(format string, args ...interface{}) { value := fmt.Sprintf(format, args...) log.Info(context.Background()).Msg(value) })) } func main() { flag.StringVar(&runMode, "mode", "", "running mode") flag.Parse() if runMode == "check" { check() return } listen := getListen() e := elton.New() e.OnError(func(c *elton.Context, err error) { log.Error(c.Context()). Str("uri", c.Request.RequestURI). Err(err). Msg("unexpected error") }) e.Use(middleware.NewRecover()) e.Use(middleware.NewStats(middleware.StatsConfig{ OnStats: func(statsInfo *middleware.StatsInfo, c *elton.Context) { log.Info(c.Context()). Str("ip", statsInfo.IP). Str("method", statsInfo.Method). Str("uri", statsInfo.URI). Int("status", statsInfo.Status). Str("latency", statsInfo.Latency.String()). Str("size", humanize.Bytes(uint64(statsInfo.Size))). Msg("access log") }, })) e.Use(middleware.NewDefaultError()) e.Use(func(c *elton.Context) error { ctx := util.SetTraceID(c.Context(), util.RandomString(8)) c.WithContext(ctx) c.NoCache() return c.Next() }) e.Use(middleware.NewDefaultFresh()) e.Use(middleware.NewDefaultETag()) e.Use(middleware.NewDefaultResponder()) // health check e.GET("/ping", func(c *elton.Context) (err error) { c.Body = "pong" return }) groups := router.GetGroups() for _, g := range groups { e.AddGroup(g) } // http1与http2均支持 e.Server = &http.Server{ Handler: h2c.NewHandler(e, &http2.Server{}), } log.Info(context.Background()).Msg("server will listen on " + listen) err := e.ListenAndServe(listen) if err != nil { panic(err) } }
// Copyright 2021 The Perses Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package dashboard import ( "encoding/json" "fmt" "regexp" "gopkg.in/yaml.v2" ) // CapturingRegexp is just an alias to regexp.Regexp. // It used mainly to be able to override the way to unmarshall and marshall a regexp type CapturingRegexp regexp.Regexp func (c *CapturingRegexp) GetRegexp() *regexp.Regexp { return (*regexp.Regexp)(c) } // MarshalText is used during the marshal of a json. It will be considered as a text and not as a json struct. func (c *CapturingRegexp) MarshalText() ([]byte, error) { return []byte(c.GetRegexp().String()), nil } func (c *CapturingRegexp) MarshalYAML() (interface{}, error) { return c.GetRegexp().String(), nil } func (c *CapturingRegexp) UnmarshalJSON(data []byte) error { var tmp string if err := json.Unmarshal(data, &tmp); err != nil { return err } if err := c.validate(tmp); err != nil { return err } return nil } func (c *CapturingRegexp) UnmarshalYAML(unmarshal func(interface{}) error) error { var tmp string if err := unmarshal(&tmp); err != nil { return err } if err := c.validate(tmp); err != nil { return err } return nil } func (c *CapturingRegexp) validate(reg string) error { if len(reg) == 0 { return fmt.Errorf("regexp cannot be empty") } if re, err := regexp.Compile(reg); err != nil { return err } else { *c = CapturingRegexp(*re) } return nil } type VariableKind string const ( KindPromQLQueryVariable VariableKind = "PromQLQuery" KindLabelNamesQueryVariable VariableKind = "LabelNamesQuery" KindLabelValuesQueryVariable VariableKind = "LabelValuesQuery" KindConstantVariable VariableKind = "Constant" ) var variableKindMap = map[VariableKind]bool{ KindPromQLQueryVariable: true, KindLabelNamesQueryVariable: true, KindLabelValuesQueryVariable: true, KindConstantVariable: true, } func (k *VariableKind) UnmarshalJSON(data []byte) error { var tmp VariableKind type plain VariableKind if err := json.Unmarshal(data, (*plain)(&tmp)); err != nil { return err } if err := (&tmp).validate(); err != nil { return err } *k = tmp return nil } func (k *VariableKind) UnmarshalYAML(unmarshal func(interface{}) error) error { var tmp VariableKind type plain VariableKind if err := unmarshal((*plain)(&tmp)); err != nil { return err } if err := (&tmp).validate(); err != nil { return err } *k = tmp return nil } func (k *VariableKind) validate() error { if len(*k) == 0 { return fmt.Errorf("variable.kind cannot be empty") } if _, ok := variableKindMap[*k]; !ok { return fmt.Errorf("unknown variable.kind '%s' used", *k) } return nil } type VariableParameter interface { } // LabelNamesQueryVariableParameter is representing the parameter to be used when filling the variable by using the HTTP endpoint // `GET /api/v1/labels` // More information here: https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names type LabelNamesQueryVariableParameter struct { VariableParameter `json:"-" yaml:"-"` // Matchers is the repeated series selector argument that selects the series from which to read the label names Matchers []string `json:"matchers,omitempty" yaml:"matchers,omitempty"` // CapturingRegexp is the regexp used to catch and filter the result of the query. CapturingRegexp *CapturingRegexp `json:"capturing_regexp" yaml:"capturing_regexp"` } func (v *LabelNamesQueryVariableParameter) UnmarshalJSON(data []byte) error { var tmp LabelNamesQueryVariableParameter type plain LabelNamesQueryVariableParameter if err := json.Unmarshal(data, (*plain)(&tmp)); err != nil { return err } if err := (&tmp).validate(); err != nil { return err } *v = tmp return nil } func (v *LabelNamesQueryVariableParameter) UnmarshalYAML(unmarshal func(interface{}) error) error { var tmp LabelNamesQueryVariableParameter type plain LabelNamesQueryVariableParameter if err := unmarshal((*plain)(&tmp)); err != nil { return err } if err := (&tmp).validate(); err != nil { return err } *v = tmp return nil } func (v *LabelNamesQueryVariableParameter) validate() error { if v.CapturingRegexp == nil { return fmt.Errorf("'parameter.capturing_regexp' cannot be empty for a LabelNamesQuery") } return nil } // LabelValuesQueryVariableParameter is representing the parameter to be used when filling the variable by using the HTTP endpoint // `GET /api/v1/label/<label_name>/values` // More information here: https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values type LabelValuesQueryVariableParameter struct { VariableParameter `json:"-" yaml:"-"` LabelName string `json:"label_name" yaml:"label_name"` // Matchers is the repeated series selector argument that selects the series from which to read the label values Matchers []string `json:"matchers,omitempty" yaml:"matchers,omitempty"` // CapturingRegexp is the regexp used to catch and filter the result of the query. CapturingRegexp *CapturingRegexp `json:"capturing_regexp" yaml:"capturing_regexp"` } func (v *LabelValuesQueryVariableParameter) UnmarshalJSON(data []byte) error { var tmp LabelValuesQueryVariableParameter type plain LabelValuesQueryVariableParameter if err := json.Unmarshal(data, (*plain)(&tmp)); err != nil { return err } if err := (&tmp).validate(); err != nil { return err } *v = tmp return nil } func (v *LabelValuesQueryVariableParameter) UnmarshalYAML(unmarshal func(interface{}) error) error { var tmp LabelValuesQueryVariableParameter type plain LabelValuesQueryVariableParameter if err := unmarshal((*plain)(&tmp)); err != nil { return err } if err := (&tmp).validate(); err != nil { return err } *v = tmp return nil } func (v *LabelValuesQueryVariableParameter) validate() error { if len(v.LabelName) == 0 { return fmt.Errorf("'parameter.label_name' cannot be empty for a LabelValuesQuery") } if v.CapturingRegexp == nil { return fmt.Errorf("'parameter.capturing_regexp' cannot be empty for a LabelValuesQuery") } return nil } type PromQLQueryVariableParameter struct { VariableParameter `json:"-" yaml:"-"` // Expr is the PromQL expression to be used when variable should be filled by using the HTTP endpoint // `GET /api/v1/query_range` // More information available here: https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries Expr string `json:"expr,omitempty" yaml:"expr,omitempty"` // LabelName is the name of the label which is used once the PromQL query is performed to select the labelValue in the metric LabelName string `json:"label_name" yaml:"label_name"` CapturingRegexp *CapturingRegexp `json:"capturing_regexp" yaml:"capturing_regexp"` } func (v *PromQLQueryVariableParameter) UnmarshalJSON(data []byte) error { var tmp PromQLQueryVariableParameter type plain PromQLQueryVariableParameter if err := json.Unmarshal(data, (*plain)(&tmp)); err != nil { return err } if err := (&tmp).validate(); err != nil { return err } *v = tmp return nil } func (v *PromQLQueryVariableParameter) UnmarshalYAML(unmarshal func(interface{}) error) error { var tmp PromQLQueryVariableParameter type plain PromQLQueryVariableParameter if err := unmarshal((*plain)(&tmp)); err != nil { return err } if err := (&tmp).validate(); err != nil { return err } *v = tmp return nil } func (v *PromQLQueryVariableParameter) validate() error { if len(v.Expr) == 0 { return fmt.Errorf("parameter.expr cannot be empty for a PromQLQuery") } if len(v.LabelName) == 0 { return fmt.Errorf("parameter.label_name cannot be empty for a PromQLQuery") } if v.CapturingRegexp == nil { return fmt.Errorf("parameter.capturing_regexp cannot be empty for a PromQLQuery") } return nil } type ConstantVariableParameter struct { VariableParameter `json:"-" yaml:"-"` Values []string `json:"values" yaml:"values"` } func (v *ConstantVariableParameter) UnmarshalJSON(data []byte) error { var tmp ConstantVariableParameter type plain ConstantVariableParameter if err := json.Unmarshal(data, (*plain)(&tmp)); err != nil { return err } if err := (&tmp).validate(); err != nil { return err } *v = tmp return nil } func (v *ConstantVariableParameter) UnmarshalYAML(unmarshal func(interface{}) error) error { var tmp ConstantVariableParameter type plain ConstantVariableParameter if err := unmarshal((*plain)(&tmp)); err != nil { return err } if err := (&tmp).validate(); err != nil { return err } *v = tmp return nil } func (v *ConstantVariableParameter) validate() error { if len(v.Values) == 0 { return fmt.Errorf("parameter.values cannot be empty for a constant variable") } return nil } type tmpDashboardVariable struct { Kind VariableKind `json:"kind" yaml:"kind"` DisplayedName string `json:"displayed_name,omitempty" yaml:"displayed_name,omitempty"` Hide bool `json:"hide" yaml:"hide"` Selected string `json:"selected,omitempty" yaml:"selected,omitempty"` Parameter map[string]interface{} `json:"parameter" yaml:"parameter"` } type Variable struct { // Kind is the type of the variable. Depending of the value of Kind, it will change the content of Parameter. Kind VariableKind `json:"kind" yaml:"kind"` // DisplayedName is the name that would be displayed by the UI. It should be filled only if Hide is set to false. // It is not the name used to reference the variable in others variables or in the different panels. // The name used for the reference is the key of map of variables DisplayedName string `json:"displayed_name,omitempty" yaml:"displayed_name,omitempty"` // Hide will be used by the UI to decide if the variable has to be displayed Hide bool `json:"hide" yaml:"hide"` // Selected is the variable selected by default if it exists Selected string `json:"selected,omitempty" yaml:"selected,omitempty"` Parameter VariableParameter `json:"parameter" yaml:"parameter"` } func (d *Variable) UnmarshalJSON(data []byte) error { jsonUnmarshalFunc := func(panel interface{}) error { return json.Unmarshal(data, panel) } return d.unmarshal(jsonUnmarshalFunc, json.Marshal, json.Unmarshal) } func (d *Variable) UnmarshalYAML(unmarshal func(interface{}) error) error { return d.unmarshal(unmarshal, yaml.Marshal, yaml.Unmarshal) } func (d *Variable) unmarshal(unmarshal func(interface{}) error, staticMarshal func(interface{}) ([]byte, error), staticUnmarshal func([]byte, interface{}) error) error { var tmpVariable tmpDashboardVariable if err := unmarshal(&tmpVariable); err != nil { return err } d.Kind = tmpVariable.Kind d.Selected = tmpVariable.Selected d.Hide = tmpVariable.Hide d.DisplayedName = tmpVariable.DisplayedName if len(tmpVariable.DisplayedName) == 0 && !d.Hide { return fmt.Errorf("variable.displayed_name cannot be empty if the variable is not hidden") } if len(tmpVariable.Kind) == 0 { return fmt.Errorf("variable.kind cannot be empty") } rawParameter, err := staticMarshal(tmpVariable.Parameter) if err != nil { return err } var parameter interface{} switch tmpVariable.Kind { case KindPromQLQueryVariable: parameter = &PromQLQueryVariableParameter{} case KindLabelNamesQueryVariable: parameter = &LabelNamesQueryVariableParameter{} case KindLabelValuesQueryVariable: parameter = &LabelValuesQueryVariableParameter{} case KindConstantVariable: parameter = &ConstantVariableParameter{} } if err := staticUnmarshal(rawParameter, parameter); err != nil { return err } d.Parameter = parameter return nil }
// This is the setup file for this test suite. package main import ( "testing" "github.com/go-rod/rod" "github.com/ysmood/got" ) // test context. type G struct { got.G browser *rod.Browser } // setup for tests. var setup = func() func(t *testing.T) G { browser := rod.New().MustConnect() return func(t *testing.T) G { t.Parallel() // run each test concurrently return G{got.New(t), browser} } }() // a helper function to create an incognito page. func (g G) page(url string) *rod.Page { page := g.browser.MustIncognito().MustPage(url) g.Cleanup(page.MustClose) return page }
package main import ( "log" "os" "text/template" ) type answer struct { Primary string Secondary string } func main() { ans := answer{Primary: "42", Secondary: "monkey"} tpl, err := template.ParseFiles("templates/answer.gotpl") if err != nil { log.Fatalln(err) } err = tpl.Execute(os.Stdout, ans) if err != nil { log.Fatalln(err) } }
package operations // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "fmt" "github.com/go-openapi/runtime" strfmt "github.com/go-openapi/strfmt" ) // GetExportExecutionStatusObjectReader is a Reader for the GetExportExecutionStatusObject structure. type GetExportExecutionStatusObjectReader struct { formats strfmt.Registry } // ReadResponse reads a server response into the recieved o. func (o *GetExportExecutionStatusObjectReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { switch response.Code() { case 200: result := NewGetExportExecutionStatusObjectOK() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return result, nil default: return nil, runtime.NewAPIError("unknown error", response, response.Code()) } } // NewGetExportExecutionStatusObjectOK creates a GetExportExecutionStatusObjectOK with default headers values func NewGetExportExecutionStatusObjectOK() *GetExportExecutionStatusObjectOK { return &GetExportExecutionStatusObjectOK{} } /*GetExportExecutionStatusObjectOK handles this case with default header values. GetExportExecutionStatusObjectOK get export execution status object o k */ type GetExportExecutionStatusObjectOK struct { } func (o *GetExportExecutionStatusObjectOK) Error() string { return fmt.Sprintf("[GET /{executionId}/exports/{exportId}/status][%d] getExportExecutionStatusObjectOK ", 200) } func (o *GetExportExecutionStatusObjectOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { return nil }
package chance import ( "fmt" "math" "strings" ) // Float returns any valid floating point number in range [-MaxInt64..+MaxInt64] func (chance *Chance) Float() float64 { sign := 1 if chance.Bool() { sign = -1 } return float64(math.MaxInt64) * chance.r.Float64() * float64(sign) } // FloatN returns any floating point number in range [0, max - 1] func (chance *Chance) FloatN(max float64) float64 { return chance.FloatBtw(0, max-1) } // FloatBtw returns any floating point number between `min` and `max` func (chance *Chance) FloatBtw(min float64, max float64) float64 { return chance.r.Float64()*(max-min) + min } // Int returns any valid integer func (chance *Chance) Int() int { sign := 1 if chance.Bool() { sign = -1 } return chance.r.Int() * sign } // IntN returns any integer in range [0..max - 1] func (chance *Chance) IntN(max int) int { return chance.r.Intn(max) } //IntBtw returns any integer in range [min..max] func (chance *Chance) IntBtw(min int, max int) int { return chance.r.Intn(max-min) + min } // Natural returns any natural number in range [1..MaxInt64] func (chance *Chance) Natural() int { return chance.r.Int() + 1 } // NaturalN returns any natural number in range [1..max - 1] func (chance *Chance) NaturalN(max int) int { return chance.r.Intn(max-1) + 1 } // Phone function returns random phone number for specified country. // Valid countries: BY, DE, US, RU, CN func (chance *Chance) Phone(country string) string { templates := map[string]string{ "BY": "+375 29 ??? ????", "DE": "+49 151 ????????", "US": "+1 201-???-????", "RU": "+7 912 ???-??-??", "CN": " +86 131 ???? ????", } template := "" template = templates[country] if template != "" { for i := 0; i < len(template); i++ { if template[i] == '?' { symbol := chance.NumChar() template = strings.Replace(template, "?", string(symbol), 1) } } return fmt.Sprintf("%s", template) } else { return fmt.Sprintf("%d-%d-%d", chance.IntBtw(100, 999), chance.IntBtw(10, 99), chance.IntBtw(10, 99)) } }
// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication // license. Its contents can be found at: // http://creativecommons.org/publicdomain/zero/1.0 package vlc //#include <stdlib.h> //#include <vlc/vlc.h> import "C" import ( "syscall" "unsafe" ) type Media struct { ptr *C.libvlc_media_t } // Retain increments the reference count of this Media instance. func (this *Media) Retain() (err error) { if this.ptr == nil { return syscall.EINVAL } C.libvlc_media_retain(this.ptr) return } // Release decreases the reference count of this Media instance and destroys it // when it reaches zero. It will send out a MediaFreed event to all listeners. // If the media descriptor object has been released it should not be used again. func (this *Media) Release() (err error) { if this.ptr == nil { return syscall.EINVAL } C.libvlc_media_release(this.ptr) return } // Duplicate duplicates the media object. func (this *Media) Duplicate() (*Media, error) { if this.ptr == nil { return nil, syscall.EINVAL } if c := C.libvlc_media_duplicate(this.ptr); c != nil { return &Media{c}, nil } return nil, checkError() } // Add an option to the media. // // This option will be used to determine how the media player will read the // media. This allows us to use VLC's advanced reading/streaming options on a // per-media basis. // // The options are detailed in vlc --full-help, for instance "--sout-all" func (this *Media) AddOption(options string) error { if this.ptr == nil { return syscall.EINVAL } c := C.CString(options) C.libvlc_media_add_option(this.ptr, c) C.free(unsafe.Pointer(c)) return checkError() } // Add an option to the media with configurable flags. // // This option will be used to determine how the media player will read the // media. This allows us to use VLC's advanced reading/streaming options on a // per-media basis. // // The options are detailed in vlc --full-help, for instance "--sout-all" func (this *Media) AddOptionFlag(options string, flags uint32) error { if this.ptr == nil { return syscall.EINVAL } c := C.CString(options) C.libvlc_media_add_option_flag(this.ptr, c, C.uint(flags)) C.free(unsafe.Pointer(c)) return checkError() } // Mrl returns the media resource locator (mrl) from a media descriptor object. func (this *Media) Mrl() (s string) { if this.ptr == nil { return } if c := C.libvlc_media_get_mrl(this.ptr); c != nil { s = C.GoString(c) C.free(unsafe.Pointer(c)) } return } // Meta reads the specified metadata property of the media. // // If the media has not yet been parsed this will return an empty string. // // This method automatically calls Media.ParseAsync(), so after calling // it you may receive a MediaMetaChanged event. If you prefer a synchronous // version, ensure that you call Media.Parse() before Media.Meta(). func (this *Media) Meta(mp MetaProperty) (s string) { if this.ptr == nil { return } if c := C.libvlc_media_get_meta(this.ptr, C.libvlc_meta_t(mp)); c != nil { s = C.GoString(c) C.free(unsafe.Pointer(c)) } return } // SetMeta sets the metadata for this media instance. // Note: This method does not save the metadata. Call Media.SaveMeta() for this purpose. func (this *Media) SetMeta(mp MetaProperty, v string) { if this.ptr == nil { return } c := C.CString(v) C.libvlc_media_set_meta(this.ptr, C.libvlc_meta_t(mp), c) C.free(unsafe.Pointer(c)) } // SaveMeta saves the previously changed metadata. func (this *Media) SaveMeta() (err error) { if this.ptr == nil { return syscall.EINVAL } if C.libvlc_media_save_meta(this.ptr) != 0 { err = checkError() } return } // State returns the current media state. func (this *Media) State() MediaState { if this.ptr == nil { return MSError } return MediaState(C.libvlc_media_get_state(this.ptr)) } // Stats returns media statistics. func (this *Media) Stats() (*Stats, error) { if this.ptr == nil { return nil, syscall.EINVAL } var c C.libvlc_media_stats_t if C.libvlc_media_get_stats(this.ptr, &c) != 0 { return nil, checkError() } return &Stats{&c}, nil } // SubItems returns subitems of this media instance. This will increment // the reference count of this media instance. Use MediaList.Release() to // decrement the reference count. func (this *Media) SubItems() (*MediaList, error) { if this.ptr == nil { return nil, syscall.EINVAL } if c := C.libvlc_media_subitems(this.ptr); c != nil { return &MediaList{c}, nil } return nil, checkError() } // Events returns an event manager for this media instance. // Note: This method does not increment the media reference count. func (this *Media) Events() (*EventManager, error) { if this.ptr == nil { return nil, syscall.EINVAL } if c := C.libvlc_media_event_manager(this.ptr); c != nil { return NewEventManager(c), nil } return nil, checkError() } // Duration returns the duration in milliseconds for the current media instance. func (this *Media) Duration() int64 { if this.ptr == nil { return 0 } return int64(C.libvlc_media_get_duration(this.ptr)) } // Parse the current media source. // // This fetches (local) meta data and track information. // The method is synchronous version of Media.ParseAsync(). func (this *Media) Parse() { if this.ptr != nil { C.libvlc_media_parse(this.ptr) } } // Parse the current media source. // // This fetches (local) meta data and track information. // The method is the asynchronous version of Media.Parse() // // To determine when this routine finishes, you can listen for a MediaParsedChanged // event. However if the media was already parsed you will not receive this event. func (this *Media) ParseAsync() { if this.ptr != nil { C.libvlc_media_parse_async(this.ptr) } } // IsParsed returns true if the media's metadata has already been parsed. func (this *Media) IsParsed() bool { if this.ptr != nil { return C.libvlc_media_is_parsed(this.ptr) != 0 } return false } // UserData returns the media descriptor's user_data. user_data is specialized // data accessed by the host application, VLC.framework uses it as a pointer to // a native object that references a libvlc_media_t pointer. // //TODO(jimt): I have no idea what this comment means. Presumably its a roundabout // way of saying that the data specified in here will survive roundtrips through // event callback handlers. So you can pass it anything you need. func (this *Media) UserData() interface{} { if this.ptr != nil { return C.libvlc_media_get_user_data(this.ptr) } return nil } // SetUserData sets the media descriptor's user_data. user_data is specialized // data accessed by the host application, VLC.framework uses it as a pointer to // a native object that references a libvlc_media_t pointer. // //TODO(jimt): I have no idea what this comment means. Presumably its a roundabout // way of saying that the data specified in here will survive roundtrips through // event callback handlers. So you can pass it anything you need. func (this *Media) SetUserData(v interface{}) { if this.ptr != nil { C.libvlc_media_set_user_data(this.ptr, unsafe.Pointer(&v)) } } // TrackInfo yields the media descriptor's elementary stream descriptions. // // Note: You need to play the media _one_ time with --sout="#description" // Not doing this will result in an empty array, and doing it more than once // will duplicate the entries in the array each time. Something like this: // // player, _ := media.NewPlayer() // media.AddOption("sout=#description") // player.Play() // // ... wait until playing // player.Release() // // This is very likely to change in next release, and will be done at the // parsing phase instead. func (this *Media) TrackInfo() ([]*TrackInfo, error) { if this.ptr == nil { return nil, syscall.EINVAL } var c **C.libvlc_media_track_t if size := C.libvlc_media_tracks_get(this.ptr, &c); size > 0 { list := make([]*TrackInfo, size) addr := uintptr(unsafe.Pointer(c)) sz := int(unsafe.Sizeof(c)) for i := range list { list[i] = &TrackInfo{(*C.libvlc_media_track_info_t)(unsafe.Pointer(addr + uintptr(i*sz)))} } return list, nil } return nil, checkError() } // NewPlayer a media player from this media instance. // After creating the player, you can destroy this Media instance, unless you // really need it for something. It is not necessary to perform actual playback. func (this *Media) NewPlayer() (*Player, error) { if this.ptr == nil { return nil, syscall.EINVAL } if c := C.libvlc_media_player_new_from_media(this.ptr); c != nil { return &Player{c}, nil } return nil, checkError() }
/* Упражнение: rot13Reader https://tour.golang.org/methods/23 */ package main import ( "io" "os" "strings" ) type rot13Reader struct { r io.Reader } func (rot13 rot13Reader) Read(b []byte) (int, error) { n, err := rot13.r.Read(b) var ( from = "NOPQRSTUVWXYZABCDEFGHIJKLMnopqrstuvwxyzabcdefghijklm" to = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" ) for i := 0; i < n; i++ { if index := strings.Index(from, string(b[i])); index != -1 { b[i] = to[index] } } return n, err } func main() { s := strings.NewReader("Lbh penpxrq gur pbqr!") r := rot13Reader{s} io.Copy(os.Stdout, &r) }
package event import ( "net/url" ) // GetEventSource gets the source to be used for CloudEvents originating from the dynatrace-service func GetEventSource() string { source, _ := url.Parse("dynatrace-service") return source.String() }
package utils import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestSecureToken(t *testing.T) { t.Parallel() tok1 := SecureToken() assert.NotEmpty(t, tok1) tok2 := SecureToken() assert.NotEmpty(t, tok2) assert.NotEqual(t, tok1, tok2) } func TestHashPassword(t *testing.T) { t.Parallel() const pw = "SXJAm7qJ4?3dH!aN8T3f5p!oNnpXbaRy#Gtx#8jG" hash := HashPassword(pw) assert.NotEmpty(t, hash) } func TestCheckPassword(t *testing.T) { t.Parallel() const pw = "SXJAm7qJ4?3dH!aN8T3f5p!oNnpXbaRy#Gtx#8jG" hash := HashPassword(pw) require.NotEmpty(t, hash) err := CheckPassword(hash, pw) assert.NoError(t, err) err = CheckPassword(hash, "") assert.Error(t, err) }
package solutions type TrieNode struct { nodes [26]*TrieNode index int match map[int]bool } func palindromePairs(words []string) [][]int { var result [][]int root := &TrieNode{index: -1, match: make(map[int]bool)} for i, word := range words { addWord(root, word, i) } for i, word := range words { matchSet := searchWord(root, word) for matchIndex := range matchSet { if matchIndex != i { result = append(result, []int{matchIndex, i}) } } } return result } func searchWord(root *TrieNode, word string) map[int]bool { result := make(map[int]bool) for i := len(word) - 1; i >= 0; i-- { if root.index != -1 && isPalindrome(word[: i + 1]) { result[root.index] = true } offset := word[i] - 'a' if root.nodes[offset] == nil { return result } root = root.nodes[offset] } for k := range root.match { result[k] = true } return result } func addWord(root *TrieNode, word string, idx int) { for i := 0; i < len(word); i++ { if isPalindrome(word[i:]) { root.match[idx] = true } offset := word[i] - 'a' if root.nodes[offset] == nil { root.nodes[offset] = &TrieNode{index: - 1, match: make(map[int]bool)} } root = root.nodes[offset] } root.index = idx root.match[idx] = true } func isPalindrome(s string) bool { for left, right := 0, len(s) - 1; left < right; left, right = left + 1, right - 1 { if s[left] != s[right] { return false } } return true }
package main func twoSum(nums []int, target int) []int { valueToIdx := make(map[int][]int, 0) for i, v := range nums { valueToIdx[v] = append(valueToIdx[v], i) } for i := 0; i < len(nums); i++ { indexes := valueToIdx[target-nums[i]] if len(indexes) == 1 && i != indexes[0] { return []int{i, indexes[0]} } if len(indexes) > 1 { return []int{i, indexes[1]} } } return []int{} }
package main import ( "bytes" "encoding/gob" "errors" "fmt" "reflect" "strings" ) type Sslmeta struct { Ssl string User string } func main() { mList2 := map[string]interface{}{ "Ssl": "klp1", "User": "klpklp1", } var ssls *Sslmeta mapToStruct(mList2, &ssls) } func mapToStruct(mList map[string]interface{}, model interface{}) (err error) { val := reflect.Indirect(reflect.ValueOf(model)) valof := reflect.ValueOf(model) fmt.Println(valof.Type(), reflect.ValueOf(model), val.Type(), reflect.ValueOf(val)) typ := val.Type() mVal := reflect.Indirect(reflect.New(typ.Elem().Elem())).Addr() for key, val := range mList { err = setField(mVal.Interface(), key, val) if err != nil { return err } } val = reflect.Append(val, mVal) DeepCopy(model, val.Interface()) return err } //用map的值替换结构的值 func setField(obj interface{}, name string, value interface{}) error { // 将首字母转换为大写 sl := strings.Split(name, "") sl[0] = strings.ToUpper(sl[0]) name = strings.Join(sl, "") structValue := reflect.ValueOf(obj).Elem() //结构体属性值 //fmt.Printf("structValue: %+v\n", structValue) structFieldValue := structValue.FieldByName(name) //结构体单个属性值 //fmt.Printf("structFieldValue: %+v\n", structFieldValue) if !structFieldValue.IsValid() { return fmt.Errorf("No such field: %s in obj", name) } if !structFieldValue.CanSet() { return fmt.Errorf("Cannot set %s field value", name) } structFieldType := structFieldValue.Type() //结构体的类型 val := reflect.ValueOf(value) //map值的反射值 fmt.Println("fieldtype", structFieldType, val) if structFieldType != val.Type() { return errors.New("type is err") } structFieldValue.Set(val) return nil } func DeepCopy(dst, src interface{}) error { var buf bytes.Buffer if err := gob.NewEncoder(&buf).Encode(src); err != nil { return err } return gob.NewDecoder(bytes.NewBuffer(buf.Bytes())).Decode(dst) }
package LRU import ( "fmt" "testing" ) func TestInit(t *testing.T) { lru := Init(3) if lru.dList.Capacity == 3 { t.Log("LRU Cache init success") } else { t.Error("LRU Cache init failed") } } func TestPut(t *testing.T) { lruCache := Init(3) lruCache.Put(2) lruCache.Put(3) lruCache.Put(2) lruCache.Put(1) lruCache.Put(5) lruCache.Put(2) lruCache.Put(4) lruCache.Put(5) lruCache.Put(3) lruCache.Put(2) lruCache.Put(5) lruCache.Put(2) lruCache.PrintLRUCache() } func TestGet(t *testing.T) { lruCache := Init(3) lruCache.Put(2) lruCache.Put(3) lruCache.Put(2) fmt.Println(lruCache.Get(5)) lruCache.PrintLRUCache() }
package main import ( "database/sql" "encoding/hex" //"encoding/json" "flag" //"fmt" _ "github.com/go-sql-driver/mysql" "github.com/olebedev/config" "github.com/shiyanhui/dht" "io" "log" // "net/http" _ "net/http/pprof" "os" "sort" "strings" //"code.google.com/p/go.text/encoding/charmap" //"code.google.com/p/go.text/transform" //"runtime" ) var ( db *sql.DB l *log.Logger cfg *config.Config port string replacer *strings.Replacer ) const ( logFileName = "logger.log" configFileName = "config.json" ) type file struct { Path []interface{} `json:"path"` Length int `json:"length"` } type bitTorrent struct { InfoHash string `json:"infohash"` Name string `json:"name"` Files []file `json:"files,omitempty"` Length int `json:"length,omitempty"` } // func convertCP1251toUTF8(Text1251) { // sr := strings.NewReader(Text1251) // tr := transform.NewReader(sr, charmap.Windows1251.NewDecoder()) // buf, err := ioutil.ReadAll(tr) // if err != err { // l.Println("Error string converting") // } // return string(buf) // } func init() { f, err := os.OpenFile(logFileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { log.Fatalln("Failed to open log file", logFileName, ":", err) } multi := io.MultiWriter(f, os.Stdout) l = log.New(multi, "main: ", log.Ldate|log.Ltime|log.Lshortfile) cfg, err := config.ParseJsonFile(configFileName) if err != nil { l.Fatalln("Failed to open config file", configFileName, ":", err) } host, _ := cfg.String("database.host") name, _ := cfg.String("database.name") user, _ := cfg.String("database.user") pass, _ := cfg.String("database.password") port, _ = cfg.String("spider.port") if port == "" { port = "6882" } db, err = sql.Open("mysql", ""+user+":"+pass+"@"+host+"/"+name) if err != nil { l.Fatalln("Error database connect", err.Error()) panic(err.Error()) } portPtr := flag.String("port", port, "DHT port") flag.Parse() port = *portPtr replacer = strings.NewReplacer( "/", " ", "[", " ", "(", " ", "]", " ", ")", " ", ".", " ", "_", " ", ) } func GenerateSearchIndex(text string) string { // text = strings.Replace(text, "/", " ", -1) // text = strings.Replace(text, "[", " ", -1) // text = strings.Replace(text, "(", " ", -1) // text = strings.Replace(text, "]", " ", -1) // text = strings.Replace(text, ")", " ", -1) // text = strings.Replace(text, ".", " ", -1) // text = strings.Replace(text, "_", " ", -1) text = replacer.Replace(text) uniq := map[string]int{} for _, s := range strings.Split(text, " ") { if s != "" { if cv, presen := uniq[s]; presen { uniq[s] = cv + 1 } else { uniq[s] = 1 } } } type kvt struct { Key string Value int } kv := make([]kvt, len(uniq)) for k, v := range uniq { kv = append(kv, kvt{k, v}) } sort.Slice(kv, func(i, j int) bool { return kv[i].Value > kv[j].Value }) text = "" for _, i := range kv { text = text + i.Key + " " } return strings.Trim(text, " ") } func main() { w := dht.NewWire(65536, 1024, 256) textIndex := "" countDown := 100 go func() { for resp := range w.Response() { metadata, err := dht.Decode(resp.MetadataInfo) if err != nil { continue } info := metadata.(map[string]interface{}) if _, ok := info["name"]; !ok { continue } bt := bitTorrent{ InfoHash: hex.EncodeToString(resp.InfoHash), Name: info["name"].(string), } var vFiles []interface{} haveFiles := false isNew := false if v, ok := info["files"]; ok { haveFiles = true vFiles = v.([]interface{}) bt.Files = make([]file, len(vFiles)) for i, item := range vFiles { f := item.(map[string]interface{}) bt.Files[i] = file{ Path: f["path"].([]interface{}), Length: f["length"].(int), } } } else if _, ok := info["length"]; ok { bt.Length = info["length"].(int) } // data, err := json.Marshal(bt) // if err == nil { //l.Printf("[%s] %s \t%d\n", bt.InfoHash, bt.Name, bt.Length) infoHashSelect, err := db.Query("SELECT id FROM infohash WHERE infohash = ?", bt.InfoHash) if err != nil { l.Panicln(err.Error()) } var id int64 if infoHashSelect.Next() { err := infoHashSelect.Scan(&id) if err != nil { l.Fatal(err) } l.Println(bt.InfoHash, " update: \t"+bt.Name) //upd, err := db.Prepare("UPDATE infohash SET name=?,files=?,length=?,updated=NOW(), cnt=cnt+1 WHERE infohash=?") upd, err := db.Prepare("UPDATE infohash SET updated=NOW(), cnt=cnt+1 WHERE id=?") if err != nil { l.Panicln(err.Error()) } _, err = upd.Exec(id) if err != nil { l.Println(err.Error()) continue } upd.Close() } else { isNew = true totalLength := 0 textIndex = bt.Name if _, ok := info["length"]; ok { totalLength = info["length"].(int) } if haveFiles { for _, item := range vFiles { f := item.(map[string]interface{}) totalLength += f["length"].(int) for _, p := range f["path"].([]interface{}) { textIndex += " " + p.(string) } } } textIndex = GenerateSearchIndex(textIndex) l.Println(bt.InfoHash, " add new:\t"+bt.Name) ins, err := db.Prepare("INSERT INTO infohash SET infohash=?,name=?,files=?,length=?,addeded=NOW(),updated=NOW(), textindex=?") if err != nil { l.Panicln(err.Error()) } res, err := ins.Exec(bt.InfoHash, bt.Name, haveFiles, totalLength, textIndex) if err != nil { l.Println(err.Error()) continue } id, err = res.LastInsertId() if err != nil { println("Error:", err.Error()) } ins.Close() } infoHashSelect.Close() textIndex = "" if haveFiles && isNew { ins, err := db.Prepare("INSERT INTO files SET infohash_id=?,path=?,length=?") if err != nil { l.Panicln(err.Error()) } for _, item := range vFiles { f := item.(map[string]interface{}) path := "" for _, p := range f["path"].([]interface{}) { if path != "" { path = path + "/" + p.(string) } else { path = p.(string) } } _, err = ins.Exec(id, path, f["length"].(int)) if err != nil { l.Println(err.Error()) } } ins.Close() l.Printf("%s files:\t%d", bt.InfoHash, len(vFiles)) } countDown-- if countDown <= 0 { // exit to restart in run.sh //l.Panic("Exit to restart") os.Exit(0) } } }() go w.Run() config := dht.NewCrawlConfig() l.Println("Use port ", port) config.Address = ":" + port config.PrimeNodes = append(config.PrimeNodes, "router.bitcomet.com:6881") config.OnAnnouncePeer = func(infoHash, ip string, port int) { //l.Println("Annonce peer", ip, ":", port) w.Request([]byte(infoHash), ip, port) } d := dht.New(config) d.Run() defer db.Close() }
package decoder import ( "bytes" "fmt" "go/ast" "go/parser" "go/token" "os" "os/exec" "path/filepath" "testing" "github.com/benbjohnson/megajson/generator/test" "github.com/stretchr/testify/assert" ) // Ensures a basic sanity check when generating the decoder. func TestWriteTypeGenerator(t *testing.T) { src := ` package foo type Foo struct { Name string Age int } ` f, _ := parser.ParseFile(token.NewFileSet(), "foo.go", src, 0) err := NewGenerator().Generate(bytes.NewBufferString(src), f) assert.NoError(t, err) } // Ensures that a simple struct can be decoded from JSON. func TestGenerateSimple(t *testing.T) { out, err := execute("simple") assert.NoError(t, err) assert.Equal(t, out, `|foo|200|189273|2392|172389984|182.23|19380.1312|true|`) } // Ensures that a complex nested struct can be decoded from JSON. func TestGenerateDecodeNested(t *testing.T) { out, err := execute("nested") assert.NoError(t, err) assert.Equal(t, out, `|foo|John|20|<nil>|2|Jane|60|Jack|-13|`) } // execute generates a decoder against a fixture, executes the main prorgam, and returns the results. func execute(name string) (ret string, err error) { test.Test(name, func(path string) { var file *ast.File file, err = parser.ParseFile(token.NewFileSet(), filepath.Join(path, "types.go"), nil, 0) if err != nil { return } // Generate decoder. f, _ := os.Create(filepath.Join(path, "decoder.go")) if err = NewGenerator().Generate(f, file); err != nil { fmt.Println("generate error:", err.Error()) return } f.Close() // Execute fixture. out, _ := exec.Command("go", "run", filepath.Join(path, "decode.go"), filepath.Join(path, "decoder.go"), filepath.Join(path, "types.go")).CombinedOutput() ret = string(out) }) return }
// 183.Hands-on exercise#2 // fmt.Errorf() & errors.New() // 圖? package main import ( "encoding/json" "fmt" "log" ) type person struct { First string Last string Sayings []string } func main() { p1 := person{ First: "James", Last: "Bond", Sayings: []string{"Shaken, not stirred", "Any last wishes?", "Never say never"}, } bs, err := toJSON(p1) // Fatalln() 內建 exit 所以不用 return if err != nil { log.Fatalln("err") } // 需要 return 返回 main 結束程式 // if err != nil { // log.Println("err") // return // } fmt.Println(string(bs)) } // toJSON needs to return an error also func toJSON(a interface{}) ([]byte, error) { bs, err := json.Marshal(a) if err != nil { return []byte{}, fmt.Errorf("err:%v", err) // 下面 等於 上面的 背後在跑的內容 || 上面為簡化版 // return bs, errors.New(fmt.Sprintf("ERR:%v", err)) } return bs, nil }
package vkubelet import ( "context" "encoding/json" "fmt" "io" "net/http" "os" "strconv" "strings" "time" "github.com/Sirupsen/logrus" "github.com/gorilla/mux" "github.com/pkg/errors" "github.com/virtual-kubelet/virtual-kubelet/log" "k8s.io/kubernetes/pkg/kubelet/server/remotecommand" ) func loggingContext(r *http.Request) context.Context { ctx := r.Context() logger := log.G(ctx).WithFields(logrus.Fields{ "uri": r.RequestURI, "vars": mux.Vars(r), }) return log.WithLogger(ctx, logger) } // NotFound provides a handler for cases where the requested endpoint doesn't exist func NotFound(w http.ResponseWriter, r *http.Request) { logger := log.G(loggingContext(r)) log.Trace(logger, "404 request not found") http.Error(w, "404 request not found", http.StatusNotFound) } // KubeletServer implements HTTP endpoints for serving kubelet API's type KubeletServer struct { p Provider } // KubeletServertStart starts the virtual kubelet HTTP server. func KubeletServerStart(p Provider) { certFilePath := os.Getenv("APISERVER_CERT_LOCATION") keyFilePath := os.Getenv("APISERVER_KEY_LOCATION") port := os.Getenv("KUBELET_PORT") addr := fmt.Sprintf(":%s", port) r := mux.NewRouter() s := &KubeletServer{p: p} r.HandleFunc("/containerLogs/{namespace}/{pod}/{container}", s.ApiServerHandler).Methods("GET") r.HandleFunc("/exec/{namespace}/{pod}/{container}", s.ApiServerHandlerExec).Methods("POST") r.NotFoundHandler = http.HandlerFunc(NotFound) if err := http.ListenAndServeTLS(addr, certFilePath, keyFilePath, r); err != nil { log.G(context.TODO()).WithError(err).Error("error setting up http server") } } // MetricsServerStart starts an HTTP server on the provided addr for serving the kubelset summary stats API. // TLS is never enabled on this endpoint. func MetricsServerStart(p Provider, addr string) { r := mux.NewRouter() s := &MetricsServer{p: p} r.HandleFunc("/stats/summary", s.MetricsSummaryHandler).Methods("GET") r.HandleFunc("/stats/summary/", s.MetricsSummaryHandler).Methods("GET") r.NotFoundHandler = http.HandlerFunc(NotFound) if err := http.ListenAndServe(addr, r); err != nil { log.G(context.TODO()).WithError(err).Error("Error starting http server") } } // MetricsServer provides an HTTP endpopint for accessing pod metrics type MetricsServer struct { p Provider } // MetricsSummaryHandler is an HTTP handler for implementing the kubelet summary stats endpoint func (s *MetricsServer) MetricsSummaryHandler(w http.ResponseWriter, req *http.Request) { ctx := loggingContext(req) mp, ok := s.p.(MetricsProvider) if !ok { log.G(ctx).Debug("stats not implemented for provider") http.Error(w, "not implememnted", http.StatusNotImplemented) return } stats, err := mp.GetStatsSummary(req.Context()) if err != nil { if errors.Cause(err) == context.Canceled { return } log.G(ctx).Error("Error getting stats from provider:", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } b, err := json.Marshal(stats) if err != nil { log.G(ctx).WithError(err).Error("Could not marshal stats") http.Error(w, "could not marshal stats: "+err.Error(), http.StatusInternalServerError) return } if _, err := w.Write(b); err != nil { log.G(ctx).WithError(err).Debug("Could not write to client") } } func (s *KubeletServer) ApiServerHandler(w http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) if len(vars) != 3 { NotFound(w, req) return } ctx := loggingContext(req) namespace := vars["namespace"] pod := vars["pod"] container := vars["container"] tail := 10 q := req.URL.Query() if queryTail := q.Get("tailLines"); queryTail != "" { t, err := strconv.Atoi(queryTail) if err != nil { logger := log.G(context.TODO()).WithError(err) log.Trace(logger, "could not parse tailLines") http.Error(w, fmt.Sprintf("could not parse \"tailLines\": %v", err), http.StatusBadRequest) return } tail = t } podsLogs, err := s.p.GetContainerLogs(ctx, namespace, pod, container, tail) if err != nil { log.G(ctx).WithError(err).Error("error getting container logs") http.Error(w, fmt.Sprintf("error while getting container logs: %v", err), http.StatusInternalServerError) return } if _, err := io.WriteString(w, podsLogs); err != nil { log.G(ctx).WithError(err).Warn("error writing response to client") } } func (s *KubeletServer) ApiServerHandlerExec(w http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) namespace := vars["namespace"] pod := vars["pod"] container := vars["container"] supportedStreamProtocols := strings.Split(req.Header.Get("X-Stream-Protocol-Version"), ",") q := req.URL.Query() command := q["command"] // streamOpts := &remotecommand.Options{ // Stdin: (q.Get("input") == "1"), // Stdout: (q.Get("output") == "1"), // Stderr: (q.Get("error") == "1"), // TTY: (q.Get("tty") == "1"), // } // TODO: tty flag causes remotecommand.createStreams to wait for the wrong number of streams streamOpts := &remotecommand.Options{ Stdin: true, Stdout: true, Stderr: true, TTY: false, } idleTimeout := time.Second * 30 streamCreationTimeout := time.Second * 30 remotecommand.ServeExec(w, req, s.p, fmt.Sprintf("%s-%s", namespace, pod), "", container, command, streamOpts, idleTimeout, streamCreationTimeout, supportedStreamProtocols) }
package indexing_test import ( "testing" "github.com/onsi/gomega" "github.com/sp0x/torrentd/bots" "github.com/sp0x/torrentd/indexer/search" . "github.com/sp0x/torrentd/storage/indexing" ) func TestKeyHasValue(t *testing.T) { g := gomega.NewWithT(t) item := &search.ScrapeResultItem{} chat := &bots.Chat{} item.ModelData = make(map[string]interface{}) item.ModelData["time"] = "33" k := NewKey("ModelData.time") g.Expect(KeyHasValue(k, item)).To(gomega.BeTrue()) item = &search.ScrapeResultItem{} item.ModelData = make(map[string]interface{}) item.LocalID = "33" k = NewKey("LocalID") g.Expect(KeyHasValue(k, item)).To(gomega.BeTrue()) item = &search.ScrapeResultItem{} item.ModelData = make(map[string]interface{}) item.ModelData["time"] = "33" k = NewKey("time") g.Expect(KeyHasValue(k, item)).To(gomega.BeTrue()) item = &search.ScrapeResultItem{} item.ModelData = make(map[string]interface{}) item.ModelData["time"] = "" k = NewKey("time") g.Expect(KeyHasValue(k, item)).ToNot(gomega.BeTrue()) // Should work with other types also kChat := NewKey("id") g.Expect(KeyHasValue(kChat, chat)).ToNot(gomega.BeTrue()) } func TestGetKeyQueryFromItem(t *testing.T) { g := gomega.NewWithT(t) item := &search.ScrapeResultItem{} item.ModelData = make(map[string]interface{}) item.ModelData["time"] = "33" k := NewKey("ModelData.time") q := GetKeyQueryFromItem(k, item) g.Expect(q).ToNot(gomega.BeNil()) g.Expect(q.Get("time")).To(gomega.BeNil()) val, found := q.Get("ModelData.time") g.Expect(found).To(gomega.BeTrue()) g.Expect(val).To(gomega.Equal("33")) item = &search.ScrapeResultItem{} item.ModelData = make(map[string]interface{}) item.LocalID = "34" k = NewKey("LocalID") q = GetKeyQueryFromItem(k, item) g.Expect(KeyHasValue(k, item)).To(gomega.BeTrue()) val, found = q.Get("LocalID") g.Expect(found).To(gomega.BeTrue()) g.Expect(val).To(gomega.Equal("34")) } func TestKey_AddKeys(t *testing.T) { g := gomega.NewWithT(t) k := NewKey("a") k.AddKeys(NewKey("b")) k.AddKeys(NewKey("b", "c", "d")) g.Expect(k.IsEmpty()).To(gomega.BeFalse()) g.Expect(len(k.Fields)).To(gomega.Equal(4)) k = &Key{Fields: []string{"a"}} k.AddKeys(NewKey("b")) k.Add("b") k.Add("agg") g.Expect(k.IsEmpty()).To(gomega.BeFalse()) g.Expect(len(k.Fields)).To(gomega.Equal(3)) }
package ed25519 import ( "github.com/oasisprotocol/ed25519" "github.com/pkg/errors" ) const ( PublicKeySize = ed25519.PublicKeySize SignatureSize = ed25519.SignatureSize PrivateKeySize = ed25519.PrivateKeySize SeedSize = ed25519.SeedSize ) var ( ErrNotEnoughBytes = errors.New("not enough bytes") ) // GenerateKey creates a public/private key pair. func GenerateKey() (publicKey PublicKey, privateKey PrivateKey, err error) { pub, priv, genErr := ed25519.GenerateKey(nil) copy(publicKey[:], pub) copy(privateKey[:], priv) err = genErr return } // GenerateKey creates a private key. func GeneratePrivateKey() (privateKey PrivateKey, err error) { _, privateKey, err = GenerateKey() return }
package main import ( "encoding/json" "flag" "fmt" "io/ioutil" "net/http" "os" "strconv" "strings" ) type Resp struct { Code int64 Msg string Redirect string } type ResonseData struct { TaskId int64 UniqueSourceId string RenderUri string SensorName string Status int64 UniqueTaskId string Source Source } type Result struct { Resp Data []ResonseData } type SourceTreeData struct { Resp Data TreeData } type Sensor struct { SensorName string Url string Latitude float64 Longitude float64 Comment string SensorSn string SensorType int UniqueRepoId string UniqueSensorId string Sources []Source } type SourceData struct { uri string SourceId string } type Repo struct { UniqueRepoId string Name string SensorCount int64 } type Source struct { Id int64 Type byte Uri string Name string Status byte SourceId string } type SensorInfo struct { AllSize int64 ReturnedSize int64 Sensors []Sensor } type AllSensor struct { Resp Data SensorInfo } type Task struct { UniqueSourceId string TypeId int64 DetectTypeIds []int AdditionalInfos map[string]string RuleSwitcher bool FramingStrategy int64 } type CommonQuery struct { Offset int32 Limit int32 } type SensorSource struct { CommonQuery CommonQuery UniqueRepoId string SourceTypes []int } type Tsource struct { Type int64 Uri string } type VideoQuery struct { Type int64 Sources []Tsource Sensor Sensor } type TreeData struct { Id int64 RepoId int64 UniqueRepoId string SensorCount int64 Name string Repos []Repo Sensors []Sensor } type SearchResult struct { AllSize int64 AllTaskIds []string } type SourceResult struct { Resp Data []Source } type TaskSearch struct { Resp Data SearchResult } var ( ip = flag.String("ip", "127.0.0.1", "deepvideo ip") port = flag.Int("port", 8899, "deepvideo port") file = flag.String("file", "rtsp.txt", "file path about rtsp list") command = flag.String("c", "", `command about api. [ getSourceList -ip 127.0.0.1 -repoId xxxx-xxxx-xxxx-xxxx add_sys_sensor_rtsp -ip 127.0.0.1 -repoId xxxx-xxxx-xxxx-xxxx -file rtsp.txt add_sys_sensor_video -ip 127.0.0.1 -repoId xxxx-xxxx-xxxx-xxxx -file rtsp.txt get_task_list -ip 127.0.0.1 -c get_task_list del_all_task -ip 127.0.0.1 -c del_all_task add_vehicle_task -ip 127.0.0.1 -repoId xxxx-xxxx-xxxx-xxxx add_face_task -ip 127.0.0.1 -repoId xxxx-xxxx-xxxx-xxxx add_kse_task -ip 127.0.0.1 -repoId xxxx-xxxx-xxxx-xxxx ] `) repoId = flag.String("repoId", "", "add task need repoId") ) func init() { flag.Set("alsologtostderr", "true") flag.Set("log_dir", "./tmp") flag.Set("logtostderr", "true") flag.Set("v", "1") } func main() { flag.Parse() if *ip == "" { printUsageErrorAndExit("no -ip specified. should not empty") } if *port == 0 { printUsageErrorAndExit("no -port specified. should not 0") } switch *command { case "get_task_list": get_task_list() case "add_vehicle_task": if *repoId == "" { printUsageErrorAndExit("no -repoId specified.add task need repoId,should not empty") } add_vehicle_task(getSourceByUniqueRepoId(*repoId)) case "add_face_task": if *repoId == "" { printUsageErrorAndExit("no -repoId specified.add task need repoId,should not empty") } add_face_task(getSourceByUniqueRepoId(*repoId)) case "add_kse_task": if *repoId == "" { printUsageErrorAndExit("no -repoId specified.add task need repoId,should not empty") } add_kse_task(getSourceByUniqueRepoId(*repoId)) case "del_all_task": del_all_task() case "add_sys_sensor_rtsp": if *repoId == "" { printUsageErrorAndExit("no -repoId specified.add task need repoId,should not empty") } add_sys_sensor_rtsp(*repoId) case "getTaskByType": getTaskByType("vehicle") case "getSourceList": if *repoId == "" { printUsageErrorAndExit("no -repoId specified.getSourceList need repoId,should not empty") } getSourceList(*repoId) case "add_sys_sensor_video": if *repoId == "" { printUsageErrorAndExit("no -repoId specified.add_sys_sensor_video need repoId,should not empty") } add_sys_sensor_video(*repoId) default: getRepoInfo() } } func getRepoInfo() { fmt.Println(strings.Repeat("******", 10)) fmt.Println("repoInfo below:") url := fmt.Sprintf("http://%s:%d/api/biz/repos/tree?WithSensor=true&WithSource=true&LimitLlsevel=1&UniqueRepoId=%s&SourceTypes=3&WithTypeReg=&timestamp=1550197932129", *ip, *port, "root") result, err := httpDo(url, "GET", []byte("")) if err != nil { } var sourceTreeData SourceTreeData err = json.Unmarshal([]byte(result), &sourceTreeData) if err != nil { } rootRepo := Repo{ UniqueRepoId: "root", SensorCount: sourceTreeData.Data.SensorCount, Name: sourceTreeData.Data.Name, } repoList := sourceTreeData.Data.Repos repoList = append(repoList, rootRepo) for _, repo := range repoList { fmt.Printf("repoId:%s-----sensorCount:%d-----repoName:%s\n", repo.UniqueRepoId, repo.SensorCount, repo.Name) } fmt.Println(strings.Repeat("******", 10)) } func getSourceList(uniqueRepoId string) { url := fmt.Sprintf("http://%s:%d/api/biz/repos/tree?WithSensor=true&WithSource=true&LimitLevel=1&UniqueRepoId=%s&SourceTypes=3&WithTypeReg=&timestamp=1550197932129", *ip, *port, uniqueRepoId) result, err := httpDo(url, "GET", []byte("")) if err != nil { } var sourceTreeData SourceTreeData json.Unmarshal([]byte(result), &sourceTreeData) sensorList := sourceTreeData.Data.Sensors for _, source := range sensorList { fmt.Println(source.SensorName, "\t", source.Url, "\t", source.Latitude, "\t", source.Longitude) } } func getSourceByUniqueRepoId(uniqueRepoId string) (sourceIdList []string) { url := fmt.Sprintf("http://%s:%d/api/biz/repos/tree?WithSensor=true&WithSource=true&LimitLevel=1&UniqueRepoId=%s&SourceTypes=3&WithTypeReg=&timestamp=1550197932129", *ip, *port, uniqueRepoId) result, err := httpDo(url, "GET", []byte("")) if err != nil { } var sourceTreeData SourceTreeData json.Unmarshal([]byte(result), &sourceTreeData) sensorList := sourceTreeData.Data.Sensors fmt.Println(len(sensorList)) for _, sensor := range sensorList { sourceIdList = append(sourceIdList, sensor.Sources[0].SourceId) } return } func getRepoList() { url := fmt.Sprintf("http://%s:%d/api/biz/sys/sensors/list", *ip, *port) param := `{"CommonQuery":{"Offset":0,"Limit":10},"UniqueRepoId":"root","SourceTypes":[3,4]}` //fmt.Printf("%T",param) resultResponse, err := httpDo(url, "POST", []byte(param)) if err != nil { return } var result AllSensor json.Unmarshal([]byte(resultResponse), &result) fmt.Println(result) } func getSensorsByUniqueRepoId(uniqueRepoId string) (uniqueSendorList []string) { commQuery := CommonQuery{ Offset: 0, Limit: 100, } url := fmt.Sprintf("http://%s:%d/api/biz/sys/sensors/list", *ip, *port) sensorSource := SensorSource{ CommonQuery: commQuery, UniqueRepoId: uniqueRepoId, SourceTypes: []int{3, 4}, } bytes, err := json.Marshal(sensorSource) if err != nil { } //fmt.Println(string(bytes)) result, err := httpDo(url, "POST", bytes) if err != nil { } var allSensor AllSensor json.Unmarshal([]byte(result), &allSensor) sensorList := allSensor.Data.Sensors for _, sensor := range sensorList { uniqueSendorList = append(uniqueSendorList, sensor.UniqueSensorId) } fmt.Println(uniqueSendorList) return } func getTaskByType(task_type string) { var tmp int64 if task_type == "face" { tmp = 1011 } else if task_type == "vehicle" { tmp = 2011 } else { fmt.Println("type err") os.Exit(400) } url := fmt.Sprintf("http://%s:%d/api/tasks?detectTypeId=%d", *ip, *port, tmp) result, err := httpDo(url, "GET", []byte("")) if err != nil { } var taskSearch TaskSearch json.Unmarshal([]byte(result), &taskSearch) //fmt.Println(taskSearch) } //todo 待完善source信息 func getSourceIdList() (sourceList []string) { url := fmt.Sprintf("http://%s:%d/api/source", *ip, *port) resp, err := http.Get(url) if err != nil { } defer resp.Body.Close() res, err := ioutil.ReadAll(resp.Body) var sourceResult SourceResult json.Unmarshal(res, &sourceResult) for _, v := range sourceResult.Data { sourceList = append(sourceList, v.SourceId) } fmt.Println(len(sourceList)) return } func get_task_list() (uniqueTaskIdList []string, err error) { url := fmt.Sprintf("http://%s:%d/api/task", *ip, *port) resp, err := http.Get(url) if err != nil { return } defer resp.Body.Close() res, err := ioutil.ReadAll(resp.Body) if err != nil { return } var result Result json.Unmarshal(res, &result) fmt.Println(strings.Repeat("******", 10)) fmt.Println("taskID", "\t", "SensorName", "\t", "Status", "\t", "rtsp") for i := 0; i < len(result.Data); i++ { tmp := result.Data[i] fmt.Printf("%-d\t\t %-7s\t\t %-d\t\t\t%-s\n", tmp.TaskId, tmp.SensorName, tmp.Status, tmp.SourceData.Uri) uniqueTaskIdList = append(uniqueTaskIdList, tmp.UniqueTaskId) } fmt.Println(strings.Repeat("******", 10)) return } func add_kse_task(sourceList []string) { url := fmt.Sprintf("http://%s:%d/api/tasks", *ip, *port) var s []Task for _, v := range sourceList { taskParam := Task{ UniqueSourceId: v, TypeId: 3, DetectTypeIds: []int{2011, 2012, 2013, 2015}, RuleSwitcher: true, FramingStrategy: 2, //AdditionalInfos: map[string]string{"kse": "true"}, } sensors := append(s, taskParam) bytes, err := json.Marshal(sensors) if err != nil { fmt.Println(err) } result, err := httpDo(url, "POST", bytes) if err != nil { } fmt.Println(result) } } func add_face_task(sourceList []string) { url := fmt.Sprintf("http://%s:%d/api/tasks", *ip, *port) var s []Task for _, v := range sourceList { taskParam := Task{ UniqueSourceId: v, TypeId: 3, DetectTypeIds: []int{1011}, } sensors := append(s, taskParam) bytes, err := json.Marshal(sensors) if err != nil { fmt.Println(err) } result, err := httpDo(url, "POST", bytes) if err != nil { } fmt.Println(result) } } func add_vehicle_task(sourceList []string) { url := fmt.Sprintf("http://%s:%d/api/tasks", *ip, *port) var s []Task for _, v := range sourceList { taskParam := Task{ UniqueSourceId: v, TypeId: 3, DetectTypeIds: []int{2011, 2012, 2013, 2015}, } sensors := append(s, taskParam) bytes, err := json.Marshal(sensors) if err != nil { fmt.Println(err) } result, err := httpDo(url, "POST", bytes) if err != nil { } fmt.Println(result) } } func del_all_task() { uniqueTaskIdList, err := get_task_list() if err != nil { } for _, uniqueTaskId := range uniqueTaskIdList { url := fmt.Sprintf("http://%s:%d/api/tasks?ids=%s", *ip, *port, uniqueTaskId) httpDo(url, "DELETE", []byte("")) } } func import_file(stream_file string) (res []string, err error) { if contentBytes, err := ioutil.ReadFile(stream_file); err == nil { result := strings.Replace(string(contentBytes), "\n", "\n", 1) splitResult := strings.Split(result, "\n") return splitResult, err } return } func add_sys_sensor_rtsp(repoId string) { url := fmt.Sprintf("http://%s:%d/api/biz/sensors", *ip, *port) fmt.Println(url) url_list, _ := import_file(*file) for _, sensor := range url_list { //tmp := strings.Split(sensor, " ") tmp := strings.Fields(sensor) //fmt.Println(tmp) //linux上有问题,mac上没有,多个[] if len(tmp) == 0 { continue } //fmt.Println("tmp----->>>>>",len(tmp)) latitude, _ := strconv.ParseFloat(tmp[3], 32) longitude, _ := strconv.ParseFloat(tmp[2], 32) sensor_param := Sensor{ SensorName: tmp[0], Url: tmp[1], Latitude: latitude, Longitude: longitude, SensorType: 1, UniqueRepoId: repoId, } var s []Sensor data := append(s, sensor_param) fmt.Println("data--->", sensor_param) bytea, err := json.Marshal(data) if err != nil { fmt.Println("Marshal,err===>>", err) } result, err := httpDo(url, "POST", bytea) if err != nil { fmt.Println("err*******-->", err) } fmt.Println(result) } } func add_sys_sensor_video(repoId string) { url := fmt.Sprintf("http://%s:%d/api/biz/sensors/videos", *ip, *port) fmt.Println(url) url_list, _ := import_file(*file) count := 0 for _, sensor := range url_list { videoName := "video-" + strconv.Itoa(count) tmp := strings.Fields(sensor) //linux上有问题,mac上没有,多个[] if len(tmp) == 0 { continue } //fmt.Println("tmp----->>>>>",len(tmp)) //latitude, _ := strconv.ParseFloat(tmp[2], 32) //longitude, _ := strconv.ParseFloat(tmp[3], 32) sensor_param := Sensor{ SensorName: videoName, //Url: tmp[1], Latitude: 0, Longitude: 0, SensorType: 3, UniqueRepoId: repoId, } video_param := VideoQuery{ Type: 2, Sources: []Tsource{ {Type: 2, Uri: tmp[1]}, }, Sensor: sensor_param, } //fmt.Println(video_param) var s []VideoQuery data := append(s, video_param) fmt.Println("data--->", video_param) bytea, err := json.Marshal(data[0]) fmt.Println(string(bytea)) if err != nil { fmt.Println("Marshal,err===>>", err) } result, err := httpDo(url, "POST", bytea) if err != nil { fmt.Println("err*******-->", err) } count++ fmt.Println(result) } } func httpDo(url string, methodType string, param []byte) (result string, err error) { client := &http.Client{} request, err := http.NewRequest(methodType, url, strings.NewReader(string(param))) if err != nil { return } request.Header.Set("Authorization", "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE1ODQzMDIwNTAsImlkIjoiYWRtaW4iLCJvcmlnX2lhdCI6MTUzMzkwMjA1MH0.86owJoyHXTF5tikrFoQpDuDA-UJve_GWcq7qAvKBcn8") response, err := client.Do(request) if err != nil { fmt.Println("http do err", err) return } defer response.Body.Close() body, err := ioutil.ReadAll(response.Body) if err != nil { fmt.Println("--------->", err) return } result = string(body) return } func printUsageErrorAndExit(message string) { fmt.Fprintln(os.Stderr, "ERROR:", message) fmt.Fprintln(os.Stderr) fmt.Fprintln(os.Stderr, "Available command line options:") flag.PrintDefaults() os.Exit(64) } func printErrorAndExit(code int, format string, values ...interface{}) { fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) fmt.Fprintln(os.Stderr) os.Exit(code) }
package middleware import ( pb "github.com/jfeng45/grpcservice" "github.com/sony/gobreaker" "golang.org/x/net/context" "log" ) var cb *gobreaker.CircuitBreaker type CircuitBreakerCallGet struct { Next callGetter } func init() { var st gobreaker.Settings st.Name = "CircuitBreakerCallGet" st.MaxRequests = 2 st.Timeout = 10 st.ReadyToTrip = func(counts gobreaker.Counts) bool { failureRatio := float64(counts.TotalFailures) / float64(counts.Requests) return counts.Requests >= 2 && failureRatio >= 0.6 } cb = gobreaker.NewCircuitBreaker(st) } func (tcg *CircuitBreakerCallGet) CallGet(ctx context.Context, key string, c pb.CacheServiceClient) ( []byte, error) { var err error var value []byte var serviceUp bool log.Printf("state:%v", cb.State().String()) _, err = cb.Execute(func() (interface{}, error) { value, err = tcg.Next.CallGet(ctx, key, c) if err != nil { return nil, err } serviceUp = true return value, nil }) if !serviceUp { //return a default value here. You can also run a downgrade function here log.Printf("circuit breaker return error:%v\n", err) return []byte{0}, nil } return value, nil }
package main import ( netHttp "net/http" "sync" "time" "github.com/kelseyhightower/envconfig" "github.com/m-zajac/goprojectdemo/internal/adapter/github" "github.com/m-zajac/goprojectdemo/internal/api/grpc" "github.com/m-zajac/goprojectdemo/internal/api/http" "github.com/m-zajac/goprojectdemo/internal/api/http/limiter" "github.com/m-zajac/goprojectdemo/internal/app" "github.com/m-zajac/goprojectdemo/internal/database" "github.com/sirupsen/logrus" ) func main() { l := logrus.New() l.Level = logrus.InfoLevel var conf Config if err := envconfig.Process("", &conf); err != nil { l.Fatalf("coludn't parse config: %v", err) } httpClient := &netHttp.Client{ Timeout: 30 * time.Second, } limitedHTTPClient := limiter.NewHTTPDoer( httpClient, conf.GithubAPIRateLimit, ) kvStore, err := database.NewBoltKVStore( conf.GithubDBPath, conf.GithubDBBucketName, ) if err != nil { l.Fatalf("coludn't create bolt kv store: %v", err) } defer kvStore.Close() githubClient := github.NewClient( limitedHTTPClient, conf.GithubAPIAddress, conf.GithubAPIToken, ) githubStaleDataClient, err := github.NewClientWithStaleData( githubClient, kvStore, conf.GithubDBDataTTL, conf.GithubDBDataRefreshTTL, l.WithField("component", "githubStaleDataClient"), ) if err != nil { l.Fatalf("coludn't create github db client: %v", err) } githubStaleDataClient.RunScheduler() defer githubStaleDataClient.Close() githubCachedClient, err := github.NewCachedClient( githubStaleDataClient, conf.GithubClientCacheSize, conf.GithubClientCacheTTL, ) if err != nil { l.Fatalf("couldn't create github client cache: %v", err) } service := app.NewService( githubCachedClient, conf.ServiceResponseTimeout, ) mux := http.NewMux(service, 60*time.Second, l.WithField("component", "mux")) server := http.NewServer( conf.HTTPServerAddress, conf.HTTPProfileServerAddress, mux, l.WithField("component", "httpServer"), ) grpcService := grpc.NewService(service) grpcServer := grpc.NewServer( grpcService, conf.GRPCServerAddress, l.WithField("component", "grpcServer"), ) var wg sync.WaitGroup wg.Add(1) go func() { server.Run() wg.Done() }() wg.Add(1) go func() { if err := grpcServer.Run(); err != nil { l.Fatalf("couldn't run grpc server: %v", err) } wg.Done() }() wg.Wait() }
package main import ( "fmt" "strings" "testing" ) func TestCompress(t *testing.T) { tests := map[string]struct { str string want string }{ "1": { str: "aabcccccaaa", want: "a2b1c5a3", }, "2": { str: "abca", want: "abca", }, } for name, tt := range tests { t.Run(name, func(t *testing.T) { if got := compress(tt.str); got != tt.want { t.Errorf("got %v, want %v", got, tt.want) } if got := compress2(tt.str); got != tt.want { t.Errorf("got %v, want %v", got, tt.want) } }) } } // 新しく作成する文字列をns(new string)として空文字で定義する // 文字列をスライスに格納して、1文字ずつループを回す // 最初の文字として””(空文字)を定義しておき、prevとする // 同じ文字が何回出現したか数えるためにcntというint変数を0で初期値定義しておく // ループ内の処理: // prevと同じ文字であれば、cntに1加算 // prevと異なる文字であれば、「prevの後ろにcntをつけた文字列」をnsに追加して、cntを1に書き換えて、prevをその文字で書き換える func compress(s string) string { ns := "" prev := "" cnt := 0 for _, v := range targetStrSlice(s) { if v == prev { cnt++ continue } if prev != "" { // prevが最初に定義した空文字の時は何もしない ns = fmt.Sprintf("%s%s%d", ns, prev, cnt) } cnt = 1 prev = v } fmt.Println("ns", ns) if len(ns) > len(s) { return s } return ns } // こちらのほうが、nssとしてスライスを用意して入れていくだけなので速い func compress2(s string) string { var nss []string // new string slice prev := "" cnt := 0 for _, v := range targetStrSlice(s) { if v == prev { cnt++ continue } if prev != "" { // prevが最初に定義した空文字の時は何もしない //ns = fmt.Sprintf("%s%s%d", ns, prev, cnt) nss = append(nss, fmt.Sprintf("%s%d", prev, cnt)) } cnt = 1 prev = v } ns := strings.Join(nss, "") fmt.Println("ns", ns) if len(ns) > len(s) { return s } return ns } func targetStrSlice(s string) []string { // 普通に見ていくと一番最後の文字列だけ処理されないので、最後に別の文字列として空文字を入れておく // aabbbccdd - > a2b3c2で終わってしまう // aabbbccdd"" - > a2b3c2d2 最後が空文字列なので、その前のdの数を評価してくれる return append(strings.Split(s, ""), []string{""}...) }
// Copyright [2015] [Ignazio Ferreira] // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package compatibility import ( "fmt" "github.com/gogo/protobuf/parser" descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" "os" "strconv" "strings" ) func check(err error) { if err != nil { panic(err) } } func GetDescriptor(path string, f *descriptor.FileDescriptorSet) *descriptor.DescriptorProto { pathA := strings.Split(path, ".") for _, v1 := range f.File { out := getDescriptor(pathA, v1.MessageType) if out != nil { return out } } return nil } func getDescriptor(path []string, d []*descriptor.DescriptorProto) *descriptor.DescriptorProto { for _, val := range d { c := 0 for ; path[c] == ""; c++ { } if val.GetName() == path[c] { if len(path)-c == 1 { return val } else { return getDescriptor(path[c+1:], val.NestedType) } } } return nil } type Condition int const ( ChangedLabel Condition = 1 AddedField Condition = 2 RemovedField Condition = 3 ChangedName Condition = 4 ChangedType Condition = 5 ChangedNumber Condition = 6 ChangedDefault Condition = 7 ChangedTypeName Condition = 8 NonFieldIncompatibility Condition = 9 ) type Difference struct { condition Condition newValue string oldValue string path string qualifier string message string } func (d *Difference) String() string { path := "" if d.path == "" { path = "." } else { path = d.path } if d.condition == ChangedLabel { return "Changed label of field nr " + d.qualifier + " in " + path + " from " + d.oldValue + " to " + d.newValue } else if d.condition == AddedField { return "Added Field nr " + d.qualifier + " in " + path + " of label " + d.newValue + d.message } else if d.condition == RemovedField { return "Removed Field nr " + d.qualifier + " in " + path + " of label " + d.newValue + d.message } else if d.condition == ChangedName { return "Changed name of field nr " + d.qualifier + " in " + path + " from " + d.oldValue + " to " + d.newValue } else if d.condition == ChangedType { return "Changed type of field nr " + d.qualifier + " in " + path + " from " + d.oldValue + " to " + d.newValue } else if d.condition == ChangedNumber { return "Changed numeric tag of field named \"" + d.qualifier + "\" in " + path + " from " + d.oldValue + " to " + d.newValue } else if d.condition == ChangedDefault { return "Changed default value of field nr " + d.qualifier + " in " + path + " from " + d.oldValue + " to " + d.newValue + " this is generally OK" } else if d.condition == NonFieldIncompatibility { return d.message } else if d.condition == ChangedTypeName { return "Changed TypeName of field " + d.qualifier + " from " + d.oldValue + " to " + d.newValue + " in " + path + " manually compare message types using compare message method" } return "" } type DifferenceList struct { Error []Difference Warning []Difference Extension []Difference } func (d *DifferenceList) addWarning(c Condition, newValue, oldValue, path, qualifier, message string) { d1 := Difference{c, newValue, oldValue, path, qualifier, message} d.Warning = append(d.Warning, d1) } func (d *DifferenceList) addError(c Condition, newValue, oldValue, path, qualifier, message string) { d1 := Difference{c, newValue, oldValue, path, qualifier, message} d.Error = append(d.Error, d1) } func (d1 *DifferenceList) merge(d2 DifferenceList) { d1.Error = append(d1.Error, d2.Error...) d1.Warning = append(d1.Warning, d2.Warning...) } func (d1 *DifferenceList) mergeExt(d2 DifferenceList) { d1.Extension = append(d1.Extension, d2.Error...) } func (d *DifferenceList) String(suppressWarning bool) string { var output string = "" if !suppressWarning && d.Warning != nil { output = output + "WARNING\n" for _, val := range d.Warning { output = output + val.String() + "\n" } } if d.Error != nil { output = output + "INCOMPATIBILITIES\n" for _, val := range d.Error { output = output + val.String() + "\n" } } return output } func (d *DifferenceList) IsCompatible() bool { if d.Error == nil { return true } return false } type Comparer struct { Newer *descriptor.FileDescriptorSet Older *descriptor.FileDescriptorSet } func (c *Comparer) appendExtensions() { for _, val := range c.Newer.File { for _, ext := range val.Extension { d := GetDescriptor(*ext.Extendee, c.Newer) d.Field = append(d.Field, ext) } for _, message := range val.MessageType { apndext(message, c.Newer) } } for _, val := range c.Older.File { for _, ext := range val.Extension { if ext != nil { d := GetDescriptor(*ext.Extendee, c.Older) d.Field = append(d.Field, ext) } } for _, message := range val.MessageType { apndext(message, c.Older) } } } func apndext(d *descriptor.DescriptorProto, c *descriptor.FileDescriptorSet) { for _, ext := range d.Extension { d := GetDescriptor(*ext.Extendee, c) d.Field = append(d.Field, ext) } for _, msg := range d.NestedType { apndext(msg, c) } } func (c *Comparer) Compare() DifferenceList { c.appendExtensions() var output DifferenceList for _, val1 := range c.Newer.File { //loop through both arrays to see which fields existed in the older version too and which were newly added exist := false for _, val2 := range c.Older.File { if val1.GetPackage() == val2.GetPackage() { exist = true output.merge(getChangesDP(val1.MessageType, val2.MessageType, "", *c)) //if proto exists in both files, compare it } } if !exist { output.addWarning(NonFieldIncompatibility, "", "", "", "", "Added proto file "+strings.Split(val1.GetName(), ".")[0]) } } for _, val1 := range c.Older.File { exist := false for _, val2 := range c.Newer.File { if val1.GetPackage() == val2.GetPackage() { exist = true } } if !exist { output.addWarning(NonFieldIncompatibility, "", "", "", "", "Removed proto file "+strings.Split(val1.GetName(), ".")[0]) //if it exists only in the old proto, it has been removed } } return output } func getChangesDP(newer, older []*descriptor.DescriptorProto, path string, c Comparer) DifferenceList { var output DifferenceList for _, val1 := range newer { exist := false for _, val2 := range older { if val1.GetName() == val2.GetName() { exist = true output.merge(getChangesFieldDP(val1.Field, val2.Field, val1.ExtensionRange, val2.ExtensionRange, path+"."+val1.GetName(), c)) output.merge(getChangesDP(val1.NestedType, val2.NestedType, path+"."+val1.GetName(), c)) output.merge(getChangesEDP(val1.EnumType, val2.EnumType, path+"."+val1.GetName())) } } if !exist { output.addWarning(NonFieldIncompatibility, "", "", "", "", "Added message "+val1.GetName()+" in "+path) } } for _, val1 := range older { exist := false for _, val2 := range newer { if val1.GetName() == val2.GetName() { exist = true } } if !exist { output.addWarning(NonFieldIncompatibility, "", "", "", "", "Removed message "+val1.GetName()+" in "+path) } } return output } func getChangesEDP(newer, older []*descriptor.EnumDescriptorProto, path string) DifferenceList { var output DifferenceList for _, val1 := range newer { exist := false for _, val2 := range older { if val1.GetName() == val2.GetName() { exist = true output.merge(getChangesEVDP(val1.Value, val2.Value, path+"."+val1.GetName())) } } if !exist { output.addWarning(NonFieldIncompatibility, "", "", "", "", "Added enum "+val1.GetName()+" in "+path) } } for _, val1 := range older { exist := false for _, val2 := range newer { if val1.GetName() == val2.GetName() { exist = true } } if !exist { output.addWarning(NonFieldIncompatibility, "", "", "", "", "Removed enum "+val1.GetName()+" in "+path) } } return output } func getChangesFieldDP(newer, older []*descriptor.FieldDescriptorProto, newEx, oldEx []*descriptor.DescriptorProto_ExtensionRange, path string, c Comparer) DifferenceList { var output DifferenceList for _, val1 := range newer { //loop through both arrays to see which fields existed in the older version too and which were newly added exist := false for _, val2 := range older { if val1.GetNumber() == val2.GetNumber() { //if message exists in both, check label, numeric tag and type for dissimilarities exist = true output.merge(compareFields(*val1, *val2, path, c)) } } if !exist { if val1.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REQUIRED { output.addError(AddedField, val1.Label.String(), "", path, strconv.Itoa(int(*val1.Number)), "") } } } for _, val1 := range older { exist := false for _, val2 := range newer { if val1.GetNumber() == val2.GetNumber() { exist = true } } if !exist { if val1.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REQUIRED { output.addError(RemovedField, val1.Label.String(), "", path, strconv.Itoa(int(*val1.Number)), "") } else { output.addWarning(RemovedField, val1.Label.String(), "", path, strconv.Itoa(int(*val1.Number)), " consider pathing \"OBSOLETE_\" instead") } } } for _, val1 := range newer { for _, val2 := range older { if val1.GetName() == val2.GetName() { if val1.GetNumber() != val2.GetNumber() { output.addWarning(ChangedNumber, strconv.Itoa(int(*val1.Number)), strconv.Itoa(int(*val2.Number)), path, val1.GetName(), "") } } } } return output } func compareFields(val1, val2 descriptor.FieldDescriptorProto, path string, c Comparer) DifferenceList { var output DifferenceList if val1.Label.String() != val2.Label.String() { //If field label changed add it to differences if val1.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REQUIRED { output.addError(ChangedLabel, val1.Label.String(), val2.Label.String(), path, strconv.Itoa(int(*val1.Number)), "") } else if val2.GetLabel() == descriptor.FieldDescriptorProto_LABEL_REQUIRED { output.addError(ChangedLabel, val1.Label.String(), val2.Label.String(), path, strconv.Itoa(int(*val1.Number)), "") } else { output.addWarning(ChangedLabel, val1.Label.String(), val2.Label.String(), path, strconv.Itoa(int(*val1.Number)), "") } } if val1.GetName() != val2.GetName() { output.addWarning(ChangedName, val1.GetName(), val2.GetName(), path, strconv.Itoa(int(*val1.Number)), "") } if *val1.Type != *val2.Type { compatible := false if *val1.Type == descriptor.FieldDescriptorProto_TYPE_INT32 || *val1.Type == descriptor.FieldDescriptorProto_TYPE_INT64 || *val1.Type == descriptor.FieldDescriptorProto_TYPE_UINT32 || *val1.Type == descriptor.FieldDescriptorProto_TYPE_UINT64 || *val1.Type == descriptor.FieldDescriptorProto_TYPE_BOOL { if *val2.Type == descriptor.FieldDescriptorProto_TYPE_INT32 || *val2.Type == descriptor.FieldDescriptorProto_TYPE_INT64 || *val2.Type == descriptor.FieldDescriptorProto_TYPE_UINT32 || *val2.Type == descriptor.FieldDescriptorProto_TYPE_UINT64 || *val2.Type == descriptor.FieldDescriptorProto_TYPE_BOOL { compatible = true } } if *val1.Type == descriptor.FieldDescriptorProto_TYPE_SINT32 || *val1.Type == descriptor.FieldDescriptorProto_TYPE_SINT64 { if *val2.Type == descriptor.FieldDescriptorProto_TYPE_SINT32 || *val2.Type == descriptor.FieldDescriptorProto_TYPE_SINT64 { compatible = true } } if *val1.Type == descriptor.FieldDescriptorProto_TYPE_STRING || *val1.Type == descriptor.FieldDescriptorProto_TYPE_BYTES { if *val2.Type == descriptor.FieldDescriptorProto_TYPE_STRING || *val2.Type == descriptor.FieldDescriptorProto_TYPE_BYTES { compatible = true } } if *val1.Type == descriptor.FieldDescriptorProto_TYPE_FIXED32 || *val1.Type == descriptor.FieldDescriptorProto_TYPE_FIXED64 || *val1.Type == descriptor.FieldDescriptorProto_TYPE_SFIXED32 || *val1.Type == descriptor.FieldDescriptorProto_TYPE_SFIXED64 { if *val2.Type == descriptor.FieldDescriptorProto_TYPE_FIXED32 || *val2.Type == descriptor.FieldDescriptorProto_TYPE_FIXED64 || *val2.Type == descriptor.FieldDescriptorProto_TYPE_SFIXED32 || *val2.Type == descriptor.FieldDescriptorProto_TYPE_SFIXED64 { compatible = true } } if compatible { output.addWarning(ChangedType, val1.Type.String(), val2.Type.String(), path, strconv.Itoa(int(*val1.Number)), "") } else { output.addError(ChangedType, val1.Type.String(), val2.Type.String(), path, strconv.Itoa(int(*val1.Number)), "") } } if val1.DefaultValue != val2.DefaultValue { output.addWarning(ChangedDefault, val1.GetDefaultValue(), val2.GetDefaultValue(), path, strconv.Itoa(int(*val1.Number)), "") } if val1.GetTypeName() != val2.GetTypeName() { output.addWarning(ChangedTypeName, *val1.TypeName, *val2.TypeName, path, strconv.Itoa(int(*val1.Number)), "") d1 := GetDescriptor(val1.GetTypeName(), c.Newer) d2 := GetDescriptor(val2.GetTypeName(), c.Older) output.merge(getChangesFieldDP(d1.Field, d2.Field, d1.ExtensionRange, d2.ExtensionRange, path+"."+d1.GetName(), c)) output.merge(getChangesDP(d1.NestedType, d2.NestedType, path+"."+d1.GetName(), c)) output.merge(getChangesEDP(d1.EnumType, d2.EnumType, path+"."+d1.GetName())) } return output } func getChangesEVDP(newer, older []*descriptor.EnumValueDescriptorProto, path string) DifferenceList { var output DifferenceList for _, val1 := range newer { exist := false for _, val2 := range older { if val1.GetName() == val2.GetName() { exist = true } } if !exist { output.addError(AddedField, val1.GetName(), "", path, strconv.Itoa(int(*val1.Number)), "") } } for _, val1 := range older { exist := false for _, val2 := range newer { if val1.GetName() == val2.GetName() { exist = true } } if !exist { output.addError(RemovedField, val1.GetName(), "", path, strconv.Itoa(int(*val1.Number)), "") } } return output } func isExtension(tag int, ext []*descriptor.DescriptorProto_ExtensionRange) bool { for _, val1 := range ext { if tag >= int(*val1.Start) && tag <= int(*val1.End) { return true } } return false } func main() { if len(os.Args) == 5 { newer, err1 := parser.ParseFile(os.Args[1], strings.Split(os.Args[2], ":")...) check(err1) older, err2 := parser.ParseFile(os.Args[3], strings.Split(os.Args[4], ":")...) check(err2) c := Comparer{newer, older} d := c.Compare() fmt.Print(d.String(false)) if d.Error != nil { os.Exit(1) } } else if len(os.Args) == 6 { newer, err1 := parser.ParseFile(os.Args[1], strings.Split(os.Args[2], ":")...) check(err1) older, err2 := parser.ParseFile(os.Args[3], strings.Split(os.Args[4], ":")...) check(err2) c := Comparer{newer, older} d := c.Compare() fmt.Println(d.String(false)) if d.Error != nil { os.Exit(1) } } else if len(os.Args) == 1 { newer, err1 := parser.ParseFile("./ExtensionProtos/Changes/p.proto", "./ExtensionProtos/Changes") check(err1) older, err2 := parser.ParseFile("./ExtensionProtos/p.proto", "./ExtensionProtos/") check(err2) c := Comparer{newer, older} d := c.Compare() fmt.Print(d.String(false)) if d.Error != nil { os.Exit(1) } } else { fmt.Println(len(os.Args)) fmt.Println("Use either 0 parameters for hard coded imports or 4,5 paramters to pass relative filepath") fmt.Println("Use parameters {proto path 1} {proto 1 dependancies} {proto path 2} {proto 2 dependancies} if there is more than 1 dependency for a proto seperate them by \":\"") os.Exit(1) } }
package model import ( "github.com/futurehomeno/fimpgo" "time" ) type MsgPipeline chan Message type FlowRunner func(ReactorEvent) type Message struct { AddressStr string Address fimpgo.Address Payload fimpgo.FimpMessage RawPayload []byte Header map[string]string CancelOp bool // if true , listening end should close all operations } type ReactorEvent struct { Msg Message Err error TransitionNodeId NodeID SrcNodeId NodeID } type Setting struct { Value interface{} ValueType string // only simple types supported - int,string,float,bool Description string `json:"omitempty"` // Human readable description InitVar bool `json:"omitempty"` // If set , flow will init variable during startup TVarSType string `json:"omitempty"` // Target variable storage type - mem_local, disk_local,disk_global TVarPType string `json:"omitempty"` // Target variable payload type - int , string , float , bool } func (s *Setting) String() string { r, _ := s.Value.(string) return r } type FlowMeta struct { Id string // Instance id . Is different for every instance ClassId string // Class id , all instances share the same ClassId Author string Version int CreatedAt time.Time UpdatedAt time.Time Name string Group string Description string Nodes []MetaNode Settings map[string]Setting IsDisabled bool IsDefault bool // default flows are read only and can't be deleted ParallelExecution string // keep_first , keep_last , parallel } const ( SIGNAL_STOP = 1 SIGNAL_TERMINATE_WAITING = 2 // Signal to terminate all waiting nodes but not trigger nodes ParallelExecutionKeepFirst = "keep_first" ParallelExecutionKeepLast = "keep_last" ParallelExecutionParallel = "parallel" ) type FlowOperationalContext struct { FlowMeta *FlowMeta FlowId string IsFlowRunning bool State string TriggerControlSignalChannel chan int // the channel should be used to stop all waiting nodes . NodeControlSignalChannel chan int NodeIsReady chan bool // Flow should notify message router when next node is ready to process new message . StoragePath string ExtLibsDir string } type FlowStatsReport struct { CurrentNodeId NodeID CurrentNodeLabel string NumberOfNodes int NumberOfTriggers int NumberOfActiveTriggers int NumberOfActiveSubflows int State string StartedAt time.Time WaitingSince time.Time LastExecutionTime int64 }
package services import ( "errors" "github.com/astaxie/beego/validation" "homework/models/datamodels" "homework/models/repositories" "log" ) type IProductService interface { GetProductByID(int64) (*datamodels.Product, error) GetAllProduct(int, int) ([]datamodels.Product, int, error) GetAllProductInfo(int, int) (map[int]map[string]string, int, error) DeleteProductByID(int64) bool InsertProduct(product *datamodels.Product) (int64, error) UpdateProduct(product *datamodels.Product) error SubNumberOne(productID int64) error GetProductByshop(shopID int64, pagenum int, limit int) ([]datamodels.Product, int, error) } type ProductService struct { productRepository repositories.IProduct } //初始化函数 func NewProductService(repository repositories.IProduct) IProductService { return &ProductService{repository} } func (p *ProductService) GetAllProductInfo(pagenum int, limit int) (map[int]map[string]string, int, error) { return p.productRepository.SelectAllInfo((pagenum-1)*limit, limit) } func (p *ProductService) GetProductByID(productID int64) (*datamodels.Product, error) { return p.productRepository.SelectByKey(productID) } func (p *ProductService) GetAllProduct(pagenum int, limit int) ([]datamodels.Product, int, error) { return p.productRepository.SelectAll((pagenum-1)*limit, limit) } func (p *ProductService) DeleteProductByID(productID int64) bool { return p.productRepository.Delete(productID) } func (p *ProductService) InsertProduct(product *datamodels.Product) (int64, error) { if err := ValidateProduct(product); err != nil { return -1, err } return p.productRepository.Insert(product) } func (p *ProductService) UpdateProduct(product *datamodels.Product) error { return p.productRepository.Update(product) } func (p *ProductService) GetProductByshop(shopID int64, pagenum int, limit int) ([]datamodels.Product, int, error) { return p.productRepository.SelectByshopId(shopID, (pagenum-1)*10, limit) } func (p *ProductService) SubNumberOne(productID int64) error { return p.productRepository.SubProductNum(productID) } func ValidateProduct(product *datamodels.Product) error { valid := validation.Validation{} b, err := valid.Valid(product) if !b { for _, err := range valid.Errors { log.Println(err.Key, err.Message) } err = errors.New("数据验证错误") } return err }
package structs import ( "fmt" "strconv" "time" ) // ConvertUserMessageToUser — parse UserMessage (from JSON) to User. func ConvertUserMessageToUser(u UserMessage) (User, error) { ID, err := strconv.Atoi(u.ID) if err != nil { return User{}, fmt.Errorf("convert string to int error: %w", err) } user := User{ ID: int64(ID), Email: u.Email, FirstName: u.FirstName, LastName: u.LastName, } return user, nil } // ConvertEventMessageToEvent — parse EventMessage (from JSON) to Event. func ConvertEventMessageToEvent(e EventMessage) (Event, error) { ID, err := strconv.Atoi(e.ID) if err != nil { return Event{}, fmt.Errorf("convert ID error: %w", err) } userID, err := strconv.Atoi(e.ID) if err != nil { return Event{}, fmt.Errorf("convert UserID error: %w", err) } dateFrom, err := time.Parse(time.RFC3339, e.DateFrom) if err != nil { return Event{}, fmt.Errorf("parse DateFrom error: %w", err) } dateTo, err := time.Parse(time.RFC3339, e.DateTo) if err != nil { return Event{}, fmt.Errorf("parse DateTo error: %w", err) } event := Event{ ID: int64(ID), UserID: int64(userID), Title: e.Title, Content: e.Content, DateFrom: dateFrom, DateTo: dateTo, Notified: e.Notified, } return event, nil }
package models import ( "encoding/json" "log" "strconv" "os" "testing" "github.com/go-redis/redis" ) var raw = json.RawMessage(`{ "global_id": 1704691, "system_object_id": "161", "ID": 161, "Name": "Парковка такси по адресу Карачаровское шоссе, дом 15", "AdmArea": "Юго-Восточный административный округ", "District": "Нижегородский район", "Address": "Карачаровское шоссе, дом 15", "Longitude_WGS84": "37.7630192041397", "Latitude_WGS84": "55.7356914963956", "CarCapacity": 4, "Mode": "круглосуточно", "ID_en": 161, "Name_en": "Taxi parking at Karacharovskoe shosse, house 15", "AdmArea_en": "Yugo-Vostochny'j administrativny'j okrug", "District_en": "Nizhegorodskij rajon", "Address_en": "Karacharovskoe shosse, dom 15", "Longitude_WGS84_en": "37.7630192041397", "Latitude_WGS84_en": "55.7356914963956", "CarCapacity_en": 4, "Mode_en": "24-hours" }`) var mockdata = []TaxiParking{ TaxiParking{GlobalID: 1704691, ID: 161, ModeEN: "24-hours", Raw: raw, }} // Create a test connection with Redis. Be careful, all data in the database will be reset. // Number of db for test, pass in env testdb=... func connectToTest() *DBClient { addr := os.Getenv("redis") password := os.Getenv("password") var ( dbNum int err error ) if os.Getenv("testdb") == "" { dbNum = 0 } else { dbNum, err = strconv.Atoi(os.Getenv("testdb")) if err != nil { log.Fatal(err) } } dbredis := redis.NewClient(&redis.Options{ Addr: addr, Password: password, DB: dbNum, }) dbredis.FlushDB() return &DBClient{db: dbredis} } func TestBulkInsert(t *testing.T) { client := connectToTest() client.BulkInsert(&mockdata) prefixKey, err := client.getActualPrefix() if err != nil { t.Errorf("BulkInser. getActualPrefix error %s", err) } if prefixKey == "" { t.Errorf("BulkInsert not working. There is nothing in \"parking:keys\"") } res, err := client.GetTaxiParking(1704691) if err != nil { t.Errorf("GetTaxiParking not working. Error %s", err) } if res == "" { t.Errorf("BulkInsert not working. GetTaxiParking return nothing") } client.db.FlushDB() } func TestGetParkingTaxiById(t *testing.T) { client := connectToTest() client.BulkInsert(&mockdata) res, _ := client.GetTaxiParking(1704691) if res != string(raw) { t.Errorf("Error compare data GetTaxiParking with the source") } // first 1, in range with leght 1 resArr, _ := client.GetTaxiParkingByID(161, 1, 0) if len(resArr) != 1 { t.Errorf("Len of result GetTaxiParkingByID is not equal 1") } // first 10, in range with leght 1 resArr, _ = client.GetTaxiParkingByID(161, 10, 0) if len(resArr) != 1 { t.Errorf("Len of result GetTaxiParkingByID is not equal 1") } // first 1 with offset 10, in range with leght 1 resArr, _ = client.GetTaxiParkingByID(161, 1, 10) if len(resArr) != 0 { t.Errorf("Offset wrong working in GetTaxiParkingByID") } client.db.FlushDB() res, _ = client.GetTaxiParking(1704691) if res != "" { t.Errorf("Incorrect data in emty DB") } } func TestGetParkingTaxiByMode(t *testing.T) { client := connectToTest() client.BulkInsert(&mockdata) // first 1, in range with leght 1 resArr, _ := client.GetTaxiParkingByMode("24-hours", 1, 0) if len(resArr) != 1 { t.Errorf("Len of result GetTaxiParkingByID is wrong") } // first 1 with offset 10, in range with leght 1 resArr, _ = client.GetTaxiParkingByMode("24-hours", 1, 10) if len(resArr) != 0 { t.Errorf("Len of result GetTaxiParkingByID is wrong") } }
package outer import ( "context" "sync" "github.com/qyqx233/go-tunel/lib" "github.com/qyqx233/go-tunel/lib/proto" "github.com/rs/zerolog/log" ) const ( RegState int = iota ) type reqConnChanStru struct { reqID int64 ch *chan lib.WrapConnStru } type transportImpl struct { proxyStarted bool // 是否监听转发端口 Dump bool Export bool AddIp bool atomic int32 connNum int32 ID uint64 IP string TargetHost string TargetPort int LocalPort int TcpOrUdp string Name []byte SymKey []byte MinConnNum int MaxConnNum int AllowIps []string State proto.ShakeStateEnum // 通道状态 cmdConn lib.WrapConnStru connCh chan lib.WrapConnStru // 缓存的传输通道 newCh chan reqConnChanStru // 用来监听是否需要创建临时通道 asyncMap sync.Map // cmdCh chan struct{} } // 总是最多被一个coroutine调用 func (t *transportImpl) shutdown() { t.State = proto.ShutdownState for { select { case <-t.newCh: case ch := <-t.connCh: ch.ShutDown() default: return } } } // 总是最多被一个coroutine调用 func (t *transportImpl) restart(ctx context.Context, conn lib.WrapConnStru) { if t.State == proto.RegState { t.connCh = make(chan lib.WrapConnStru, t.MinConnNum) t.newCh = make(chan reqConnChanStru, 10) } t.cmdConn = conn go t.monitor(ctx) t.State = proto.ShakeState } func (t *transportImpl) monitor(ctx context.Context) { for { select { case <-ctx.Done(): return case ch := <-t.newCh: go func() { defer func() { if err := recover(); err != nil { log.Error().Msgf("%v", err) } }() rid := ch.reqID log.Info().Msgf("请求与服务器%s:%d的临时通道, reqID=%d", t.IP, t.TargetPort, rid) cmd := proto.CmdProto{Usage: proto.TransportReqUsage, ReqID: rid} err := cmd.Send(t.cmdConn.Conn) if err != nil { log.Error().Err(err).Msg("error") return } t.asyncMap.Store(rid, ch) }() } } } type transportList []*transportImpl var initCap = 64 func (l transportList) search(h *transportImpl) (bool, int) { d := 0 begin := 0 end := len(l) - 1 var mid int for begin <= end { mid = (begin + end) / 2 // println(begin, end, mid) v := l[mid] if v.TargetHost == h.TargetHost && v.TargetPort == h.TargetPort { return true, mid } else if h.TargetHost < v.TargetHost || (h.TargetHost == v.TargetHost && h.TargetPort < v.TargetPort) { end = mid - 1 d = 0 } else { begin = mid + 1 d = 1 } } return false, mid + d } func (m *TransportMng) add(h *transportImpl) transportList { l := m.tl leng := len(l) if leng == 0 { l = append(l, h) m.tl = l return l } has, pos := l.search(h) if !has { l = append(l, &transportImpl{}) for i := leng; i > pos; i-- { l[i] = l[i-1] } l[pos] = h } m.tl = l return l } func (m *TransportMng) remove(h *transportImpl) transportList { l := m.tl leng := len(l) if leng == 1 { l = make([]*transportImpl, 0, initCap) m.tl = l return l } _, pos := l.search(h) ll := make([]*transportImpl, leng-1, initCap) if pos > 0 { copy(ll[:pos], l[:pos]) } copy(ll[pos:], l[pos+1:]) l = ll m.tl = l return l } type TransportMng struct { rwl *sync.RWMutex tl transportList } var tl transportList var transportMng TransportMng func init() { transportMng.tl = make([]*transportImpl, 0, initCap) transportMng.rwl = &sync.RWMutex{} }
package main import ( "os" "text/template" ) func main() { repoRoot, _ := os.Getwd() //dir := repoRoot + "/Chapter 2/Video 14" tpl, _ := template.ParseGlob(repoRoot + "/onur.gohtml") team := []string{"Muslera", "Falcao", "Luyindama", "Onyekuru"} tpl.Execute(os.Stdout, team) }
package arrays import ( "strings" ) func removeSpaces(s string) string { var new_s []rune for _, c := range s { if c == ' ' { continue } new_s = append(new_s, c) } return string(new_s) } func reverse(s string) string { n := len([]rune(s)) new_s := make([]rune, n) for i, v := range s { new_s[n-1-i] = v } return string(new_s) } func palindrome(s string) bool { new_s := removeSpaces(s) return strings.EqualFold(new_s, reverse(new_s)) } func factorial(n int) int { acc := 1 for i := n; i >= 1; i-- { acc *= i } return acc } func permutations(s string) []string { var perms []string n := len([]rune(s)) if s == "" || n == 1 { return []string{s} } if n == 2 { return []string{ s, reverse(s), } } for i, c := range s { for _, p := range permutations(s[:i] + s[i+1:]) { perms = append(perms, string(c)+p) } } return perms } func PalindromePermutation(s string) (bool, string) { for _, permutation := range permutations(s) { if palindrome(permutation) { return true, permutation } } return false, "" }
package main import "fmt" func Numbers(c chan int) { for i := 1; ; i++ { c <- i } return } func main() { ch := make(chan int) go Numbers(ch) for i := range ch { fmt.Println("Number: ", i) if i == 10 { break } } }
package main import "testing" func TestSatisfyTime(t *testing.T) { var time mtime time = toTime("9:00") action := eq c := &constraint{} c.vars = []constraintVariable{time} checkvalue := mtime(time) got := satisfyTime(action, c, checkvalue) if !got { t.Errorf("satisfyTime failed") } }
/* Application modules are test permutations testing various rlog output modules. */ package main import ( "github.com/rightscale/rlog" "github.com/rightscale/rlog/console" "github.com/rightscale/rlog/file" "github.com/rightscale/rlog/syslog" "os" "strings" ) func main() { //Setup syslog module facility, err := syslog.FacilityNameToValue("local6") if err != nil { panic("Getting syslog facility value failed: " + err.Error()) } syslogModule, err := syslog.NewLocalFacilitySyslogLogger("", "", facility, "tmp/sysloggerHeartbeat.txt") if err != nil { panic("Getting syslog logger instance failed: " + err.Error()) } //Setup file logger log_file_name := "tmp/test.txt" rotated_log_name := log_file_name + ".1" if _, err = os.Stat(rotated_log_name); err == nil { os.Remove(rotated_log_name) } fileModule, err := file.NewFileLogger(log_file_name, true, true) if err != nil { panic("Getting file logger instance failed: " + err.Error()) } rlog.EnableModule(syslogModule) rlog.EnableModule(fileModule) rlog.EnableModule(console.NewStdoutLogger(true)) rlog.EnableModule(console.NewStderrLogger(true)) conf := rlog.GetDefaultConfig() conf.Severity = rlog.SeverityDebug rlog.Start(conf) defer rlog.Flush() //Test all the different log levels rlog.Debug("debug log entry") rlog.Info("info log entry") rlog.Warning("warning log entry") // simulate log rotation followed by SIGHUP and then flush. err = os.Rename(log_file_name, rotated_log_name) if err != nil { panic(err) } rlog.Flush() // will reopen logs rlog.Error("error log entry") rlog.Fatal("fatal log entry") //Generate a couple of IDs and log it ids := "" for i := 0; i < 10; i++ { ids += rlog.GenerateID() + ", " } rlog.Info("IDs: %s", ids) //A deeply nested stack trace rlog.Debug("---------------------------") level1() rlog.Debug("---------------------------") //A very long log message rlog.Debug(strings.Repeat("hello rlog, ", 1000)) //All done rlog.Debug("Test permutations completed") } func level1() { level2() } func level2() { level3() } func level3() { rlog.Error("nested function call") }
/* Copyright: PeerFintech. All Rights Reserved. */ package gohfc import ( "strconv" "github.com/zhj0811/gohfc/pkg/parseBlock" "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric-protos-go/common" "github.com/pkg/errors" ) type LedgerClientImpl struct { *sdkHandler } // queryByQscc 根据系统智能合约Qscc查询信息 // input: args: 请求信息,例如:args := []string{"GetChainInfo", channelName}(查询某一个通道信息) // channelName: 通道名字,通道为空时默认用配置文件中的通道 // output: []*QueryResponse: 返回信息,需要解析 func (lc *LedgerClientImpl) queryByQscc(args []string, channelName string) ([]*QueryResponse, error) { peerNames := getSendPeerName() if len(peerNames) == 0 { return nil, errors.New("cannot found peer address") } if channelName == "" { channelName = lc.client.Channel.ChannelId } chaincode := ChainCode{ ChannelId: channelName, Type: ChaincodeSpec_GOLANG, Name: QSCC, Args: args, } return lc.client.Query(*lc.identity, chaincode, []string{peerNames[0]}) } // GetBlockByNumber 根据区块编号查询区块 // input: blockNum: 区块编号 // channelName: 通道名称,通道为空时默认用配置文件中的通道 // output: *parseBlock.FilterBlock: 经过解析的区块 func (lc *LedgerClientImpl) GetBlockByNumber(blockNum uint64, channelName string) (*parseBlock.FilterBlock, error) { if channelName == "" { channelName = lc.client.Channel.ChannelId } strBlockNum := strconv.FormatUint(blockNum, 10) args := []string{"GetBlockByNumber", channelName, strBlockNum} resps, err := lc.queryByQscc(args, channelName) if err != nil { return nil, errors.WithMessage(err, "cannot got installed chaincode") } else if len(resps) == 0 { return nil, errors.New("no response") } if resps[0].Error != nil { return nil, resps[0].Error } data := resps[0].Response.Response.Payload block := new(common.Block) err = proto.Unmarshal(data, block) if err != nil { return nil, errors.WithMessage(err, "unmarshal block failed") } filterBlock := parseBlock.FilterParseBlock(block) return filterBlock, nil } // GetBlockHeight: 查询区块高度 // input: channelName: 通道名称,通道为空时默认用配置文件中的通道 func (lc *LedgerClientImpl) GetBlockHeight(channelName string) (uint64, error) { if channelName == "" { channelName = lc.client.Channel.ChannelId } args := []string{"GetChainInfo", channelName} resps, err := lc.queryByQscc(args, channelName) if err != nil { return 0, err } else if len(resps) == 0 { return 0, errors.New("no response") } if resps[0].Error != nil { return 0, resps[0].Error } data := resps[0].Response.Response.Payload chainInfo := new(common.BlockchainInfo) err = proto.Unmarshal(data, chainInfo) if err != nil { return 0, errors.WithMessage(err, "unmarshal block failed") } return chainInfo.Height, nil } // GetBlockHeightByEventPeer 向event peer发送请求,获得区块高度 // input: channelName: 通道名称,通道为空时默认用配置文件中的通道 func (lc *LedgerClientImpl) GetBlockHeightByEventPeer(channelName string) (uint64, error) { if channelName == "" { channelName = lc.client.Channel.ChannelId } if eventPeer == "" { return 0, errors.New("event peername is empty") } args := []string{"GetChainInfo", channelName} chaincode := ChainCode{ ChannelId: channelName, Type: ChaincodeSpec_GOLANG, Name: QSCC, Args: args, } resps, err := lc.client.queryByEvent(*lc.identity, chaincode, []string{eventPeer}) if err != nil { return 0, err } else if len(resps) == 0 { return 0, errors.New("no response") } if resps[0].Error != nil { return 0, resps[0].Error } data := resps[0].Response.Response.Payload chainInfo := new(common.BlockchainInfo) err = proto.Unmarshal(data, chainInfo) if err != nil { return 0, errors.WithMessage(err, "unmarshal block failed") } return chainInfo.Height, nil } // GetBlockByTxID 根据transaction ID查询区块 // input: txid: 交易ID // channelName: 通道名称,通道为空时默认用配置文件中的通道 // output: *parseBlock.FilterBlock: 经过解析的区块 func (lc *LedgerClientImpl) GetBlockByTxID(txid string, channelName string) (*parseBlock.FilterBlock, error) { if channelName == "" { channelName = lc.client.Channel.ChannelId } args := []string{"GetBlockByTxID", channelName, txid} resps, err := lc.queryByQscc(args, channelName) if err != nil { return nil, errors.WithMessage(err, "can not get installed chaincodes") } else if len(resps) == 0 { return nil, errors.New("no response") } if resps[0].Error != nil { return nil, resps[0].Error } data := resps[0].Response.Response.Payload block := new(common.Block) err = proto.Unmarshal(data, block) if err != nil { return nil, errors.WithMessage(err, "unmarshal block failed") } filterBlock := parseBlock.FilterParseBlock(block) return filterBlock, nil } func (lc *LedgerClientImpl) GetFilterTxByTxID(txId string, channelName string) (*parseBlock.FilterTx, error) { if channelName == "" { channelName = lc.client.Channel.ChannelId } args := []string{"GetBlockByTxID", channelName, txId} resps, err := lc.queryByQscc(args, channelName) if err != nil { return nil, errors.WithMessage(err, "can not get installed chaincodes") } else if len(resps) == 0 { return nil, errors.New("no response") } if resps[0].Error != nil { return nil, resps[0].Error } data := resps[0].Response.Response.Payload block := new(common.Block) err = proto.Unmarshal(data, block) if err != nil { return nil, errors.WithMessage(err, "unmarshal block failed") } res := parseBlock.FilterParseTransaction(block, txId) return res, nil }
package psa import ( "fmt" "net/url" "github.com/evcc-io/evcc/util" "github.com/evcc-io/evcc/util/request" "github.com/evcc-io/evcc/util/transport" "golang.org/x/oauth2" ) // https://developer.groupe-psa.io/webapi/b2c/api-reference/specification // BaseURL is the API base url const BaseURL = "https://api.groupe-psa.com/connectedcar/v4" // API is an api.Vehicle implementation for PSA cars type API struct { *request.Helper realm string id string } // NewAPI creates a new vehicle func NewAPI(log *util.Logger, identity oauth2.TokenSource, realm, id string) *API { v := &API{ Helper: request.NewHelper(log), realm: realm, id: id, } // replace client transport with authenticated transport plus headers v.Client.Transport = &transport.Decorator{ Base: &oauth2.Transport{ Source: identity, Base: v.Client.Transport, }, Decorator: transport.DecorateHeaders(map[string]string{ "Accept": "application/hal+json", "X-Introspect-Realm": v.realm, }), } return v } func (v *API) clientID() string { return url.Values{ "client_id": []string{v.id}, }.Encode() } // Vehicles implements the /vehicles response func (v *API) Vehicles() ([]Vehicle, error) { var res struct { Embedded struct { Vehicles []Vehicle } `json:"_embedded"` } uri := fmt.Sprintf("%s/user/vehicles?%s", BaseURL, v.clientID()) err := v.GetJSON(uri, &res) return res.Embedded.Vehicles, err } // Status implements the /vehicles/<vid>/status response func (v *API) Status(vid string) (Status, error) { var res Status uri := fmt.Sprintf("%s/user/vehicles/%s/status?%s", BaseURL, vid, v.clientID()) err := v.GetJSON(uri, &res) return res, err }
package models type Abonemen struct { UserID string `json:"userID"` SubscriberID string `json:"subscriberID"` Notification bool `json:"notification"` }
// Copyright (c) 2016-2019 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package blobclient import ( "bytes" "crypto/tls" "encoding/json" "errors" "fmt" "io" "net/url" "time" "github.com/uber/kraken/core" "github.com/uber/kraken/utils/httputil" ) // uploader provides methods for executing a chunked upload. type uploader interface { start(d core.Digest) (uid string, err error) patch(d core.Digest, uid string, start, stop int64, chunk io.Reader) error commit(d core.Digest, uid string) error } func runChunkedUpload(u uploader, d core.Digest, blob io.Reader, chunkSize int64) error { if err := runChunkedUploadHelper(u, d, blob, chunkSize); err != nil && !httputil.IsConflict(err) { return err } return nil } func runChunkedUploadHelper(u uploader, d core.Digest, blob io.Reader, chunkSize int64) error { uid, err := u.start(d) if err != nil { return err } var pos int64 buf := make([]byte, chunkSize) for { n, err := blob.Read(buf) if err != nil { if err == io.EOF { break } return fmt.Errorf("read blob: %s", err) } chunk := bytes.NewReader(buf[:n]) stop := pos + int64(n) if err := u.patch(d, uid, pos, stop, chunk); err != nil { return err } pos = stop } return u.commit(d, uid) } // transferClient executes chunked uploads for internal blob transfers. type transferClient struct { addr string tls *tls.Config } func newTransferClient(addr string, tls *tls.Config) *transferClient { return &transferClient{addr, tls} } func (c *transferClient) start(d core.Digest) (uid string, err error) { r, err := httputil.Post( fmt.Sprintf("http://%s/internal/blobs/%s/uploads", c.addr, d), httputil.SendTLS(c.tls)) if err != nil { return "", err } uid = r.Header.Get("Location") if uid == "" { return "", errors.New("request succeeded, but Location header not set") } return uid, nil } func (c *transferClient) patch( d core.Digest, uid string, start, stop int64, chunk io.Reader) error { _, err := httputil.Patch( fmt.Sprintf("http://%s/internal/blobs/%s/uploads/%s", c.addr, d, uid), httputil.SendBody(chunk), httputil.SendHeaders(map[string]string{ "Content-Range": fmt.Sprintf("%d-%d", start, stop), }), httputil.SendTLS(c.tls)) return err } func (c *transferClient) commit(d core.Digest, uid string) error { _, err := httputil.Put( fmt.Sprintf("http://%s/internal/blobs/%s/uploads/%s", c.addr, d, uid), httputil.SendTimeout(15*time.Minute), httputil.SendTLS(c.tls)) return err } type uploadType int const ( _publicUpload = iota + 1 _duplicateUpload ) // uploadClient executes chunked uploads for external cluster upload operations. type uploadClient struct { addr string namespace string uploadType uploadType delay time.Duration tls *tls.Config } func newUploadClient( addr string, namespace string, t uploadType, delay time.Duration, tls *tls.Config) *uploadClient { return &uploadClient{addr, namespace, t, delay, tls} } func (c *uploadClient) start(d core.Digest) (uid string, err error) { r, err := httputil.Post( fmt.Sprintf("http://%s/namespace/%s/blobs/%s/uploads", c.addr, url.PathEscape(c.namespace), d), httputil.SendTLS(c.tls)) if err != nil { return "", err } uid = r.Header.Get("Location") if uid == "" { return "", errors.New("request succeeded, but Location header not set") } return uid, nil } func (c *uploadClient) patch( d core.Digest, uid string, start, stop int64, chunk io.Reader) error { _, err := httputil.Patch( fmt.Sprintf("http://%s/namespace/%s/blobs/%s/uploads/%s", c.addr, url.PathEscape(c.namespace), d, uid), httputil.SendBody(chunk), httputil.SendHeaders(map[string]string{ "Content-Range": fmt.Sprintf("%d-%d", start, stop), }), httputil.SendTLS(c.tls)) return err } // DuplicateCommitUploadRequest defines HTTP request body. type DuplicateCommitUploadRequest struct { Delay time.Duration `yaml:"delay"` } func (c *uploadClient) commit(d core.Digest, uid string) error { var template string var body io.Reader switch c.uploadType { case _publicUpload: template = "http://%s/namespace/%s/blobs/%s/uploads/%s" case _duplicateUpload: template = "http://%s/internal/duplicate/namespace/%s/blobs/%s/uploads/%s" b, err := json.Marshal(DuplicateCommitUploadRequest{c.delay}) if err != nil { return fmt.Errorf("json: %s", err) } body = bytes.NewBuffer(b) default: return fmt.Errorf("unknown upload type: %d", c.uploadType) } _, err := httputil.Put( fmt.Sprintf(template, c.addr, url.PathEscape(c.namespace), d, uid), httputil.SendTimeout(15*time.Minute), httputil.SendBody(body), httputil.SendTLS(c.tls)) return err }
package main import ( "fmt" "os" "os/signal" "github.com/growse/pcap" "github.com/jinzhu/gorm" ) var ( snaplen = 65536 ) // OpenFile opens or creates a file for json logging func OpenFile(path string) *os.File { var fo *os.File var ferr error if _, err := os.Stat(path); err == nil { fo, ferr = os.OpenFile(path, os.O_RDWR|os.O_APPEND, 0660) } else { fo, ferr = os.Create(path) } if ferr != nil { panic(ferr) } return fo } // WriteToFile write json output to file func WriteToFile(fo *os.File, json []byte) { if _, err := fo.WriteString(string(json) + "\n"); err != nil { panic(err) } } // Monitor bind and monitor for DNS packets. This will also // handle the various output methods. func Monitor(options *Options) { if !options.Quiet { fmt.Printf("\n %s (%s) - %s\n\n", Name, Version, Description, ) fmt.Printf(" Hostname: %s\n", options.Hostname) fmt.Printf(" Interface: %s (%s)\n", options.InterfaceData.Name, options.InterfaceData.HardwareAddr.String()) fmt.Printf(" IP Address: %s\n", options.Ip) // addrs, err := options.InterfaceData.Addrs() // if err == nil { // var list []string // for _, addr := range addrs { // list = append(list, addr.String()) // } // fmt.Printf(" Addresses: %s\n\n", strings.Join(list, ", ")) // } else { // fmt.Printf("\n") // } } expr := fmt.Sprintf("port %d", options.Port) h, err := pcap.OpenLive(options.Interface, int32(snaplen), true, 500) if h == nil { fmt.Fprintf(os.Stderr, "%s Error: %s\n", Name, err) os.Exit(-1) } ferr := h.SetFilter(expr) if ferr != nil { fmt.Fprintf(os.Stderr, "%s Error: %s", Name, ferr) os.Exit(-1) } var file *os.File if options.Write != "" { file = OpenFile(options.Write) defer func() { if err := file.Close(); err != nil { panic(err) } }() } var db gorm.DB var clientId int64 = 0 queue := make(chan *Question) if options.Mysql || options.Postgres || options.Sqlite3 { db, err = DatabaseConnect(options) if err != nil { fmt.Println("\n Error: ", err.Error(), "\n") os.Exit(1) } clientId = CreateClient(db, options) go func() { for elem := range queue { elem.ToDatabase(db, options) } }() } c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) go func() { defer func() { if r := recover(); r != nil { fmt.Println("DB Queue - Recovered:", r) fmt.Println("Please report this issue. https://github.com/mephux/dnas") } }() for sig := range c { if sig.String() == "interrupt" { file.Close() close(queue) if options.Mysql || options.Postgres || options.Sqlite3 { db.Close() } os.Exit(1) } } }() if options.User != "" { chuser(options.User) } defer func() { if r := recover(); r != nil { fmt.Println("Monitor - Recovered:", r) fmt.Println("Please report this issue. https://github.com/mephux/dnas") } }() for pkt, r := h.NextEx(); r >= 0; pkt, r = h.NextEx() { if r == 0 { continue } message, err := DNS(pkt, options) if err == nil { if options.Write != "" { go func() { json, err := message.ToJSON() if err != nil { fmt.Println(err.Error()) os.Exit(-1) } WriteToFile(file, json) }() } if options.Mysql || options.Postgres || options.Sqlite3 { message.ClientId = clientId queue <- message } if !options.Quiet { message.ToStdout(options) } } } fmt.Fprintf(os.Stderr, "%s Error: %s\n", Name, h.Geterror()) }
package adapter import ( "fmt" "strings" "github.com/CenturyLinkLabs/pmxadapter" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" ) func (a KubernetesAdapter) CreateServices(services []*pmxadapter.Service) ([]pmxadapter.ServiceDeployment, error) { deployments := make([]pmxadapter.ServiceDeployment, len(services)) // TODO destroy all services (and RCs I guess!) if there is an error // anywhere. Otherwise they'll be orphaned and screw up subsequent deploys. kServices, err := kServicesFromServices(services) if err != nil { return nil, err } if err := DefaultExecutor.CreateKServices(kServices); err != nil { return nil, err } for i, s := range services { rcSpec := replicationControllerSpecFromService(*s) rc, err := DefaultExecutor.CreateReplicationController(rcSpec) if err != nil { if sErr, ok := err.(*errors.StatusError); ok && sErr.ErrStatus.Reason == api.StatusReasonAlreadyExists { return nil, pmxadapter.NewAlreadyExistsError(err.Error()) } return nil, err } status, err := statusFromReplicationController(rc) if err != nil { return nil, err } deployments[i].ID = rc.ObjectMeta.Name deployments[i].ActualState = status } return deployments, nil } func replicationControllerSpecFromService(s pmxadapter.Service) api.ReplicationController { ports := make([]api.Port, len(s.Ports)) for i, p := range s.Ports { ports[i].HostPort = int(p.HostPort) ports[i].ContainerPort = int(p.ContainerPort) ports[i].Protocol = api.Protocol(p.Protocol) } env := make([]api.EnvVar, len(s.Environment)) for i, e := range s.Environment { env[i].Name = e.Variable env[i].Value = e.Value } safeName := sanitizeServiceName(s.Name) commands := make([]string, 0) if s.Command != "" { commands = append(commands, s.Command) } replicas := s.Deployment.Count // The adapter seems to be in charge of adjusting missing replica count from // the JSON. The UI doesn't allow selection of 0 replicas, so this shouldn't // screw things up in the current state. if replicas == 0 { replicas = 1 } return api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Name: safeName, }, Spec: api.ReplicationControllerSpec{ Replicas: replicas, Selector: map[string]string{"service-name": safeName}, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{ "service-name": safeName, "panamax": "panamax", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: safeName, Image: s.Source, Command: commands, Ports: ports, Env: env, }, }, }, }, }, } } func kServicesFromServices(services []*pmxadapter.Service) ([]api.Service, error) { if err := validateServicesPorts(services); err != nil { return nil, err } if err := validateServicesAliases(services); err != nil { return nil, err } servicesByName := map[string]pmxadapter.Service{} for _, s := range services { servicesByName[s.Name] = *s } kServices := make([]api.Service, 0) // Create KServices by name for any configured ports. for _, s := range services { if len(s.Ports) == 0 { continue } sanitizedName := sanitizeServiceName(s.Name) ks := kServiceByNameAndPort( sanitizedName, sanitizedName, *s.Ports[0], ) kServices = append(kServices, ks) } // Create KServices by alias for any links with aliases. for _, s := range services { for _, l := range s.Links { if l.Alias == "" { continue } toService, exists := servicesByName[l.Name] if !exists { return nil, fmt.Errorf("linking to non-existant service '%v'", l.Name) } if len(toService.Ports) == 0 { return nil, fmt.Errorf("linked-to service '%v' exposes no ports", l.Name) } ks := kServiceByNameAndPort( sanitizeServiceName(l.Alias), sanitizeServiceName(toService.Name), *toService.Ports[0], ) kServices = append(kServices, ks) } } return kServices, nil } // Once K8s allows multiple ports per service, we can lift the restriction on // a single port. We can't do anything about it now because we need to mimic // current Docker environment variables while satisfying K8s's requirement // for unique service names. func validateServicesPorts(services []*pmxadapter.Service) error { for _, s := range services { if len(s.Ports) > 1 { return pmxadapter.NewAlreadyExistsError(multiplePortsError) } } return nil } // The same alias name to different services can't be supported. func validateServicesAliases(services []*pmxadapter.Service) error { aliases := map[string]string{} for _, s := range services { for _, l := range s.Links { if l.Alias == "" { continue } if name, exists := aliases[l.Alias]; exists && name != l.Name { return fmt.Errorf("multiple services with the same alias name '%v'", l.Alias) } aliases[l.Alias] = l.Name } } return nil } func kServiceByNameAndPort(name string, toServiceName string, p pmxadapter.Port) api.Service { return api.Service{ ObjectMeta: api.ObjectMeta{ Name: name, Labels: map[string]string{"service-name": toServiceName}, }, Spec: api.ServiceSpec{ // I'm unaware of any wildcard selector, we don't have a name for the // overarching application being started, and I can't specifically // target only certain RCs because we don't know if a Service exists // solely to allow external access. Shrug. Selector: map[string]string{"panamax": "panamax"}, Port: int(p.HostPort), ContainerPort: util.NewIntOrStringFromInt(int(p.ContainerPort)), Protocol: api.Protocol(p.Protocol), PublicIPs: PublicIPs, }, } } func sanitizeServiceName(n string) string { s := illegalNameCharacters.ReplaceAllString(n, "-") return strings.ToLower(s) }
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gcbench import ( "fmt" "regexp" "strconv" "strings" "time" ) type GCTrace []GCCycle type GCCycle struct { // N is the 1-based index of this GC cycle. N int // Format indicates the variant of the GC trace format for this line. Format TraceFormat // Start and End are the times this GC cycle started and // ended, relative to when the program began executing. Start, End time.Duration // Util is the overall CPU utilized by GC since the program // began executing. Util will be in [0, 1]. Util float64 // Forced is true is this GC cycle is a forced STW cycle. Forced bool // Clock* are the wall-clock durations of each phase. // ClockSweepTerm, ClockSync, and ClockMarkTerm are STW. // On Go 1.6, ClockRootScan and ClockSync will be 0. ClockSweepTerm, ClockRootScan, ClockSync, ClockMark, ClockMarkTerm time.Duration // CPU* are the CPU times of each phase. CPUSweepTerm, CPURootScan, CPUSync, CPUMark, CPUMarkTerm time.Duration // CPUAssist, CPUBackground, and CPUIdle break down CPUMark in // to its components. CPUAssist, CPUBackground, CPUIdle time.Duration // HeapTrigger is the size of the heap at which this GC cycle // was triggered. HeapTrigger Bytes // HeapActual is the size of the heap when this GC cycle // finished (before sweeping). HeapActual Bytes // HeapMarked is the bytes of heap that this GC cycle marked // (which will be retained by sweeping). HeapMarked Bytes // HeapGoal is the size of the heap this GC cycle was aiming // to finish at. HeapGoal Bytes // Procs is the value of GOMAXPROCS during this GC cycle. Procs int } type TraceFormat int const ( // Trace1_5 is the trace format from Go 1.5.x. Trace1_5 TraceFormat = 1 + iota // Trace1_6 is the trace format from Go 1.6.x. Trace1_6 ) var ( gcTraceLine = regexp.MustCompile(`(?m)^gc #?([0-9]+) @([0-9.]+)s ([0-9]+)%: (.*)`) gcTraceClock = regexp.MustCompile(`^([+0-9.]+) ms clock$`) gcTraceCPU = regexp.MustCompile(`^([+/0-9.]+) ms cpu$`) gcTraceHeap = regexp.MustCompile(`^([0-9.]+)->([0-9.]+)->([0-9.]+) MB$`) gcTraceGoal = regexp.MustCompile(`^([0-9]+) MB goal$`) gcTraceProcs = regexp.MustCompile(`^([0-9]+) P$`) ) func ParseGCTrace(s string) (GCTrace, error) { lines := gcTraceLine.FindAllStringSubmatch(s, -1) out := make([]GCCycle, 0, len(lines)) for _, line := range lines { c := GCCycle{ N: atoi(line[1]), Start: time.Duration(atof(line[2]) * 1e9), Util: atof(line[3]) / 100, } if strings.HasSuffix(line[4], " (forced)") { c.Forced = true line[4] = strings.TrimSuffix(line[4], " (forced)") } // Process parts. for _, part := range strings.Split(line[4], ",") { part = strings.TrimSpace(part) m := gcTraceClock.FindStringSubmatch(part) if m != nil { var phases []time.Duration var sum time.Duration for _, p := range strings.Split(m[1], "+") { dur := msToDur(p) phases = append(phases, dur) sum += dur } c.End = c.Start + sum switch len(phases) { case 5: // Go 1.5 c.Format = Trace1_5 c.ClockSweepTerm = phases[0] c.ClockRootScan = phases[1] c.ClockSync = phases[2] c.ClockMark = phases[3] c.ClockMarkTerm = phases[4] case 3: // Go 1.6 c.Format = Trace1_6 c.ClockSweepTerm = phases[0] c.ClockMark = phases[1] c.ClockMarkTerm = phases[2] default: return nil, fmt.Errorf("unexpected number of phases: %d", len(phases)) } continue } m = gcTraceCPU.FindStringSubmatch(part) if m != nil { var phases []time.Duration for _, p := range strings.Split(m[1], "+") { sub := strings.Split(p, "/") if len(sub) > 1 { c.CPUAssist = msToDur(sub[0]) c.CPUBackground = msToDur(sub[1]) c.CPUIdle = msToDur(sub[2]) } var sum time.Duration for _, t := range sub { sum += msToDur(t) } phases = append(phases, sum) } switch len(phases) { case 5: // Go 1.5 c.CPUSweepTerm = phases[0] c.CPURootScan = phases[1] c.CPUSync = phases[2] c.CPUMark = phases[3] c.CPUMarkTerm = phases[4] case 3: // Go 1.6 c.CPUSweepTerm = phases[0] c.CPUMark = phases[1] c.CPUMarkTerm = phases[2] default: return nil, fmt.Errorf("unexpected number of phases: %d", len(phases)) } continue } m = gcTraceHeap.FindStringSubmatch(part) if m != nil { c.HeapTrigger = mbToBytes(m[1]) c.HeapActual = mbToBytes(m[2]) c.HeapMarked = mbToBytes(m[3]) continue } m = gcTraceGoal.FindStringSubmatch(part) if m != nil { c.HeapGoal = mbToBytes(m[1]) continue } m = gcTraceProcs.FindStringSubmatch(part) if m != nil { c.Procs = atoi(m[1]) continue } return nil, fmt.Errorf("failed to parse part of gctrace line: %q", part) } out = append(out, c) } return GCTrace(out), nil } func atoi(s string) int { i, err := strconv.Atoi(s) if err != nil { panic(err) } return i } func atof(s string) float64 { f, err := strconv.ParseFloat(s, 64) if err != nil { panic(err) } return f } func msToDur(s string) time.Duration { return time.Duration(atof(s) * 1e6) } func mbToBytes(s string) Bytes { return Bytes(atof(s) * (1024 * 1024)) } func (t GCTrace) WithoutForced() GCTrace { out := make(GCTrace, 0) for _, c := range t { if !c.Forced { out = append(out, c) } } return out }
func findMedianSortedArrays(nums1 []int, nums2 []int) float64 { m, n := len(nums1), len(nums2) length := m + n left, right := -1, -1 x, y := 0, 0 for i := 0; i <= length/2; i++ { left = right if x < m && (y >= n || nums1[x] < nums2[y]) { right = nums1[x] x++ } else { right = nums2[y] y++ } } if (length & 1) == 0 { return float64(left+right) / 2.0 } else { return float64(right) } }
package cli import ( "bytes" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/cli-runtime/pkg/genericclioptions" "github.com/tilt-dev/tilt/pkg/apis" "github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1" ) func TestWait(t *testing.T) { f := newServerFixture(t) err := f.client.Create(f.ctx, &v1alpha1.UIResource{ ObjectMeta: metav1.ObjectMeta{Name: "my-sleep"}, Status: v1alpha1.UIResourceStatus{ Conditions: []v1alpha1.UIResourceCondition{ { Type: v1alpha1.UIResourceReady, Status: metav1.ConditionTrue, LastTransitionTime: apis.NowMicro(), }, }, }, }) require.NoError(t, err) out := bytes.NewBuffer(nil) streams := genericclioptions.IOStreams{Out: out} wait := newWaitCmd(streams) cmd := wait.register() err = cmd.Flags().Parse([]string{"--for=condition=Ready"}) require.NoError(t, err) err = wait.run(f.ctx, []string{"uiresource/my-sleep"}) require.NoError(t, err) assert.Contains(t, out.String(), `uiresource.tilt.dev/my-sleep condition met`) }
package main import ( "bufio" "container/heap" "fmt" "io" "math" "os" "strconv" "strings" ) func main() { solve(os.Stdin, os.Stdout) } type edge struct { to int cost int } type vertex struct { id int dist int } type priorityQueue []vertex func (p priorityQueue) Len() int { return len(p) } func (p priorityQueue) Less(i int, j int) bool { return p[i].dist < p[j].dist } func (p priorityQueue) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p *priorityQueue) Push(x interface{}) { *p = append(*p, x.(vertex)) } func (p *priorityQueue) Pop() interface{} { old := *p *p = old[:len(old)-1] return old[len(old)-1] } func solve(stdin io.Reader, stdout io.Writer) { sc := bufio.NewScanner(stdin) sc.Scan() n, _ := strconv.Atoi(sc.Text()) sc.Scan() r, _ := strconv.Atoi(sc.Text()) graph := make([][]edge, n) for i := range graph { graph[i] = []edge{} } for i := 0; i < r; i++ { sc.Scan() line := strings.Split(sc.Text(), " ") from, _ := strconv.Atoi(line[0]) to, _ := strconv.Atoi(line[1]) cost, _ := strconv.Atoi(line[2]) graph[from-1] = append(graph[from-1], edge{to: to - 1, cost: cost}) graph[to-1] = append(graph[to-1], edge{to: from - 1, cost: cost}) } ans := dijkstra(graph) fmt.Fprintln(stdout, ans) } func dijkstra(graph [][]edge) int { dist := make([]int, len(graph)) dist2 := make([]int, len(graph)) for i := range graph { if i == 0 { dist[i], dist2[i] = 0, math.MaxInt64 continue } dist[i], dist2[i] = math.MaxInt64, math.MaxInt64 } queue := priorityQueue{} heap.Push(&queue, vertex{id: 0, dist: 0}) for len(queue) > 0 { v := heap.Pop(&queue).(vertex) if dist2[v.id] <= v.dist { continue } for j := 0; j < len(graph[v.id]); j++ { e := graph[v.id][j] d := dist[v.id] + e.cost if d < dist[e.to] { dist2[e.to] = dist[e.to] dist[e.to] = d heap.Push(&queue, vertex{id: e.to, dist: dist[e.to]}) heap.Push(&queue, vertex{id: e.to, dist: dist2[e.to]}) } else if d < dist2[e.to] { dist2[e.to] = d heap.Push(&queue, vertex{id: e.to, dist: dist2[e.to]}) } } } return dist2[len(dist2)-1] }
package main type pizza struct { Cells map[point]*Cell H, L, R, C int Slices map[slice]*sliceInfo } type slice struct { x0, y0, x1, y1 int //score int } type sliceInfo struct { nbChamp, nbTomate int score int used bool } type point struct { x, y int } type Cell struct { Ingredient byte IsInSlice *slice AvailableSlice [][]*slice } func (p *pizza) score() int { nbCell := 0 for _, c := range p.Slices { if c.used { nbCell += c.score } } return nbCell } func (p *pizza) cutInBigSlice() { //fmt.Println(p.H) //primaryN := PrimeFactors(p.H) p.Slices = make(map[slice]*sliceInfo) for coordCell := range p.Cells { //p.Cells[coordCell].AvailableSlice = make([][]*slice, p.H) for cptSize := 0; cptSize < p.H; cptSize++{ primaryN := PrimeFactors(p.H - cptSize) for _, n := range primaryN { if coordCell.x+n > p.C || coordCell.y+p.H/n > p.R { continue } coordSlice := slice{coordCell.x, coordCell.y, coordCell.x + n - 1, coordCell.y + p.H/n - 1} p.Slices[coordSlice] = &sliceInfo{} for j := 0; j < n; j++ { for k := 0; k < p.H/n; k++ { _, exist := p.Cells[point{coordCell.x + j, coordCell.y + k}] if !exist { break } if p.Cells[point{coordCell.x + j, coordCell.y + k}].Ingredient == 'T' { p.Slices[coordSlice].nbTomate++ } else { p.Slices[coordSlice].nbChamp++ } } } if p.Slices[coordSlice].nbTomate < p.L || p.Slices[coordSlice].nbChamp < p.L { delete(p.Slices, coordSlice) continue } p.Slices[coordSlice].score = p.Slices[coordSlice].nbChamp + p.Slices[coordSlice].nbTomate p.Cells[coordCell].AvailableSlice[p.H - cptSize - 1] = append(p.Cells[coordCell].AvailableSlice[p.H - cptSize - 1], &coordSlice) } } } } func (p pizza) PlacerSlice() { //fmt.Println("R ",p.R," C ",p.C) for cptSize := 0; cptSize < p.H; cptSize++{ for y := 0; y < p.R; y++ { for x := 0; x < p.C; x++ { //fmt.Println("X ", x, " Y ", y) if p.Cells[point{x, y}].IsInSlice != nil { //fmt.Println(p.Cells[point{x, y}].IsInSlice) continue } dance: for _, trySlice := range p.Cells[point{x, y}].AvailableSlice[p.H - cptSize - 1] { for i := trySlice.x0; i <= trySlice.x1; i++ { for j := trySlice.y0; j <= trySlice.y1; j++ { if p.Cells[point{i, j}].IsInSlice != nil { continue dance } } } for i := trySlice.x0; i <= trySlice.x1; i++ { for j := trySlice.y0; j <= trySlice.y1; j++ { //fmt.Println("locking ",trySlice,i,j) p.Cells[point{i, j}].IsInSlice = trySlice } } p.Slices[*trySlice].used = true x += trySlice.x1 - trySlice.x0 - 1 break } } } } }
/* * Lean tool - hypothesis testing application * * https://github.com/MikaelLazarev/lean-tool/ * Copyright (c) 2020. Mikhail Lazarev * */ package marketing import ( "context" "github.com/MikaelLazarev/willie/server/core" "github.com/MikaelLazarev/willie/server/helpers" "github.com/stretchr/testify/assert" "testing" ) func Test_store_GetFeaturesList(t *testing.T) { fakeApiClient := helpers.NewFakeApiClient("HOST", "SECRET_KEY") mockMarketingStore := NewFromClient(fakeApiClient) featuresToSend := []core.Feature{ { ID: "123", Name: "Feature1", Weight: 100, IsHeader: false, BasicPlan: "+", PremiumPlan: "+", TeamsPlan: "+", }, { ID: "2223", Name: "Feature2", Weight: 1000, IsHeader: true, BasicPlan: "", PremiumPlan: "", TeamsPlan: "+", }, } fakeApiClient.AddResponse(&featuresToSend, nil) features, err := mockMarketingStore.GetFeaturesList(context.TODO()) assert.NoError(t, err) assert.EqualValues(t, featuresToSend, features) fakeApiClient.AssertRequest(t, 0, "GET", "HOST/api/features/", struct{}{}) assert.Equal(t, fakeApiClient.Count(), 1) }
package handlers import ( mdl "diaria/models" route "diaria/routes" sec "diaria/security" "html/template" "log" "net/http" "strconv" ) func CreateFoodHandler(w http.ResponseWriter, r *http.Request) { log.Println("Create Food") if r.Method == "POST" && sec.IsAuthenticated(w, r) { name := r.FormValue("Name") measure := r.FormValue("Measure") qtd := r.FormValue("Qtd") cho := r.FormValue("Cho") kcal := r.FormValue("Kcal") sqlStatement := "INSERT INTO foods(name, measure, qtd, cho, kcal) VALUES ($1, $2, $3, $4, $5) RETURNING id" id := 0 err := Db.QueryRow(sqlStatement, name, measure, qtd, cho, kcal).Scan(&id) sec.CheckInternalServerError(err, w) if err != nil { panic(err.Error()) } sec.CheckInternalServerError(err, w) log.Println("INSERT: Id: " + strconv.Itoa(id) + " | Name: " + name + " | Measure: " + measure + " | Qtd: " + qtd + " | Cho: " + cho + " | Kcal: " + kcal) http.Redirect(w, r, route.FoodsRoute, 301) } else { http.Redirect(w, r, "/logout", 301) } } func UpdateFoodHandler(w http.ResponseWriter, r *http.Request) { log.Println("Update Food") if r.Method == "POST" && sec.IsAuthenticated(w, r) { id := r.FormValue("Id") name := r.FormValue("Name") measure := r.FormValue("Measure") measureId := r.FormValue("SelectMeasureForUpdate") qtd := r.FormValue("Qtd") cho := r.FormValue("Cho") kcal := r.FormValue("Kcal") sqlStatement := "UPDATE foods SET name=$1, measure=$2, qtd=$3, cho=$4, kcal=$5, measure_id=$6 WHERE id=$7" updtForm, err := Db.Prepare(sqlStatement) sec.CheckInternalServerError(err, w) if err != nil { panic(err.Error()) } sec.CheckInternalServerError(err, w) updtForm.Exec(name, measure, qtd, cho, kcal, measureId, id) log.Println("UPDATE: Id: " + id + " | Name: " + name + " | Measure: " + measure + " | Qtd: " + qtd + " | Cho: " + cho + " | Kcal: " + kcal) http.Redirect(w, r, route.FoodsRoute, 301) } else { http.Redirect(w, r, "/logout", 301) } } func DeleteFoodHandler(w http.ResponseWriter, r *http.Request) { log.Println("Delete Food") if r.Method == "POST" && sec.IsAuthenticated(w, r) { id := r.FormValue("Id") sqlStatement := "DELETE FROM foods WHERE id=$1" deleteForm, err := Db.Prepare(sqlStatement) if err != nil { panic(err.Error()) } deleteForm.Exec(id) sec.CheckInternalServerError(err, w) log.Println("DELETE: Id: " + id) http.Redirect(w, r, route.FoodsRoute, 301) } else { http.Redirect(w, r, "/logout", 301) } } func ListFoodsHandler(w http.ResponseWriter, r *http.Request) { log.Println("List Foods") // if !sec.IsAuthenticated(w, r) { query := "SELECT " + " A.id, A.name, coalesce(A.measure,'') as measure, coalesce(B.name,'') as measure_name, " + " A.measure_id, A.qtd, A.cho, A.kcal " + " FROM foods A " + " LEFT OUTER JOIN measures B " + " ON A.measure_id = B.id ORDER BY name ASC" log.Println("Query: " + query) rows, err := Db.Query(query) sec.CheckInternalServerError(err, w) var foods []mdl.Food var food mdl.Food var i = 1 for rows.Next() { err = rows.Scan(&food.Id, &food.Name, &food.Measure, &food.MeasureName, &food.MeasureId, &food.Qtd, &food.Cho, &food.Kcal) sec.CheckInternalServerError(err, w) food.Order = i i++ foods = append(foods, food) } var page mdl.PageFoods page.Foods = foods var measures []mdl.Measure var measure mdl.Measure rows, err = Db.Query("SELECT id, name FROM measures order by name asc") for rows.Next() { err = rows.Scan(&measure.Id, &measure.Name) sec.CheckInternalServerError(err, w) measures = append(measures, measure) } page.Title = "Tabela de Alimentos" page.Measures = measures page.LoggedUser = BuildLoggedUser(GetUserInCookie(w, r)) var tmpl = template.Must(template.ParseGlob("tiles/foods/*")) tmpl.ParseGlob("tiles/*") tmpl.ExecuteTemplate(w, "Main-Food", page) // } else { // http.Redirect(w, r, "/logout", 301) // } }
// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package policy import ( "context" "encoding/json" "strings" "time" "chromiumos/tast/common/policy" "chromiumos/tast/common/servo" "chromiumos/tast/ctxutil" "chromiumos/tast/errors" "chromiumos/tast/remote/bundles/cros/policy/dututils" "chromiumos/tast/remote/policyutil" "chromiumos/tast/rpc" pspb "chromiumos/tast/services/cros/policy" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: DeviceBootOnAC, LacrosStatus: testing.LacrosVariantUnneeded, Desc: "Tests the DeviceBootOnAcEnabled policy that boots up the DUT from shutdown by plugging in a power supply", Contacts: []string{ "chromeos-oem-services@google.com", // Use team email for tickets. "bkersting@google.com", "lamzin@google.com", }, SoftwareDeps: []string{"wilco", "chrome"}, Timeout: 30 * time.Minute, // Disabled due to <1% pass rate over 30 days. See b/246818601 //Attr: []string{"group:wilco_bve"}, ServiceDeps: []string{ "tast.cros.hwsec.OwnershipService", "tast.cros.policy.PolicyService", }, // Var "servo" is a ServoV4 Type-C device paired with a Servo Micro via the micro USB port. // Servo Micro as usual gets connected to the DUT motherboard debug header and the other cable with // a USB-C head is attached to the DUT type C port. Note: both cables must be connected to the DUT. Vars: []string{"servo"}, }) } // DeviceBootOnAC verifies DeviceBootOnAcEnabled policy that boots the device from the off state by plugging // in a power supply. If the policy is disabled or not set, boot on AC is off. func DeviceBootOnAC(ctx context.Context, s *testing.State) { d := s.DUT() // isDischarging checks if the DUT is in discharging state. isDischarging := func(ctx context.Context) (bool, error) { out, err := d.Conn().CommandContext(ctx, "cat", "/sys/class/power_supply/BAT0/status").Output() if err != nil { return false, err } return strings.TrimSpace(string(out)) == "Discharging", nil } // Shorten deadline to leave time for cleanup. cleanupCtx := ctx ctx, cancel := ctxutil.Shorten(ctx, 3*time.Minute) defer cancel() pxy, err := servo.NewProxy(ctx, s.RequiredVar("servo"), d.KeyFile(), d.KeyDir()) if err != nil { s.Fatal("Failed to connect to servo: ", err) } defer pxy.Close(cleanupCtx) defer func(ctx context.Context) { if err := pxy.Servo().SetPDRole(ctx, servo.PDRoleSrc); err != nil { s.Error("Failed to reset servo power delivery (PD) role to src: ", err) } if err := dututils.EnsureDUTIsOn(ctx, d, pxy.Servo()); err != nil { s.Error("Failed to ensure DUT is powered on: ", err) } if err := policyutil.EnsureTPMAndSystemStateAreReset(ctx, d, s.RPCHint()); err != nil { s.Error("Failed to reset TPM after test: ", err) } }(cleanupCtx) for _, tc := range []struct { name string policy policy.Policy wantBootUp bool }{ { name: "unset", policy: &policy.DeviceBootOnAcEnabled{Stat: policy.StatusUnset}, wantBootUp: false, }, { name: "enabled", policy: &policy.DeviceBootOnAcEnabled{Val: true}, wantBootUp: true, }, { name: "disabled", policy: &policy.DeviceBootOnAcEnabled{Val: false}, wantBootUp: false, }, } { s.Run(ctx, tc.name, func(ctx context.Context, s *testing.State) { // For safety purpose, introducing a new cleanup context for device boot up. cleanupCtx := ctx ctx, cancel := ctxutil.Shorten(ctx, 2*time.Minute) defer cancel() if err := policyutil.EnsureTPMAndSystemStateAreReset(ctx, d, s.RPCHint()); err != nil { s.Fatal("Failed to reset TPM: ", err) } cl, err := rpc.Dial(ctx, d, s.RPCHint()) if err != nil { s.Fatal("Failed to connect to the RPC service on the DUT: ", err) } defer cl.Close(ctx) policyClient := pspb.NewPolicyServiceClient(cl.Conn) pb := policy.NewBlob() pb.AddPolicy(tc.policy) pJSON, err := json.Marshal(pb) if err != nil { s.Fatal("Failed to serialize policies: ", err) } if _, err := policyClient.EnrollUsingChrome(ctx, &pspb.EnrollUsingChromeRequest{ PolicyJson: pJSON, }); err != nil { s.Fatal("Failed to enroll using chrome: ", err) } // Cutting off the power supply. if err := pxy.Servo().SetPDRole(ctx, servo.PDRoleSnk); err != nil { s.Fatal("Failed to cut-off power supply: ", err) } // Ensuring DUT actually has started discharging. Using polling to tackle // the delay of the three power states in DUT i.e. Charging to Unknown to Discharging. if err := testing.Poll(ctx, func(ctx context.Context) error { discharging, err := isDischarging(ctx) if err != nil { return testing.PollBreak(err) } if !discharging { return errors.New("device is not in discharging state") } return nil }, &testing.PollOptions{ Timeout: 10 * time.Second, Interval: time.Second, }); err != nil { s.Fatal("Failed to wait for device discharging state: ", err) } // Even if policy fails, device must be on a power on state in between subtests. defer func(ctx context.Context) { if err := dututils.EnsureDUTIsOn(ctx, d, pxy.Servo()); err != nil { s.Error("Failed to ensure DUT is powered on: ", err) } }(cleanupCtx) // Powering off DUT and ensuring DUT is unreachable. if err := pxy.Servo().KeypressWithDuration(ctx, servo.PowerKey, servo.DurLongPress); err != nil { s.Fatal("Failed to power off DUT: ", err) } s.Log("Waiting for DUT to become unreachable") if err := d.WaitUnreachable(ctx); err != nil { s.Fatal("DUT is still reachable while it should not be: ", err) } s.Log("DUT became unreachable as expected") // Even after DUT becomes unreachable, it is not completely powered off. if err := testing.Sleep(ctx, 15*time.Second); err != nil { s.Error("Failed to sleep: ", err) } // Connecting DUT to the power supply to test the policy behaviour. if err := pxy.Servo().SetPDRole(ctx, servo.PDRoleSrc); err != nil { s.Fatal("Unable to turn on power supply: ", err) } if tc.wantBootUp { waitCtx, cancel := context.WithTimeout(ctx, 2*time.Minute) defer cancel() if err := d.WaitConnect(waitCtx); err != nil { s.Error("Failed to wait DUT to be connected: ", err) } } else { if err := dututils.EnsureDisconnected(ctx, d, 2*time.Minute); err != nil { s.Error("Failed to ensure DUT is disconnected: ", err) } } }) } }
// Copyright (c) 2021 Target Brands, Inc. All rights reserved. // // Use of this source code is governed by the LICENSE file in this repository. package docker import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "io/ioutil" "strings" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/stringid" v1 "github.com/opencontainers/image-spec/specs-go/v1" ) // ContainerService implements all the container // related functions for the Docker mock. type ContainerService struct{} // ContainerAttach is a helper function to simulate // a mocked call to attach a connection to a // Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerAttach func (c *ContainerService) ContainerAttach(ctx context.Context, ctn string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { return types.HijackedResponse{}, nil } // ContainerCommit is a helper function to simulate // a mocked call to apply changes to a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerCommit func (c *ContainerService) ContainerCommit(ctx context.Context, ctn string, options types.ContainerCommitOptions) (types.IDResponse, error) { return types.IDResponse{}, nil } // ContainerCreate is a helper function to simulate // a mocked call to create a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerCreate func (c *ContainerService) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, p *v1.Platform, ctn string) (container.ContainerCreateCreatedBody, error) { // verify a container was provided if len(ctn) == 0 { return container.ContainerCreateCreatedBody{}, errors.New("no container provided") } // check if the container is notfound and // check if the notfound should be ignored if strings.Contains(ctn, "notfound") && !strings.Contains(ctn, "ignorenotfound") { return container.ContainerCreateCreatedBody{}, // nolint:golint,stylecheck // messsage is capitalized to match Docker messages errdefs.NotFound(fmt.Errorf("Error: No such container: %s", ctn)) } // check if the container is not-found and // check if the not-found should be ignored if strings.Contains(ctn, "not-found") && !strings.Contains(ctn, "ignore-not-found") { return container.ContainerCreateCreatedBody{}, // nolint:golint,stylecheck // messsage is capitalized to match Docker messages errdefs.NotFound(fmt.Errorf("Error: No such container: %s", ctn)) } // check if the image is not found if strings.Contains(config.Image, "notfound") || strings.Contains(config.Image, "not-found") { return container.ContainerCreateCreatedBody{}, errdefs.NotFound( // nolint:golint,stylecheck // messsage is capitalized to match Docker messages fmt.Errorf("Error response from daemon: manifest for %s not found: manifest unknown", config.Image), ) } // create response object to return response := container.ContainerCreateCreatedBody{ ID: stringid.GenerateRandomID(), } return response, nil } // ContainerDiff is a helper function to simulate // a mocked call to show the differences in the // filesystem between two Docker containers. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerDiff func (c *ContainerService) ContainerDiff(ctx context.Context, ctn string) ([]container.ContainerChangeResponseItem, error) { return nil, nil } // ContainerExecAttach is a helper function to simulate // a mocked call to attach a connection to a process // running inside a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerExecAttach func (c *ContainerService) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { return types.HijackedResponse{}, nil } // ContainerExecCreate is a helper function to simulate // a mocked call to create a process to run inside a // Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerExecCreate func (c *ContainerService) ContainerExecCreate(ctx context.Context, ctn string, config types.ExecConfig) (types.IDResponse, error) { return types.IDResponse{}, nil } // ContainerExecInspect is a helper function to simulate // a mocked call to inspect a process running inside a // Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerExecInspect func (c *ContainerService) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { return types.ContainerExecInspect{}, nil } // ContainerExecResize is a helper function to simulate // a mocked call to resize the tty for a process running // inside a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerExecResize func (c *ContainerService) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { return nil } // ContainerExecStart is a helper function to simulate // a mocked call to start a process inside a Docker // container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerExecStart func (c *ContainerService) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { return nil } // ContainerExport is a helper function to simulate // a mocked call to expore the contents of a Docker // container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerExport func (c *ContainerService) ContainerExport(ctx context.Context, ctn string) (io.ReadCloser, error) { return nil, nil } // ContainerInspect is a helper function to simulate // a mocked call to inspect a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerInspect func (c *ContainerService) ContainerInspect(ctx context.Context, ctn string) (types.ContainerJSON, error) { // verify a container was provided if len(ctn) == 0 { return types.ContainerJSON{}, errors.New("no container provided") } // check if the container is notfound and // check if the notfound should be ignored if strings.Contains(ctn, "notfound") && !strings.Contains(ctn, "ignorenotfound") { return types.ContainerJSON{}, // nolint:golint,stylecheck // messsage is capitalized to match Docker messages errdefs.NotFound(fmt.Errorf("Error: No such container: %s", ctn)) } // check if the container is not-found and // check if the not-found should be ignored if strings.Contains(ctn, "not-found") && !strings.Contains(ctn, "ignore-not-found") { return types.ContainerJSON{}, // nolint:golint,stylecheck // messsage is capitalized to match Docker messages errdefs.NotFound(fmt.Errorf("Error: No such container: %s", ctn)) } // create response object to return response := types.ContainerJSON{ ContainerJSONBase: &types.ContainerJSONBase{ ID: stringid.GenerateRandomID(), Image: "alpine:latest", Name: ctn, State: &types.ContainerState{Running: true}, }, Config: &container.Config{ Image: "alpine:latest", }, } return response, nil } // ContainerInspectWithRaw is a helper function to simulate // a mocked call to inspect a Docker container and return // the raw body received from the API. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerInspectWithRaw func (c *ContainerService) ContainerInspectWithRaw(ctx context.Context, ctn string, getSize bool) (types.ContainerJSON, []byte, error) { // verify a container was provided if len(ctn) == 0 { return types.ContainerJSON{}, nil, errors.New("no container provided") } // check if the container is not found if strings.Contains(ctn, "notfound") || strings.Contains(ctn, "not-found") { return types.ContainerJSON{}, nil, // nolint:golint,stylecheck // messsage is capitalized to match Docker messages errdefs.NotFound(fmt.Errorf("Error: No such container: %s", ctn)) } // create response object to return response := types.ContainerJSON{ ContainerJSONBase: &types.ContainerJSONBase{ ID: stringid.GenerateRandomID(), Image: "alpine:latest", Name: ctn, State: &types.ContainerState{Running: true}, }, Config: &container.Config{ Image: "alpine:latest", }, } // marshal response into raw bytes b, err := json.Marshal(response) if err != nil { return types.ContainerJSON{}, nil, err } return response, b, nil } // ContainerKill is a helper function to simulate // a mocked call to kill a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerKill func (c *ContainerService) ContainerKill(ctx context.Context, ctn, signal string) error { // verify a container was provided if len(ctn) == 0 { return errors.New("no container provided") } // check if the container is not found if strings.Contains(ctn, "notfound") || strings.Contains(ctn, "not-found") { // nolint:golint,stylecheck // messsage is capitalized to match Docker messages return errdefs.NotFound(fmt.Errorf("Error: No such container: %s", ctn)) } return nil } // ContainerList is a helper function to simulate // a mocked call to list Docker containers. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerList func (c *ContainerService) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { return nil, nil } // ContainerLogs is a helper function to simulate // a mocked call to capture the logs from a // Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerLogs func (c *ContainerService) ContainerLogs(ctx context.Context, ctn string, options types.ContainerLogsOptions) (io.ReadCloser, error) { // verify a container was provided if len(ctn) == 0 { return nil, errors.New("no container provided") } // check if the container is not found if strings.Contains(ctn, "notfound") || strings.Contains(ctn, "not-found") { // nolint:golint,stylecheck // messsage is capitalized to match Docker messages return nil, errdefs.NotFound(fmt.Errorf("Error: No such container: %s", ctn)) } // create response object to return response := new(bytes.Buffer) // write stdout logs to response buffer _, err := stdcopy. NewStdWriter(response, stdcopy.Stdout). Write([]byte("hello to stdout from github.com/go-vela/mock/docker")) if err != nil { return nil, err } // write stderr logs to response buffer _, err = stdcopy. NewStdWriter(response, stdcopy.Stderr). Write([]byte("hello to stderr from github.com/go-vela/mock/docker")) if err != nil { return nil, err } return ioutil.NopCloser(response), nil } // ContainerPause is a helper function to simulate // a mocked call to pause a running Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerPause func (c *ContainerService) ContainerPause(ctx context.Context, ctn string) error { return nil } // ContainerRemove is a helper function to simulate // a mocked call to remove a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerRemove func (c *ContainerService) ContainerRemove(ctx context.Context, ctn string, options types.ContainerRemoveOptions) error { // verify a container was provided if len(ctn) == 0 { return errors.New("no container provided") } // check if the container is not found if strings.Contains(ctn, "notfound") || strings.Contains(ctn, "not-found") { // nolint:golint,stylecheck // messsage is capitalized to match Docker messages return errdefs.NotFound(fmt.Errorf("Error: No such container: %s", ctn)) } return nil } // ContainerRename is a helper function to simulate // a mocked call to rename a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerRename func (c *ContainerService) ContainerRename(ctx context.Context, container, newContainerName string) error { return nil } // ContainerResize is a helper function to simulate // a mocked call to resize a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerResize func (c *ContainerService) ContainerResize(ctx context.Context, ctn string, options types.ResizeOptions) error { return nil } // ContainerRestart is a helper function to simulate // a mocked call to restart a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerRestart func (c *ContainerService) ContainerRestart(ctx context.Context, ctn string, timeout *time.Duration) error { return nil } // ContainerStart is a helper function to simulate // a mocked call to start a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerStart func (c *ContainerService) ContainerStart(ctx context.Context, ctn string, options types.ContainerStartOptions) error { // verify a container was provided if len(ctn) == 0 { return errors.New("no container provided") } // check if the container is not found if strings.Contains(ctn, "notfound") || strings.Contains(ctn, "not-found") { // nolint:golint,stylecheck // messsage is capitalized to match Docker messages return errdefs.NotFound(fmt.Errorf("Error: No such container: %s", ctn)) } return nil } // ContainerStatPath is a helper function to simulate // a mocked call to capture information about a path // inside a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerStatPath func (c *ContainerService) ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) { return types.ContainerPathStat{}, nil } // ContainerStats is a helper function to simulate // a mocked call to capture information about a // Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerStats func (c *ContainerService) ContainerStats(ctx context.Context, ctn string, stream bool) (types.ContainerStats, error) { return types.ContainerStats{}, nil } // ContainerStop is a helper function to simulate // a mocked call to stop a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerStop func (c *ContainerService) ContainerStop(ctx context.Context, ctn string, timeout *time.Duration) error { // verify a container was provided if len(ctn) == 0 { return errors.New("no container provided") } // check if the container is not found if strings.Contains(ctn, "notfound") || strings.Contains(ctn, "not-found") { // nolint:golint,stylecheck // messsage is capitalized to match Docker messages return errdefs.NotFound(fmt.Errorf("Error: No such container: %s", ctn)) } return nil } // ContainerTop is a helper function to simulate // a mocked call to show running processes inside // a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerTop func (c *ContainerService) ContainerTop(ctx context.Context, ctn string, arguments []string) (container.ContainerTopOKBody, error) { return container.ContainerTopOKBody{}, nil } // ContainerUnpause is a helper function to simulate // a mocked call to unpause a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerUnpause func (c *ContainerService) ContainerUnpause(ctx context.Context, ctn string) error { return nil } // ContainerUpdate is a helper function to simulate // a mocked call to update a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerUpdate func (c *ContainerService) ContainerUpdate(ctx context.Context, ctn string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { return container.ContainerUpdateOKBody{}, nil } // ContainerWait is a helper function to simulate // a mocked call to wait for a running Docker // container to finish. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainerWait func (c *ContainerService) ContainerWait(ctx context.Context, ctn string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { ctnCh := make(chan container.ContainerWaitOKBody, 1) errCh := make(chan error, 1) // verify a container was provided if len(ctn) == 0 { // propagate the error to the error channel errCh <- errors.New("no container provided") return ctnCh, errCh } // check if the container is not found if strings.Contains(ctn, "notfound") || strings.Contains(ctn, "not-found") { // propagate the error to the error channel // nolint:golint,stylecheck // messsage is capitalized to match Docker messages errCh <- errdefs.NotFound(fmt.Errorf("Error: No such container: %s", ctn)) return ctnCh, errCh } // create goroutine for responding to call go func() { // create response object to return response := container.ContainerWaitOKBody{ StatusCode: 15, } // sleep for 1 second to simulate waiting for the container time.Sleep(1 * time.Second) // propagate the response to the container channel ctnCh <- response // propagate nil to the error channel errCh <- nil }() return ctnCh, errCh } // ContainersPrune is a helper function to simulate // a mocked call to prune Docker containers. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.ContainersPrune func (c *ContainerService) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { return types.ContainersPruneReport{}, nil } // ContainerStatsOneShot is a helper function to simulate // a mocked call to return near realtime stats for a given container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.CopyFromContainer func (c *ContainerService) ContainerStatsOneShot(ctx context.Context, containerID string) (types.ContainerStats, error) { return types.ContainerStats{}, nil } // CopyFromContainer is a helper function to simulate // a mocked call to copy content from a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.CopyFromContainer func (c *ContainerService) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { return nil, types.ContainerPathStat{}, nil } // CopyToContainer is a helper function to simulate // a mocked call to copy content to a Docker container. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#Client.CopyToContainer func (c *ContainerService) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { return nil } // WARNING: DO NOT REMOVE THIS UNDER ANY CIRCUMSTANCES // // This line serves as a quick and efficient way to ensure that our // ContainerService satisfies the ContainerAPIClient interface that // the Docker client expects. // // https://pkg.go.dev/github.com/docker/docker/client?tab=doc#ContainerAPIClient var _ client.ContainerAPIClient = (*ContainerService)(nil)
package main import ( "fmt" "main/utils" ) /* 给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。 你可以假设每种输入只会对应一个答案。但是,你不能重复利用这个数组中同样的元素。 示例: 给定 nums = [2, 7, 11, 15], target = 9 因为 nums[0] + nums[1] = 2 + 7 = 9 所以返回 [0, 1] 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/two-sum 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 */ func twoSum(nums []int, target int) []int { // 遍历数组获取每个数字对应的差 keySubList := make([]int, len(nums)) for idx, num := range nums { keySubList[idx] = target - num } // 从map中进行匹配结果 for idx, num := range nums { subNum := target - num if subNum == keySubList[idx] { firstN := 1 for ; ; { // 一直遍历,直到找到和当前下标不一致的下标,避免重复 otherIdx := utils.IndexOf(nums, subNum, firstN) if otherIdx == -1 { break } if idx != otherIdx { return []int {idx, otherIdx} } else { firstN += 1 } } } } return []int {} } func twoSum2(nums []int, target int) []int { /* 利用map记录下当前的值对应的下标,重点在于一边遍历一遍设置 因为如果提前设置好所有的下标,就会导致获取的下标不正确 比如[3, 2, 4],当出现差和当前值一样的情况,获取到两个数的下标就是重复的 */ subMap := make(map[int]int) for idx, val := range nums { if subIdx, exist := subMap[target - val]; exist { return []int {idx, subIdx} } else { subMap[val] = idx } } return []int {} } func main() { data := []int{2, 7, 11, 15} result := twoSum(data, 9) if len(result) != 2 || !utils.CompareEqual(result, []int {0, 1}) { fmt.Printf("fail 1: %v", result) return } data = []int{3, 3, 11, 15} result = twoSum(data, 6) if len(result) != 2 || !utils.CompareEqual(result, []int {0, 1}) { fmt.Printf("fail 2: %v", result) return } data = []int{3, 2, 4} result = twoSum(data, 6) if len(result) != 2 || !utils.CompareEqual(result, []int {1, 2}) { fmt.Printf("fail 3: %v", result) return } fmt.Printf("success") }