text
stringlengths
11
4.05M
package aoc2015 import ( "testing" aoc "github.com/janreggie/aoc/internal" "github.com/stretchr/testify/assert" ) func TestDay06(t *testing.T) { assert := assert.New(t) testCases := []aoc.TestCase{ {Input: "toggle 936,774 through 937,775\n" + // ans1 += 4; ans2 += 8 "turn off 116,843 through 533,934\n" + // nothing "turn on 950,906 through 986,993\n", // ans1 += 3256; ans2 += 3256 Result1: "3260", Result2: "3264"}, {Input: "toggle 0,0 through 999,999\n" + // ans1 += 1000000; ans2 += 2000000 "turn on 936,388 through 948,560\n" + // ans1 += 0; ans2 += 2249 "turn off 485,17 through 655,610\n", // ans1 -= 101574; ans2 -= 101574 Result1: "898426", Result2: "1900675"}, {Details: "Y2015D06 my input", Input: day06myInput, Result1: "543903", Result2: "14687245"}, } for _, tt := range testCases { tt.Test(Day06, assert) } } func TestDay06Deux(t *testing.T) { assert := assert.New(t) testCases := []aoc.TestCase{ {Input: "toggle 936,774 through 937,775\n" + // ans1 += 4; ans2 += 8 "turn off 116,843 through 533,934\n" + // nothing "turn on 950,906 through 986,993\n", // ans1 += 3256; ans2 += 3256 Result1: "3260", Result2: "3264"}, {Input: "toggle 0,0 through 999,999\n" + // ans1 += 1000000; ans2 += 2000000 "turn on 936,388 through 948,560\n" + // ans1 += 0; ans2 += 2249 "turn off 485,17 through 655,610\n", // ans1 -= 101574; ans2 -= 101574 Result1: "898426", Result2: "1900675"}, {Details: "Y2015D06 my input", Input: day06myInput, Result1: "543903", Result2: "14687245"}, } for _, tt := range testCases { tt.Test(Day06, assert) } } func BenchmarkDay06(b *testing.B) { aoc.Benchmark(Day06, b, day06myInput) }
package demo1 import "time" type Cache interface { Get(url string) (string, bool) Put(url string, body string) } type item struct { body string timestamp time.Time } // newLRUCache returns a cache with given size. // "size" is the number of items that could be stored in the cache. // If more item is added, LRU eviction will happen. func newLRUCache(size int) *lruCache { return &lruCache{ size: size, urlToItem: make(map[string]*item), } } type lruCache struct { size int urlToItem map[string]*item } func (lc *lruCache) Get(url string) (string, bool) { if lc.urlToItem[url] != nil { lc.urlToItem[url].timestamp = time.Now() return lc.urlToItem[url].body, true } return "", false } func (lc *lruCache) Put(url string, body string) { if lc.Full() { lc.evictOne() } lc.set(url, body) } func (lc *lruCache) Full() bool { return len(lc.urlToItem) == lc.size } func (lc *lruCache) evictOne() { var toEvict string var evictItem *item // find the item with earliest timestamp, O(n) for url, item := range lc.urlToItem { if evictItem == nil || item.timestamp.Before(evictItem.timestamp) { toEvict = url evictItem = item } } if evictItem == nil { panic("It's not possible! REWRITE THE CODE!") } delete(lc.urlToItem, toEvict) } func (lc *lruCache) set(url string, body string) { lc.urlToItem[url] = &item{ body: body, timestamp: time.Now(), } } // -------------------- // Efficient algorithm // -------------------- // type item struct { // url string // body string // } // func newLRUCache(size int) *lruCache { // return &lruCache{ // size: size, // urlToListElem: make(map[string]*list.Element), // l: list.New(), // } // } // type lruCache struct { // size int // urlToListElem map[string]*list.Element // l *list.List // } // func (lc *lruCache) Get(url string) (string, bool) { // if lc.urlToListElem[url] != nil { // lc.l.MoveToFront(lc.urlToListElem[url]) // return lc.urlToListElem[url].Value.(*item).body, true // } // return "", false // } // func (lc *lruCache) Put(url string, body string) { // if len(lc.urlToListElem) == lc.size { // lc.evictOne() // } // lc.set(url, body) // } // func (lc *lruCache) evictOne() { // delete(lc.urlToListElem, lc.l.Back().Value.(*item).url) // lc.l.Remove(lc.l.Back()) // } // func (lc *lruCache) set(url string, body string) { // lc.urlToListElem[url] = lc.l.PushFront(&item{ // url: url, // body: body, // }) // }
package sqly import "errors" // errors var ( // ErrQueryFmt sql statement format error ErrQueryFmt = errors.New("query can't be formatted") // ErrArgType sql statement format type error ErrArgType = errors.New("invalid variable type for argument") // ErrStatement sql syntax error ErrStatement = errors.New("sql statement syntax error") // ErrContainer container for results ErrContainer = errors.New("invalid container for scanning (struct pointer, not nil)") // ErrFieldsMatch fields not match ErrFieldsMatch = errors.New("queried fields not match with struct fields") // ErrMultiRes multi result for get ErrMultiRes = errors.New("get more than one results for get query") // ErrEmpty empty ErrEmpty = errors.New("no result for get query ") // ErrCapsule Invalid Capsule ErrCapsule = errors.New("query capsule is not available") ErrEmptyArrayInStatement = errors.New("has empty array in query arguments") // ErrNotSupportForThisDriver driver not support ErrNotSupportForThisDriver = errors.New("not support for this driver") )
package main import ( "github.com/dearcj/golangproj/bitmask" pb "github.com/dearcj/golangproj/network" "go.uber.org/zap" "math/rand" "time" "unsafe" ) type DelayedCall struct { param unsafe.Pointer function func(unsafe.Pointer) duration time.Duration startTime time.Time } type ActorF struct { run *Run spawners []*MonsterSpawner lastUpdateSpawners time.Time actors []*Object maxID int positions []*Object ids map[int]*Object } func (f *ActorF) SpawnPlace(ownerPlace int) int { min := 0 place := -1 if ownerPlace >= 4 { min = 4 } for x := min; x <= min+3; x++ { if f.positions[x] == nil { place = x break } } return place } func (f *ActorF) CountType(typeColGroup bitmask.Bitmask) int { count := 0 for _, el := range f.actors { if el.FindByComponent(typeColGroup) != nil { unit := el.FindByComponent(config.Components.Unit) if unit != nil { if unit.(*Unit).Killed == false { // println("MONSER HAS ", int32(unit.(*Unit).HP)) count++ } } } } return count } func (f *ActorF) Update(dt time.Duration) { var i = 0 //var prevAddedRemoved bool for i = 0; i < len(f.actors); i++ { a := f.actors[i] if a.doRemove { //prevAddedRemoved = true f.actors[i] = f.actors[len(f.actors)-1] f.actors = f.actors[:len(f.actors)-1] i-- } } for i = 0; i < len(f.actors); i++ { a := f.actors[i] a.Process(0) } } func (f *ActorF) Remove(inx int) { f.actors[inx] = f.actors[len(f.actors)-1] f.actors = f.actors[:len(f.actors)-1] } func (f *ActorF) AddFish(ID uint32, room *Run, curve *Curve, skipTime time.Duration, fish *FishConfig, curveTime time.Duration, silent bool) (*Fish, FList) { var list FList o, fx := f.Add(&f.actors) list = list.AddSingle(fx) BaseCharacter := BaseCharacter{ HP: float64(fish.Hp), MaxHP: float64(fish.Hp)} AI := &Fish{ FishConfig: fish, SkipTime: skipTime, BornTime: room.timeline.TimeNow(), CurrentTime: skipTime, FishId: ID, CurveTime: curveTime, CurveInx: curve.Inx, Curve: curve} AI.CachedPos = curve.GetCachedPoint(AI.GetCurveProp()) unit := &Unit{ BaseCharacter: BaseCharacter, } unit.BaseCharacter.Object = o o.BindComponent(unit) o.BindComponent(AI) o.Type = uint32(config.Components.Fish) o.InitComponents() if silent { return nil, nil } else { return AI, list } } func (f *ActorF) AddPlayer(s *Session, defaultGun *Gun) (*Player, *ServerEffect) { var newActor, fx = f.Add(&f.actors) newActor.session = s unit, pl := CreateDefaultPlayer(s.progress) pl.currentGun = defaultGun unit.BaseCharacter.Object = newActor newActor.BindComponent(unit) newActor.BindComponent(pl) s.SetPlayer(newActor) //TODO: for deathmatch is current pl.NetworkData = &pb.Player{ StartPosition: 0, NetworkObject: &pl.parent.NetworkObject, } pl.NetworkData.StartPosition = uint32(sessionPos(f, s)) return pl, fx } func sessionPos(f *ActorF, s *Session) int { positions := []int{} for inx, sess := range f.run.positions { if sess == nil { positions = append(positions, inx) } } if len(positions) > 0 { inx := rand.Intn(len(positions)) pos := positions[inx] f.run.positions[pos] = s return pos } else { return 0 } } func (f *ActorF) CreateObjectWithComponent(c Component) (*Object, *ServerEffect) { var newActor, ff = f.Add(&f.actors) newActor.BindComponent(c) return newActor, ff } func (f *ActorF) RemoveObject(obj *Object, death bool, silent bool) FList { if obj.doRemove { return nil } if f.ids[int(obj.ID)] != nil { f.ids[int(obj.ID)].Remove() f.ids[int(obj.ID)] = nil } else { server.logger.Debug("Object removed but not in map", zap.Uint32("ID", obj.ID)) } //server.logger.Debug("Removing object", zap.Uint32("ID", obj.ID)) dv := 0. if death { dv = 1 } if silent { return FList{} } else { return FList{}.AddSingle(cf(confActions.Remove).ID(obj.ID).V(float32(dv))) } } func (f *ActorF) Add(list *[]*Object) (*Object, *ServerEffect) { var newActor = &Object{ Components: make(map[bitmask.Bitmask]Component), NetworkObject: pb.NetworkObject{ID: uint32(f.getNewId())}, } newActor.factory = f f.ids[int(newActor.NetworkObject.ID)] = newActor *list = append(*list, newActor) if int(newActor.ID) > f.maxID { f.maxID = int(newActor.ID) } return newActor, newActor.Effect(confActions.Appear) } func (f *ActorF) FindByClientID(id int, session *Session) *Object { inx := f.InxByCID(id, session) if inx >= 0 { return f.actors[inx] } else { return nil } } func (f *ActorF) Find(id int) *Object { inx := f.InxById(id) if inx >= 0 { if !f.actors[inx].doRemove { return f.actors[inx] } else { return nil } } else { return nil } } func (f *ActorF) InxByCID(id int, s *Session) int { for i, el := range f.actors { if el.session == s { return i } } return -1 } func (f *ActorF) InxById(id int) int { for i, el := range f.actors { if int(el.ID) == id { return i } } return -1 } func (f *ActorF) FilterObjects(component bitmask.Bitmask) []*Object { var objs []*Object for _, x := range f.actors { if x.FindByComponent(component) != nil && !x.doRemove { objs = append(objs, x) } } return objs } func (f *ActorF) FilterComponents(component bitmask.Bitmask) []Component { var objs []Component for _, x := range f.actors { c := x.FindByComponent(component) if c != nil && !x.doRemove { objs = append(objs, c) } } return objs } func (f *ActorF) getNewId() int { if len(f.actors) == 0 { return 0 } f.maxID = 0 for _, x := range f.actors { if int(x.ID) > f.maxID { f.maxID = int(x.ID) } } ids := make([]bool, f.maxID+1) for _, x := range f.actors { ids[x.ID] = true } for inx, haveId := range ids { if !haveId { return inx } } return f.maxID + 1 } func CreateActorsFabric(room *Run) *ActorF { var fabric = &ActorF{} fabric.positions = make([]*Object, 10, 10) fabric.ids = make(map[int]*Object) fabric.run = room fabric.maxID = 0 return fabric }
package main import ( "net/http" "fmt" "github.com/acidlemon/rocket" ) type WebApi struct { rocket.WebApp cfg *Config } func NewWebApi(cfg *Config) *WebApi { app := &WebApi{} app.Init() app.cfg = cfg view := &rocket.View{ BasicTemplates: []string{"html/layout.html"}, } app.AddRoute("/", app.List, view) app.AddRoute("/launcher", app.Launcher, view) app.AddRoute("/launch", app.Launch, view) app.AddRoute("/terminate", app.Terminate, view) app.AddRoute("/api/list", app.ApiList, view) app.AddRoute("/api/launch", app.ApiLaunch, view) app.AddRoute("/api/terminate", app.ApiTerminate, view) app.BuildRouter() return app } func (api *WebApi) ServeHTTP(w http.ResponseWriter, req *http.Request) { api.Handler(w, req) } func (api *WebApi) List(c rocket.CtxData) { info, err := app.Docker.List() errStr := "" if err != nil { errStr = err.Error() } value := rocket.RenderVars { "info" : info, "error": errStr, } c.Render("html/list.html", value) } func (api *WebApi) Launcher(c rocket.CtxData) { c.Render("html/launcher.html", rocket.RenderVars{ "DefaultImage": api.cfg.Docker.DefaultImage, }) } func (api *WebApi) Launch(c rocket.CtxData) { result := api.launch(c) if result["result"] == "ok" { c.Redirect("/") } else { c.RenderJSON(result) } } func (api *WebApi) Terminate(c rocket.CtxData) { result := api.terminate(c) if result["result"] == "ok" { c.Redirect("/") } else { c.RenderJSON(result) } } func (api *WebApi) ApiList(c rocket.CtxData) { info, err := app.Docker.List() var status interface{} if err != nil { status = err.Error() } else { status = info } result := rocket.RenderVars { "result": status, } c.RenderJSON(result) } func (api *WebApi) ApiLaunch(c rocket.CtxData) { result := api.launch(c) c.RenderJSON(result) } func (api *WebApi) ApiTerminate(c rocket.CtxData) { result := api.terminate(c) c.RenderJSON(result) } func (api *WebApi) launch(c rocket.CtxData) rocket.RenderVars { if c.Req().Method != "POST" { c.Res().StatusCode = http.StatusMethodNotAllowed c.RenderText("you must use POST") return rocket.RenderVars{} } subdomain, _ := c.ParamSingle("subdomain") branch , _ := c.ParamSingle("branch") image , _ := c.ParamSingle("image") status := "ok" if subdomain == "" || branch == "" || image == "" { status = fmt.Sprintf("parameter required: subdomain=%s, branch=%s, image=%s", subdomain, branch, image) } else { err := app.Docker.Launch(subdomain, branch, image) if err != nil { status = err.Error() } } result := rocket.RenderVars { "result": status, } return result } func (api *WebApi) terminate(c rocket.CtxData) rocket.RenderVars { if c.Req().Method != "POST" { c.Res().StatusCode = http.StatusMethodNotAllowed c.RenderText("you must use POST") return rocket.RenderVars{} } status := "ok" subdomain, _ := c.ParamSingle("subdomain") if subdomain == "" { status = fmt.Sprintf("parameter required: subdomain") } else { err := app.Docker.Terminate(subdomain) if err != nil { status = err.Error() } } result := rocket.RenderVars { "result": status, } return result }
package pathfileops import ( "fmt" "strings" "testing" "time" ) func TestFileMgrCollection_AddFileMgrCollection_01(t *testing.T) { var fileNameExt string fMgrs1 := FileMgrCollection{} for i := 0; i < 10; i++ { fileNameExt = fmt.Sprintf("testAddFile_%03d.txt", i+1) fmgr, err := fileMgrCollectionTestSetupFmgr01(fileNameExt) if err != nil { t.Errorf("Error returned from testFileMgrCollection_SetupFmgr_01(fileNameExt). fileNameExt='%v' Error='%v'", fileNameExt, err.Error()) } fMgrs1.AddFileMgr(fmgr) } if fMgrs1.GetNumOfFileMgrs() != 10 { t.Errorf("Expected fMgrs1 Array Length == 10. Instead fMgrs1.GetNumOfDirs()=='%v'", fMgrs1.GetNumOfFileMgrs()) } fMgrs2 := FileMgrCollection{} for i := 0; i < 15; i++ { fileNameExt = fmt.Sprintf("testCol2AddFile_%03d.txt", i+1) fmgr, err := fileMgrCollectionTestSetupFmgr01(fileNameExt) if err != nil { t.Errorf("Error returned from 2nd run of testFileMgrCollection_SetupFmgr_01(fileNameExt). fileNameExt='%v' Error='%v'", fileNameExt, err.Error()) } fMgrs2.AddFileMgr(fmgr) } if fMgrs2.GetNumOfFileMgrs() != 15 { t.Errorf("Expected fMgrs2 Array Length == 15. Instead fMgrs2.GetNumOfDirs()=='%v'", fMgrs2.GetNumOfFileMgrs()) } fMgrs1.AddFileMgrCollection(&fMgrs2) if fMgrs1.GetNumOfFileMgrs() != 25 { t.Errorf("Expected augmented fMgrs1 Array Length == 25. Instead fMgrs1.GetNumOfDirs()=='%v'", fMgrs1.GetNumOfFileMgrs()) } fMgr, err := fMgrs1.PeekLastFileMgr() if err != nil { t.Errorf("2nd Run: Error returned from fMgrs1.PeekLastDirMgr(). Error='%v'", err.Error()) return } if fMgr.fileNameExt != "testCol2AddFile_015.txt" { t.Errorf("Expected consolidated fMgrs1 to have last fMgr.fileNameExt='testCol2AddFile_015.txt'.\n"+ "Instead, fMgr.fileNameExt='%v'", fMgr.fileNameExt) } } func TestFileMgrCollection_AddFileMgrCollection_02(t *testing.T) { fMgrs1 := FileMgrCollection{} fMgrs1.fileMgrs = nil fMgrs2 := FileMgrCollection{} fMgrs2.fileMgrs = nil fMgrs1.AddFileMgrCollection(&fMgrs2) if fMgrs1.fileMgrs == nil { t.Error("ERROR: Expected fMgrs1.fileMgrs!=nil.\n" + "Instead, fMgrs1.fileMgrs==nil!!!") return } if len(fMgrs1.fileMgrs) > 0 { t.Errorf("Expected len(fMgrs1.fileMgrs)==0.\n" + "Instead, len(fMgrs1.fileMgrs)=='%v'\n", len(fMgrs1.fileMgrs)) } } func TestFileMgrCollection_AddFileMgr_01(t *testing.T) { var fileNameExt string fMgrs := FileMgrCollection{} for i := 0; i < 10; i++ { fileNameExt = fmt.Sprintf("testAddFile_%03d.txt", i+1) fmgr, err := fileMgrCollectionTestSetupFmgr01(fileNameExt) if err != nil { t.Errorf("Error returned from testFileMgrCollection_SetupFmgr_01(fileNameExt). fileNameExt='%v' Error='%v'", fileNameExt, err.Error()) } fMgrs.AddFileMgr(fmgr) } if fMgrs.GetNumOfFileMgrs() != 10 { t.Errorf("Expected fMgrs Array Length == 10. Instead fMgrs.GetNumOfDirs()=='%v'", fMgrs.GetNumOfFileMgrs()) } lastFmgr, err := fMgrs.PeekLastFileMgr() if err != nil { t.Errorf("Error returned by fMgrs.PeekLastDirMgr(). Error='%v'", err) } if lastFmgr.fileNameExt != "testAddFile_010.txt" { t.Errorf("Expected last File Manager to have fileNameExt='testAddFile_010.txt'. Instead fileNameExt='%v'", lastFmgr.fileNameExt) } } func TestFileMgrCollection_AddFileMgrByPathFileNameExt_01(t *testing.T) { var fileNameExt string fh := FileHelper{} fMgrs := FileMgrCollection{} for i := 0; i < 10; i++ { fileNameExt = fmt.Sprintf("testAddFile_%03d.txt", i+1) fmgr, err := fileMgrCollectionTestSetupFmgr01(fileNameExt) if err != nil { t.Errorf("Error returned from testFileMgrCollection_SetupFmgr_01(fileNameExt). fileNameExt='%v' Error='%v'", fileNameExt, err.Error()) } fMgrs.AddFileMgr(fmgr) } if fMgrs.GetNumOfFileMgrs() != 10 { t.Errorf("Expected fMgrs Array Length == 10. Instead fMgrs.GetNumOfDirs()=='%v'", fMgrs.GetNumOfFileMgrs()) } adjustedPath := "../../filesfortest/newfilesfortest/newerFileForTest_01.txt" fPath, err := fh.MakeAbsolutePath(adjustedPath) if err != nil { t.Errorf("Error returned from fh.MakeAbsolutePath(adjustedPath). adjustedPath='%v' Error='%v'", adjustedPath, err.Error()) } err = fMgrs.AddFileMgrByPathFileNameExt(fPath) if err != nil { t.Errorf("Error returned from fMgrs.AddFileMgrByPathFileNameExt(fPath). fPath='%v' Error='%v'", fPath, err.Error()) } fmgr2, err := fMgrs.PeekLastFileMgr() if err != nil { t.Errorf("Error returned by fMgrs.PeekLastDirMgr(). Error='%v'", err.Error()) return } if fmgr2.fileNameExt != "newerFileForTest_01.txt" { t.Errorf("Expected Newly Added Fmgr fileNameExt='newerFileForTest_01.txt'.\n"+ "Instead, fileNameExt='%v'", fmgr2.fileNameExt) } } func TestFileMgrCollection_AddFileMgrByPathFileNameExt_02(t *testing.T) { fMgrs := FileMgrCollection{} fMgrs.fileMgrs = nil filePath := "filesfortest/levelfilesfortest/level_0_0_test.txt" err := fMgrs.AddFileMgrByPathFileNameExt(filePath) if err != nil { t.Errorf("Error returned from fMgrs.AddFileMgrByPathFileNameExt(fPath).\n" + "filePath='%v'\n" + "Error='%v'\n", filePath, err.Error()) } if fMgrs.GetNumOfFiles() != 1 { t.Errorf("ERROR: Expected number of File Managers in collection = '1'\n" + "Instead, number of file managers='%v'", fMgrs.GetNumOfFiles()) } } func TestFileMgrCollection_AddFileMgrByDirFileNameExt_01(t *testing.T) { testDir := "../../createFilesTest" testDMgr, err := DirMgr{}.New(testDir) if err != nil { t.Errorf("Test Setup Error returned by DirMgr{}.New(testDir)\n"+ "testDir='%v'\nError='%v'\n", testDir, err.Error()) return } fMgrs := FileMgrCollection{}.New() var fileNameExt string fileNamesArray := make([]string, 0, 30) for i := 0; i < 10; i++ { fileNameExt = fmt.Sprintf("testAddFile_%03d.txt", i+1) fileNamesArray = append(fileNamesArray, strings.ToLower(fileNameExt)) err := fMgrs.AddFileMgrByDirFileNameExt(testDMgr, fileNameExt) if err != nil { t.Errorf("Error returned from fMgrs.AddFileMgrByDirFileNameExt(testDMgr, fileNameExt).\n"+ "testDMgr='%v'"+ "fileNameExt='%v'\nError='%v'", testDMgr.GetAbsolutePath(), fileNameExt, err.Error()) } } numOfFileManagers := fMgrs.GetNumOfFileMgrs() if 10 != numOfFileManagers { t.Errorf("ERROR: Expected 10-File Managers.\n"+ "Instead, the collection actually contains %v-File Managers.\n", numOfFileManagers) } if len(fileNamesArray) != numOfFileManagers { t.Errorf("Expected number of elements in 'fileNamesArray'\n"+ "to equal number of file managers in collection.\n"+ "They ARE NOT EQUAL!\n"+ "Length of fileNamesArray='%v'\nNumber of File Managers='%v'\n", len(fileNamesArray), numOfFileManagers) } for k := 0; k < numOfFileManagers; k++ { fMgr, err := fMgrs.GetFileMgrAtIndex(k) if err != nil { t.Errorf("Error retrned by fMgrs.GetFileMgrAtIndex(%v)\n"+ "Error='%v'\n", k, err.Error()) return } fNameExt := strings.ToLower(fMgr.GetFileNameExt()) if fileNamesArray[k] != fNameExt { t.Errorf("Expected File Name Extension='%v'.\n"+ "Instead, File Name Extension='%v'\n", fileNamesArray[k], fNameExt) } } } func TestFileMgrCollection_AddFileMgrByDirFileNameExt_02(t *testing.T) { fMgrs := FileMgrCollection{} testDir := "../../createFilesTest" testDMgr, err := DirMgr{}.New(testDir) if err != nil { t.Errorf("Test Setup Error returned by DirMgr{}.New(testDir)\n"+ "testDir='%v'\nError='%v'\n", testDir, err.Error()) return } fileNameExt := "" err = fMgrs.AddFileMgrByDirFileNameExt(testDMgr, fileNameExt) if err == nil { t.Error("Expected an error return fMgrs.AddFileMgrByDirFileNameExt(testDMgr, fileNameExt)\n" + "because 'fileNameExt' is an empty string.\n" + "However, NO ERROR WAS RETURNED!!!\n") } } func TestFileMgrCollection_AddFileMgrByDirFileNameExt_03(t *testing.T) { fMgrs := FileMgrCollection{} testDir := "../../createFilesTest" testDMgr, err := DirMgr{}.New(testDir) if err != nil { t.Errorf("Test Setup Error returned by DirMgr{}.New(testDir)\n"+ "testDir='%v'\nError='%v'\n", testDir, err.Error()) return } fileNameExt := "testFile.txt" testDMgr.isInitialized = false err = fMgrs.AddFileMgrByDirFileNameExt(testDMgr, fileNameExt) if err == nil { t.Error("Expected an error return fMgrs.AddFileMgrByDirFileNameExt(testDMgr, fileNameExt)\n" + "because 'testDMgr' is INVALID!.\nHowever, NO ERROR WAS RETURNED!!!\n") } } func TestFileMgrCollection_AddFileMgrByDirStrFileNameStr_01(t *testing.T) { fMgrs := FileMgrCollection{} testDir := "../../createFilesTest" absTestDir, err := FileHelper{}.MakeAbsolutePath(testDir) if err != nil { t.Errorf("Test Setup Error returned by FileHelper{}.MakeAbsolutePath(testDir)\n" + "testDir='%v'\nError='%v'\n", testDir, err.Error()) return } for i:=0; i < 20; i++ { fileNameExt := fmt.Sprintf("FileNameNo%02d.txt", i+1) err = fMgrs.AddFileMgrByDirStrFileNameStr(absTestDir, fileNameExt) if err != nil { t.Errorf("Error returned by fMgrs.AddFileMgrByDirStrFileNameStr(" + "absTestDir, fileNameExt)\n" + "absTestDir='%v'\nfileNameExt='%v'\nError='%v'\n", absTestDir, fileNameExt, err.Error()) return } } if 20 != fMgrs.GetNumOfFiles() { t.Errorf("ERROR: Expected number of file managers='20'.\n" + "Instead, number of file managers='%v'\n", fMgrs.GetNumOfFiles()) return } for k:=0; k < 20; k++ { fMgr, err := fMgrs.PeekFileMgrAtIndex(k) if err != nil { t.Errorf("Error returned by fMgrs.PeekFileMgrAtIndex(%v).\n" + "Error='%v'\n", k, err.Error()) return } err = fMgr.IsFileMgrValid("") if err != nil { t.Errorf("File Manager #%v is INVALID!\n" + "Error='%v'\n", k, err.Error()) return } fileNameExt := fMgr.GetFileNameExt() expectedFileNameExt := fmt.Sprintf("FileNameNo%02d.txt", k+1) if expectedFileNameExt != fileNameExt { t.Errorf("ERROR: Expected fileNameExt='%v'.\n" + "Instead, fileNameExt='%v'\n", expectedFileNameExt, fileNameExt) return } } } func TestFileMgrCollection_AddFileInfo_01(t *testing.T) { var fileNameExt string fh := FileHelper{} fMgrs := FileMgrCollection{} for i := 0; i < 10; i++ { fileNameExt = fmt.Sprintf("testAddFile_%03d.txt", i+1) fmgr, err := fileMgrCollectionTestSetupFmgr01(fileNameExt) if err != nil { t.Errorf("Error returned from testFileMgrCollection_SetupFmgr_01(fileNameExt). "+ "fileNameExt='%v' Error='%v'", fileNameExt, err.Error()) } fMgrs.AddFileMgr(fmgr) } if fMgrs.GetNumOfFileMgrs() != 10 { t.Errorf("Expected fMgrs Array Length == 10. Instead fMgrs.GetNumOfDirs()=='%v'", fMgrs.GetNumOfFileMgrs()) } expectedFileNameExt := "newerFileForTest_01.txt" fic := FileInfoPlus{} fic.SetName(expectedFileNameExt) fic.SetIsDir(false) fic.SetSize(123456) fic.SetModTime(time.Now().Local()) fic.SetMode(0666) fic.SetSysDataSrc("xyzxyzxyzyzx") fic.SetIsFInfoInitialized(true) adjustedPath := "../../filesfortest/newfilesfortest" fPath, err := fh.MakeAbsolutePath(adjustedPath) if err != nil { t.Errorf("Error returned from fh.MakeAbsolutePath(adjustedPath). adjustedPath='%v' Error='%v'", adjustedPath, err.Error()) } err = fMgrs.AddFileMgrByFileInfo(fPath, fic) if err != nil { t.Errorf("Error returned from fMgrs.AddFileMgrByFileInfo(fPath, fic). fPath='%v' Error='%v'", fPath, err.Error()) } if fMgrs.GetNumOfFileMgrs() != 11 { t.Errorf("Expected fMgrs Array Length == 11.\n"+ "Instead fMgrs.GetNumOfDirs()=='%v'", fMgrs.GetNumOfFileMgrs()) return } fmgrLast, err := fMgrs.PopLastFileMgr() if err != nil { t.Errorf("Error returned by fMgrs.PopLastFileMgr()\n"+ "Error='%v'\n", err.Error()) return } if fmgrLast.fileNameExt != expectedFileNameExt { t.Errorf("Expected fmgrLast.fileNameExt='%v'.\n"+ "Instead, fmgrLast.fileNameExt='%v'\n", expectedFileNameExt, fmgrLast.fileNameExt) } } func TestFileMgrCollection_CopyFilesToDir_01(t *testing.T) { srcPath := "../../filesfortest/checkfiles" srcDMgr, err := DirMgr{}.New(srcPath) if err != nil { t.Errorf("Test Setup Error returned by DirMgr{}.New(srcPath)\n"+ "srcPath='%v'\nError='%v'\n", srcPath, err.Error()) return } testPath := "../../dirmgrtests/TestFileMgrCollection_CopyFilesToDir_01" testDMgr, err := DirMgr{}.New(testPath) if err != nil { t.Errorf("Test Setup Error returned by DirMgr{}.New(testPath)\n"+ "testPath='%v'\nError='%v'\n", testPath, err.Error()) return } err = testDMgr.DeleteAll() if err != nil { t.Errorf("Test Setup Error returned by (1) testDMgr.DeleteAll()\n"+ "Error='%v'\n", err.Error()) return } err = testDMgr.MakeDir() if err != nil { t.Errorf("Test Setup Error returned by (1) testDMgr.MakeDir()\n"+ "Error='%v'\n", err.Error()) return } fsc := FileSelectionCriteria{} fMgrs, err := srcDMgr.FindFilesBySelectCriteria(fsc) if err != nil { t.Errorf("Test Setup Error returned by (1) srcDMgr.FindFilesBySelectCriteria(fsc)\n"+ "Error='%v'\n", err.Error()) _ = testDMgr.DeleteAll() return } numOfSrcFMgrs := fMgrs.GetNumOfFileMgrs() if numOfSrcFMgrs == 0 { t.Error("Expected files would be returned from srcDMgr search.\n" + "However, Zero Files were returned!\n") _ = testDMgr.DeleteAll() return } err = fMgrs.CopyFilesToDir(testDMgr) if err != nil { t.Errorf("Error returned by err = fMgrs.CopyFilesToDir(testDMgr)\n"+ "testDMgr='%v'"+ "Error='%v'\n", testDMgr.GetAbsolutePath(), err.Error()) _ = testDMgr.DeleteAll() return } fsc = FileSelectionCriteria{} testDirInfo, err := testDMgr.FindFilesBySelectCriteria(fsc) if err != nil { t.Errorf("Error returned by testDMgr.FindFilesBySelectCriteria(fsc)\n"+ "Error='%v'\n", err) _ = testDMgr.DeleteAll() return } if numOfSrcFMgrs != testDirInfo.GetNumOfFileMgrs() { t.Errorf("After File Manager Collection Copy Operation,\n"+ "the number of files copied does NOT match the number of source files.\n"+ "Expected Number of Files Copied='%v'\n"+ "Actual Number of Files Copied='%v'\n", numOfSrcFMgrs, testDirInfo.GetNumOfFileMgrs()) } err = testDMgr.DeleteAll() if err != nil { t.Errorf("Test Cleanup Error returned by (2) testDMgr.DeleteAll()\n"+ "Error='%v'\n", err.Error()) } } func TestFileMgrCollection_CopyFilesToDir_02(t *testing.T) { testPath := "../../dirmgrtests/TestFileMgrCollection_CopyFilesToDir_02" testDMgr, err := DirMgr{}.New(testPath) if err != nil { t.Errorf("Test Setup Error returned by DirMgr{}.New(testPath)\n"+ "testPath='%v'\nError='%v'\n", testPath, err.Error()) return } err = testDMgr.DeleteAll() if err != nil { t.Errorf("Test Setup Error returned by (1) testDMgr.DeleteAll()\n"+ "Error='%v'\n", err.Error()) return } err = testDMgr.MakeDir() if err != nil { t.Errorf("Test Setup Error returned by (1) testDMgr.MakeDir()\n"+ "Error='%v'\n", err.Error()) return } fMgrs := FileMgrCollection{} err = fMgrs.CopyFilesToDir(testDMgr) if err == nil { t.Error("Expected an error return by fMgrs.CopyFilesToDir(testDMgr)\n" + "because fMgrs is an empty collection.\nHowever, NO ERROR WAS RETURNED!!!\n") } err = testDMgr.DeleteAll() if err != nil { t.Errorf("Test Cleanup Error returned by (2) testDMgr.DeleteAll()\n"+ "Error='%v'\n", err.Error()) } } func TestFileMgrCollection_CopyOut_01(t *testing.T) { fMgrs := FileMgrCollection{} _, err := fMgrs.CopyOut() if err == nil { t.Error("Expected an error return by fMgrs.CopyOut() because\n" + "fMgrs is an empty File Manager Collection.\nHowever, NO ERROR WAS RETURNED!\n") } } func TestFileMgrCollection_CopyOut_02(t *testing.T) { testDir := "../../createFilesTest" testDMgr, err := DirMgr{}.New(testDir) if err != nil { t.Errorf("Test Setup Error returned by DirMgr{}.New(testDir)\n"+ "testDir='%v'\nError='%v'\n", testDir, err.Error()) return } fMgrs := FileMgrCollection{}.New() var fileNameExt string fileNamesArray := make([]string, 0, 30) for i := 0; i < 10; i++ { fileNameExt = fmt.Sprintf("testAddFile_%03d.txt", i+1) filePath := testDMgr.GetAbsolutePathWithSeparator() + fileNameExt fileNamesArray = append(fileNamesArray, strings.ToLower(filePath)) err := fMgrs.AddFileMgrByDirFileNameExt(testDMgr, fileNameExt) if err != nil { t.Errorf("Error returned from fMgrs.AddFileMgrByDirFileNameExt(testDMgr, fileNameExt).\n"+ "testDMgr='%v'"+ "fileNameExt='%v'\nError='%v'", testDMgr.GetAbsolutePath(), fileNameExt, err.Error()) return } } fMgrs2, err := fMgrs.CopyOut() if err != nil { t.Errorf("Error returned by fMgrs.CopyOut()\n"+ "Error='%v'\n", err.Error()) return } origNumOfFMgrs := fMgrs.GetNumOfFileMgrs() copiedNumOfFMgrs := fMgrs2.GetNumOfFileMgrs() if origNumOfFMgrs != copiedNumOfFMgrs { t.Errorf("ERROR: Expected copied number of file managers='%v'\n"+ "Instead, copied number of file managers='%v'\n", origNumOfFMgrs, copiedNumOfFMgrs) return } for k := 0; k < origNumOfFMgrs; k++ { origFMgr, err := fMgrs.GetFileMgrAtIndex(k) if err != nil { t.Errorf("Error returned by fMgrs.GetFileMgrAtIndex(%v)\n"+ "Error='%v'\n", k, err.Error()) return } origPathFileName := strings.ToLower(origFMgr.GetAbsolutePathFileName()) copiedFMgr, err := fMgrs2.GetFileMgrAtIndex(k) if err != nil { t.Errorf("Error returned by fMgrs2.GetFileMgrAtIndex(%v)\n"+ "Error='%v'\n", k, err.Error()) return } copiedPathFileName := strings.ToLower(copiedFMgr.GetAbsolutePathFileName()) if origPathFileName != copiedPathFileName { t.Errorf("ERROR: Original File Manager Path File Name NOT EQUAL\n"+ "to Copied File Manager Path File Name!\n"+ "Original File Manager Path File Name='%v'\n"+ "Copied File Manager Path File Name='%v'\n", origPathFileName, copiedPathFileName) } } } func TestFileMgrCollection_CopyOut_03(t *testing.T) { fMgrs := FileMgrCollection{} fMgrs.fileMgrs = nil _, err := fMgrs.CopyOut() if err == nil { t.Error("Expected an error return by fMgrs.CopyOut() because\n" + "fMgrs is an empty File Manager Collection.\nHowever, NO ERROR WAS RETURNED!\n") } } func TestFileMgrCollection_DeleteAtIndex_01(t *testing.T) { f0 := "..\\dirmgrtests\\dir01\\level_1_1_test.txt" f1 := "..\\dirmgrtests\\dir01\\level_1_2_test.txt" f2 := "..\\dirmgrtests\\dir01\\level_1_3_test.txt" f3 := "..\\dirmgrtests\\dir01\\level_1_4_test.txt" fMgrCol := FileMgrCollection{}.New() err := fMgrCol.AddFileMgrByPathFileNameExt(f0) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f0). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f1) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f1). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f2) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f2). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f3) if err != nil { t.Errorf("Error returned by AddFileMgrByPathFileNameExt(f3). "+ "Error='%v' ", err.Error()) return } foundDir := false fh := FileHelper{} searchStr, err := fh.GetAbsPathFromFilePath(f2) if err != nil { t.Errorf("Error returned by fh.GetAbsPathFromFilePath(f2). "+ "f2='%v' Error='%v' ", f2, err.Error()) } arrayLen := fMgrCol.GetNumOfFileMgrs() for i := 0; i < arrayLen; i++ { fileMgr, err := fMgrCol.PeekFileMgrAtIndex(i) if err != nil { t.Errorf("Error returned by fMgrCol.PeekFileMgrAtIndex(i). "+ "i='%v' Error='%v' ", i, err.Error()) return } if searchStr == fileMgr.GetAbsolutePathFileName() { foundDir = true } } if foundDir != true { t.Error("Expected to find file index # 2 on first pass. DID NOT FIND IT!") return } err = fMgrCol.DeleteAtIndex(2) if err != nil { t.Errorf("Error returned by fMgrCol.DeleteAtIndex(2) "+ "Error='%v' ", err.Error()) } arrayLen = fMgrCol.GetNumOfFileMgrs() foundDir = false for j := 0; j < arrayLen; j++ { fileMgr, err := fMgrCol.PeekFileMgrAtIndex(j) if err != nil { t.Errorf("Error returned by fMgrCol.PeekFileMgrAtIndex(j). "+ "j='%v' Error='%v' ", j, err.Error()) return } if searchStr == fileMgr.GetAbsolutePathFileName() { foundDir = true } } if foundDir != false { t.Error("Error: Found file at index # 2. IT WAS NOT DELETED!") } } func TestFileMgrCollection_DeleteAtIndex_02(t *testing.T) { f0 := "..\\dirmgrtests\\dir01\\level_1_1_test.txt" f1 := "..\\dirmgrtests\\dir01\\level_1_2_test.txt" f2 := "..\\dirmgrtests\\dir01\\level_1_3_test.txt" f3 := "..\\dirmgrtests\\dir01\\level_1_4_test.txt" fMgrCol := FileMgrCollection{}.New() err := fMgrCol.AddFileMgrByPathFileNameExt(f0) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f0). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f1) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f1). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f2) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f2). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f3) if err != nil { t.Errorf("Error returned by AddFileMgrByPathFileNameExt(f3). "+ "Error='%v' ", err.Error()) return } foundDir := false fh := FileHelper{} searchStr, err := fh.GetAbsPathFromFilePath(f1) if err != nil { t.Errorf("Error returned by fh.GetAbsPathFromFilePath(f1). "+ "f1='%v' Error='%v' ", f1, err.Error()) } arrayLen := fMgrCol.GetNumOfFileMgrs() for i := 0; i < arrayLen; i++ { fileMgr, err := fMgrCol.PeekFileMgrAtIndex(i) if err != nil { t.Errorf("Error returned by fMgrCol.PeekFileMgrAtIndex(i). "+ "i='%v' Error='%v' ", i, err.Error()) return } if searchStr == fileMgr.GetAbsolutePathFileName() { foundDir = true } } if foundDir != true { t.Error("Expected to find file index # 1 on first pass. DID NOT FIND IT!") return } err = fMgrCol.DeleteAtIndex(1) if err != nil { t.Errorf("Error returned by fMgrCol.DeleteAtIndex(1) "+ "Error='%v' ", err.Error()) } arrayLen = fMgrCol.GetNumOfFileMgrs() foundDir = false for j := 0; j < arrayLen; j++ { fileMgr, err := fMgrCol.PeekFileMgrAtIndex(j) if err != nil { t.Errorf("Error returned by fMgrCol.PeekFileMgrAtIndex(j). "+ "j='%v' Error='%v' ", j, err.Error()) return } if searchStr == fileMgr.GetAbsolutePathFileName() { foundDir = true } } if foundDir != false { t.Error("Error: Found file at index # 1. IT WAS NOT DELETED!") } } func TestFileMgrCollection_DeleteAtIndex_03(t *testing.T) { f0 := "..\\dirmgrtests\\dir01\\level_1_1_test.txt" f1 := "..\\dirmgrtests\\dir01\\level_1_2_test.txt" f2 := "..\\dirmgrtests\\dir01\\level_1_3_test.txt" f3 := "..\\dirmgrtests\\dir01\\level_1_4_test.txt" fMgrCol := FileMgrCollection{}.New() err := fMgrCol.AddFileMgrByPathFileNameExt(f0) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f0). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f1) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f1). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f2) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f2). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f3) if err != nil { t.Errorf("Error returned by AddFileMgrByPathFileNameExt(f3). "+ "Error='%v' ", err.Error()) return } foundDir := false fh := FileHelper{} searchStr, err := fh.GetAbsPathFromFilePath(f0) if err != nil { t.Errorf("Error returned by fh.GetAbsPathFromFilePath(f0). "+ "f0='%v' Error='%v' ", f0, err.Error()) } arrayLen := fMgrCol.GetNumOfFileMgrs() for i := 0; i < arrayLen; i++ { fileMgr, err := fMgrCol.PeekFileMgrAtIndex(i) if err != nil { t.Errorf("Error returned by fMgrCol.PeekFileMgrAtIndex(i). "+ "i='%v' Error='%v' ", i, err.Error()) return } if searchStr == fileMgr.GetAbsolutePathFileName() { foundDir = true } } if foundDir != true { t.Error("Expected to find file index # 0 on first pass. DID NOT FIND IT!") return } err = fMgrCol.DeleteAtIndex(0) if err != nil { t.Errorf("Error returned by fMgrCol.DeleteAtIndex(0) "+ "Error='%v' ", err.Error()) } arrayLen = fMgrCol.GetNumOfFileMgrs() foundDir = false for j := 0; j < arrayLen; j++ { fileMgr, err := fMgrCol.PeekFileMgrAtIndex(j) if err != nil { t.Errorf("Error returned by fMgrCol.PeekFileMgrAtIndex(j). "+ "j='%v' Error='%v' ", j, err.Error()) return } if searchStr == fileMgr.GetAbsolutePathFileName() { foundDir = true } } if foundDir != false { t.Error("Error: Found file at index # 0. IT WAS NOT DELETED!") } } func TestFileMgrCollection_DeleteAtIndex_04(t *testing.T) { f0 := "..\\dirmgrtests\\dir01\\level_1_1_test.txt" f1 := "..\\dirmgrtests\\dir01\\level_1_2_test.txt" f2 := "..\\dirmgrtests\\dir01\\level_1_3_test.txt" f3 := "..\\dirmgrtests\\dir01\\level_1_4_test.txt" fMgrCol := FileMgrCollection{}.New() err := fMgrCol.AddFileMgrByPathFileNameExt(f0) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f0). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f1) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f1). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f2) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f2). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f3) if err != nil { t.Errorf("Error returned by AddFileMgrByPathFileNameExt(f3). "+ "Error='%v' ", err.Error()) return } foundDir := false fh := FileHelper{} searchStr, err := fh.GetAbsPathFromFilePath(f3) if err != nil { t.Errorf("Error returned by fh.GetAbsPathFromFilePath(f3). "+ "f3='%v' Error='%v' ", f3, err.Error()) } arrayLen := fMgrCol.GetNumOfFileMgrs() for i := 0; i < arrayLen; i++ { fileMgr, err := fMgrCol.PeekFileMgrAtIndex(i) if err != nil { t.Errorf("Error returned by fMgrCol.PeekFileMgrAtIndex(i). "+ "i='%v' Error='%v' ", i, err.Error()) return } if searchStr == fileMgr.GetAbsolutePathFileName() { foundDir = true } } if foundDir != true { t.Error("Expected to find file index # 3 on first pass. DID NOT FIND IT!") return } err = fMgrCol.DeleteAtIndex(3) if err != nil { t.Errorf("Error returned by fMgrCol.DeleteAtIndex(0) "+ "Error='%v' ", err.Error()) } arrayLen = fMgrCol.GetNumOfFileMgrs() foundDir = false for j := 0; j < arrayLen; j++ { fileMgr, err := fMgrCol.PeekFileMgrAtIndex(j) if err != nil { t.Errorf("Error returned by fMgrCol.PeekFileMgrAtIndex(j). "+ "j='%v' Error='%v' ", j, err.Error()) return } if searchStr == fileMgr.GetAbsolutePathFileName() { foundDir = true } } if foundDir != false { t.Error("Error: Found file at index # 3. IT WAS NOT DELETED!") } } func TestFileMgrCollection_DeleteAtIndex_05(t *testing.T) { f0 := "..\\dirmgrtests\\dir01\\level_1_1_test.txt" f1 := "..\\dirmgrtests\\dir01\\level_1_2_test.txt" f2 := "..\\dirmgrtests\\dir01\\level_1_3_test.txt" f3 := "..\\dirmgrtests\\dir01\\level_1_4_test.txt" fMgrCol := FileMgrCollection{}.New() err := fMgrCol.AddFileMgrByPathFileNameExt(f0) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f0). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f1) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f1). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f2) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f2). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f3) if err != nil { t.Errorf("Error returned by AddFileMgrByPathFileNameExt(f3). "+ "Error='%v' ", err.Error()) return } arrayLen := fMgrCol.GetNumOfFileMgrs() if arrayLen != 4 { t.Errorf("Error: Expected intial array length='4'. Instead, array length='%v'", arrayLen) } err = fMgrCol.DeleteAtIndex(2) if err != nil { t.Errorf("Error returned by fMgrCol.DeleteAtIndex(2). "+ "Error='%v' ", err.Error()) } err = fMgrCol.DeleteAtIndex(1) if err != nil { t.Errorf("Error returned by fMgrCol.DeleteAtIndex(1). "+ "Error='%v' ", err.Error()) } err = fMgrCol.DeleteAtIndex(1) if err != nil { t.Errorf("Error returned by 2nd Pass fMgrCol.DeleteAtIndex(1). "+ "Error='%v' ", err.Error()) } err = fMgrCol.DeleteAtIndex(0) if err != nil { t.Errorf("Error returned by fMgrCol.DeleteAtIndex(0). "+ "Error='%v' ", err.Error()) } arrayLen = fMgrCol.GetNumOfFileMgrs() if arrayLen != 0 { t.Errorf("Error: Expected final array length='0'. "+ "Instead, final array length='%v' ", arrayLen) } } func TestFileMgrCollection_DeleteAtIndex_06(t *testing.T) { fMgrCol := FileMgrCollection{} err := fMgrCol.DeleteAtIndex(-3) if err == nil { t.Error("Expected fMgrCol.DeleteAtIndex(-3) to return an error\n" + "because the index is less than zero and invalid!\n" + "However, NO ERROR WAS RETURNED!!!\n") } } func TestFileMgrCollection_DeleteAtIndex_07(t *testing.T) { f0 := "..\\dirmgrtests\\dir01\\level_1_1_test.txt" f1 := "..\\dirmgrtests\\dir01\\level_1_2_test.txt" f2 := "..\\dirmgrtests\\dir01\\level_1_3_test.txt" f3 := "..\\dirmgrtests\\dir01\\level_1_4_test.txt" fMgrCol := FileMgrCollection{}.New() err := fMgrCol.AddFileMgrByPathFileNameExt(f0) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f0). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f1) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f1). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f2) if err != nil { t.Errorf("Error returned by fMgrCol.AddFileMgrByPathFileNameExt(f2). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.AddFileMgrByPathFileNameExt(f3) if err != nil { t.Errorf("Error returned by AddFileMgrByPathFileNameExt(f3). "+ "Error='%v' ", err.Error()) return } err = fMgrCol.DeleteAtIndex(19) if err == nil { t.Error("Expected error return from fMgrCol.DeleteAtIndex(19)\n" + "because the index, '19', exceeds the actual number of array elements.\n" + "However, NO ERROR WAS RETURNED!!!\n") } } func TestFileMgrCollection_DeleteAtIndex_08(t *testing.T) { fMgrCol := FileMgrCollection{}.New() err := fMgrCol.DeleteAtIndex(5) if err == nil { t.Error("Expected error return from fMgrCol.DeleteAtIndex(5)\n" + "because the actual number of array elements is zero.\n" + "However, NO ERROR WAS RETURNED!!!\n") } }
package main import "fmt" func SuperDescriber(i interface{}) { fmt.Printf("Type is %T and value %v\n", i, i) } func main() { str := "Hello world!" SuperDescriber(str) intValue := 22 SuperDescriber(intValue) std := struct { name string }{ name: "Bob", } SuperDescriber(std) }
package migrations import ( "fmt" "github.com/jinzhu/gorm" "github.com/tespo/satya/v2/db" "github.com/tespo/satya/v2/types" ) // // Migrate runs all migrations // func Migrate() { db, err := db.Open() if err != nil { fmt.Print(err) } defer db.Close() // Tables // create primary tables such as Account, Dispensers, et cetera migratePrimaryTables(db) // create the tables that use foreign keys migrateForeignTables(db) // add the foreign keys to foreign tables addForeignKeys(db) } // // Nuke will drop all tables // func Nuke() { db, err := db.Open() if err != nil { fmt.Print(err) } defer db.Close() // remove all the foreign key constraints before dropping removeForeignKeys(db) // drop it like it's hot dropAllTables(db) } func dropAllTables(db *gorm.DB) { // Drop tables if err := db.DropTable(&types.Usage{}).Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.Reminder{}).Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.Regimen{}).Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.Connection{}).Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.Insertion{}).Error; err != nil { fmt.Print(err) } if err := db.DropTable("oauth_client_role").Error; err != nil { fmt.Print(err) } if err := db.DropTable("role_user").Error; err != nil { fmt.Print(err) } if err := db.DropTable("permission_role").Error; err != nil { fmt.Print(err) } if err := db.DropTable("oauth_client_grant").Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.Role{}).Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.Barcode{}).Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.Permission{}).Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.User{}).Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.Pod{}).Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.Dispenser{}).Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.Account{}).Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.OauthAuthorizationCode{}).Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.OauthClientUserRefreshToken{}).Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.OauthScopeRequest{}).Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.OauthAccessToken{}).Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.OauthGrant{}).Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.OauthClient{}).Error; err != nil { fmt.Print(err) } } func removeForeignKeys(db *gorm.DB) { // Drop Constraints if err := db.Model(&types.Barcode{}).RemoveForeignKey("pod_id", "pods(id)").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Reminders{}).RemoveForeignKey("user_id", "users(id)").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Reminders{}).RemoveForeignKey("regimen_id", "regimens(id)").Error; err != nil { fmt.Print(err) } if err := db.Table("permission_role").RemoveForeignKey("permission_id", "permissions(id)").Error; err != nil { fmt.Print(err) } if err := db.Table("permission_role").RemoveForeignKey("role_id", "roles(id)").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Connections{}).RemoveForeignKey("account_id", "accounts(id)").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Connections{}).RemoveForeignKey("dispenser_id", "dispensers(id)").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Insertions{}).RemoveForeignKey("dispenser_id", "dispensers(id)").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Insertions{}).RemoveForeignKey("regimen_id", "regimens(id)").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Insertions{}).RemoveForeignKey("barcode_id", "barcodes(id)").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Regimens{}).RemoveForeignKey("account_id", "accounts(id)").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Regimens{}).RemoveForeignKey("user_id", "users(id)").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Regimens{}).RemoveForeignKey("pod_id", "pods(id)").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Usages{}).RemoveForeignKey("dispenser_id", "dispensers(id)").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Usages{}).RemoveForeignKey("regimen_id", "regimens(id)").Error; err != nil { fmt.Print(err) } if err := db.Table("role_user").RemoveForeignKey("user_id", "users(id)").Error; err != nil { fmt.Print(err) } if err := db.Table("role_user").RemoveForeignKey("role_id", "roles(id)").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.User{}).RemoveForeignKey("account_id", "accounts(id)").Error; err != nil { fmt.Print(err) } if err := db.Table("oauth_client_grant").RemoveForeignKey("oauth_client_id", "oauth_clients(id)").Error; err != nil { fmt.Print(err) } if err := db.Table("oauth_client_grant").RemoveForeignKey("oauth_grant_id", "oauth_grants(id)").Error; err != nil { fmt.Print(err) } if err := db.DropTable(&types.Invitation{}).Error; err != nil { fmt.Print(err) } } func addForeignKeys(db *gorm.DB) { // Foreign Keys if err := db.Model(&types.Users{}).AddForeignKey("account_id", "accounts(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Barcodes{}).AddForeignKey("pod_id", "pods(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Connections{}).AddForeignKey("account_id", "accounts(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Connections{}).AddForeignKey("dispenser_id", "dispensers(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Usages{}).AddForeignKey("dispenser_id", "dispensers(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Usages{}).AddForeignKey("regimen_id", "regimens(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Usages{}).AddForeignKey("user_id", "users(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Usages{}).AddForeignKey("barcode_id", "barcodes(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Regimens{}).AddForeignKey("account_id", "accounts(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Regimens{}).AddForeignKey("user_id", "users(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Regimens{}).AddForeignKey("pod_id", "pods(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Reminders{}).AddForeignKey("user_id", "users(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Reminders{}).AddForeignKey("regimen_id", "regimens(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Insertions{}).AddForeignKey("dispenser_id", "dispensers(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Model(&types.Insertions{}).AddForeignKey("regimen_id", "regimens(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Table("permission_role").AddForeignKey("role_id", "roles(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Table("permission_role").AddForeignKey("permission_id", "permissions(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Table("role_user").AddForeignKey("role_id", "roles(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Table("role_user").AddForeignKey("user_id", "users(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Table("oauth_client_grant").AddForeignKey("oauth_client_id", "oauth_clients(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } if err := db.Table("oauth_client_grant").AddForeignKey("oauth_grant_id", "oauth_grants(id)", "RESTRICT", "RESTRICT").Error; err != nil { fmt.Print(err) } } func migrateForeignTables(db *gorm.DB) { if err := db.AutoMigrate(&types.Barcode{}).Error; err != nil { fmt.Print(err) } if err := db.AutoMigrate(&types.Regimen{}).Error; err != nil { fmt.Print(err) } if err := db.AutoMigrate(&types.Connection{}).Error; err != nil { fmt.Print(err) } if err := db.AutoMigrate(&types.Insertion{}).Error; err != nil { fmt.Print(err) } if err := db.AutoMigrate(&types.Usage{}).Error; err != nil { fmt.Print(err) } if err := db.AutoMigrate(&types.Reminder{}).Error; err != nil { fmt.Print(err) } } func migratePrimaryTables(db *gorm.DB) { if err := db.AutoMigrate(&types.Invitation{}).Error; err != nil { fmt.Print(err) } if err := db.AutoMigrate(&types.Account{}).Error; err != nil { fmt.Print(err) } if err := db.AutoMigrate(&types.Role{}).Error; err != nil { fmt.Print(err) } if err := db.AutoMigrate(&types.Permission{}).Error; err != nil { fmt.Print(err) } if err := db.AutoMigrate(&types.User{}).Error; err != nil { fmt.Print(err) } if err := db.AutoMigrate(&types.Dispenser{}).Error; err != nil { fmt.Print(err) } if err := db.AutoMigrate(&types.Pod{}).Error; err != nil { fmt.Print(err) } if err := db.AutoMigrate(&types.OauthClient{}).Error; err != nil { fmt.Print(err) } if err := db.AutoMigrate(&types.OauthAccessToken{}).Error; err != nil { fmt.Print(err) } if err := db.AutoMigrate(&types.OauthAuthorizationCode{}).Error; err != nil { fmt.Print(err) } if err := db.AutoMigrate(&types.OauthClientUserRefreshToken{}).Error; err != nil { fmt.Print(err) } if err := db.AutoMigrate(&types.OauthGrant{}).Error; err != nil { fmt.Print(err) } if err := db.AutoMigrate(&types.OauthScopeRequest{}).Error; err != nil { fmt.Print(err) } }
package database import ( "go-admin/config" "go-admin/global" "gorm.io/driver/mysql" _ "gorm.io/driver/mysql" "gorm.io/gorm" "log" ) var DB *gorm.DB func InitMySQL(admin config.MySQL) { if db, err := gorm.Open(mysql.Open(admin.Username+":"+admin.Password+"@("+admin.Path+")/"+admin.DBName+"?"+admin.Config), &gorm.Config{}); err != nil { log.Printf("DEFAULTDB数据库启动异常%v", err) } else { //DB = db global.GDB = db sqlDb, _ := global.GDB.DB() sqlDb.SetMaxIdleConns(10) sqlDb.SetMaxOpenConns(100) } }
package mock import ( "context" "time" "github.com/bobrovka/calendar/internal/models" "github.com/stretchr/testify/mock" ) // StorageMock мок хранилища type StorageMock struct { mock.Mock } // ListEvents мокирует метод func (m *StorageMock) ListEvents(ctx context.Context, user string, from, to time.Time) ([]*models.Event, error) { args := m.Called(ctx, user, from, to) err := args.Error(1) if err != nil { return nil, err } return args.Get(0).([]*models.Event), err } // CreateEvent мокирует метод func (m *StorageMock) CreateEvent(ctx context.Context, event *models.Event) (string, error) { args := m.Called(ctx, event) err := args.Error(1) if err != nil { return "", err } return args.String(0), err } // UpdateEvent мокирует метод func (m *StorageMock) UpdateEvent(ctx context.Context, id string, event *models.Event) error { args := m.Called(ctx, id, event) return args.Error(0) } // DeleteEvent мокирует метод func (m *StorageMock) DeleteEvent(ctx context.Context, id string) error { args := m.Called(ctx, id) return args.Error(0) } func (m *StorageMock) PopNotifications(ctx context.Context) ([]*models.Event, error) { args := m.Called(ctx) err := args.Error(1) if err != nil { return nil, err } return args.Get(0).([]*models.Event), err }
// Copyright 2018 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package certcache import ( "bytes" "context" "crypto/x509" "encoding/base64" "io" "io/ioutil" "log" "net/http" "net/url" "strconv" "sync" "time" "github.com/WICG/webpackage/go/signedexchange/certurl" "github.com/ampproject/amppackager/packager/util" "github.com/julienschmidt/httprouter" "github.com/pkg/errors" "github.com/pquerna/cachecontrol" "golang.org/x/crypto/ocsp" ) // The maximum representable time, per https://stackoverflow.com/questions/25065055/what-is-the-maximum-time-time-in-go. var infiniteFuture = time.Unix(1<<63-62135596801, 999999999) // The OCSP code below aims to satisfy items 1-8 of the "sleevi" doc: // https://gist.github.com/sleevi/5efe9ef98961ecfb4da8 // Item #9 should be added. Item #10 is N/A for SXGs. // 1MB is the maximum used by github.com/xenolf/lego/acmev2 in GetOCSPForCert. // Alternatively, here's a random documented example of a 20K limit: // https://www.ibm.com/support/knowledgecenter/en/SSPREK_9.0.0/com.ibm.isam.doc/wrp_stza_ref/reference/ref_ocsp_max_size.html const maxOCSPResponseBytes = 1024 * 1024 // How often to check if OCSP stapling needs updating. const ocspCheckInterval = 1 * time.Hour type CertCache struct { // TODO(twifkak): Support multiple cert chains (for different domains, for different roots). certName string certs []*x509.Certificate ocspUpdateAfterMu sync.RWMutex ocspUpdateAfter time.Time // TODO(twifkak): Implement a registry of Updateable instances which can be configured in the toml. ocspFile Updateable client http.Client // "Virtual methods", exposed for testing. // Given a certificate, returns the OCSP responder URL for that cert. extractOCSPServer func(*x509.Certificate) (string, error) // Given an HTTP request/response, returns its cache expiry. httpExpiry func(*http.Request, *http.Response) time.Time } // Must call Init() on the returned CertCache before you can use it. func New(certs []*x509.Certificate, ocspCache string) *CertCache { return &CertCache{ certName: util.CertName(certs[0]), certs: certs, ocspUpdateAfter: infiniteFuture, // Default, in case initial readOCSP successfully loads from disk. // Distributed OCSP cache to support the following sleevi requirements: // 1. Support for keeping a long-lived (disk) cache of OCSP responses. // This should be fairly simple. Any restarting of the service // shouldn't blow away previous responses that were obtained. // 6. Distributed or proxiable fetching // ... there may be thousands of FE servers, all with the same // certificate, all needing to staple an OCSP response. You don't // want to have all of them hammering the OCSP server - ideally, // you'd have one request, in the backend, and updating them all. ocspFile: &Chained{first: &InMemory{}, second: &LocalFile{path: ocspCache}}, client: http.Client{Timeout: 60 * time.Second}, extractOCSPServer: func(cert *x509.Certificate) (string, error) { if len(cert.OCSPServer) < 1 { return "", errors.New("Cert missing OCSPServer.") } // This is a URI, per https://tools.ietf.org/html/rfc5280#section-4.2.2.1. return cert.OCSPServer[0], nil }, httpExpiry: func(req *http.Request, resp *http.Response) time.Time { reasons, expiry, err := cachecontrol.CachableResponse(req, resp, cachecontrol.Options{PrivateCache: true}) if len(reasons) > 0 || err != nil { return infiniteFuture } else { return expiry } }, } } func (this *CertCache) Init(stop chan struct{}) error { // Prime the OCSP disk and memory cache, so we can start serving immediately. _, _, err := this.readOCSP() if err != nil { return errors.Wrap(err, "initializing CertCache") } // Update OCSP in the background, per sleevi requirements: // 3. Refreshes the response, in the background, with sufficient time before expiration. // A rule of thumb would be to fetch at notBefore + (notAfter - // notBefore) / 2, which is saying "start fetching halfway through // the validity period". You want to be able to handle situations // like the OCSP responder giving you junk, but also sufficient time // to raise an alert if something has gone really wrong. // 7. The ability to serve old responses while fetching new responses. go this.maintainOCSP(stop) return nil } func (this *CertCache) createCertChainCBOR(ocsp []byte) ([]byte, error) { certChain := make(certurl.CertChain, len(this.certs)) for i, cert := range this.certs { certChain[i] = &certurl.CertChainItem{Cert: cert} } certChain[0].OCSPResponse = ocsp var buf bytes.Buffer err := certChain.Write(&buf) if err != nil { return nil, errors.Wrap(err, "Error writing cert chain") } return buf.Bytes(), nil } func (this *CertCache) ocspMidpoint(bytes []byte, issuer *x509.Certificate) (time.Time, error) { resp, err := ocsp.ParseResponseForCert(bytes, this.certs[0], issuer) if err != nil { return time.Time{}, errors.Wrap(err, "Parsing OCSP") } return resp.ThisUpdate.Add(resp.NextUpdate.Sub(resp.ThisUpdate) / 2), nil } func (this *CertCache) ServeHTTP(resp http.ResponseWriter, req *http.Request, params httprouter.Params) { if params.ByName("certName") == this.certName { // https://tools.ietf.org/html/draft-yasskin-httpbis-origin-signed-exchanges-impl-00#section-3.3 // This content-type is not standard, but included to reduce // the chance that faulty user agents employ content sniffing. resp.Header().Set("Content-Type", "application/cert-chain+cbor") // Instruct the intermediary to reload this cert-chain at the // OCSP midpoint, in case it cannot parse it. ocsp, _, err := this.readOCSP() if err != nil { util.NewHTTPError(http.StatusInternalServerError, "Error reading OCSP: ", err).LogAndRespond(resp) return } midpoint, err := this.ocspMidpoint(ocsp, this.findIssuer()) if err != nil { util.NewHTTPError(http.StatusInternalServerError, "Error computing OCSP midpoint: ", err).LogAndRespond(resp) return } // int is large enough to represent 24855 days in seconds. expiry := int(midpoint.Sub(time.Now()).Seconds()) if expiry < 0 { expiry = 0 } resp.Header().Set("Cache-Control", "public, max-age="+strconv.Itoa(expiry)) resp.Header().Set("ETag", "\""+this.certName+"\"") resp.Header().Set("X-Content-Type-Options", "nosniff") cbor, err := this.createCertChainCBOR(ocsp) if err != nil { util.NewHTTPError(http.StatusInternalServerError, "Error building cert chain: ", err).LogAndRespond(resp) return } http.ServeContent(resp, req, "", time.Time{}, bytes.NewReader(cbor)) } else { http.NotFound(resp, req) } } // If we've been unable to fetch a fresh OCSP response before expiry of the old // one, or, at server start-up, if we're unable to fetch a valid OCSP request at // all (either from disk or network), then return false. This signals to the // packager that it should not try to package anything; just proxy the content // unsigned. This is per sleevi requirement: // 8. Some idea of what to do when "things go bad". // What happens when it's been 7 days, no new OCSP response can be obtained, // and the current response is about to expire? func (this *CertCache) IsHealthy() bool { ocsp, _, err := this.readOCSP() return err != nil || this.isHealthy(ocsp) } func (this *CertCache) isHealthy(ocspResp []byte) bool { if ocspResp == nil { log.Println("OCSP response not yet fetched.") return false } issuer := this.findIssuer() if issuer == nil { log.Println("Cannot find issuer certificate in CertFile.") return false } resp, err := ocsp.ParseResponseForCert(ocspResp, this.certs[0], issuer) if err != nil { log.Println("Error parsing OCSP response:", err) return false } if resp.NextUpdate.Before(time.Now()) { log.Println("Cached OCSP is stale, NextUpdate:", resp.NextUpdate) return false } return true } // Returns the OCSP response and expiry, refreshing if necessary. func (this *CertCache) readOCSP() ([]byte, time.Time, error) { var ocspUpdateAfter time.Time ocsp, err := this.ocspFile.Read(context.Background(), this.shouldUpdateOCSP, func(orig []byte) []byte { return this.fetchOCSP(orig, &ocspUpdateAfter) }) if err != nil { return nil, time.Time{}, errors.Wrap(err, "Updating OCSP cache") } if len(ocsp) == 0 { return nil, time.Time{}, errors.New("Missing OCSP response.") } if !this.isHealthy(ocsp) { return nil, time.Time{}, errors.New("OCSP failed health check.") } this.ocspUpdateAfterMu.Lock() defer this.ocspUpdateAfterMu.Unlock() if !ocspUpdateAfter.Equal(time.Time{}) { // fetchOCSP was called, and therefore a new HTTP cache expiry was set. // TODO(twifkak): Write this to disk, so any replica can pick it up. this.ocspUpdateAfter = ocspUpdateAfter } return ocsp, ocspUpdateAfter, nil } // Checks for OCSP updates every hour. Never terminates. func (this *CertCache) maintainOCSP(stop chan struct{}) { // Only make one request per ocspCheckInterval, to minimize the impact // on OCSP servers that are buckling under load, per sleevi requirement: // 5. As with any system doing background requests on a remote server, // don't be a jerk and hammer the server when things are bad... // sometimes servers and networks have issues. When a[n OCSP client] // has trouble getting a request, hopefully it does something // smarter than just retry in a busy loop, hammering the OCSP server // into further oblivion. ticker := time.NewTicker(ocspCheckInterval) for { select { case <-ticker.C: _, _, err := this.readOCSP() if err != nil { log.Println("Warning: OCSP update failed. Cached response may expire:", err) } case <-stop: ticker.Stop() return } } } // Returns true if OCSP is expired (or near enough). func (this *CertCache) shouldUpdateOCSP(bytes []byte) bool { if len(bytes) == 0 { // TODO(twifkak): Use a logging framework with support for debug-only statements. log.Println("Updating OCSP; none cached yet.") return true } issuer := this.findIssuer() if issuer == nil { log.Println("Cannot find issuer certificate in CertFile.") // This is a permanent error; do not attempt OCSP update. return false } // Compute the midpoint per sleevi #3 (see above). midpoint, err := this.ocspMidpoint(bytes, issuer) if err != nil { log.Println("Error computing OCSP midpoint:", err) return true } if time.Now().After(midpoint) { // TODO(twifkak): Use a logging framework with support for debug-only statements. log.Println("Updating OCSP; after midpoint: ", midpoint) return true } // Allow cache-control headers to indicate an earlier update time, per // https://tools.ietf.org/html/rfc5019#section-6.1, per sleevi requirement: // 4. ... such a system should observe the Lightweight OCSP Profile of // RFC 5019. This more or less boils down to "Use GET requests whenever // possible, and observe HTTP cache semantics." this.ocspUpdateAfterMu.RLock() defer this.ocspUpdateAfterMu.RUnlock() if time.Now().After(this.ocspUpdateAfter) { // TODO(twifkak): Use a logging framework with support for debug-only statements. log.Println("Updating OCSP; expired by HTTP cache headers: ", this.ocspUpdateAfter) return true } // TODO(twifkak): Use a logging framework with support for debug-only statements. log.Println("No update necessary.") return false } // Finds the issuer of this cert (i.e. the second from the bottom of the // chain). func (this *CertCache) findIssuer() *x509.Certificate { issuerName := this.certs[0].Issuer for _, cert := range this.certs { // The subject name is guaranteed to match the issuer name per // https://tools.ietf.org/html/rfc3280#section-4.1.2.4 and // #section-4.1.2.6. (The latter guarantees that the subject // name will be in the subject field and not the subjectAltName // field for CAs.) // // However, the definition of "match" is more complicated. The // general "Name matching" algorithm is defined in // https://www.itu.int/rec/T-REC-X.501-201610-I/en. However, // RFC3280 defines a subset, and pkix.Name.String() defines an // ad hoc canonical serialization (as opposed to // https://tools.ietf.org/html/rfc1779 which has many forms), // such that comparing the two strings should be sufficient. if cert.Subject.String() == issuerName.String() { return cert } } return nil } // Queries the OCSP responder for this cert and return the OCSP response. func (this *CertCache) fetchOCSP(orig []byte, ocspUpdateAfter *time.Time) []byte { issuer := this.findIssuer() if issuer == nil { log.Println("Cannot find issuer certificate in CertFile.") return orig } // The default SHA1 hash function is mandated by the Lightweight OCSP // Profile, https://tools.ietf.org/html/rfc5019 2.1.1 (sleevi #4, see above). req, err := ocsp.CreateRequest(this.certs[0], issuer, nil) if err != nil { log.Println("Error creating OCSP request:", err) return orig } ocspServer, err := this.extractOCSPServer(this.certs[0]) if err != nil { log.Println("Error extracting OCSP server:", err) return orig } // Conform to the Lightweight OCSP Profile, by preferring GET over POST // if the request is small enough (sleevi #4, see above). // https://tools.ietf.org/html/rfc2560#appendix-A.1.1 describes how the // URL should be formed. // https://tools.ietf.org/html/rfc5019#section-5 shows an example where // the base64 encoding includes '/' and '=' (and therefore should be // StdEncoding). getURL := ocspServer + "/" + url.PathEscape(base64.StdEncoding.EncodeToString(req)) var httpReq *http.Request if len(getURL) <= 255 { httpReq, err = http.NewRequest("GET", getURL, nil) if err != nil { log.Println("Error creating OCSP response:", err) return orig } } else { httpReq, err = http.NewRequest("POST", ocspServer, bytes.NewReader(req)) if err != nil { log.Println("Error creating OCSP response:", err) return orig } httpReq.Header.Set("Content-Type", "application/ocsp-request") } httpResp, err := this.client.Do(httpReq) if err != nil { log.Println("Error issuing OCSP request:", err) return orig } if httpResp.Body != nil { defer httpResp.Body.Close() } // If cache-control headers indicate a response that is not ever // cacheable, then ignore them. Otherwise, allow them to indicate an // expiry earlier than we'd usually follow. *ocspUpdateAfter = this.httpExpiry(httpReq, httpResp) respBytes, err := ioutil.ReadAll(io.LimitReader(httpResp.Body, 1024*1024)) if err != nil { log.Println("Error reading OCSP response:", err) return orig } // Validate the response, per sleevi requirement: // 2. Validate the server responses to make sure it is something the client will accept. // and also per sleevi #4 (see above), as required by // https://tools.ietf.org/html/rfc5019#section-2.2.2. resp, err := ocsp.ParseResponseForCert(respBytes, this.certs[0], issuer) if err != nil { log.Println("Error parsing OCSP response:", err) return orig } if resp.Status != ocsp.Good { log.Println("Invalid OCSP status:", resp.Status) return orig } if resp.ThisUpdate.After(time.Now()) { log.Println("OCSP thisUpdate in the future:", resp.ThisUpdate) return orig } if resp.NextUpdate.Before(time.Now()) { log.Println("OCSP nextUpdate in the past:", resp.NextUpdate) return orig } // OCSP duration must be <=7 days, per // https://wicg.github.io/webpackage/draft-yasskin-httpbis-origin-signed-exchanges-impl.html#cross-origin-trust. // Serving these responses may cause UAs to reject the SXG. if resp.NextUpdate.Sub(resp.ThisUpdate) > time.Hour * 24 * 7 { log.Printf("OCSP nextUpdate %+v too far ahead of thisUpdate %+v\n", resp.NextUpdate, resp.ThisUpdate) return orig } return respBytes }
package main import "fmt" func main() { var i [5]int = [5]int{1, 2, 3, 4, 5} var slicedI []int = i[1:3] fmt.Println("slicedI = ", slicedI) fmt.Println("i = ", i) slicedI = []int{7, 8} fmt.Println("slicedI = ", slicedI) fmt.Println("i = ", i) }
// Template Declare Start // // ${function_name}:${todo} // @Description:${todo} // @receiver ${receiver} // @param ${params} // @return ${return_types} // // Template Declare End // Methods Declare // // Method1: // @Description: // func Method1() { }
package linkaja type PublicTokenRequest struct { TrxId string Total string SuccessUrl string FailedUrl string Items []PublicTokenItemRequest MSISDN string DefaultLanguage string DefaultTemplate string } type PublicTokenItemRequest struct { Name string Price string Quantity string } type TransactionRequest struct { RefNum string }
package kafka import ( "encoding/json" ckafka "github.com/confluentinc/confluent-kafka-go/kafka" route2 "github/cassiolpaixao/simulator-go/application/route" "github/cassiolpaixao/simulator-go/infra/kafka" "log" "os" "time" ) func Produce(msg *ckafka.Message){ producer := kafka.NewKafkaProducer() route := route2.NewRoute() json.Unmarshal(msg.Value, &route) route.LoadPosition() positions, err := route.ExportJsonPositions() if err != nil { log.Println(err.Error()) } for _, p := range positions { kafka.Publish(p, os.Getenv("KafkaProduceTopic"), producer) time.Sleep(time.Millisecond * 500) } }
package main import ( "github.com/flix-tech/k8s-mdns/mdns" "log" "k8s.io/client-go/kubernetes" "k8s.io/api/core/v1" "fmt" "flag" "k8s.io/client-go/tools/clientcmd" "k8s.io/apimachinery/pkg/watch" "net" metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func mustPublish(rr string) { if err := mdns.Publish(rr); err != nil { log.Fatalf(`Unable to publish record "%s": %v`, rr, err) } } func mustUnPublish(rr string) { if err := mdns.UnPublish(rr); err != nil { log.Fatalf(`Unable to publish record "%s": %v`, rr, err) } } var ( master = flag.String("master", "", "url to master") default_namespace = flag.String("default-namespace", "default", "namespace in which services should also be published with a shorter entry") test = flag.Bool("test", false, "testing mode, no connection to k8s") ) func main() { flag.Parse() if *test { mustPublish("router.local. 60 IN A 192.168.1.254") mustPublish("254.1.168.192.in-addr.arpa. 60 IN PTR router.local.") select{ } } // uses the current context in kubeconfig config, err := clientcmd.BuildConfigFromFlags(*master, "") if err != nil { panic(err.Error()) } // creates the clientset clientset, err := kubernetes.NewForConfig(config) if err != nil { panic(err.Error()) } for { services, err := clientset.CoreV1().Services("").Watch(metaV1.ListOptions{}) if err != nil { panic(err.Error()) } for { ev := <-services.ResultChan() if ev.Object == nil { log.Fatalln("Error during watching") } service := ev.Object.(*v1.Service) ip := net.ParseIP(service.Spec.ClusterIP) if ip == nil { continue } reverseIp := net.IPv4(ip[15], ip[14], ip[13], ip[12]) records := []string{ fmt.Sprintf("%s.%s.local. 120 IN A %s", service.Name, service.Namespace, ip), fmt.Sprintf("%s.in-addr.arpa. 120 IN PTR %s.%s.local.", reverseIp, service.Name, service.Namespace), } if service.Namespace == *default_namespace { records = append(records, fmt.Sprintf("%s.local. 120 IN A %s", service.Name, ip)) } switch ev.Type { case watch.Added: for _, record := range records { log.Printf("Added %s\n", record) mustPublish(record) } case watch.Deleted: for _, record := range records { log.Printf("Remove %s\n", record) mustUnPublish(record) } case watch.Modified: // ignore } } } }
package main import ( "fmt" "math" ) func main() { fmt.Println("Hello, Andrei :-)") var a int start: fmt.Print("1. Calculate area of a rectangle. " + "\n2. Calculate length and diameter of a circle by area." + "\n3. Expand three-digit number into hundreds, tens, units." + "\n9. For exit." + "\nChoose program:") fmt.Scanln(&a) switch a { case 1: sRectangle() case 2: circleParametersByArea() case 3: numericParameters() case 9: break default: fmt.Println("Program not found. Repeat.") goto start } } func sRectangle() { var h, w int height: fmt.Print("Enter height:") fmt.Scanln(&h) if h == 0 { fmt.Println("You entered empty value ") goto height } width: fmt.Print("Enter width:") fmt.Scanln(&w) if w == 0 { fmt.Println("You entered empty value ") goto width } fmt.Println("Area of rectangle:", h*w) } func circleParametersByArea() { var s float64 start: fmt.Print("Enter area:") fmt.Scanln(&s) if s == 0 { fmt.Println("You entered empty value") goto start } fmt.Println("Circle diameter:", 2*math.Sqrt(s/math.Pi), "\nCircle length:", math.Sqrt(s*4*math.Pi)) } func numericParameters() { var a int start: fmt.Print("Enter value:") fmt.Scanln(&a) if a == 0 || a > 999 { fmt.Println("You entered non-three-digit value or empty value") goto start } fmt.Println("Count of hundreds in value:", a/100, "\nCount of tens in value:", a/10%10, "\nCount of units in value:", a%10) }
package main import ( "bufio" "fmt" "io" "log" "os" "strconv" "strings" ) const ( // Up is the north direction in the grid. Up = iota // Right is the east direction in the grid. Right // Down is the south direction in the grid. Down // Left is the west direction in the grid. Left ) // Step represent a path component. type Step struct { Direction uint8 // Direction is either Up, Right, Down or Left. Count int64 // Count is the step's number of port. } // Path represent a circuit wire connection description. type Path []Step // Point represent a position in the grid. // y grows Up, x grows Right, the zero value is the central port. type Point struct { x, y int64 } // Segment are straight connections between two points. type Segment struct { From, To Point xmin, xmax int64 ymin, ymax int64 steps int64 } // Wire represent a wire connected into the grid from the central port. type Wire []*Segment // CentralPort returns the point in the grid where from where all wire // connections begins. func CentralPort() Point { // By convention, the central port is the Point zero value. return Point{x: 0, y: 0} } // Add returns p + other. func (p Point) Add(other Point) Point { return Point{x: p.x + other.x, y: p.y + other.y} } // Distance compute and returns the Manhattan distance between two points. func (p Point) Distance(other Point) int64 { return abs(p.x-other.x) + abs(p.y-other.y) } // NewSegment create a new Segment given the starting and ending points. func NewSegment(from, to Point) *Segment { seg := &Segment{From: from, To: to} if from.x < to.x { seg.xmin, seg.xmax = from.x, to.x } else { seg.xmin, seg.xmax = to.x, from.x } if from.y < to.y { seg.ymin, seg.ymax = from.y, to.y } else { seg.ymin, seg.ymax = to.y, from.y } seg.steps = from.Distance(to) return seg } // IntersectWith check if the other Segment and seg share a point. If they do, // the intersection point and true is returned. Otherwise the Point zero value // and false is returned. func (seg *Segment) IntersectWith(other *Segment) (Point, bool) { switch { case seg.xmin <= other.xmin && seg.xmax >= other.xmin && seg.ymin <= other.ymax && seg.ymin >= other.ymin: return Point{x: other.xmin, y: seg.ymin}, true case other.xmin <= seg.xmin && other.xmax >= seg.xmin && other.ymin <= seg.ymax && other.ymin >= seg.ymin: return Point{x: seg.xmin, y: other.ymin}, true } return Point{}, false } // NewWire place a given wire path into the grid and return the resulting Wire. func NewWire(path Path) Wire { var wire Wire start := CentralPort() // the current position, starting at the central port. for _, step := range path { stop := start switch step.Direction { case Up: stop.y += step.Count case Right: stop.x += step.Count case Down: stop.y -= step.Count case Left: stop.x -= step.Count } wire = append(wire, NewSegment(start, stop)) start = stop } return wire } // Connect link a couple of wire on the grid. It returns the the Manhattan // distance from the central port to the closest intersection (md) and the // fewest combined steps the wires must take to reach an intersection (ms). func Connect(a, b Wire) (md, ms int64) { md = -1 ms = -1 cp := CentralPort() var astep int64 = 0 for _, aseg := range a { var bstep int64 = 0 for _, bseg := range b { // we omit the intersection at the central port, hence p != cp. if p, ok := aseg.IntersectWith(bseg); ok && p != cp { // min distance d := cp.Distance(p) if md == -1 || d < md { md = d } // min combined step s := astep + aseg.From.Distance(p) + bstep + bseg.From.Distance(p) if ms == -1 || s < ms { ms = s } } bstep += bseg.steps } astep += aseg.steps } return } // main compute and display the Manhattan distance from the central port to the // closest intersection of the wires description given on stdin. func main() { paths, err := Parse(os.Stdin) if err != nil { log.Fatalf("input error: %s\n", err) } fst := NewWire(paths[0]) snd := NewWire(paths[1]) md, ms := Connect(fst, snd) fmt.Printf("The Manhattan distance fron the central port to the closest intersection is %v,\n", md) fmt.Printf("and the fewest combined steps the wires must take to reach an intersection is %v.\n", ms) } // Parse a couple of wire paths. // It returns the parsed paths and any read or parsing error encountered. func Parse(r io.Reader) ([]Path, error) { var paths []Path scanner := bufio.NewScanner(r) for scanner.Scan() { var path Path line := scanner.Text() for _, part := range strings.Split(line, ",") { step, err := parseStep(part) if err != nil { return nil, err } path = append(path, step) } paths = append(paths, path) } if err := scanner.Err(); err != nil { return nil, err } return paths, nil } // parseStep is a parsing helper for Parse. // It parse and returns one step any parsing error encountered. func parseStep(s string) (Step, error) { var step Step // the smallest step would be something like U1 if len(s) < 2 { return step, fmt.Errorf("step too short: %s", s) } // parse the direction switch s[0] { case 'U': step.Direction = Up case 'R': step.Direction = Right case 'D': step.Direction = Down case 'L': step.Direction = Left default: return step, fmt.Errorf("unrecognized direction: %c", s[0]) } // parse the step count i, err := strconv.ParseUint(s[1:], 10, 63) // 63 bit size fit in int64 if err != nil { return step, err } step.Count = int64(i) return step, nil } // abs compute and returns the absolute value of n. func abs(n int64) int64 { y := n >> 63 return (n ^ y) - y }
package main import ( "net/http" "github.com/labstack/echo" mw "github.com/labstack/echo/middleware" ) type Hosts map[string]http.Handler func (h Hosts) ServeHTTP(w http.ResponseWriter, r *http.Request) { if handler := h[r.Host]; handler != nil { handler.ServeHTTP(w, r) } else { http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) } } func main() { // Host map hosts := make(Hosts) //----- // API //----- api := echo.New() api.Use(mw.Logger()) api.Use(mw.Recover()) hosts["api.localhost:1323"] = api api.Get("/", func(c *echo.Context) error { return c.String(http.StatusOK, "API") }) //------ // Blog //------ blog := echo.New() blog.Use(mw.Logger()) blog.Use(mw.Recover()) hosts["blog.localhost:1323"] = blog blog.Get("/", func(c *echo.Context) error { return c.String(http.StatusOK, "Blog") }) //--------- // Website //--------- site := echo.New() site.Use(mw.Logger()) site.Use(mw.Recover()) hosts["localhost:1323"] = site site.Get("/", func(c *echo.Context) error { return c.String(http.StatusOK, "Welcome!") }) http.ListenAndServe(":1323", hosts) }
package sync import ( "testing" "time" "github.com/stretchr/testify/assert" "k8s.io/client-go/util/workqueue" ) func TestNoParallelismSamePriority(t *testing.T) { queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) throttler := NewThrottler(0, queue) throttler.Add("c", 0, time.Now().Add(2*time.Hour)) throttler.Add("b", 0, time.Now().Add(1*time.Hour)) throttler.Add("a", 0, time.Now()) next, ok := throttler.Next("b") assert.True(t, ok) assert.Equal(t, "a", next) next, ok = throttler.Next("c") assert.True(t, ok) assert.Equal(t, "b", next) } func TestWithParallelismLimitAndPriority(t *testing.T) { queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) throttler := NewThrottler(2, queue) throttler.Add("a", 1, time.Now()) throttler.Add("b", 2, time.Now()) throttler.Add("c", 3, time.Now()) throttler.Add("d", 4, time.Now()) next, ok := throttler.Next("a") assert.True(t, ok) assert.Equal(t, "d", next) next, ok = throttler.Next("a") assert.True(t, ok) assert.Equal(t, "c", next) _, ok = throttler.Next("a") assert.False(t, ok) next, ok = throttler.Next("c") assert.True(t, ok) assert.Equal(t, "c", next) throttler.Remove("c") assert.Equal(t, 1, queue.Len()) queued, _ := queue.Get() assert.Equal(t, "b", queued) } func TestChangeParallelism(t *testing.T) { queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) throttler := NewThrottler(1, queue) throttler.Add("a", 1, time.Now()) throttler.Add("b", 2, time.Now()) throttler.Add("c", 3, time.Now()) throttler.Add("d", 4, time.Now()) next, ok := throttler.Next("a") assert.True(t, ok) assert.Equal(t, "d", next) _, ok = throttler.Next("b") assert.False(t, ok) _, ok = throttler.Next("c") assert.False(t, ok) throttler.SetParallelism(3) assert.Equal(t, 2, queue.Len()) queued, _ := queue.Get() assert.Equal(t, "c", queued) queued, _ = queue.Get() assert.Equal(t, "b", queued) }
package components import ( "context" "github.com/mitchellh/go-glint" ) func WatchEvent(isRunning bool, message glint.Component, yield []glint.Component) *WatchEventComponent { return &WatchEventComponent{isRunning: isRunning, message: message, yield: yield} } func LargeEvent(message glint.Component, yield []glint.Component) *LargeEventComponent { return &LargeEventComponent{message: message, yield: yield} } func SmallEvent(event glint.Component, line ...glint.Component) glint.Component { components := append([]glint.Component{event}, line...) return glint.Layout( components..., ).Row() } type WatchEventComponent struct { isRunning bool message glint.Component yield []glint.Component } func (c *WatchEventComponent) Body(context.Context) glint.Component { genRunning := func() glint.Component { return IconRunning() } return glint.Layout( LineSpacing(), glint.Layout( Maybe(genRunning, c.isRunning), c.message, ).Row(), LineSpacing(), glint.Layout(c.yield...).MarginLeft(4), LineSpacing(), ) } type LargeEventComponent struct { message glint.Component yield []glint.Component } func (c *LargeEventComponent) Body(context.Context) glint.Component { return glint.Layout( c.message, glint.Layout(c.yield...).MarginLeft(4), LineSpacing(), ) } func Maybe(gen func() glint.Component, predicate bool) glint.Component { if predicate { return gen() } // Return a "noop" for convenience return glint.Text("") }
package easy771 func numJewelsInStones(jewels string, stones string) int { set := make(map[rune]struct{}) for _, v := range jewels { set[v] = struct{}{} } var cnt int for _, v := range stones { if _, ok := set[v]; ok { cnt++ } } return cnt }
// Copyright 2018 PingCAP, Inc. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package session import ( "bytes" "context" "fmt" "runtime/trace" "strings" "sync/atomic" "time" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/session/txninfo" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/binloginfo" "github.com/pingcap/tidb/sessiontxn" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/sli" "github.com/pingcap/tidb/util/syncutil" "github.com/pingcap/tipb/go-binlog" "github.com/tikv/client-go/v2/oracle" "github.com/tikv/client-go/v2/tikv" "go.uber.org/zap" ) // LazyTxn wraps kv.Transaction to provide a new kv.Transaction. // 1. It holds all statement related modification in the buffer before flush to the txn, // so if execute statement meets error, the txn won't be made dirty. // 2. It's a lazy transaction, that means it's a txnFuture before StartTS() is really need. type LazyTxn struct { // States of a LazyTxn should be one of the followings: // Invalid: kv.Transaction == nil && txnFuture == nil // Pending: kv.Transaction == nil && txnFuture != nil // Valid: kv.Transaction != nil && txnFuture == nil kv.Transaction txnFuture *txnFuture initCnt int stagingHandle kv.StagingHandle mutations map[int64]*binlog.TableMutation writeSLI sli.TxnWriteThroughputSLI enterFairLockingOnValid bool // TxnInfo is added for the lock view feature, the data is frequent modified but // rarely read (just in query select * from information_schema.tidb_trx). // The data in this session would be query by other sessions, so Mutex is necessary. // Since read is rare, the reader can copy-on-read to get a data snapshot. mu struct { syncutil.RWMutex txninfo.TxnInfo } // mark the txn enables lazy uniqueness check in pessimistic transactions. lazyUniquenessCheckEnabled bool } // GetTableInfo returns the cached index name. func (txn *LazyTxn) GetTableInfo(id int64) *model.TableInfo { return txn.Transaction.GetTableInfo(id) } // CacheTableInfo caches the index name. func (txn *LazyTxn) CacheTableInfo(id int64, info *model.TableInfo) { txn.Transaction.CacheTableInfo(id, info) } func (txn *LazyTxn) init() { txn.mutations = make(map[int64]*binlog.TableMutation) txn.mu.Lock() defer txn.mu.Unlock() txn.mu.TxnInfo = txninfo.TxnInfo{} } // call this under lock! func (txn *LazyTxn) updateState(state txninfo.TxnRunningState) { if txn.mu.TxnInfo.State != state { lastState := txn.mu.TxnInfo.State lastStateChangeTime := txn.mu.TxnInfo.LastStateChangeTime txn.mu.TxnInfo.State = state txn.mu.TxnInfo.LastStateChangeTime = time.Now() if !lastStateChangeTime.IsZero() { hasLockLbl := !txn.mu.TxnInfo.BlockStartTime.IsZero() txninfo.TxnDurationHistogram(lastState, hasLockLbl).Observe(time.Since(lastStateChangeTime).Seconds()) } txninfo.TxnStatusEnteringCounter(state).Inc() } } func (txn *LazyTxn) initStmtBuf() { if txn.Transaction == nil { return } buf := txn.Transaction.GetMemBuffer() txn.initCnt = buf.Len() txn.stagingHandle = buf.Staging() } // countHint is estimated count of mutations. func (txn *LazyTxn) countHint() int { if txn.stagingHandle == kv.InvalidStagingHandle { return 0 } return txn.Transaction.GetMemBuffer().Len() - txn.initCnt } func (txn *LazyTxn) flushStmtBuf() { if txn.stagingHandle == kv.InvalidStagingHandle { return } buf := txn.Transaction.GetMemBuffer() if txn.lazyUniquenessCheckEnabled { keysNeedSetPersistentPNE := kv.FindKeysInStage(buf, txn.stagingHandle, func(k kv.Key, flags kv.KeyFlags, v []byte) bool { return flags.HasPresumeKeyNotExists() }) for _, key := range keysNeedSetPersistentPNE { buf.UpdateFlags(key, kv.SetPreviousPresumeKeyNotExists) } } buf.Release(txn.stagingHandle) txn.initCnt = buf.Len() } func (txn *LazyTxn) cleanupStmtBuf() { if txn.stagingHandle == kv.InvalidStagingHandle { return } buf := txn.Transaction.GetMemBuffer() buf.Cleanup(txn.stagingHandle) txn.initCnt = buf.Len() txn.mu.Lock() defer txn.mu.Unlock() txn.mu.TxnInfo.EntriesCount = uint64(txn.Transaction.Len()) } // resetTxnInfo resets the transaction info. // Note: call it under lock! func (txn *LazyTxn) resetTxnInfo( startTS uint64, state txninfo.TxnRunningState, entriesCount uint64, currentSQLDigest string, allSQLDigests []string, ) { if !txn.mu.LastStateChangeTime.IsZero() { lastState := txn.mu.State hasLockLbl := !txn.mu.BlockStartTime.IsZero() txninfo.TxnDurationHistogram(lastState, hasLockLbl).Observe(time.Since(txn.mu.TxnInfo.LastStateChangeTime).Seconds()) } if txn.mu.TxnInfo.StartTS != 0 { txninfo.Recorder.OnTrxEnd(&txn.mu.TxnInfo) } txn.mu.TxnInfo = txninfo.TxnInfo{} txn.mu.TxnInfo.StartTS = startTS txn.mu.TxnInfo.State = state txninfo.TxnStatusEnteringCounter(state).Inc() txn.mu.TxnInfo.LastStateChangeTime = time.Now() txn.mu.TxnInfo.EntriesCount = entriesCount txn.mu.TxnInfo.CurrentSQLDigest = currentSQLDigest txn.mu.TxnInfo.AllSQLDigests = allSQLDigests } // Size implements the MemBuffer interface. func (txn *LazyTxn) Size() int { if txn.Transaction == nil { return 0 } return txn.Transaction.Size() } // Mem implements the MemBuffer interface. func (txn *LazyTxn) Mem() uint64 { if txn.Transaction == nil { return 0 } return txn.Transaction.Mem() } // SetMemoryFootprintChangeHook sets the hook to be called when the memory footprint of this transaction changes. func (txn *LazyTxn) SetMemoryFootprintChangeHook(hook func(uint64)) { if txn.Transaction == nil { return } txn.Transaction.SetMemoryFootprintChangeHook(hook) } // Valid implements the kv.Transaction interface. func (txn *LazyTxn) Valid() bool { return txn.Transaction != nil && txn.Transaction.Valid() } func (txn *LazyTxn) pending() bool { return txn.Transaction == nil && txn.txnFuture != nil } func (txn *LazyTxn) validOrPending() bool { return txn.txnFuture != nil || txn.Valid() } func (txn *LazyTxn) String() string { if txn.Transaction != nil { return txn.Transaction.String() } if txn.txnFuture != nil { res := "txnFuture" if txn.enterFairLockingOnValid { res += " (pending fair locking)" } return res } return "invalid transaction" } // GoString implements the "%#v" format for fmt.Printf. func (txn *LazyTxn) GoString() string { var s strings.Builder s.WriteString("Txn{") if txn.pending() { s.WriteString("state=pending") } else if txn.Valid() { s.WriteString("state=valid") fmt.Fprintf(&s, ", txnStartTS=%d", txn.Transaction.StartTS()) if len(txn.mutations) > 0 { fmt.Fprintf(&s, ", len(mutations)=%d, %#v", len(txn.mutations), txn.mutations) } } else { s.WriteString("state=invalid") } s.WriteString("}") return s.String() } // GetOption implements the GetOption func (txn *LazyTxn) GetOption(opt int) interface{} { if txn.Transaction == nil { if opt == kv.TxnScope { return "" } return nil } return txn.Transaction.GetOption(opt) } func (txn *LazyTxn) changeToPending(future *txnFuture) { txn.Transaction = nil txn.txnFuture = future } func (txn *LazyTxn) changePendingToValid(ctx context.Context, sctx sessionctx.Context) error { if txn.txnFuture == nil { return errors.New("transaction future is not set") } future := txn.txnFuture txn.txnFuture = nil defer trace.StartRegion(ctx, "WaitTsoFuture").End() t, err := future.wait() if err != nil { txn.Transaction = nil return err } txn.Transaction = t txn.initStmtBuf() if txn.enterFairLockingOnValid { txn.enterFairLockingOnValid = false err = txn.Transaction.StartFairLocking() if err != nil { return err } } // The txnInfo may already recorded the first statement (usually "begin") when it's pending, so keep them. txn.mu.Lock() defer txn.mu.Unlock() txn.resetTxnInfo( t.StartTS(), txninfo.TxnIdle, uint64(txn.Transaction.Len()), txn.mu.TxnInfo.CurrentSQLDigest, txn.mu.TxnInfo.AllSQLDigests) // set resource group name for kv request such as lock pessimistic keys. kv.SetTxnResourceGroup(txn, sctx.GetSessionVars().ResourceGroupName) return nil } func (txn *LazyTxn) changeToInvalid() { if txn.stagingHandle != kv.InvalidStagingHandle { txn.Transaction.GetMemBuffer().Cleanup(txn.stagingHandle) } txn.stagingHandle = kv.InvalidStagingHandle txn.Transaction = nil txn.txnFuture = nil txn.enterFairLockingOnValid = false txn.mu.Lock() lastState := txn.mu.TxnInfo.State lastStateChangeTime := txn.mu.TxnInfo.LastStateChangeTime hasLock := !txn.mu.TxnInfo.BlockStartTime.IsZero() if txn.mu.TxnInfo.StartTS != 0 { txninfo.Recorder.OnTrxEnd(&txn.mu.TxnInfo) } txn.mu.TxnInfo = txninfo.TxnInfo{} txn.mu.Unlock() if !lastStateChangeTime.IsZero() { txninfo.TxnDurationHistogram(lastState, hasLock).Observe(time.Since(lastStateChangeTime).Seconds()) } } func (txn *LazyTxn) onStmtStart(currentSQLDigest string) { if len(currentSQLDigest) == 0 { return } txn.mu.Lock() defer txn.mu.Unlock() txn.updateState(txninfo.TxnRunning) txn.mu.TxnInfo.CurrentSQLDigest = currentSQLDigest // Keeps at most 50 history sqls to avoid consuming too much memory. const maxTransactionStmtHistory int = 50 if len(txn.mu.TxnInfo.AllSQLDigests) < maxTransactionStmtHistory { txn.mu.TxnInfo.AllSQLDigests = append(txn.mu.TxnInfo.AllSQLDigests, currentSQLDigest) } } func (txn *LazyTxn) onStmtEnd() { txn.mu.Lock() defer txn.mu.Unlock() txn.mu.TxnInfo.CurrentSQLDigest = "" txn.updateState(txninfo.TxnIdle) } var hasMockAutoIncIDRetry = int64(0) func enableMockAutoIncIDRetry() { atomic.StoreInt64(&hasMockAutoIncIDRetry, 1) } func mockAutoIncIDRetry() bool { return atomic.LoadInt64(&hasMockAutoIncIDRetry) == 1 } var mockAutoRandIDRetryCount = int64(0) func needMockAutoRandIDRetry() bool { return atomic.LoadInt64(&mockAutoRandIDRetryCount) > 0 } func decreaseMockAutoRandIDRetryCount() { atomic.AddInt64(&mockAutoRandIDRetryCount, -1) } // ResetMockAutoRandIDRetryCount set the number of occurrences of // `kv.ErrTxnRetryable` when calling TxnState.Commit(). func ResetMockAutoRandIDRetryCount(failTimes int64) { atomic.StoreInt64(&mockAutoRandIDRetryCount, failTimes) } // Commit overrides the Transaction interface. func (txn *LazyTxn) Commit(ctx context.Context) error { defer txn.reset() if len(txn.mutations) != 0 || txn.countHint() != 0 { logutil.BgLogger().Error("the code should never run here", zap.String("TxnState", txn.GoString()), zap.Int("staging handler", int(txn.stagingHandle)), zap.Int("mutations", txn.countHint()), zap.Stack("something must be wrong")) return errors.Trace(kv.ErrInvalidTxn) } txn.mu.Lock() txn.updateState(txninfo.TxnCommitting) txn.mu.Unlock() failpoint.Inject("mockSlowCommit", func(_ failpoint.Value) {}) // mockCommitError8942 is used for PR #8942. failpoint.Inject("mockCommitError8942", func(val failpoint.Value) { if val.(bool) { failpoint.Return(kv.ErrTxnRetryable) } }) // mockCommitRetryForAutoIncID is used to mock an commit retry for adjustAutoIncrementDatum. failpoint.Inject("mockCommitRetryForAutoIncID", func(val failpoint.Value) { if val.(bool) && !mockAutoIncIDRetry() { enableMockAutoIncIDRetry() failpoint.Return(kv.ErrTxnRetryable) } }) failpoint.Inject("mockCommitRetryForAutoRandID", func(val failpoint.Value) { if val.(bool) && needMockAutoRandIDRetry() { decreaseMockAutoRandIDRetryCount() failpoint.Return(kv.ErrTxnRetryable) } }) return txn.Transaction.Commit(ctx) } // Rollback overrides the Transaction interface. func (txn *LazyTxn) Rollback() error { defer txn.reset() txn.mu.Lock() txn.updateState(txninfo.TxnRollingBack) txn.mu.Unlock() // mockSlowRollback is used to mock a rollback which takes a long time failpoint.Inject("mockSlowRollback", func(_ failpoint.Value) {}) return txn.Transaction.Rollback() } // RollbackMemDBToCheckpoint overrides the Transaction interface. func (txn *LazyTxn) RollbackMemDBToCheckpoint(savepoint *tikv.MemDBCheckpoint) { txn.flushStmtBuf() txn.Transaction.RollbackMemDBToCheckpoint(savepoint) txn.cleanup() } // LockKeys wraps the inner transaction's `LockKeys` to record the status func (txn *LazyTxn) LockKeys(ctx context.Context, lockCtx *kv.LockCtx, keys ...kv.Key) error { return txn.LockKeysFunc(ctx, lockCtx, nil, keys...) } // LockKeysFunc Wrap the inner transaction's `LockKeys` to record the status func (txn *LazyTxn) LockKeysFunc(ctx context.Context, lockCtx *kv.LockCtx, fn func(), keys ...kv.Key) error { failpoint.Inject("beforeLockKeys", func() {}) t := time.Now() var originState txninfo.TxnRunningState txn.mu.Lock() originState = txn.mu.TxnInfo.State txn.updateState(txninfo.TxnLockAcquiring) txn.mu.TxnInfo.BlockStartTime.Valid = true txn.mu.TxnInfo.BlockStartTime.Time = t txn.mu.Unlock() lockFunc := func() { if fn != nil { fn() } txn.mu.Lock() defer txn.mu.Unlock() txn.updateState(originState) txn.mu.TxnInfo.BlockStartTime.Valid = false txn.mu.TxnInfo.EntriesCount = uint64(txn.Transaction.Len()) } return txn.Transaction.LockKeysFunc(ctx, lockCtx, lockFunc, keys...) } // StartFairLocking wraps the inner transaction to support using fair locking with lazy initialization. func (txn *LazyTxn) StartFairLocking() error { if txn.Valid() { return txn.Transaction.StartFairLocking() } else if !txn.pending() { err := errors.New("trying to start fair locking on a transaction in invalid state") logutil.BgLogger().Error("unexpected error when starting fair locking", zap.Error(err), zap.Stringer("txn", txn)) return err } txn.enterFairLockingOnValid = true return nil } // RetryFairLocking wraps the inner transaction to support using fair locking with lazy initialization. func (txn *LazyTxn) RetryFairLocking(ctx context.Context) error { if txn.Valid() { return txn.Transaction.RetryFairLocking(ctx) } else if !txn.pending() { err := errors.New("trying to retry fair locking on a transaction in invalid state") logutil.BgLogger().Error("unexpected error when retrying fair locking", zap.Error(err), zap.Stringer("txnStartTS", txn)) return err } return nil } // CancelFairLocking wraps the inner transaction to support using fair locking with lazy initialization. func (txn *LazyTxn) CancelFairLocking(ctx context.Context) error { if txn.Valid() { return txn.Transaction.CancelFairLocking(ctx) } else if !txn.pending() { err := errors.New("trying to cancel fair locking on a transaction in invalid state") logutil.BgLogger().Error("unexpected error when cancelling fair locking", zap.Error(err), zap.Stringer("txnStartTS", txn)) return err } if !txn.enterFairLockingOnValid { err := errors.New("trying to cancel fair locking when it's not started") logutil.BgLogger().Error("unexpected error when cancelling fair locking", zap.Error(err), zap.Stringer("txnStartTS", txn)) return err } txn.enterFairLockingOnValid = false return nil } // DoneFairLocking wraps the inner transaction to support using fair locking with lazy initialization. func (txn *LazyTxn) DoneFairLocking(ctx context.Context) error { if txn.Valid() { return txn.Transaction.DoneFairLocking(ctx) } if !txn.pending() { err := errors.New("trying to cancel fair locking on a transaction in invalid state") logutil.BgLogger().Error("unexpected error when finishing fair locking") return err } if !txn.enterFairLockingOnValid { err := errors.New("trying to finish fair locking when it's not started") logutil.BgLogger().Error("unexpected error when finishing fair locking") return err } txn.enterFairLockingOnValid = false return nil } // IsInFairLockingMode wraps the inner transaction to support using fair locking with lazy initialization. func (txn *LazyTxn) IsInFairLockingMode() bool { if txn.Valid() { return txn.Transaction.IsInFairLockingMode() } else if txn.pending() { return txn.enterFairLockingOnValid } else { return false } } func (txn *LazyTxn) reset() { txn.cleanup() txn.changeToInvalid() } func (txn *LazyTxn) cleanup() { txn.cleanupStmtBuf() txn.initStmtBuf() for key := range txn.mutations { delete(txn.mutations, key) } } // KeysNeedToLock returns the keys need to be locked. func (txn *LazyTxn) KeysNeedToLock() ([]kv.Key, error) { if txn.stagingHandle == kv.InvalidStagingHandle { return nil, nil } keys := make([]kv.Key, 0, txn.countHint()) buf := txn.Transaction.GetMemBuffer() buf.InspectStage(txn.stagingHandle, func(k kv.Key, flags kv.KeyFlags, v []byte) { if !keyNeedToLock(k, v, flags) { return } keys = append(keys, k) }) return keys, nil } // Wait converts pending txn to valid func (txn *LazyTxn) Wait(ctx context.Context, sctx sessionctx.Context) (kv.Transaction, error) { if !txn.validOrPending() { return txn, errors.AddStack(kv.ErrInvalidTxn) } if txn.pending() { defer func(begin time.Time) { sctx.GetSessionVars().DurationWaitTS = time.Since(begin) }(time.Now()) // Transaction is lazy initialized. // PrepareTxnCtx is called to get a tso future, makes s.txn a pending txn, // If Txn() is called later, wait for the future to get a valid txn. if err := txn.changePendingToValid(ctx, sctx); err != nil { logutil.BgLogger().Error("active transaction fail", zap.Error(err)) txn.cleanup() sctx.GetSessionVars().TxnCtx.StartTS = 0 return txn, err } txn.lazyUniquenessCheckEnabled = !sctx.GetSessionVars().ConstraintCheckInPlacePessimistic } return txn, nil } func keyNeedToLock(k, v []byte, flags kv.KeyFlags) bool { isTableKey := bytes.HasPrefix(k, tablecodec.TablePrefix()) if !isTableKey { // meta key always need to lock. return true } // a pessimistic locking is skipped, perform the conflict check and // constraint check (more accurately, PresumeKeyNotExist) in prewrite (or later pessimistic locking) if flags.HasNeedConstraintCheckInPrewrite() { return false } if flags.HasPresumeKeyNotExists() { return true } // lock row key, primary key and unique index for delete operation, if len(v) == 0 { return flags.HasNeedLocked() || tablecodec.IsRecordKey(k) } if tablecodec.IsUntouchedIndexKValue(k, v) { return false } if !tablecodec.IsIndexKey(k) { return true } if tablecodec.IsTempIndexKey(k) { tmpVal, err := tablecodec.DecodeTempIndexValue(v) if err != nil { logutil.BgLogger().Warn("decode temp index value failed", zap.Error(err)) return false } current := tmpVal.Current() return current.Handle != nil || tablecodec.IndexKVIsUnique(current.Value) } return tablecodec.IndexKVIsUnique(v) } func getBinlogMutation(ctx sessionctx.Context, tableID int64) *binlog.TableMutation { bin := binloginfo.GetPrewriteValue(ctx, true) for i := range bin.Mutations { if bin.Mutations[i].TableId == tableID { return &bin.Mutations[i] } } idx := len(bin.Mutations) bin.Mutations = append(bin.Mutations, binlog.TableMutation{TableId: tableID}) return &bin.Mutations[idx] } func mergeToMutation(m1, m2 *binlog.TableMutation) { m1.InsertedRows = append(m1.InsertedRows, m2.InsertedRows...) m1.UpdatedRows = append(m1.UpdatedRows, m2.UpdatedRows...) m1.DeletedIds = append(m1.DeletedIds, m2.DeletedIds...) m1.DeletedPks = append(m1.DeletedPks, m2.DeletedPks...) m1.DeletedRows = append(m1.DeletedRows, m2.DeletedRows...) m1.Sequence = append(m1.Sequence, m2.Sequence...) } type txnFailFuture struct{} func (txnFailFuture) Wait() (uint64, error) { return 0, errors.New("mock get timestamp fail") } // txnFuture is a promise, which promises to return a txn in future. type txnFuture struct { future oracle.Future store kv.Storage txnScope string } func (tf *txnFuture) wait() (kv.Transaction, error) { startTS, err := tf.future.Wait() failpoint.Inject("txnFutureWait", func() {}) if err == nil { return tf.store.Begin(tikv.WithTxnScope(tf.txnScope), tikv.WithStartTS(startTS)) } else if config.GetGlobalConfig().Store == "unistore" { return nil, err } logutil.BgLogger().Warn("wait tso failed", zap.Error(err)) // It would retry get timestamp. return tf.store.Begin(tikv.WithTxnScope(tf.txnScope)) } // HasDirtyContent checks whether there's dirty update on the given table. // Put this function here is to avoid cycle import. func (s *session) HasDirtyContent(tid int64) bool { if s.txn.Transaction == nil { return false } seekKey := tablecodec.EncodeTablePrefix(tid) it, err := s.txn.GetMemBuffer().Iter(seekKey, nil) terror.Log(err) return it.Valid() && bytes.HasPrefix(it.Key(), seekKey) } // StmtCommit implements the sessionctx.Context interface. func (s *session) StmtCommit(ctx context.Context) { defer func() { s.txn.cleanup() }() txnManager := sessiontxn.GetTxnManager(s) err := txnManager.OnStmtCommit(ctx) if err != nil { logutil.Logger(ctx).Error("txnManager failed to handle OnStmtCommit", zap.Error(err)) } st := &s.txn st.flushStmtBuf() // Need to flush binlog. for tableID, delta := range st.mutations { mutation := getBinlogMutation(s, tableID) mergeToMutation(mutation, delta) } } // StmtRollback implements the sessionctx.Context interface. func (s *session) StmtRollback(ctx context.Context, isForPessimisticRetry bool) { txnManager := sessiontxn.GetTxnManager(s) err := txnManager.OnStmtRollback(ctx, isForPessimisticRetry) if err != nil { logutil.Logger(ctx).Error("txnManager failed to handle OnStmtRollback", zap.Error(err)) } s.txn.cleanup() } // StmtGetMutation implements the sessionctx.Context interface. func (s *session) StmtGetMutation(tableID int64) *binlog.TableMutation { st := &s.txn if _, ok := st.mutations[tableID]; !ok { st.mutations[tableID] = &binlog.TableMutation{TableId: tableID} } return st.mutations[tableID] }
/* * @lc app=leetcode.cn id=942 lang=golang * * [942] 增减字符串匹配 */ package main // @lc code=start func diStringMatch(s string) []int { ret := make([]int, len(s)+1) start := 0 end := len(s) for i := 0; i < len(s); i++ { if s[i] == 'I' { ret[i] = start start++ } else { ret[i] = end end-- } } if s[len(s)-1] == 'I' { ret[len(s)] = ret[len(s)-1] + 1 } else { ret[len(s)] = ret[len(s)-1] - 1 } return ret } // func main() { // fmt.Println(diStringMatch("IDID"), diStringMatch("III"), diStringMatch("DDI")) // } // @lc code=end
package resources import ( "errors" "net/http" "github.com/manyminds/api2go" "gopkg.in/mgo.v2/bson" "themis/utils" "themis/models" "themis/database" ) // AreaResource for api2go routes. type AreaResource struct { AreaStorage *database.AreaStorage WorkItemStorage *database.WorkItemStorage } func (c AreaResource) getFilterFromRequest(r api2go.Request) (bson.M, *utils.NestedEntityError) { var filter bson.M // Getting reference context sourceContext, sourceContextID, thisContext := utils.ParseContext(r) switch sourceContext { case models.AreaName: entity, err := c.AreaStorage.GetOne(bson.ObjectIdHex(sourceContextID)) if (err != nil) { return nil, &utils.NestedEntityError { InnerError: err, Code: 0 } } if thisContext == "parent" { if entity.ParentAreaID.Hex()=="" { // this is the root area return nil, &utils.NestedEntityError { InnerError: nil, Code: 42 } } filter = bson.M{"_id": entity.ParentAreaID} } case models.WorkItemName: entity, err := c.WorkItemStorage.GetOne(bson.ObjectIdHex(sourceContextID)) if (err != nil) { return nil, &utils.NestedEntityError { InnerError: err, Code: 0 } } if thisContext == "area" { filter = bson.M{"_id": entity.AreaID} } default: // build standard filter expression filter = utils.BuildDbFilterFromRequest(r) } return filter, nil } // FindAll Areas. func (c AreaResource) FindAll(r api2go.Request) (api2go.Responder, error) { // build filter expression filter, err := c.getFilterFromRequest(r) if err != nil && err.Code==42 { // this is the root area var empty []models.Area return &api2go.Response{Res: empty}, nil } if err != nil { return &api2go.Response{}, err.InnerError } areas, _ := c.AreaStorage.GetAll(filter) return &api2go.Response{Res: areas}, nil } // PaginatedFindAll can be used to load users in chunks. // Possible success status code 200. func (c AreaResource) PaginatedFindAll(r api2go.Request) (uint, api2go.Responder, error) { // build filter expression filter, nestedErr := c.getFilterFromRequest(r) if nestedErr != nil && nestedErr.Code==42 { // this is the root area var empty []models.Area return 0, &api2go.Response{Res: empty}, nil } if nestedErr != nil { return 0, &api2go.Response{}, nestedErr.InnerError } // parse out offset and limit queryOffset, queryLimit, err := utils.ParsePaging(r) if err!=nil { return 0, &api2go.Response{}, err } // get the paged data from storage result, err := c.AreaStorage.GetAllPaged(filter, queryOffset, queryLimit) if err!=nil { return 0, &api2go.Response{}, err } // get total count for paging allCount, err := c.AreaStorage.GetAllCount(filter) if err!=nil { return 0, &api2go.Response{}, err } // return everything return uint(allCount), &api2go.Response{Res: result}, nil } // FindOne Area. // Possible success status code 200 func (c AreaResource) FindOne(id string, r api2go.Request) (api2go.Responder, error) { utils.DebugLog.Printf("Received FindOne with ID %s.", id) res, err := c.AreaStorage.GetOne(bson.ObjectIdHex(id)) return &api2go.Response{Res: res}, err } // Create a new Area. // Possible status codes are: // - 201 Created: Resource was created and needs to be returned // - 202 Accepted: Processing is delayed, return nothing // - 204 No Content: Resource created with a client generated ID, and no fields were modified by // the server func (c AreaResource) Create(obj interface{}, r api2go.Request) (api2go.Responder, error) { area, ok := obj.(models.Area) if !ok { return &api2go.Response{}, api2go.NewHTTPError(errors.New("Invalid instance given"), "Invalid instance given", http.StatusBadRequest) } id, _ := c.AreaStorage.Insert(area) area.ID = id return &api2go.Response{Res: area, Code: http.StatusCreated}, nil } // Delete a Area. // Possible status codes are: // - 200 OK: Deletion was a success, returns meta information, currently not implemented! Do not use this // - 202 Accepted: Processing is delayed, return nothing // - 204 No Content: Deletion was successful, return nothing func (c AreaResource) Delete(id string, r api2go.Request) (api2go.Responder, error) { err := c.AreaStorage.Delete(bson.ObjectIdHex(id)) return &api2go.Response{Code: http.StatusOK}, err } // Update a Area. // Possible status codes are: // - 200 OK: Update successful, however some field(s) were changed, returns updates source // - 202 Accepted: Processing is delayed, return nothing // - 204 No Content: Update was successful, no fields were changed by the server, return nothing func (c AreaResource) Update(obj interface{}, r api2go.Request) (api2go.Responder, error) { area, ok := obj.(models.Area) if !ok { return &api2go.Response{}, api2go.NewHTTPError(errors.New("Invalid instance given"), "Invalid instance given", http.StatusBadRequest) } err := c.AreaStorage.Update(area) return &api2go.Response{Res: area, Code: http.StatusNoContent}, err }
// test-quickSort project doc.go /* test-quickSort document */ package main
package main import ( "fmt" ) func main() { i := 2 switch i { case 1, 2, 3: fmt.Println("one, two, three") default: fmt.Println("something else") } }
// Copyright 2020, Homin Lee <homin.lee@suapapa.net>. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "time" "github.com/suapapa/go_devices/max7219" "periph.io/x/conn/v3/spi/spireg" "periph.io/x/host/v3" ) func main() { _, err := host.Init() chk(err) bus, err := spireg.Open("") chk(err) dev, err := max7219.New(bus) chk(err) dev.DisplayTest(true) time.Sleep(1 * time.Second) dev.DisplayTest(false) // dev.Shutdown(true) // time.Sleep(1 * time.Second) // dev.Shutdown(false) dev.Write(3, 1<<5) } func chk(err error) { if err != nil { panic(err) } }
package example import ( "time" "github.com/asaskevich/govalidator" ) type ExampleModel struct { ID int `db:"id"` Title string `db:"title" valid:"required"` CreatedAt time.Time `db:"created_at"` UpdatedAt time.Time `db:"updated_at"` } func (t *ExampleModel) Validate() error { _, err := govalidator.ValidateStruct(t) if err != nil { return err } return nil } // Add some other methods to transform data ...
package util import( "time" //"fmt" ) const DateFormat = "2006-01-02" var DefaultDate time.Time = time.Time{} var Days = [13]int{-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31} //the @label is like yyyyMM func ParseDate(label string) time.Time { t := DefaultDate if len(label) == 0 { return t } ystr := label[:4] mstr := label[4:] year := ToInt(ystr) month := ToInt(mstr) day := LastDay(year, month) if day < 1 { NewLog().Error("Cannot parse the date: ", label) } //fmt.Println("Before:", t, year, month, day) t = time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) //fmt.Println("After: ", t) return t } func IsLeapYear(year int) bool { return (year % 400) == 0 || ( (year % 100) != 0 && (year % 4) == 0) } func LastDay(year, month int) int { day := Days[month] if IsLeapYear(year) && month == 2 { day += 1 } return day } func FormatDate(date time.Time) string { return date.Format(DateFormat) }
package logging import ( "fmt" "log" "os" ) type LoggerWrapper struct { logger *log.Logger } var ( Error = &LoggerWrapper{ logger: log.New(os.Stderr, "ERROR ", log.Ldate|log.Ltime|log.Lshortfile), } Info = &LoggerWrapper{ logger: log.New(os.Stdout, "INFO ", log.Ldate|log.Ltime|log.Lshortfile), } Trace = &LoggerWrapper{ logger: log.New(os.Stdout, "TRACE ", log.Ldate|log.Ltime|log.Lshortfile), } Warning = &LoggerWrapper{ logger: log.New(os.Stdout, "WARNING ", log.Ldate|log.Ltime|log.Lshortfile), } ) func (lw *LoggerWrapper) Println(v ...interface{}) { lw.write(3, fmt.Sprintf("%v", v...)) } func (lw *LoggerWrapper) Printf(format string, v ...interface{}) { lw.write(3, fmt.Sprintf(format, v...)) } func (lw *LoggerWrapper) Fatal(v ...interface{}) { lw.write(3, fmt.Sprintf("%v", v...)) os.Exit(1) } func (lw *LoggerWrapper) Fatalf(format string, v ...interface{}) { lw.write(3, fmt.Sprintf(format, v...)) os.Exit(1) } func (lw *LoggerWrapper) Panic(v ...interface{}) { s := fmt.Sprintf("%v", v...) lw.write(3, s) panic(s) } func (lw *LoggerWrapper) Panicf(format string, v ...interface{}) { s := fmt.Sprintf(format, v...) lw.write(3, s) panic(s) } func (lw *LoggerWrapper) Output(calldepth int, s string) error { return lw.write(calldepth+1, s) } func (lw *LoggerWrapper) write(calldepth int, line string) error { return lw.logger.Output(calldepth, line) }
package main import ( "database/sql" "fmt" "github.com/glaslos/ssdeep" _ "github.com/mattn/go-sqlite3" "log" "strconv" ) var db *sql.DB const ( DB_PATH = "./data.db" // hashtypes HASH_HTML_SSDEEP = 0 HASH_IMAGE_SSDEEP = 1 HASH_EDGES_SSDEEP = 2 HASH_HEADER_SSDEEP = 3 HASH_IMAGE_PHASH = 4 HASH_EDGES_PHASH = 5 HASH_HEADER_PHASH = 6 ) func ConnectDB() { db, _ = sql.Open("sqlite3", DB_PATH) statement, _ := db.Prepare("CREATE TABLE IF NOT EXISTS hashes (subdomain VARCHAR(128), domain VARCHAR(128), path CARCHAR(128), hashtype INT, hash VARCHAR(128), safe INT)") statement.Exec() } func CloseDB() { db.Close() } // Stores hash for the domain func InsertHashes(subdomain, domain, path, hash_html_ssdeep, hash_image_ssdeep, hash_edges_ssdeep, hash_header_ssdeep, hash_image_phash, hash_edges_phash, hash_header_phash string, safe int) { rows, _ := db.Query("SELECT hashtype, hash FROM hashes WHERE subdomain=? AND domain=? AND path=?", subdomain, domain, path) defer rows.Close() var t int var h string for rows.Next() { rows.Scan(&t, &h) if ((t == HASH_HTML_SSDEEP) && (h == hash_html_ssdeep)) || ((t == HASH_IMAGE_SSDEEP) && (h == hash_image_ssdeep)) || ((t == HASH_EDGES_SSDEEP) && (h == hash_edges_ssdeep)) || ((t == HASH_HEADER_SSDEEP) && (h == hash_header_ssdeep)) || ((t == HASH_IMAGE_PHASH) && (h == hash_image_phash)) || ((t == HASH_EDGES_PHASH) && (h == hash_edges_phash)) || ((t == HASH_HEADER_PHASH) && (h == hash_header_phash)) { return } } statement, _ := db.Prepare("INSERT INTO hashes (subdomain, domain, path, hashtype, hash, safe) VALUES (?, ?, ?, ?, ?, ?)") _, err := statement.Exec(subdomain, domain, path, HASH_HTML_SSDEEP, hash_html_ssdeep, safe) if err != nil { log.Printf("Error: %v\n", err) } _, err = statement.Exec(subdomain, domain, path, HASH_IMAGE_SSDEEP, hash_image_ssdeep, safe) if err != nil { log.Printf("Error: %v\n", err) } _, err = statement.Exec(subdomain, domain, path, HASH_EDGES_SSDEEP, hash_edges_ssdeep, safe) if err != nil { log.Printf("Error: %v\n", err) } _, err = statement.Exec(subdomain, domain, path, HASH_HEADER_SSDEEP, hash_header_ssdeep, safe) if err != nil { log.Printf("Error: %v\n", err) } _, err = statement.Exec(subdomain, domain, path, HASH_IMAGE_PHASH, hash_image_phash, safe) if err != nil { log.Printf("Error: %v\n", err) } _, err = statement.Exec(subdomain, domain, path, HASH_EDGES_PHASH, hash_edges_phash, safe) if err != nil { log.Printf("Error: %v\n", err) } _, err = statement.Exec(subdomain, domain, path, HASH_HEADER_PHASH, hash_header_phash, safe) if err != nil { log.Printf("Error: %v\n", err) } } func UpdateDomainStatus(domain string, safe int) { statement, _ := db.Prepare("UPDATE hashes SET safe=? WHERE domain=?") _, err := statement.Exec(safe, domain) if err != nil { log.Printf("Error: %v\n", err) } } func HashMatch(domain, hash_html_ssdeep, hash_image_ssdeep, hash_edges_ssdeep, hash_header_ssdeep, hash_image_phash, hash_edges_phash, hash_header_phash string) (string, string, int) { rows, _ := db.Query("SELECT subdomain, domain, path, hashtype, hash FROM hashes WHERE domain<>?", domain) defer rows.Close() var sd, d, p, h string var t int for rows.Next() { rows.Scan(&sd, &d, &p, &t, &h) switch t { case HASH_HTML_SSDEEP: if hash_html_ssdeep != "" { score, _ := ssdeep.Distance(h, hash_html_ssdeep) log.Printf("%sHTML Score %s/%s vs %s = %v%s", COLOR_SCAN, d, p, domain, score, COLOR_RESET) if score >= THRESHOLD_HTML_SSDEEP { return fmt.Sprintf("%s.%s/%s", sd, d, p), "HTML_SSDEEP", score } } case HASH_IMAGE_SSDEEP: if hash_image_ssdeep != "" { score, _ := ssdeep.Distance(h, hash_image_ssdeep) log.Printf("%sImage Score %s/%s vs %s = %v%s", COLOR_SCAN, d, p, domain, score, COLOR_RESET) if score >= THRESHOLD_IMAGE_SSDEEP { return fmt.Sprintf("%s.%s/%s", sd, d, p), "IMAGE_SSDEEP", score } } case HASH_EDGES_SSDEEP: if hash_edges_ssdeep != "" { score, _ := ssdeep.Distance(h, hash_edges_ssdeep) log.Printf("%sEdge Score %s/%s vs %s = %v%s", COLOR_SCAN, d, p, domain, score, COLOR_RESET) if score >= THRESHOLD_EDGES_SSDEEP { return fmt.Sprintf("%s.%s/%s", sd, d, p), "EDGE_SSDEEP", score } } case HASH_HEADER_SSDEEP: if hash_header_ssdeep != "" { score, _ := ssdeep.Distance(h, hash_header_ssdeep) log.Printf("%sHead Score %s/%s vs %s = %v%s", COLOR_SCAN, d, p, domain, score, COLOR_RESET) if score >= THRESHOLD_HEADER_SSDEEP { return fmt.Sprintf("%s.%s/%s", sd, d, p), "HEAD_SSDEEP", score } } case HASH_IMAGE_PHASH: h2, _ := strconv.ParseUint(h, 10, 64) h3, _ := strconv.ParseUint(hash_image_phash, 10, 64) if hash_image_phash != "" { score := phashScore(h2, h3) log.Printf("%sPHash Image Score %s/%s vs %s = %v%s", COLOR_SCAN, d, p, domain, score, COLOR_RESET) if score >= THRESHOLD_IMAGE_PHASH { return fmt.Sprintf("%s.%s/%s", sd, d, p), "IMAGE_PHASH", score } } case HASH_EDGES_PHASH: h2, _ := strconv.ParseUint(h, 10, 64) h3, _ := strconv.ParseUint(hash_edges_phash, 10, 64) if hash_edges_phash != "" { score := phashScore(h2, h3) log.Printf("%sPHash Edge Score %s/%s vs %s = %v%s", COLOR_SCAN, d, p, domain, score, COLOR_RESET) if score >= THRESHOLD_EDGES_PHASH { return fmt.Sprintf("%s.%s/%s", sd, d, p), "EDGE_PHASH", score } } case HASH_HEADER_PHASH: h2, _ := strconv.ParseUint(h, 10, 64) h3, _ := strconv.ParseUint(hash_header_phash, 10, 64) if hash_header_phash != "" { score := phashScore(h2, h3) log.Printf("%sPHash Head Score %s/%s vs %s = %v%s", COLOR_SCAN, d, p, domain, score, COLOR_RESET) if score >= THRESHOLD_HEADER_PHASH { return fmt.Sprintf("%s.%s/%s", sd, d, p), "HEAD_PHASH", score } } } } return "", "", 0 } // Returns 0 if domain not in db, 1 if marked as unsafe, and 2 if marked as safe func DomainStatus(domain string) int { rows, _ := db.Query("SELECT safe FROM hashes WHERE domain=?", domain) defer rows.Close() var safe int for rows.Next() { rows.Scan(&safe) if safe == 0 { return 1 } return 2 } return 0 } func SiteStatus(subdomain string, domain string, path string) int { rows, _ := db.Query("SELECT safe from hashes WHERE subdomain=? AND domain=? AND path=?", subdomain, domain, path) defer rows.Close() var safe int for rows.Next() { rows.Scan(&safe) if safe == 0 { return 1 } return 2 } return 0 } func SiteExistsDB(subdomain string, domain string, path string) bool { rows, err := db.Query("SELECT * FROM hashes WHERE subdomain=? AND domain=? AND path=?", subdomain, domain, path) if err != nil { log.Printf("Error: %v\n", err) return false } defer rows.Close() for rows.Next() { return true } return false }
package game_map import ( "github.com/steelx/go-rpg-cgm/state_machine" ) type CharacterStateBase struct { Character *Character Map *GameMap Entity *Entity Controller *state_machine.StateMachine } type Character struct { Id string Anims map[string][]int Facing string Entity *Entity Controller *state_machine.StateMachine //[Name] -> [function that returns state] DefaultState, PrevDefaultState string //"wait" PathIndex int Path []string //e.g. ["up", "up", "up", "left", "right", "right",] TalkIndex int //used during speech tracking } func CharacterCreate(def CharacterDefinition, controllerStates map[string]func() state_machine.State) *Character { ch := &Character{ Id: def.Id, Facing: def.FacingDirection, Entity: CreateEntity(def.EntityDef), Controller: state_machine.Create(controllerStates), DefaultState: def.DefaultState, } ch.Anims = make(map[string][]int) for k, v := range def.Animations { ch.Anims[k] = v } return ch } func (ch Character) GetFacedTileCoords() (x, y float64) { var xOff, yOff float64 = 0, 0 if ch.Facing == CharacterFacingDirection[3] { xOff = -1 //"left" } else if ch.Facing == CharacterFacingDirection[1] { xOff = 1 //"right" } else if ch.Facing == CharacterFacingDirection[0] { yOff = -1 //"up" } else if ch.Facing == CharacterFacingDirection[2] { yOff = 1 //"down" } x = ch.Entity.TileX + xOff y = ch.Entity.TileY + yOff return } func (ch *Character) SetFacing(dir int) { ch.Facing = CharacterFacingDirection[dir] } func (ch *Character) FollowPath(path []string) { ch.PathIndex = 0 ch.Path = path //ch.PrevDefaultState = ch.DefaultState //this is causing problem ch.PrevDefaultState = "wait" ch.DefaultState = "follow_path" ch.Controller.Change("follow_path", nil) } func (ch *Character) GetCombatAnim(id string) []int { if anims, ok := ch.Anims[id]; ok { return anims } return []int{ch.Entity.StartFrame} }
// Copyright (c) 2018 John Dewey // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. package repositories const configSchema = ` { "type": "array", "$schema": "http://json-schema.org/draft-07/schema#", "minItems": 1, "items": { "type": "object", "additionalProperties": false, "properties": { "git": { "type": "string" }, "version": { "type": "string", "pattern": "^[0-9a-f]{5,40}$" }, "dstDir": { "type": "string" }, "sources": { "type": "array", "items": { "type": "object", "additionalProperties": false, "properties": { "src": { "type": "string" }, "dstFile": { "type": "string" }, "dstDir": { "type": "string" } }, "oneOf": [ { "required": [ "src", "dstFile" ] }, { "required": [ "src", "dstDir" ] } ] } } }, "oneOf": [ { "required": [ "git", "version", "dstDir" ] }, { "required": [ "git", "version", "sources" ] } ] } } `
package main import ( "fmt" "log" "net/http" "github.com/gorilla/mux" ) func main() { router := mux.NewRouter() router.HandleFunc("/signup", signup).Methods("POST") router.HandleFunc("/login", login).Methods("POST") router.HandleFunc("/protected", TokenVerifyMiddleWare(protectedEndpoint)).Methods("GET") log.Println("Listen oon port 8000...") log.Fatal(http.ListenAndServe(":8000", router)) } func signup(w http.ResponseWriter, r *http.Request) { fmt.Println("Signup invoked") w.Write([]byte("succesfully called signup")) } func login(w http.ResponseWriter, r *http.Request) { fmt.Println("login invoked") w.Write([]byte("succesfully called login")) } func protectedEndpoint(w http.ResponseWriter, r *http.Request) { fmt.Println("protected invoked") w.Write([]byte("succesfully called protectedEndpoint")) } // TokenVerifyMiddleWare function func TokenVerifyMiddleWare(next http.HandlerFunc) http.HandlerFunc { fmt.Println("TokenVerifyMiddleWare invoked") return nil }
package main import "github.com/gin-gonic/gin" func main() { // Creates a router without any middleware by default r := gin.New() // By default gin.DefaultWriter = os.Stdout r.Use(gin.Logger()) // Recovery middleware recovers from any panics and writes a 500 if there was one. r.Use(gin.Recovery()) r.GET("/ping", func(c *gin.Context) { c.JSON(200, gin.H{ "message": "pong", }) }) // Listen and serve on 0.0.0.0:8080 r.Run(":8080") }
package redis import ( "fmt" "testing" "github.com/stretchr/testify/assert" ) func Test(t *testing.T) { client := Start("localhost:6379") assert := assert.New(t) err := client.Connect() assert.NoError(err) t.Run("test request,response", func(t *testing.T) { err := client.Request("SET", "test", "ICE baby") if !assert.NoError(err) { return } err = client.PipeliningRequest([]string{"SET", "pipeling", "10"}, []string{"INCR", "pipeling"}, []string{"INCR", "pipeling"}) if !assert.NoError(err) { return } err = client.Request("GET", "pipeling") if !assert.NoError(err) { return } err = client.PipeliningRequest([]string{"SET", "testErr", "NEW"}, []string{"GET", "testErr"}, []string{"GET", "test"}) if !assert.NoError(err) { return } data, errs := client.Response() if data == nil && err != nil { assert.Error(errs[0]) return } if errs != nil { for _, k := range errs { fmt.Println(k) } } for _, k := range data { fmt.Println("data:", k) } // resp, errs, n := arrayParser([]byte("*3\r\n*3\r\n:1\r\n:2\r\n:3\r\n*2\r\n+Foo\r\n-Bar\r\n*2\r\n$4\r\nWoop\r\n$5\r\nWoop2\r\n")) // if errs != nil { // for _, k := range errs { // fmt.Println("errs:", k) // } // } // fmt.Println("Resp:", resp, "n:", n) // resp, errs, n = arrayParser([]byte("*3\r\n*0\r\n*-1\r\n*2\r\n$0\r\n$-1\r\n")) // if errs != nil { // for _, k := range errs { // fmt.Println("errs:", k) // } // } // fmt.Println("Resp:", resp, "n:", n) }) t.Run("test Set,Get,MSet,MGet", func(t *testing.T) { err := client.Set("dyno", "Tyrannosaurus") if !assert.NoError(err) { return } resp, errs := client.Get("dyno") if err != nil { for _, k := range errs { fmt.Println(k) } } fmt.Println("dyno:", resp) err = client.MSet([]string{"dyno1", "Dilophosaurus", "dyno2"}) _ = !assert.Equal(err.Error(), `Parity Error: "lacks key or value"`) err = client.MSet([]string{"dyno1", "Dilophosaurus", "dyno2", "Ceratopsia", "total dyno", "3"}) if !assert.NoError(err) { return } data, errs := client.MGet([]string{"dyno1", "dyno2", "total dyno"}) if errs != nil { for _, k := range errs { fmt.Println("ERROR:", k) } } for _, k := range data { fmt.Println("DATA:", k) } }) t.Run("test error", func(t *testing.T) { err := client.Request("INCR", "test") if !assert.NoError(err) { return } _, errs := client.Response() _ = !assert.Equal(errs[0].Error(), "ERR value is not an integer or out of range") }) err = client.CloseConnection() assert.NoError(err) }
// // Brian Bulkowski copywrite 2015 // // I found out that when we built the cheesecave project, we tried to use YAML, // but the YAML we built was broken. And YAML is far out of favor now. // We wanted to use YAML because it seemed to have better streaming support. // json seems to have decent streaming support by taking your objects and putting them all on one line. // This is OK for an embedded system because sometimes a power fault gives you corrupt data. // a system that will simply skip data points when it hits a bad line then move on // to the next seems much better. // And yet, even though I thought all the world might be ruled by JSON, I find that the world is still ruled // by CSV. Let's output a CSV file. package main import "flag" import "fmt" import "log" import "gopkg.in/yaml.v2" import "os" import "bufio" import "strings" var testData = ` { sensor: "sensor1" , time: "2015-02-03T06:13:08UTC" , epoch: 1422943988, temperature: 42.7, celsius: 5.9, humidity: 78.6 } ` type T struct { Sensor string Time string Epoch int Temperature float32 Celsius float32 Humidity float32 } // read all the lines in the input file func process(iFilename string, oFilename string, highLimitT float32, lowLimitT float32) { // Input file in_file, err := os.Open(iFilename) if err != nil { log.Fatal(err) } defer in_file.Close() // output file out_file, err2 := os.Create(oFilename) if err2 != nil { log.Fatal(err2) } defer out_file.Close() // output a leading line of information out_file.WriteString("sensor,time,epoch,temperature,celsius,humidity\n" ) // var lineNum int = 0 var outlines int = 0 // scan through the input file, look at the lines scanner := bufio.NewScanner(in_file) for scanner.Scan() { lineNum++ var line string = scanner.Text() // in_file was written in wrong form, wanted streaming // objects, not a huge array line = strings.TrimPrefix(line, "- ") //fmt.Println(Line) t := T{Sensor: "null", Time: "null"} err := yaml.Unmarshal([]byte(line), &t) if err != nil { // found cases of bad characters, want to just skip to next line log.Printf("error: %v: continuing\n",err) } // else { // fmt.Printf("success: %v\n",t) // } // clean data. If abover or below limits, skip if t.Temperature > highLimitT { continue } if t.Temperature < lowLimitT { continue } // for each line, drop a CSV line outlines++ fmt.Fprintf(out_file, "%s, %s, %d,%.2f,%.2f,%.2f\n", t.Sensor, t.Time, t.Epoch, t.Temperature, t.Celsius, t.Humidity ) } if err := scanner.Err(); err != nil { log.Fatal(err) } fmt.Printf(" found and parsed %v lines; output %v lines\n", lineNum,outlines) } func test() { // try some testdata t := T{Sensor: "null", Time: "null"} log.Printf(" try this testdata %v\n",testData) err := yaml.Unmarshal([]byte(testData), &t) if err != nil { log.Fatalf("testdata unmarshal error: %v\n", err) } fmt.Printf("--- testdata t:\n%v\nsensorname %s\n", t, t.Sensor) } func main() { iFilenamePtr := flag.String("i", "/Users/brian/CheeseCave/pi1/sensor1-history.yaml", "inputFile") oFilenamePtr := flag.String("o","/Users/brian/CheeseCave/pi1/sensor1-history.csv", "outputFile") tempHighLimitPtr := flag.Int("hl", 100, "high temp limit in F to filter point out") tempLowLimitPtr := flag.Int("ll", 32, "low temp limit in F to filter point out") flag.Parse() fmt.Println("input: ", *iFilenamePtr) fmt.Println("output: ", *oFilenamePtr) fmt.Println("tempHighLimit", *tempHighLimitPtr) fmt.Println("tempLowLimit ", *tempLowLimitPtr) process(*iFilenamePtr, *oFilenamePtr, float32(*tempHighLimitPtr), float32(*tempLowLimitPtr)) }
package dialog import ( "reflect" "testing" ) func TestMakingKeywordsMap(t *testing.T) { keywords := []string{"a", "b", "c"} expectedMap := map[string]bool{ "a": true, "b": true, "c": true, } calculatedMap := makeKeywordsMap(keywords) if !reflect.DeepEqual(calculatedMap, expectedMap) { t.Errorf("error making map from keywods %v: expected %v, got %v", keywords, expectedMap, calculatedMap) } }
/* File describe main handle structure which includes broker and db connection. Author: Igor Kuznetsov Email: me@swe-notes.ru (c) Copyright by Igor Kuznetsov. */ package handlers import ( "github.com/gorilla/websocket" "github.com/streadway/amqp" "simple-tracking/backend/models" "simple-tracking/backend/utils" ) type Handler struct { BrokerConn *amqp.Connection BrokerCfg utils.BrokerCfg Upgrader websocket.Upgrader ErrChan chan *amqp.Error DB models.DbStore }
package services import ( "time" "models" ) var postsDb = []models.Post{} func Add(content string) (models.Post) { var newPost = models.Post{ Date: time.Now(), Content: content, } postsDb = append(postsDb, newPost) return newPost } func Get() ([]models.Post) { return postsDb }
package main import ( "fmt" "strconv" "jblee.net/adventofcode2018/utils" ) func main() { lines := utils.ReadLinesOrDie("input.txt") freq := 0 for _, line := range lines { delta, _ := strconv.Atoi(line) freq += delta } fmt.Printf("freq: %d\n", freq) }
package collection // StateReference is a reference to a collection state. type StateReference interface { // StateNum returns the sequence number of the reference. StateNum() StateNum // Create creates a new collection state with the given state number and data. // If a state already exists with the state number an error will be returned. Create(state State) error // Data returns the collection state. Data() (State, error) }
package main import ( "log" "time" "github.com/golang/protobuf/ptypes" //ptypes "github.com/golang/protobuf/ptypes" timestamp "github.com/golang/protobuf/ptypes/timestamp" ) // CreateProtobufTimestamp converts a string to a date then to a protobuf timestamp func CreateProtobufTimestamp(timeString string) *timestamp.Timestamp { var timestamp *timestamp.Timestamp t, err := time.Parse(time.RFC3339, timeString) if err != nil { log.Fatalf("Error parsing provided time. Error: %v", err) } timestamp, err = ptypes.TimestampProto(t) if err != nil { log.Fatalf("Error converting provided time to a Protobuf Timestamp. Error: %v", err) } return timestamp }
package tests import ( "encoding/json" "testing" "github.com/kataras/iris/httptest" "github.com/iris-contrib/httpexpect" "../app" "../config" ) func InitTestServer(t *testing.T) *httpexpect.Expect { config.Config.DatabaseDriver = "sqlite3" config.Config.DatabaseDSN = "./test.db" app := app.NewApp() return httptest.New(t, app) } func JsonObjectFromString(str string, t *testing.T) interface{} { var dat interface{} if err := json.Unmarshal([]byte(str), &dat); err != nil { t.Logf("Wrong json data: %v\n", err.Error()) } return dat }
package chat import ( "encoding/json" "errors" "log" "time" ) type LoginData struct { Username string `json:"username"` Password string `json:"password"` } type User struct { Name string `json:"name"` Id int64 `json:"id"` Username string `json:"username"` Token string `json:"token"` } func login(ws *Message) (*User, bool, error) { log.Println("handle login request") if ws == nil || ws.MessageType != LOGIN { return nil, false, errors.New("invalied login message") } logindata := LoginData{} err := json.Unmarshal([]byte(ws.Content), &logindata) if err != nil { return nil, false, err } return &User{Name: logindata.Username, Username: logindata.Username, Id: time.Now().Unix(), Token: logindata.Password}, true, nil }
package context_test import ( "github.com/APTrust/exchange/context" "github.com/APTrust/exchange/models" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "os" "path" "path/filepath" "testing" ) func TestNewContext(t *testing.T) { configFile := filepath.Join("config", "test.json") appConfig, err := models.LoadConfigFile(configFile) require.Nil(t, err) // In some tests we want to log to STDERR, but in this case, if it // happens to be turned on, it just creates useless, annoying output. appConfig.LogToStderr = false _context := context.NewContext(appConfig) require.NotNil(t, _context) expectedPathToLogFile := filepath.Join(_context.Config.AbsLogDirectory(), path.Base(os.Args[0])+".log") expectedPathToJsonLog := filepath.Join(_context.Config.AbsLogDirectory(), path.Base(os.Args[0])+".json") assert.NotNil(t, _context.Config) assert.NotNil(t, _context.NSQClient) assert.NotNil(t, _context.PharosClient) assert.NotNil(t, _context.MessageLog) assert.NotNil(t, _context.JsonLog) assert.Equal(t, expectedPathToLogFile, _context.PathToLogFile()) assert.Equal(t, expectedPathToJsonLog, _context.PathToJsonLog()) assert.Equal(t, int64(0), _context.Succeeded()) assert.Equal(t, int64(0), _context.Failed()) assert.NotPanics(t, func() { _context.MessageLog.Info("Test INFO log message") }) assert.NotPanics(t, func() { _context.MessageLog.Debug("Test DEBUG log message") }) assert.NotPanics(t, func() { _context.JsonLog.Println(`{"message": "Test JSON log message"}`) }) // Cleanup, but only if context was successfully created if _context != nil && _context.PathToLogFile() != "" { os.Remove(_context.PathToLogFile()) } if _context != nil && _context.PathToJsonLog() != "" { os.Remove(_context.PathToJsonLog()) } } func TestGetS3Client(t *testing.T) { configFile := filepath.Join("config", "test.json") appConfig, err := models.LoadConfigFile(configFile) require.Nil(t, err) appConfig.LogToStderr = false _context := context.NewContext(appConfig) client, err := _context.GetS3Client( "example.com", "SAMPLE_ACCESS_KEY_ID", "SAMPLE_SECRET_ACCESS_KEY") assert.Nil(t, err) assert.NotNil(t, client) }
package main import ( "encoding/base64" "encoding/json" "fmt" "io/ioutil" "mime" "net/http" "path/filepath" "strings" "time" "github.com/stripe/stripe-go" "golang.org/x/net/context" "google.golang.org/appengine/log" "google.golang.org/appengine/urlfetch" ) type PostmarkMessageHeader struct { Name string Value string } type PostmarkMessage struct { TemplateId int TemplateModel map[string]interface{} InlineCss bool From string To string Cc string Bcc string Tag string ReplyTo string Headers []PostmarkMessageHeader TrackOpens bool Attachments []PostmarkAttachment } type PostmarkAttachment struct { Name string Content string ContentType string ContentID string } func send(c context.Context, message *PostmarkMessage) error { client := urlfetch.Client(c) requestBody, err := json.Marshal(message) if err != nil { return err } req, err := http.NewRequest("POST", "https://api.postmarkapp.com/email/withTemplate", strings.NewReader(string(requestBody))) if err != nil { return err } req.Header.Add("Content-Type", "application/json") req.Header.Add("X-Postmark-Server-Token", Get(c, "POSTMARK_KEY")) res, err := client.Do(req) log.Infof(c, "Postmark: %v %v %v", req, res, err) if err != nil { return err } defer res.Body.Close() resBody, err := ioutil.ReadAll(res.Body) log.Infof(c, "Postmark response: %s", string(resBody)) return nil } func getAttachments(c context.Context) []PostmarkAttachment { attachments := []PostmarkAttachment{} path := "captured.png" file, err := ioutil.ReadFile(path) if err != nil { log.Infof(c, "[error] Reading attachment file %s (%v)", path, err) return nil } attachment := PostmarkAttachment{ Name: path, Content: base64.StdEncoding.EncodeToString(file), ContentType: mime.TypeByExtension(filepath.Ext(path)), ContentID: "cid:logo@thecapturedproject.com", } attachments = append(attachments, attachment) return attachments } func SendReceipt(c context.Context, name string, email string, shipping stripe.Shipping) error { message := PostmarkMessage{ From: "The Captured Project <info@thecapturedproject.com>", To: fmt.Sprintf("%s <%s>", name, email), TemplateId: 5705, TemplateModel: map[string]interface{}{ "date": time.Now().Format("1/2/2006"), "total": "$30", "billing_name": name, "shipping_name": shipping.Name, "address": shipping.Address, }, Attachments: getAttachments(c), } return send(c, &message) } func SendShippingNotification(c context.Context, name string, email string, shipping stripe.Shipping, trackingNumber string) error { message := PostmarkMessage{ From: "The Captured Project <info@thecapturedproject.com>", To: fmt.Sprintf("%s <%s>", name, email), TemplateId: 216881, TemplateModel: map[string]interface{}{ "date": time.Now().Format("1/2/2006"), "total": "$30", "billing_name": name, "shipping_name": shipping.Name, "address": shipping.Address, "tracking_number": trackingNumber, }, Attachments: getAttachments(c), } return send(c, &message) } func SendPaymentDeclinedNotification(c context.Context, name string, email string) error { message := PostmarkMessage{ From: "The Captured Project <info@thecapturedproject.com>", To: fmt.Sprintf("%s <%s>", name, email), TemplateId: 440161, TemplateModel: map[string]interface{}{ "date": time.Now().Format("1/2/2006"), "total": "$30", "billing_name": name, }, Attachments: getAttachments(c), } return send(c, &message) }
package voicetext const ( Show = "show" Haruka = "haruka" Hikari = "hikari" Takeru = "takeru" ) const ( Happiness = "happiness" Anger = "anger" Sadness = "sadness" )
package graphql import "text/template" var schemaTemplate = template.Must(template.New("schema").Funcs(funcMap).Parse(` ## !NOTE: This file is auto-generated DO NOT EDIT ## Generated at {{now}} {{define "field" -}} {{.Name}}: {{if .Type.IsList}}[{{end -}} {{.Type.Name}}{{if .Type.NonNullable}}!{{end}} {{- if .Type.IsList}}]{{end}} {{- end}} {{define "resolver" -}} {{.Name}}{{ .Resolver.KeyFieldArgsString }}: {{ if eq .Resolver.Action "get-items" }}[{{end}}{{.Resolver.Type.Name -}}{{ if eq .Resolver.Action "get-items" }}]{{end}}{{ if eq .Resolver.Action "list" }}Connection!{{ end }} {{- end}} {{- range .Enums}}enum {{.Name}} { {{range .Values}}{{.}} {{end}} } {{end}} {{- range .Objects}}type {{.Name}} { {{range .Fields}}{{template "field" .}} {{end}} } {{end}} {{- range .Connections}} type {{.}}Connection { items: [{{.}}] nextToken: String } {{ end -}} {{- range .FilterObjects }} input {{ .Name }} { {{ range .Fields -}}{{template "field" .}} {{ end -}} } {{ end -}} {{- range .InputObjects }} input {{ .Name }} { {{ range .Fields -}}{{template "field" .}} {{ end -}} } {{ end -}} {{- if .Queries}} type Query { {{- range .Queries}} {{ template "resolver" . }} {{- end}} } {{end -}} {{- if .Mutations}} type Mutation { {{- range .Mutations}} {{ template "resolver" . }} {{- end}} } {{end -}} input TableBooleanFilterInput { ne: Boolean eq: Boolean } {{range .FilterInputs}}input Table{{.}}FilterInput { ne: {{.}} eq: {{.}} le: {{.}} lt: {{.}} ge: {{.}} gt: {{.}} contains: {{.}} notContains: {{.}} between: [{{.}}] } {{end}} `))
package main import ( "flag" "fmt" "log" "os" "time" "github.com/teploff/otus/hw_10/client" "github.com/teploff/otus/hw_10/server" ) var timeOut = flag.Duration("timeout", 10*time.Second, "reactive power frequency") func main() { flag.Parse() if len(flag.Args()) < 2 { log.Fatal("not enough cli arguments: ip & port") } addr := fmt.Sprintf("%s:%s", flag.Args()[0], flag.Args()[1]) srv, err := server.NewTCPServer(addr) if err != nil { log.Fatalln(err) } go srv.Listen() ticker := time.NewTicker(time.Second * 10) go func() { EXIST: for { select { case <-ticker.C: ticker.Stop() srv.GracefulStop() break EXIST default: time.Sleep(time.Millisecond * 100) } } }() tn, err := client.NewTelnetClient(addr, *timeOut, os.Stdout) if err != nil { log.Fatalln(err) } log.Println(tn.Run()) }
package config import ( "github.com/BurntSushi/toml" ) var App appConfig var System systemConfig var Mongo mongoConfig var Logger logConfig type TomlConfig struct { AppConfig appConfig `toml:"app"` // App信息配置 SystemConfig systemConfig `toml:"system"` // 系统设置信息 MongoConfig mongoConfig `toml:"mongo"` // mongo配置信息 LogConfig logConfig `toml:"logger"` // 日志设置信息 } type appConfig struct { Name string `toml:"name"` // App名字 Logo string `toml:"logo"` // 系统Logo Summary string `toml:"summary"` // 系统描述 Version string `toml:"version"` // 系统版本 Copyright string `toml:"copyright"` // 版权 QQ string `toml:"qq"` // QQ Wechat string `toml:"wechat"` // 微信公众号 Website string `toml:"website"` // 网站 } type systemConfig struct { ServerPort string `toml:"server_port"` // API服务端口 ValidSecs int64 `toml:"valid_times"` // Token有效时长 CheckToken bool `toml:"check_token"` // 是否需要校验Token GridColumn int `toml:"grid_column"` // 九宫格每一行展示的个数 } type mongoConfig struct { Host string `toml:"host"` Database string `toml:"database"` UserName string `toml:"username"` Password string `toml:"password"` } type logConfig struct { EnableOperateLog bool `toml:"enable_operate_log"` // 是否打开操作日志 EnableMessageLog bool `toml:"enable_message_log"` // 是否打开消息日志 LogPath string `toml:"log_path"` // 本地日志文件存储路径 } func InitConfig() { var tomlConfig TomlConfig if _, err := toml.DecodeFile("config/config.toml", &tomlConfig); err != nil { panic(err) } App = tomlConfig.AppConfig System = tomlConfig.SystemConfig Mongo = tomlConfig.MongoConfig Logger = tomlConfig.LogConfig }
package main import ( "../SftpPb" "bufio" "context" "fmt" "github.com/pkg/sftp" "golang.org/x/crypto/ssh" "google.golang.org/grpc" "io" "log" "net" "os" "path/filepath" "strings" ) type server struct{} func main() { fmt.Println("Server was initialized") lis, err := net.Listen("tcp", "0.0.0.0:50051") if err != nil { log.Fatalf("Failed to listen: %v", err) } s := grpc.NewServer() sftppb.RegisterSFTPServer(s, &server{}) err = s.Serve(lis) if err != nil { log.Fatalf("failed to serve: %v", err) } } func (*server) CopyLocalToRemoteService(ctx context.Context, req *sftppb.CopyLocalToRemoteRequest) (*sftppb.CopyLocalToRemoteResponse, error) { fmt.Printf("Greet function was invoked with %v\n", req) fileName := req.Sftp.FileName fmt.Println("\n" + fileName) passWord := req.Sftp.PassWord fmt.Println(passWord) systemId := req.Sftp.SystemId fmt.Println(systemId) username := req.Sftp.Username fmt.Println(username) hostKey := req.Sftp.HostKey fmt.Println(hostKey) hostPort := req.Sftp.HostPort fmt.Println(hostPort) if username != "" { if passWord != "" { } } // get host public key //HostKey := getHostKey(systemId) config := ssh.ClientConfig{ User: username, Auth: []ssh.AuthMethod{ ssh.Password(passWord), }, HostKeyCallback: ssh.InsecureIgnoreHostKey(), //HostKeyCallback: ssh.FixedHostKey(HostKey), } result := "" // connect conn, err := ssh.Dial("tcp", systemId+hostPort, &config) if err != nil { log.Println(err) result = err.Error() } defer conn.Close() // create new SFTP client client, err := sftp.NewClient(conn) if err != nil { log.Println(err) result = err.Error() } defer client.Close() // create destination file dstFile, err := client.Create(fileName) if err != nil { log.Println(err) result = err.Error() } defer dstFile.Close() // open source file srcFile, err := os.Open(fileName) if err != nil { log.Println(err) result = err.Error() } // copy source file to destination file bytes, err := io.Copy(dstFile, srcFile) if err != nil { log.Println(err) result = err.Error() } fmt.Printf("%d bytes copied\n", bytes) res := &sftppb.CopyLocalToRemoteResponse{ Result: result, } fmt.Println(res.String()) return res, nil } func (*server) CopyFromRemoteService(ctx context.Context, req *sftppb.CopyFromRemoteRequest) (*sftppb.CopyFromRemoteResponse, error) { fmt.Printf("Greet function was invoked with %v\n", req) fileName := req.Sftp.FileName fmt.Println("\n" + fileName) passWord := req.Sftp.PassWord fmt.Println(passWord) systemId := req.Sftp.SystemId fmt.Println(systemId) username := req.Sftp.Username fmt.Println(username) hostKey := req.Sftp.HostKey fmt.Println(hostKey) hostPort := req.Sftp.HostPort fmt.Println(hostPort) if username != "" { if passWord != "" { } } // get host public key //HostKey := getHostKey(systemId) config := ssh.ClientConfig{ User: username, Auth: []ssh.AuthMethod{ ssh.Password(passWord), }, HostKeyCallback: ssh.InsecureIgnoreHostKey(), //HostKeyCallback: ssh.FixedHostKey(HostKey), } result := "" // connect conn, err := ssh.Dial("tcp", systemId+hostPort, &config) if err != nil { log.Println(err) result = err.Error() } defer conn.Close() // create new SFTP client client, err := sftp.NewClient(conn) if err != nil { log.Println(err) result = err.Error() } defer client.Close() // create destination file dstFile, err := os.Create(fileName) if err != nil { log.Println(err) result = err.Error() } defer dstFile.Close() // open source file srcFile, err := client.Open(fileName) if err != nil { log.Println(err) result = err.Error() } // copy with the WriteTo function bytesWritten, err := srcFile.WriteTo(dstFile) if err != nil { log.Println(err) } // copy source file to destination file //bytes, err := io.Copy(dstFile, srcFile) //if err != nil { // log.Println(err) // result = err.Error() //} fmt.Printf("%d bytes copied\n", bytesWritten) res := &sftppb.CopyFromRemoteResponse{ Result: result, } fmt.Println(res.String()) return res, nil } func getHostKey(host string) ssh.PublicKey { // parse OpenSSH known_hosts file // ssh or use ssh-keyscan to get initial key fmt.Println(host) file, err := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "known_hosts")) if err != nil { log.Println(err) } fmt.Println(file) defer file.Close() scanner := bufio.NewScanner(file) var hostKey ssh.PublicKey fmt.Println(hostKey) for scanner.Scan() { fields := strings.Split(scanner.Text(), " ") fmt.Println(fields) if len(fields) != 3 { continue } if strings.Contains(fields[0], host) { var err error hostKey, _, _, _, err = ssh.ParseAuthorizedKey(scanner.Bytes()) if err != nil { log.Printf("error parsing %q: %v", fields[2], err) } break } } if hostKey == nil { log.Println("no hostkey found for " + host) } return hostKey }
func anagramMappings(A []int, B []int) []int { bIdx := make(map[int]int) for i := 0; i < len(B); i++ { bIdx[B[i]] = i } var idxMap []int for i := 0; i < len(A); i++ { idxMap = append(idxMap, bIdx[A[i]]) } return idxMap }
package main import ( "bytes" "encoding/json" "fmt" "math/rand" "net/http" "net/http/cookiejar" "os" "sync" "time" ) type User struct { Username string `json:"username"` Password string `json:"password"` Email string `json:"email"` Firstname string `json:"firstname"` Lastname string `json:"lastname"` } type Space struct { DisplayName string `json:"displayName"` Description string `json:"description"` Visibility string `json:"visibility"` Subscription string `json:"subscription"` } type SpaceActivity struct { SpaceId int `json:"id"` Activity Activity `json:"model"` } type Activity struct { Title string `json:"title"` } type SpaceMembership struct { User string `json:"user"` Space string `json:"space"` } const ( SESSION_URI = "%s/rest/private/" USERS_URI = "/rest/private/v1/social/users" SPACES_URI = "/rest/private/v1/social/spaces" SPACE_ACTIVITIES_URI = "%s/rest/private/v1/social/spaces/%d/activities" SPACE_MEMBERSHIP_URL = "%s/rest/private/v1/social/spacesMemberships" USER_PREFIX = "abcde" USER_PASSWORD = "testtest123" USER_EMAIL = "@test.com" SPACE_PREFIX = "spacetestestc" NB_USERS = 200 NB_SPACES = 100 NB_SPACES_ACTIVITIES = 1000 SPACE_ACTIVITY_LENGTH = 200 ) const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890 !@#$%^&*()_+-=[]\\{}|;':\",./?" func RandStringBytes(n int) string { b := make([]byte, n) for i := range b { b[i] = letterBytes[rand.Intn(len(letterBytes))] } return string(b) } func usage(arguments []string) { fmt.Println(fmt.Sprintf("%s <base url> <user> <password>", arguments[0])) os.Exit(1) } func createUser(wg *sync.WaitGroup, c <-chan string, h string) { for { name := <-c t0 := time.Now() newUser := User{Username: name, Password: USER_PASSWORD, Email: fmt.Sprintf("%s%s", name, USER_EMAIL), Firstname: name, Lastname: name} json, _ := json.Marshal(newUser) req, _ := http.NewRequest("POST", fmt.Sprintf("%s%s", h, USERS_URI), bytes.NewBuffer(json)) req.Header.Set("Content-Type", "application/json") res, _ := client.Do(req) req.Body.Close() res.Body.Close() t1 := time.Now() fmt.Println("User ", name, " : "+res.Status, " in ", t1.Sub(t0)) wg.Done() } } func createUsers(h string) { var wg sync.WaitGroup c := make(chan string) ////------------- // Create any go routines here go createUser(&wg, c, h) go createUser(&wg, c, h) go createUser(&wg, c, h) go createUser(&wg, c, h) go createUser(&wg, c, h) go createUser(&wg, c, h) go createUser(&wg, c, h) go createUser(&wg, c, h) go createUser(&wg, c, h) go createUser(&wg, c, h) t0 := time.Now() for i := 0; i < NB_USERS; i++ { wg.Add(1) name := fmt.Sprintf("%s%d", USER_PREFIX, i) c <- name } fmt.Println("Waiting for the threads to finish") wg.Wait() t1 := time.Now() fmt.Println("All thread done") fmt.Println("Users created in ", t1.Sub(t0)) } func addUserToSpace(h string, space string, user string) { t0 := time.Now() membership := SpaceMembership{User: user, Space: space} json, _ := json.Marshal(membership) req, _ := http.NewRequest("POST", fmt.Sprintf(SPACE_MEMBERSHIP_URL, h), bytes.NewBuffer(json)) req.Header.Set("Content-Type", "application/json") res, _ := client.Do(req) req.Body.Close() res.Body.Close() t1 := time.Now() fmt.Println("Add user", user, "to space", space, "in", t1.Sub(t0), res.Status) } func addUsersToSpaces(h string) { function := func(wg *sync.WaitGroup, c <-chan *SpaceMembership) { for { m := <-c addUserToSpace(h, m.Space, m.User) wg.Done() } } t0 := time.Now() var wg sync.WaitGroup // Updating a space is not thread safe // directing same space update to the same channel to avoid // concurrency var channels []chan *SpaceMembership channels = append(channels, make(chan *SpaceMembership)) channels = append(channels, make(chan *SpaceMembership)) channels = append(channels, make(chan *SpaceMembership)) channels = append(channels, make(chan *SpaceMembership)) channels = append(channels, make(chan *SpaceMembership)) channels = append(channels, make(chan *SpaceMembership)) channels = append(channels, make(chan *SpaceMembership)) channels = append(channels, make(chan *SpaceMembership)) channels = append(channels, make(chan *SpaceMembership)) channels = append(channels, make(chan *SpaceMembership)) for _, c := range channels { go function(&wg, c) } for u := 0; u < NB_USERS; u++ { for sid := 0; sid < NB_SPACES; sid++ { u := fmt.Sprintf("%s%d", USER_PREFIX, u) s := fmt.Sprintf("%s%d", SPACE_PREFIX, sid) m := SpaceMembership{User: u, Space: s} wg.Add(1) pos := sid % len(channels) channels[pos] <- &m } } fmt.Println("Waiting for the threads to finish") wg.Wait() t1 := time.Now() fmt.Println("All thread done") fmt.Println("User attacted to spaces in ", t1.Sub(t0)) } func createSpace(wg *sync.WaitGroup, c <-chan int, h string) { // TODO cleary exit for { t0 := time.Now() id := <-c name := fmt.Sprintf("%s%d", SPACE_PREFIX, id) newSpace := Space{DisplayName: name, Description: name, Visibility: "public", Subscription: "open"} json, _ := json.Marshal(newSpace) req, _ := http.NewRequest("POST", fmt.Sprintf("%s%s", h, SPACES_URI), bytes.NewBuffer(json)) req.Header.Set("Content-Type", "application/json") res, _ := client.Do(req) req.Body.Close() res.Body.Close() t1 := time.Now() fmt.Println("Space ", name, " : "+res.Status, " in ", t1.Sub(t0)) wg.Done() } } /* curl -uroot:gtn -H'Content-Type: application/json' -X POST http://localhost:8080/rest/private/v1/social/spaces -d '{"displayName":"space1", "description":"space1", "visibility":"public", "subscription":"open"}' */ func createSpaces(h string, u string, p string) { var wg sync.WaitGroup sc := make(chan int) ////------------- // Create any go routines here // For the moment it's not possible // to create more than one space at a time // due to https://jira.exoplatform.org/browse/SOC-5697 go createSpace(&wg, sc, h) t0 := time.Now() for i := 0; i < NB_SPACES; i++ { wg.Add(1) sc <- i } fmt.Println("Waiting for the threads to finish") wg.Wait() t1 := time.Now() fmt.Println("All thread done") fmt.Println("Spaces created in ", t1.Sub(t0)) } /* * Create NB_ACTIVITIES on each space */ func createSpacesActivities(h string, u string, p string) { t0 := time.Now() for i := 1; i <= NB_SPACES_ACTIVITIES; i++ { // for s := 1; s <= NB_SPACES; s++ { s := 323 ta0 := time.Now() title := RandStringBytes(SPACE_ACTIVITY_LENGTH) // title = "test" a := Activity{Title: title} fmt.Print(fmt.Sprintf("Creating activity spaceId=%d actitivyCount=%d ...", s, i)) json, _ := json.Marshal(a) req, _ := http.NewRequest("POST", fmt.Sprintf(SPACE_ACTIVITIES_URI, h, s), bytes.NewBuffer(json)) req.Close = true req.Header.Set("Content-Type", "application/json") res, _ := client.Do(req) fmt.Print(res.Status) req.Body.Close() res.Body.Close() ta1 := time.Now() fmt.Println(" in ", ta1.Sub(ta0)) // } } t1 := time.Now() fmt.Println("Activities created in ", t1.Sub(t0)) } func createSpacesActivitiy(h string, id int, content string) { ta0 := time.Now() a := Activity{Title: content} fmt.Print(fmt.Sprintf("Creating activity spaceId=%d ...", id)) json, _ := json.Marshal(a) req, _ := http.NewRequest("POST", fmt.Sprintf(SPACE_ACTIVITIES_URI, h, id), bytes.NewBuffer(json)) req.Close = true req.Header.Set("Content-Type", "application/json") res, _ := client.Do(req) fmt.Print(res.Status) req.Body.Close() res.Body.Close() ta1 := time.Now() fmt.Println(" in ", ta1.Sub(ta0)) } func getSession(h string, u string, p string) { req, _ := http.NewRequest("GET", fmt.Sprintf(SESSION_URI, h), nil) req.Close = true req.SetBasicAuth(u, p) res, _ := client.Do(req) defer res.Body.Close() //fmt.Println(client.Jar) fmt.Println("Session created " + res.Status) } var client http.Client var sessionCookie string func init() { jar, _ := cookiejar.New(&cookiejar.Options{}) client = http.Client{Jar: jar} } func main() { arguments := os.Args if len(arguments) != 4 { usage(arguments) } host := arguments[1] user := arguments[2] password := arguments[3] fmt.Println(fmt.Sprintf("Using host %s and user %s", host, user)) getSession(host, user, password) createUsers(host) createSpaces(host, user, password) addUsersToSpaces(host) createSpacesActivities(host, user, password) createSpacesActivitiy(host, 66, "fake identity to test") }
package gowebdav import ( "encoding/base64" "net/http" ) // BasicAuth structure holds our credentials type BasicAuth struct { user string pw string } // Type identifies the BasicAuthenticator func (b *BasicAuth) Type() string { return "BasicAuth" } // User holds the BasicAuth username func (b *BasicAuth) User() string { return b.user } // Pass holds the BasicAuth password func (b *BasicAuth) Pass() string { return b.pw } // Authorize the current request func (b *BasicAuth) Authorize(req *http.Request, method string, path string) { a := b.user + ":" + b.pw auth := "Basic " + base64.StdEncoding.EncodeToString([]byte(a)) req.Header.Set("Authorization", auth) }
package main import "fmt" func main() { test("thisisstring") test("10") test(true) } func test(a interface{}) { fmt.Printf("(%v, %T)\n", a, a) } ############################################### // new example package main import ( "fmt" ) type Animal interface { Speak() string } type Dog struct { } func (d Dog) Speak() string { return "Woof!" } type Cat struct { } func (c Cat) Speak() string { return "Meow!" } type Llama struct { } func (l Llama) Speak() string { return "?????" } type JavaProgrammer struct { } func (j JavaProgrammer) Speak() string { return "Design patterns!" } func main() { animals := []Animal{Dog{}, Cat{}, Llama{}, JavaProgrammer{}} for _, animal := range animals { fmt.Println(animal.Speak()) } }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package session import ( "context" "time" "github.com/pingcap/errors" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/sessiontxn" "github.com/pingcap/tidb/ttl/metrics" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/sqlexec" ) // TxnMode represents using optimistic or pessimistic mode in the transaction type TxnMode int const ( // TxnModeOptimistic means using the optimistic transaction with "BEGIN OPTIMISTIC" TxnModeOptimistic TxnMode = iota // TxnModePessimistic means using the pessimistic transaction with "BEGIN PESSIMISTIC" TxnModePessimistic ) // Session is used to execute queries for TTL case type Session interface { sessionctx.Context // SessionInfoSchema returns information schema of current session SessionInfoSchema() infoschema.InfoSchema // ExecuteSQL executes the sql ExecuteSQL(ctx context.Context, sql string, args ...interface{}) ([]chunk.Row, error) // RunInTxn executes the specified function in a txn RunInTxn(ctx context.Context, fn func() error, mode TxnMode) (err error) // ResetWithGlobalTimeZone resets the session time zone to global time zone ResetWithGlobalTimeZone(ctx context.Context) error // Close closes the session Close() // Now returns the current time in location specified by session var Now() time.Time } type session struct { sessionctx.Context sqlExec sqlexec.SQLExecutor closeFn func(Session) } // NewSession creates a new Session func NewSession(sctx sessionctx.Context, sqlExec sqlexec.SQLExecutor, closeFn func(Session)) Session { return &session{ Context: sctx, sqlExec: sqlExec, closeFn: closeFn, } } // SessionInfoSchema returns information schema of current session func (s *session) SessionInfoSchema() infoschema.InfoSchema { if s.Context == nil { return nil } return sessiontxn.GetTxnManager(s.Context).GetTxnInfoSchema() } // ExecuteSQL executes the sql func (s *session) ExecuteSQL(ctx context.Context, sql string, args ...interface{}) ([]chunk.Row, error) { if s.sqlExec == nil { return nil, errors.New("session is closed") } ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnTTL) rs, err := s.sqlExec.ExecuteInternal(ctx, sql, args...) if err != nil { return nil, err } if rs == nil { return nil, nil } defer func() { terror.Log(rs.Close()) }() return sqlexec.DrainRecordSet(ctx, rs, 8) } // RunInTxn executes the specified function in a txn func (s *session) RunInTxn(ctx context.Context, fn func() error, txnMode TxnMode) (err error) { tracer := metrics.PhaseTracerFromCtx(ctx) defer tracer.EnterPhase(tracer.Phase()) tracer.EnterPhase(metrics.PhaseBeginTxn) sql := "BEGIN " switch txnMode { case TxnModeOptimistic: sql += "OPTIMISTIC" case TxnModePessimistic: sql += "PESSIMISTIC" default: return errors.New("unknown transaction mode") } if _, err = s.ExecuteSQL(ctx, sql); err != nil { return err } tracer.EnterPhase(metrics.PhaseOther) success := false defer func() { if !success { _, rollbackErr := s.ExecuteSQL(ctx, "ROLLBACK") terror.Log(rollbackErr) } }() if err = fn(); err != nil { return err } tracer.EnterPhase(metrics.PhaseCommitTxn) if _, err = s.ExecuteSQL(ctx, "COMMIT"); err != nil { return err } tracer.EnterPhase(metrics.PhaseOther) success = true return err } // ResetWithGlobalTimeZone resets the session time zone to global time zone func (s *session) ResetWithGlobalTimeZone(ctx context.Context) error { sessVar := s.GetSessionVars() if sessVar.TimeZone != nil { globalTZ, err := sessVar.GetGlobalSystemVar(ctx, variable.TimeZone) if err != nil { return err } tz, err := sessVar.GetSessionOrGlobalSystemVar(ctx, variable.TimeZone) if err != nil { return err } if globalTZ == tz { return nil } } _, err := s.ExecuteSQL(ctx, "SET @@time_zone=@@global.time_zone") return err } // Close closes the session func (s *session) Close() { if s.closeFn != nil { s.closeFn(s) s.Context = nil s.sqlExec = nil s.closeFn = nil } } // Now returns the current time in the location of time_zone session var func (s *session) Now() time.Time { return time.Now().In(s.Context.GetSessionVars().Location()) }
package hsm import "reflect" import "fmt" // AssertEqual asserts the equality of actual and expected. func AssertEqual(expected, actual interface{}) { if !ObjectAreEqual(expected, actual) { panic(fmt.Sprintf("Equal(%#v, %#v) fail", expected, actual)) } } // AssertEqual asserts the inequality of actual and expected. func AssertNotEqual(expected, actual interface{}) { if ObjectAreEqual(expected, actual) { panic(fmt.Sprintf("NotEqual(%#v, %#v) fail", expected, actual)) } } // ObjectAreEqual test whether actual is equal to expected. // It returns true when equal, otherwise returns false. func ObjectAreEqual(expected, actual interface{}) bool { if expected == nil || actual == nil { return expected == actual } if reflect.DeepEqual(expected, actual) { return true } expectedValue := reflect.ValueOf(expected) actualValue := reflect.ValueOf(actual) if expectedValue == actualValue { return true } if actualValue.Type().ConvertibleTo(expectedValue.Type()) && expectedValue == actualValue.Convert(expectedValue.Type()) { return true } if fmt.Sprintf("%#v", expected) == fmt.Sprintf("%#v", actual) { return true } return false } // AssertTrue asserts on truth of value. func AssertTrue(value bool) { if !value { panic(fmt.Sprintf("True(value=%#v) fail", value)) } } // AssertFalse asserts on falsehood of value. func AssertFalse(value bool) { if value { panic(fmt.Sprintf("False(value=%#v) fail", value)) } } // AssertNil asserts on nullability of value. func AssertNil(value interface{}) { AssertEqual(nil, value) } // AssertNotNil is opposite to AssertNil. func AssertNotNil(value interface{}) { AssertNotEqual(nil, value) }
package boom import ( "context" "go.mercari.io/datastore" ) // FromContext make new Boom object with specified context. // // Deprecated: use FromClient instead. func FromContext(ctx context.Context) (*Boom, error) { client, err := datastore.FromContext(ctx) if err != nil { return nil, err } return &Boom{Context: ctx, Client: client}, nil } // FromClient make new Boom object from specified datastore.Client. func FromClient(ctx context.Context, client datastore.Client) *Boom { return &Boom{Context: ctx, Client: client} }
package relay import ( "fmt" "github.com/lishimeng/go-libs/log" "github.com/lishimeng/go-libs/stream/serial" "io" "net" ) type Worker struct { socks io.ReadWriteCloser ser io.ReadWriteCloser server net.Listener listen uint16 Ser serial.Config bufSize int } func New(serialConf serial.Config, listen uint16) (w *Worker, err error) { w = &Worker{ Ser: serialConf, listen: listen, bufSize: 1024, } conn := serial.New(&serialConf) err = conn.Connect() if err != nil { return } w.ser = conn.Ser return } func (w *Worker) Start() { lis, err := net.Listen("tcp", fmt.Sprintf(":%d", w.listen)) if err != nil { return } log.Info("start listen: %s", lis.Addr().String()) w.server = lis for { conn, err := w.server.Accept() if err != nil { return } w.socks = conn w.run() } } func (w *Worker) Close() { if w.server != nil { _ = w.server.Close()// stop accept new connection } if w.socks != nil { _ = w.socks.Close() } if w.ser != nil { _ = w.ser.Close() } }
package bca import "fmt" type ErrorMessage struct { Indonesian string `json:"Indonesian"` English string `json:"English"` } type ErrorResponse struct { ErrorCode string `json:"ErrorCode"` ErrorMessage ErrorMessage `json:"ErrorMessage"` } func (e *ErrorResponse) getMessage() string { return fmt.Sprintf("%s %s", e.ErrorCode, e.ErrorMessage.Indonesian) }
/* Package error provides controllers for various http error codes. */ package error
package token import ( "encoding/base64" "encoding/json" "strings" "testing" "time" "gotest.tools/assert" ) func TestIsTokenValid(t *testing.T) { assert.Equal(t, false, IsTokenValid(""), "Empty token is declared as valid") assert.Equal(t, false, IsTokenValid(".."), "Token with three empty parts is declared as valid") assert.Equal(t, false, IsTokenValid(".a."), "Token with undecodable rawClaims is declared as valid") assert.Equal(t, false, IsTokenValid("..a"), "Token with undecodable signature is declared as valid") testClaim := ClaimSet{ Expiration: time.Now().Add(-time.Minute).Unix(), } claimAsJSON, _ := json.Marshal(testClaim) encodedClaim := base64.URLEncoding.EncodeToString(claimAsJSON) for strings.HasSuffix(string(encodedClaim), "=") { encodedClaim = strings.TrimSuffix(encodedClaim, "=") } assert.Equal(t, false, IsTokenValid("."+encodedClaim+"."), "Expired token is declared as valid") testClaim = ClaimSet{ Expiration: time.Now().Add(time.Hour).Unix(), } claimAsJSON, _ = json.Marshal(testClaim) encodedClaim = base64.URLEncoding.EncodeToString(claimAsJSON) for strings.HasSuffix(string(encodedClaim), "=") { encodedClaim = strings.TrimSuffix(encodedClaim, "=") } assert.Equal(t, true, IsTokenValid("."+encodedClaim+"."), "Valid token is declared as invalid") } func TestGetAccountID(t *testing.T) { testClaim := ClaimSet{ Hasura: Hasura{ AccountID: "1", }, } claimAsJSON, _ := json.Marshal(testClaim) encodedClaim := base64.URLEncoding.EncodeToString(claimAsJSON) for strings.HasSuffix(string(encodedClaim), "=") { encodedClaim = strings.TrimSuffix(encodedClaim, "=") } accountID, err := GetAccountID("." + encodedClaim + ".") assert.NilError(t, err, "Error getting Account ID from Valid Token") assert.Equal(t, 1, accountID, "Wrong accountID returned") } func TestGetAccountName(t *testing.T) { testClaim := ClaimSet{ Subject: "testSubject", } claimAsJSON, _ := json.Marshal(testClaim) encodedClaim := base64.URLEncoding.EncodeToString(claimAsJSON) for strings.HasSuffix(string(encodedClaim), "=") { encodedClaim = strings.TrimSuffix(encodedClaim, "=") } accountName, err := GetAccountName("." + encodedClaim + ".") assert.NilError(t, err, "Error getting AccountName from Valid Token") assert.Equal(t, "testSubject", accountName, "Wrong accountName returned") }
package router import ( "database/sql" "encoding/json" "fmt" "github.com/canmor/go_ms_clean_arch/pkg/adapter/outbound" "github.com/canmor/go_ms_clean_arch/pkg/adapter/outbound/db" "github.com/canmor/go_ms_clean_arch/pkg/domain/blog" "github.com/jarcoal/httpmock" "github.com/stretchr/testify/assert" "log" "net/http" "net/http/httptest" "strings" "testing" "time" ) func prepareDB(withRecords bool) *sql.DB { res, err := db.NewInMemory() if err != nil { log.Panicf("db error: %s", err) } err = db.Migrate(res) if err != nil { log.Panicf("db error: %s", err) } if !withRecords { return res } repo := outbound.NewBlogRepository(res) _, err = repo.Save(blog.Blog{Title: "A blog to share", Body: "body goes here...", CreatedAt: time.Now()}) if err != nil { log.Panicf("db error: %s", err) } return res } func TestBlogCreate(t *testing.T) { router := NewRouter(prepareDB(false)) w := httptest.NewRecorder() req := httptest.NewRequest(http.MethodPost, "/blogs", strings.NewReader(`{"title":"test", "body":"body"}`)) router.ServeHTTP(w, req) assertions := assert.New(t) assertions.Equal(http.StatusCreated, w.Result().StatusCode) resp := make(map[string]interface{}) _ = json.Unmarshal(w.Body.Bytes(), &resp) assertions.Equal(1.0, resp["id"]) assertions.Equal("test", resp["title"]) assertions.Equal("body", resp["body"]) } func TestBlogShare(t *testing.T) { httpmock.Activate() defer httpmock.DeactivateAndReset() const shortURL = "https://s.url/j18kNmic2M6" responder := httpmock.NewStringResponder(201, fmt.Sprintf(`{"shortcut":%q}`, shortURL)) httpmock.RegisterResponder(http.MethodPost, "https://api.s.url/short", responder) router := NewRouter(prepareDB(true)) w := httptest.NewRecorder() req := httptest.NewRequest(http.MethodGet, "/blogs/1/share", nil) router.ServeHTTP(w, req) assertions := assert.New(t) assertions.Equal(http.StatusOK, w.Result().StatusCode) resp := make(map[string]interface{}) _ = json.Unmarshal(w.Body.Bytes(), &resp) assertions.Equal(shortURL, resp["shortcut"]) }
package database import "github.com/eliquious/core" // SignInRequestStore stores all the user requests. type SignInRequestStore interface { SignIn(id, secret, csrf string, pubkey []byte) error } // NewSignInRequestStore creates a new sign-in request store. func NewSignInRequestStore(ks core.Keyspace) SignInRequestStore { return &signInRequestStore{ks} } type signInRequestStore struct { ks core.Keyspace } func (s *signInRequestStore) SignIn(id, secret, csrf string, pubkey []byte) error { // return fmt.Errorf("Not implemented") return nil }
package main import ( "fmt" "sync" ) type WebConfig struct { Port int } var demo *WebConfig var once sync.Once func GetConfig() *WebConfig { //go提供了内置的方法,用来创建单例方法,通过atomic 原子包达到加锁的效果 once.Do(func() { demo = &WebConfig{Port: 8080} }) return demo } func main() { c := GetConfig() c2 := GetConfig() c.Port = 9090 fmt.Println(c == c2, c2) }
package armstrong func power(base, exponent int) int { if exponent == 0 { return 1 } return base * power(base, exponent - 1) } func lengthOf(n, base int) int { count := 0 for n > 0 { count ++ n /= base } return count } func IsNumber(n int) bool { armstrong, length, copy := 0, lengthOf(n, 10), n for n > 0 { digit := n % 10 armstrong += power(digit, length) n /= 10 } return armstrong == copy }
package main import ( "flag" "fmt" "io/ioutil" "log" "os" "github.com/google/go-github/github" "golang.org/x/oauth2" "gopkg.in/yaml.v2" ) type Team struct { Name string `yaml:"name"` Members []string `yaml:"members"` Repositories []string `yaml:"repositories"` } type Config struct { Organization string `yaml:"organization"` Teams Teams `yaml:"teams"` } type Teams []Team func (t Teams) Lookup(name string) (Team, bool) { for _, team := range t { if team.Name == name { return team, true } } return Team{}, false } var githubToken = flag.String( "github-token", "", "github access token", ) var organizationFile = flag.String( "organization-file", "", "file containing organization configuration", ) func main() { flag.Parse() if *organizationFile == "" { println("must specify --organization-file") os.Exit(1) return } if *githubToken == "" { println("must specify --github-token") os.Exit(1) return } ts := oauth2.StaticTokenSource( &oauth2.Token{ AccessToken: *githubToken, }, ) tc := oauth2.NewClient(oauth2.NoContext, ts) client := github.NewClient(tc) contents, err := ioutil.ReadFile(*organizationFile) if err != nil { log.Println(err) os.Exit(1) } var config Config err = yaml.Unmarshal(contents, &config) if err != nil { log.Println(err) os.Exit(1) } orgName := config.Organization if orgName == "" { log.Println("you must specify an organization in your yaml") os.Exit(1) } var toAdd []Team var toDelete []github.Team var toUpdate []github.Team teams, _, err := client.Organizations.ListTeams(orgName, nil) for _, team := range teams { if *team.Name == "Owners" { continue } fmt.Println("> Syncing team", *team.Name) if _, found := config.Teams.Lookup(*team.Name); found { toUpdate = append(toUpdate, team) continue } else { toDelete = append(toDelete, team) continue } } for _, team := range config.Teams { if !hasTeam(teams, team.Name) { toAdd = append(toAdd, team) } } for _, team := range toAdd { fmt.Printf(" > Creating team '%s'...\n", team.Name) createdTeam, _, err := client.Organizations.CreateTeam(orgName, &github.Team{ Name: github.String(team.Name), Permission: github.String("push"), }) if err != nil { log.Println(err) os.Exit(1) } toUpdate = append(toUpdate, *createdTeam) } for _, team := range toUpdate { configuredTeam, _ := config.Teams.Lookup(*team.Name) // TODO pagination existingMembers, _, err := client.Organizations.ListTeamMembers(*team.ID, &github.OrganizationListTeamMembersOptions{}) if err != nil { log.Println(err) os.Exit(1) } membersToAdd := map[string]bool{} for _, memberName := range configuredTeam.Members { membersToAdd[memberName] = true } membersToRemove := []github.User{} for _, member := range existingMembers { if membersToAdd[*member.Login] { delete(membersToAdd, *member.Login) } else { membersToRemove = append(membersToRemove, member) } } for memberName, _ := range membersToAdd { fmt.Printf(" > Adding member '%s' to '%s'...\n", memberName, *team.Name) _, _, err := client.Organizations.AddTeamMembership(*team.ID, memberName, nil) if err != nil { log.Println(err) os.Exit(1) } } for _, member := range membersToRemove { fmt.Printf(" > Removing member '%s' from '%s'...\n", *member.Login, *team.Name) _, err := client.Organizations.RemoveTeamMembership(*team.ID, *member.Login) if err != nil { log.Println(err) os.Exit(1) } } // lol // TODO pagination existingRepos, _, err := client.Organizations.ListTeamRepos(*team.ID, &github.ListOptions{}) if err != nil { log.Println(err) os.Exit(1) } reposToAdd := map[string]bool{} for _, repoName := range configuredTeam.Repositories { reposToAdd[repoName] = true } reposToRemove := []github.Repository{} for _, repo := range existingRepos { if reposToAdd[*repo.Name] { delete(reposToAdd, *repo.Name) } else { reposToRemove = append(reposToRemove, repo) } } for repoName, _ := range reposToAdd { fmt.Printf(" > Adding repository '%s' to '%s'...\n", repoName, *team.Name) _, err := client.Organizations.AddTeamRepo(*team.ID, orgName, repoName, &github.OrganizationAddTeamRepoOptions{ Permission: "push", }) if err != nil { log.Println(err) os.Exit(1) } } for _, repo := range reposToRemove { fmt.Printf(" > Removing repository '%s' from '%s'...\n", *repo.Name, *team.Name) _, err := client.Organizations.RemoveTeamRepo(*team.ID, orgName, *repo.Name) if err != nil { log.Println(err) os.Exit(1) } } } for _, team := range toDelete { fmt.Printf(" > Removing team '%s'...\n", *team.Name) _, err := client.Organizations.DeleteTeam(*team.ID) if err != nil { log.Println(err) os.Exit(1) } } } func hasTeam(teams []github.Team, name string) bool { for _, team := range teams { if *team.Name == name { return true } } return false }
package classfile // ConstantClassInfo // class_info /** Class_info{ tag u1 index u2 指向全限定名常量的索引 } */ type ConstantClassInfo struct { cp ConstantPool // 常量池句柄 nameIndex uint16 //指向全限定名常量的索引 } func (self *ConstantClassInfo) readInfo(reader *ClassReader) { self.nameIndex = reader.readUint16() } // Name // 获取类名字符串 func (self *ConstantClassInfo) Name() string { return self.cp.getUtf8(self.nameIndex) }
package main import ( "github.com/stefanoguerrini/http-beat/cmd" ) func main() { cmd.Execute() }
package http import ( "net/http" "github.com/gorilla/websocket" "github.com/julienschmidt/httprouter" ) var upgrader = websocket.Upgrader{ ReadBufferSize: 1024, WriteBufferSize: 1024, CheckOrigin: func(r *http.Request) bool { return true }, } func (h *Handler) Upgrade(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { conn, err := upgrader.Upgrade(w, r, nil) if err != nil { h.responseErr(w, err) return } for { err = h.Messanger.ReadMessage(conn) if err != nil { conn.Close() break } } }
/* Package greeting implements a single function that returns a greeting */ package greeting // Return a hello world greeting func HelloWorld() string { return "Hello, World!"; }
// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package policygen import ( "encoding/json" "fmt" "os/exec" "path/filepath" "sort" "strings" "github.com/GoogleCloudPlatform/healthcare-data-protection-suite/internal/runner" "github.com/GoogleCloudPlatform/healthcare-data-protection-suite/internal/template" "github.com/GoogleCloudPlatform/healthcare-data-protection-suite/internal/terraform" "github.com/hashicorp/terraform/states" ) type root struct { Type string ID string } // All IAM members associated with a single role. type roleBindings map[string][]string func generateIAMPolicies(rn runner.Runner, resources []*states.Resource, outputPath, templateDir string) error { bindings, err := allBindings(rn, resources) if err != nil { return err } for root, rbs := range bindings { outputFolder := fmt.Sprintf("%s_%s", root.Type, root.ID) // Generate policies for allowed roles. data := map[string]interface{}{ // organizations/1234, folders/1234, projects/1234 "target": fmt.Sprintf("%ss/%s", root.Type, root.ID), "roles": rbs, // Also prepend type and id in the policy name to make it unique across multiple policies for the same role. "suffix": fmt.Sprintf("%s_%s", root.Type, root.ID), } in := filepath.Join(templateDir, "forseti", "tf_based", "iam_allow_roles.yaml") out := filepath.Join(outputPath, outputFolder, "iam_allow_roles.yaml") if err := template.WriteFile(in, out, data); err != nil { return err } // Generate policies for allowed bindings for each role. for role, members := range rbs { // Removes any prefix before the role name ('roles/' or 'projects/<my-project>/roles/' for a custom role). // Prepend 'custom_' if custom role. // Replaces '.' with '_' and turns each character to lower case. // E.g. roles/orgpolicy.policyViewer --> orgpolicy_policyviewer // projects/<my-project>/roles/osLoginProjectGet_6afd --> custom_osloginprojectget_6afd suffix := role // Predefined roles, e.g. roles/orgpolicy.policyViewer. if strings.HasPrefix(suffix, "roles/") { suffix = strings.TrimPrefix(suffix, "roles/") } else { // Custom roles, e.g. projects/<my-project>/roles/osLoginProjectGet_6afd. segs := strings.Split(suffix, "/") suffix = "custom_" + segs[len(segs)-1] } suffix = strings.ToLower(strings.Replace(suffix, ".", "_", -1)) data := map[string]interface{}{ // organizations/1234, folders/1234, projects/1234 "target": fmt.Sprintf("%ss/%s", root.Type, root.ID), // Also prepend type and id in the policy name to make it unique across multiple policies for the same role. "suffix": fmt.Sprintf("%s_%s_%s", root.Type, root.ID, suffix), "role": role, "members": members, } in := filepath.Join(templateDir, "forseti", "tf_based", "iam_allow_bindings.yaml") out := filepath.Join(outputPath, outputFolder, fmt.Sprintf("iam_allow_bindings_%s.yaml", suffix)) if err := template.WriteFile(in, out, data); err != nil { return err } } } return nil } func allBindings(rn runner.Runner, resources []*states.Resource) (map[root]roleBindings, error) { // All roles associated with a root resource (organization, folder or project). var allBindings = make(map[root]roleBindings) typeToIDField := map[string]string{ "project": "project", "folder": "folder", "organization": "org_id", } for rootType, idField := range typeToIDField { iamMembers, err := members(rn, resources, rootType, idField) if err != nil { return nil, err } iamBindings, err := bindings(rn, resources, rootType, idField) if err != nil { return nil, err } // Add iamBindings to iamMembers. // If iamMembers have members for the same root and role, replace it with the value from iamBindings. for root, bindings := range iamBindings { for role, members := range bindings { // Init the roleBindings map if it didn't exist. if _, ok := iamMembers[root]; !ok { iamMembers[root] = make(roleBindings) } iamMembers[root][role] = members } } for root, bindings := range iamMembers { for role, members := range bindings { // Remove duplicated members for the same role. bindings[role] = unique(members) } allBindings[root] = bindings } } return allBindings, nil } func unique(in []string) []string { keys := make(map[string]bool) var out []string for _, s := range in { if _, exists := keys[s]; !exists { keys[s] = true out = append(out, s) } } sort.Strings(out) return out } // members returns role bindings map for google_%s_iam_member (non-authoritative). func members(rn runner.Runner, resources []*states.Resource, rootType, idField string) (map[root]roleBindings, error) { var bindings = make(map[root]roleBindings) resourceType := fmt.Sprintf("google_%s_iam_member", rootType) // non-authoritative instances, err := terraform.GetInstancesForType(resources, resourceType) if err != nil { return nil, fmt.Errorf("get resource instances for type %q: %v", resourceType, err) } for _, ins := range instances { if err := validateMandatoryStringFields(ins, []string{idField, "role", "member"}); err != nil { return nil, err } id, err := normalizeID(rn, rootType, ins[idField].(string)) // Type checked in validate function. if err != nil { return nil, fmt.Errorf("normalize root resource ID: %v", err) } key := root{Type: rootType, ID: id} // Init the roleBindings map if it didn't exist. if _, ok := bindings[key]; !ok { bindings[key] = make(roleBindings) } role := ins["role"].(string) bindings[key][role] = append(bindings[key][role], ins["member"].(string)) } return bindings, nil } // bindings returns role bindings map for google_%s_iam_binding (authoritative). func bindings(rn runner.Runner, resources []*states.Resource, rootType, idField string) (map[root]roleBindings, error) { var bindings = make(map[root]roleBindings) resourceType := fmt.Sprintf("google_%s_iam_binding", rootType) // authoritative for a given role instances, err := terraform.GetInstancesForType(resources, resourceType) if err != nil { return nil, fmt.Errorf("get resource instances for type %q: %v", resourceType, err) } for _, ins := range instances { if err := validateMandatoryStringFields(ins, []string{idField, "role"}); err != nil { return nil, err } if err := validateMandatoryStringLists(ins, []string{"members"}); err != nil { return nil, err } id, err := normalizeID(rn, rootType, ins[idField].(string)) // Type checked in validate function. if err != nil { return nil, fmt.Errorf("normalize root resource ID: %v", err) } key := root{Type: rootType, ID: id} // Init the roleBindings map if it didn't exist. if _, ok := bindings[key]; !ok { bindings[key] = make(roleBindings) } role := ins["role"].(string) var members []string for _, s := range ins["members"].([]interface{}) { members = append(members, s.(string)) // Type checked in validate function. } // There should not be more than one instance of google_%s_iam_binding for the same resource // across all states. But we append all members just in case. bindings[key][role] = append(bindings[key][role], members...) } return bindings, nil } // validateMandatoryStringFields checks the presence of mandatory fields and assert string type. func validateMandatoryStringFields(instance map[string]interface{}, mandatoryFields []string) error { for _, k := range mandatoryFields { field, ok := instance[k] if !ok { return fmt.Errorf("mandatory field %q missing from instance: %v", k, instance) } if _, ok := field.(string); !ok { return fmt.Errorf("value for %q should be a string, got %T", k, field) } } return nil } // validateMandatoryLists checks the presence of mandatory fields and assert []interface{} type. func validateMandatoryStringLists(instance map[string]interface{}, mandatoryFields []string) error { for _, k := range mandatoryFields { field, ok := instance[k] if !ok { return fmt.Errorf("mandatory field %q missing from instance: %v", k, instance) } lst, ok := field.([]interface{}) if !ok { return fmt.Errorf("value for %q should be a []interface{}, got %T", k, field) } for _, s := range lst { if _, ok := s.(string); !ok { return fmt.Errorf("%q should be a string, got %T", s, s) } } } return nil } func normalizeID(rn runner.Runner, t, id string) (string, error) { var err error nid := id // For projects, the ID in the state is the project ID, but we need project number in policies. if t == "project" { if nid, err = projectNumber(rn, id); err != nil { return "", err } } else if t == "folder" { // For folders, the ID can be either {folder_id} or folders/{folder_id}. Remove the 'folders/' prefix if exists. nid = strings.TrimPrefix(id, "folders/") } return nid, nil } func projectNumber(rn runner.Runner, id string) (string, error) { cmd := exec.Command("gcloud", "projects", "describe", id, "--format", "json") out, err := rn.CmdOutput(cmd) if err != nil { return "", fmt.Errorf("failed to get project number for project %q: %v", id, err) } var p struct { ProjectNumber string `json:"projectNumber"` } if err := json.Unmarshal(out, &p); err != nil { return "", fmt.Errorf("failed to parse project number from gcloud output: %v", err) } if p.ProjectNumber == "" { return "", fmt.Errorf("project number is empty") } return p.ProjectNumber, nil }
package main import ( "fmt" "os" "log" "encoding/csv" "bufio" "io" "github.com/janritter/go-geo-ip/geoip" "github.com/cheggaaa/pb" "strconv" ) type blockedIP struct { IP string Country string Latitude float64 Longitude float64 } func runBlockLog() { fmt.Println("Input blockfile filename: ") filename := "" fmt.Scanf("%s", &filename) csvFile, err := os.Open(filename) defer csvFile.Close() if err != nil { log.Fatal(err) } reader := csv.NewReader(bufio.NewReader(csvFile)) //Start new Progressbar count , _ := lineCounter(csvFile) bar := pb.StartNew(count) //Reset line pointer after line counting csvFile.Seek(0, 0) // reset/rewind back to offset and whence 0 0 var blockedIps []blockedIP for { line, readErr := reader.Read() if readErr == io.EOF { break } else if readErr != nil { log.Fatal(readErr) } geoIpData, err := geoip.ForIP(line[0]) //Only save successful IPs if err == nil { blockedIps = append(blockedIps, blockedIP{ IP: line[0], Country: geoIpData.CountryName, Latitude: geoIpData.Latitude, Longitude: geoIpData.Longitude, }) } bar.Increment() } //Write struct array to csv resultFile, err := os.Create("block_log_enhanced.csv") defer resultFile.Close() if err != nil { log.Fatal(err) } csvWriter := csv.NewWriter(resultFile) defer csvWriter.Flush() //Write header var values[] string values = append(values, "IP") values = append(values, "Country") values = append(values, "Latitude") values = append(values, "Longitude") if err := csvWriter.Write(values); err != nil { log.Fatal(err) } for i:=0; i<len(blockedIps);i++ { singleStruct := blockedIps[i] var values [] string values = append(values, singleStruct.IP) values = append(values, singleStruct.Country) values = append(values, strconv.FormatFloat(singleStruct.Latitude, 'f', 5, 64)) values = append(values, strconv.FormatFloat(singleStruct.Longitude, 'f', 5, 64)) if err := csvWriter.Write(values); err != nil { log.Fatal(err) } } bar.FinishPrint("Finished enhancing!") }
// This file was generated for SObject Contract, API Version v43.0 at 2018-07-30 03:47:32.202844694 -0400 EDT m=+18.546147034 package sobjects import ( "fmt" "strings" ) type Contract struct { BaseSObject AccountId string `force:",omitempty"` ActivatedById string `force:",omitempty"` ActivatedDate string `force:",omitempty"` BillingAddress *Address `force:",omitempty"` BillingCity string `force:",omitempty"` BillingCountry string `force:",omitempty"` BillingGeocodeAccuracy string `force:",omitempty"` BillingLatitude float64 `force:",omitempty"` BillingLongitude float64 `force:",omitempty"` BillingPostalCode string `force:",omitempty"` BillingState string `force:",omitempty"` BillingStreet string `force:",omitempty"` CompanySignedDate string `force:",omitempty"` CompanySignedId string `force:",omitempty"` ContractNumber string `force:",omitempty"` ContractTerm int `force:",omitempty"` CreatedById string `force:",omitempty"` CreatedDate string `force:",omitempty"` CustomerSignedDate string `force:",omitempty"` CustomerSignedId string `force:",omitempty"` CustomerSignedTitle string `force:",omitempty"` Description string `force:",omitempty"` EndDate string `force:",omitempty"` Id string `force:",omitempty"` IsDeleted bool `force:",omitempty"` LastActivityDate string `force:",omitempty"` LastApprovedDate string `force:",omitempty"` LastModifiedById string `force:",omitempty"` LastModifiedDate string `force:",omitempty"` LastReferencedDate string `force:",omitempty"` LastViewedDate string `force:",omitempty"` OwnerExpirationNotice string `force:",omitempty"` OwnerId string `force:",omitempty"` Pricebook2Id string `force:",omitempty"` SpecialTerms string `force:",omitempty"` StartDate string `force:",omitempty"` Status string `force:",omitempty"` StatusCode string `force:",omitempty"` SystemModstamp string `force:",omitempty"` } func (t *Contract) ApiName() string { return "Contract" } func (t *Contract) String() string { builder := strings.Builder{} builder.WriteString(fmt.Sprintf("Contract #%s - %s\n", t.Id, t.Name)) builder.WriteString(fmt.Sprintf("\tAccountId: %v\n", t.AccountId)) builder.WriteString(fmt.Sprintf("\tActivatedById: %v\n", t.ActivatedById)) builder.WriteString(fmt.Sprintf("\tActivatedDate: %v\n", t.ActivatedDate)) builder.WriteString(fmt.Sprintf("\tBillingAddress: %v\n", t.BillingAddress)) builder.WriteString(fmt.Sprintf("\tBillingCity: %v\n", t.BillingCity)) builder.WriteString(fmt.Sprintf("\tBillingCountry: %v\n", t.BillingCountry)) builder.WriteString(fmt.Sprintf("\tBillingGeocodeAccuracy: %v\n", t.BillingGeocodeAccuracy)) builder.WriteString(fmt.Sprintf("\tBillingLatitude: %v\n", t.BillingLatitude)) builder.WriteString(fmt.Sprintf("\tBillingLongitude: %v\n", t.BillingLongitude)) builder.WriteString(fmt.Sprintf("\tBillingPostalCode: %v\n", t.BillingPostalCode)) builder.WriteString(fmt.Sprintf("\tBillingState: %v\n", t.BillingState)) builder.WriteString(fmt.Sprintf("\tBillingStreet: %v\n", t.BillingStreet)) builder.WriteString(fmt.Sprintf("\tCompanySignedDate: %v\n", t.CompanySignedDate)) builder.WriteString(fmt.Sprintf("\tCompanySignedId: %v\n", t.CompanySignedId)) builder.WriteString(fmt.Sprintf("\tContractNumber: %v\n", t.ContractNumber)) builder.WriteString(fmt.Sprintf("\tContractTerm: %v\n", t.ContractTerm)) builder.WriteString(fmt.Sprintf("\tCreatedById: %v\n", t.CreatedById)) builder.WriteString(fmt.Sprintf("\tCreatedDate: %v\n", t.CreatedDate)) builder.WriteString(fmt.Sprintf("\tCustomerSignedDate: %v\n", t.CustomerSignedDate)) builder.WriteString(fmt.Sprintf("\tCustomerSignedId: %v\n", t.CustomerSignedId)) builder.WriteString(fmt.Sprintf("\tCustomerSignedTitle: %v\n", t.CustomerSignedTitle)) builder.WriteString(fmt.Sprintf("\tDescription: %v\n", t.Description)) builder.WriteString(fmt.Sprintf("\tEndDate: %v\n", t.EndDate)) builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id)) builder.WriteString(fmt.Sprintf("\tIsDeleted: %v\n", t.IsDeleted)) builder.WriteString(fmt.Sprintf("\tLastActivityDate: %v\n", t.LastActivityDate)) builder.WriteString(fmt.Sprintf("\tLastApprovedDate: %v\n", t.LastApprovedDate)) builder.WriteString(fmt.Sprintf("\tLastModifiedById: %v\n", t.LastModifiedById)) builder.WriteString(fmt.Sprintf("\tLastModifiedDate: %v\n", t.LastModifiedDate)) builder.WriteString(fmt.Sprintf("\tLastReferencedDate: %v\n", t.LastReferencedDate)) builder.WriteString(fmt.Sprintf("\tLastViewedDate: %v\n", t.LastViewedDate)) builder.WriteString(fmt.Sprintf("\tOwnerExpirationNotice: %v\n", t.OwnerExpirationNotice)) builder.WriteString(fmt.Sprintf("\tOwnerId: %v\n", t.OwnerId)) builder.WriteString(fmt.Sprintf("\tPricebook2Id: %v\n", t.Pricebook2Id)) builder.WriteString(fmt.Sprintf("\tSpecialTerms: %v\n", t.SpecialTerms)) builder.WriteString(fmt.Sprintf("\tStartDate: %v\n", t.StartDate)) builder.WriteString(fmt.Sprintf("\tStatus: %v\n", t.Status)) builder.WriteString(fmt.Sprintf("\tStatusCode: %v\n", t.StatusCode)) builder.WriteString(fmt.Sprintf("\tSystemModstamp: %v\n", t.SystemModstamp)) return builder.String() } type ContractQueryResponse struct { BaseQuery Records []Contract `json:"Records" force:"records"` }
package net import ( "Zinx/Project/Zinx/v3-Request/zinx/iface" "fmt" "net" "strings" ) //定义一个server结构 type Server struct { IP string Port uint32 Name string TCPVersion string } //创建Server方法 func NewServer(name string) iface.Iserver { //相当于多态 return &Server{ IP: "0.0.0.0", Port: 8848, Name: name, TCPVersion: "tcp4", //tcp,tcp4,tcp6 } } //Server绑定一个方法 func (s *Server) Start() { fmt.Println("Server start...") addr := fmt.Sprintf("%s:%d", s.IP, s.Port) //创建socket,监听 tcpaddr, err := net.ResolveTCPAddr(s.TCPVersion, addr) //调用函数,生成固定格式的addr,传入监听函数中 if err != nil { fmt.Println("ResolveTCPAddr err", err) return } TCPListener, err := net.ListenTCP(s.TCPVersion, tcpaddr) //启动监听 if err != nil { fmt.Println("ListenTCP err", err) return } var connId uint32 //建立连接,Accept go func() { for { Tcpconn, err := TCPListener.AcceptTCP() if err != nil { fmt.Println("AcceptTCP err", err) return } fmt.Println("连接建立成功") //调用原生connection connId++ conn := Newconn(Tcpconn, connId, Userbussiness) //创建原生connection //对conn进行处理,接收client,转换成大写返回 //对读写功能进行封装到Start函数中,用Connection调用即可 go conn.Start() } }() } //定义一个回调函数的原型 func Userbussiness(request iface.IRequest) { data := request.GetData() //获取Request的接口绑定的方法 Conn := request.GetConn() fmt.Println("Userbussiness called,data:", string(data)) DATA := strings.ToUpper(string(data)) //转大写 //调用send发送数据 Conn.Send(DATA) } func (s *Server) Stop() { fmt.Println("server stop...") } func (s *Server) Server() { fmt.Println("server server...") s.Start() select {} }
package main import ( "fmt" "os" "strings" "time" ) func main() { str := "a" elapsed := time.Now() for i := 1; i < 10; i++ { str += str //fmt.Println(i) } //nanosec:=time.Since(elapsed).Nanoseconds() //fmt.Printf("%d sec", nanosec) sec := time.Since(elapsed).Seconds() fmt.Printf("No effective : %.8f sec", sec) fmt.Println() elapsedJoin := time.Now() for i := 1; i < 10; i++ { strings.Join(os.Args[1:], "a") } //nanosecJoin := time.Since(elapsedJoin).Nanoseconds() //fmt.Printf("Join effective: %d nano sec", nanosecJoin) secJoin := time.Since(elapsedJoin).Seconds() fmt.Printf("Join effective: %.8f sec", secJoin) fmt.Println() diff := sec - secJoin fmt.Printf("Diff = %.8f sec", diff) }
package main import "fmt" func main() { var P, r, Y float64 //may as well to make them compatible easily fmt.Printf("Enter the principal: £") fmt.Scanf("%f", &P) fmt.Printf("Enter the rate of interest: ") fmt.Scanf("%f", &r) fmt.Printf("Enter the number of years: ") fmt.Scanf("%f", &Y) fmt.Printf("After %.0f years at %.2f%%, the investment will be worth £%.2f\n", Y, r, P*(1+(r/100*Y))) }
package bob import ( "regexp" "strings" ) const testVersion = 2 func Hey(phrase string) string { greeting := Greeting{phrase: phrase} if greeting.IsShout() { return "Whoa, chill out!" } if greeting.IsQuestion() { return "Sure." } if greeting.IsSilence() { return "Fine. Be that way!" } return "Whatever." } type Greeting struct { phrase string } func (g Greeting) IsShout() bool { return g.containsUppercase() && g.allUppercase() } func (g Greeting) IsQuestion() bool { endsWithQuestionMark := `\?\s*\z` return g.matches(endsWithQuestionMark) } func (g Greeting) IsSilence() bool { allNonWordCharacters := `\A\W*\z` return g.matches(allNonWordCharacters) } func (g Greeting) containsUppercase() bool { return g.matches(`[A-Z]`) } func (g Greeting) allUppercase() bool { return g.phrase == strings.ToUpper(g.phrase) } func (g Greeting) matches(pattern string) bool { matcher := regexp.MustCompile(pattern) return matcher.Match([]byte(g.phrase)) }
package main func main() { linkList := &LinkedList{} linkList.push(10) linkList.push(20) linkList.push(30) linkList.push(40) linkList.push(50) linkList.print() linkList.reverse() linkList.print() }
package testdata import ( "github.com/frk/gosql/internal/testdata/common" ) type UpdateWhereblockBasicSingle1Query struct { User *common.User4 `rel:"test_user"` Where struct { Id int `sql:"id"` } }
package main import "fmt" /** * author: will fan * created: 2019/6/30 17:12 * description: */ func main() { var x []int fmt.Println(x, len(x), cap(x)) x = append(x, 10, 20, 30) fmt.Println("Slice x:", x) }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gci import ( "fmt" "sync" "github.com/daixiang0/gci/pkg/config" "github.com/daixiang0/gci/pkg/gci" "golang.org/x/tools/go/analysis" ) // Analyzer is the analyzer struct of gci. var Analyzer = &analysis.Analyzer{ Name: "gci", Doc: "Gci controls golang package import order and makes it always deterministic.", Run: run, } func run(pass *analysis.Pass) (any, error) { fileNames := make([]string, 0, len(pass.Files)) for _, f := range pass.Files { pos := pass.Fset.PositionFor(f.Pos(), false) fileNames = append(fileNames, pos.Filename) } rawCfg := config.YamlConfig{ Cfg: config.BoolConfig{ NoInlineComments: false, NoPrefixComments: false, Debug: false, SkipGenerated: true, }, } cfg, _ := rawCfg.Parse() var diffs []string var lock sync.Mutex err := gci.DiffFormattedFilesToArray(fileNames, *cfg, &diffs, &lock) if err != nil { return nil, err } for _, diff := range diffs { if diff == "" { continue } pass.Report(analysis.Diagnostic{ Pos: 1, Message: fmt.Sprintf("\n%s", diff), }) } return nil, nil }
package main import ( _ "embed" "os" "text/template" ) // START DATA OMIT var data = struct { Company string Employees []string }{ "Weave", []string{"Carson", "Kari", "Tami"}, } // END DATA OMIT const templateText = ` {{- "" -}} -Company Report- {{- $num := len .Employees }} {{- $msg := "" }} {{- if eq $num 1 }} {{- $msg = "is impossible" }} {{- else if eq $num 2 }} {{- $msg = "is dreary" }} {{- else if ge $num 3 }} {{- $msg = "is company; safe and cheery" }} {{- end }} {{ printf "%s has %d employees" .Company $num }} {{ printf "%d %s" $num $msg | printf "> %q" }} {{- "" -}} ` const templateTextView = ` {{- "" -}} // START TEMPLATE OMIT -Company Report- {{- $num := len .Employees }} {{- $msg := getMessage $num }} // HL {{ printf "%s has %d employees" .Company $num }} {{ printf "%d %s" $num $msg | printf "> %q" }} // END TEMPLATE OMIT {{- "" -}} ` // START CODE OMIT func getMessage(num int) string { switch num { case 1: return "is impossbile" case 2: return "is dreary" case 3: return "is company; safe and cheery" } return "" } func main() { funcMap := map[string]interface{}{ "getMessage": getMessage, } tmpl := template.Must( template.New("variables"). Funcs(funcMap). Parse(templateText), ) _ = tmpl.Execute(os.Stdout, data) } // END CODE OMIT
package handlers import ( "encoding/json" "net/http" "regexp" "strconv" "time" "github.com/nothingmuch/repricer/errors" ) // Query constructs a new query price endpoint with the given storage model func Query(m PriceLogRetriever) http.Handler { return query{m} } // PriceLogRetriever defines an interface for fetching historical price data type PriceLogRetriever interface { PriceLog( productId string, startTime, endTime time.Time, offset int64, limit int, ) ( []struct { // TODO make Entry/Record types public? ProductId string Price json.Number Timestamp time.Time }, error) } type query struct{ PriceLogRetriever } var queryPath = regexp.MustCompile(basePath.String() + `query`) func (s query) ServeHTTP(w http.ResponseWriter, req *http.Request) { // TODO rate limiting if !queryPath.MatchString(req.URL.Path) { http.Error(w, "not found", http.StatusNotFound) return } if req.Method != "GET" { http.Error(w, "method must be GET", http.StatusBadRequest) return } // Validate inputs // TODO error on multiple values, except for possibly productId, which // can be handled as a union (disjoint union, which makes things easier) params := req.URL.Query() var productId string var pageSize, pageNumber int // TODO int64? disallow negative values? var startTime, endTime time.Time var inputErrors error var err error if v, exists := params["productId"]; exists && len(v) == 1 { productId = v[0] } if v, exists := params["pagesize"]; exists && len(v) == 1 { // note inconsistent capitalization pageSize, err = strconv.Atoi(v[0]) errors.Collect(&inputErrors, err) } if v, exists := params["pageNumber"]; exists && len(v) == 1 { pageNumber, err = strconv.Atoi(v[0]) errors.Collect(&inputErrors, err) } if v, exists := params["from"]; exists && len(v) == 1 { startTime, err = time.Parse(time.RFC3339, v[0]) errors.Collect(&inputErrors, err) } if v, exists := params["to"]; exists && len(v) == 1 { endTime, err = time.Parse(time.RFC3339, v[0]) errors.Collect(&inputErrors, err) } if inputErrors != nil { http.Error(w, inputErrors.Error(), http.StatusBadRequest) // FIXME sanitize errors return } if pageSize == 0 { pageSize = 25 // FIXME arbitrary } // convert pagination information to more convenient representation for data offset := int64(pageNumber-1) * int64(pageSize) if offset < 0 { offset = 0 } limit := pageSize entries, err := s.PriceLog(productId, startTime, endTime, offset, limit) if err != nil { http.Error(w, "internal error", http.StatusInternalServerError) // TODO log err return } body := make([]struct { ProductId string `json:"productId"` Price json.Number `json:"price"` Timestamp epochTime `json:"timestamp"` }, len(entries)) for i, ent := range entries { body[i].ProductId = ent.ProductId body[i].Price = ent.Price body[i].Timestamp = epochTime(ent.Timestamp) } // json content type w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) e := json.NewEncoder(w) e.SetIndent("", "\t") // specification example has literal tabs in it, but this is also silly _ = e.Encode(body) // TODO log error if any, only likely to be IO errors }
package bitcask import ( "bytes" "fmt" "testing" ) func TestSerialize(t *testing.T) { r := &record{ tstamp: 20, key: "name", value: []byte("李浚"), } d, _ := serialize(r) fmt.Println(len(d)) reader := bytes.NewReader(d) out, _ := deserializeFrom(reader) if out == nil { t.Fail() } if out.tstamp != r.tstamp || out.key != r.key || string(r.value) != string(out.value) { t.Fail() } }
package models import ( "github.com/google/cayley" "strconv" "time" ) const ( Iterate15Minutes = 15 * 60 Iterate30Minutes = 30 * 60 Iterate45Minutes = 45 * 60 Wait5Minutes = 5 * 60 ) type User struct { Name string iterationTime int64 storage *Storage } func NewUser(name string) *User { user := &User{name, 0, nil} return user } func (u *User) Id() string { return u.Name } func (u *User) Iteration() bool { return u.IterationTime() > time.Now().Unix() } func (u *User) IterationTime() int64 { if u.iterationTime == 0 { p := cayley.StartPath(u.getStorage(), u.Name).Out("free at") it := p.BuildIterator() if cayley.RawNext(it) { u.iterationTime, _ = strconv.ParseInt(u.getStorage().NameOf(it.Result()), 10, 64) } else { u.iterationTime = time.Now().Unix() - Wait5Minutes } } return u.iterationTime } func (u *User) Start(duration int64) { if u.CanStart() { u.iterationTime = time.Now().Unix() + duration u.getStorage().SaveUser(u) } } func (u *User) Stop() { u.iterationTime = time.Now().Unix() u.getStorage().SaveUser(u) } func (u *User) CanStart() bool { return time.Now().Unix() > u.IterationTime() + Wait5Minutes } func (u *User) getStorage() *Storage { if u.storage == nil { u.storage, _ = GetStorage() } return u.storage }
package main import ( "go-mod/game" "go-mod/util" "log" "github.com/veandco/go-sdl2/sdl" ) // -- type gameState int const ( start gameState = iota play ) var state = start // SetStateStart sets the game state to start func SetStateStart() { state = start } // -- func main() { err := sdl.Init(sdl.INIT_EVERYTHING) if err != nil { log.Fatal(err) } defer sdl.Quit() window, err := sdl.CreateWindow("Go Pong", sdl.WINDOWPOS_UNDEFINED, sdl.WINDOWPOS_UNDEFINED, int32(util.WinWidth), int32(util.WinHeight), sdl.WINDOW_SHOWN) if err != nil { log.Fatal(err) } defer window.Destroy() renderer, err := sdl.CreateRenderer(window, -1, sdl.RENDERER_ACCELERATED) if err != nil { log.Fatal(err) } defer renderer.Destroy() tex, err := renderer.CreateTexture(sdl.PIXELFORMAT_ABGR8888, sdl.TEXTUREACCESS_STREAMING, int32(util.WinWidth), int32(util.WinHeight)) if err != nil { log.Fatal(err) } defer tex.Destroy() pixels := make([]byte, util.WinWidth*util.WinHeight*4) for y := 0; y < util.WinHeight; y++ { for x := 0; x < util.WinWidth; x++ { game.SetPixel(float32(x), float32(y), game.Color{ R: 0, G: 0, B: 0, }, pixels) } } player1 := game.ConstructPaddle(50, 100) player2 := game.ConstructPaddle(float32(util.WinWidth-50), 100) ball := game.ConstructBall() keyState := sdl.GetKeyboardState() for { for event := sdl.PollEvent(); event != nil; event = sdl.PollEvent() { switch event.(type) { case *sdl.QuitEvent: return } } if state == play { // Update functions player1.Update(keyState) player2.AIUpdate(ball) ball.Update(player1, player2, SetStateStart) } else if state == start { if keyState[sdl.SCANCODE_SPACE] != 0 { if player1.Score == 5 || player2.Score == 5 { player1.Score = 0 player2.Score = 0 } state = play } } util.ClearPixels(pixels) // Draw functions player1.Draw(pixels) ball.Draw(pixels) player2.Draw(pixels) tex.Update(nil, pixels, util.WinWidth*4) renderer.Copy(tex, nil, nil) renderer.Present() sdl.Delay(15) } }
/***************************************************************** * Copyright©,2020-2022, email: 279197148@qq.com * Version: 1.0.0 * @Author: yangtxiang * @Date: 2020-08-03 15:50 * Description: *****************************************************************/ package netstream import "sync" type TWaitGroup struct { delta int wg sync.WaitGroup } func (p *TWaitGroup) Add(delta int) { p.delta += delta p.wg.Add(delta) } func (p *TWaitGroup) Done() { p.delta -= 1 p.wg.Done() } func (p *TWaitGroup) DoneAll() { for { if p.delta <= 0 { return } p.Done() } } func (p *TWaitGroup) Wait() { p.wg.Wait() }
package task import ( "github.com/imsilence/gocmdb/agent/gconf" ) type CatPlugin struct { } func (p *CatPlugin) Name() string { return "cat" } func (p *CatPlugin) Init(c gconf.Config) bool { return true } func (p *CatPlugin) Call() (interface{}, error) { return "cat", nil }
package structHelper type HelperFunction struct { ID string `json:"ID"` Nombre string `json:"nombre"` Codigo string `json:"codigo"` Descripcion string `json:"descripcion"` }
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "fmt" "math" "strings" "testing" "github.com/stretchr/testify/require" ) const jsonBenchStr = `{"a":[1,"2",{"aa":"bb"},4,null],"b":true,"c":null}` func TestBinaryJSONMarshalUnmarshal(t *testing.T) { expectedList := []string{ `{"a": [1, "2", {"aa": "bb"}, 4, null], "b": true, "c": null}`, `{"aaaaaaaaaaa": [1, "2", {"aa": "bb"}, 4.1], "bbbbbbbbbb": true, "ccccccccc": "d"}`, `[{"a": 1, "b": true}, 3, 3.5, "hello, world", null, true]`, `{"a": "&<>"}`, } for _, expected := range expectedList { result := mustParseBinaryFromString(t, expected) require.Equal(t, expected, result.String()) } } func TestBinaryJSONExtract(t *testing.T) { bj1 := mustParseBinaryFromString(t, `{"\"hello\"": "world", "a": [1, "2", {"aa": "bb"}, 4.0, {"aa": "cc"}], "b": true, "c": ["d"]}`) bj2 := mustParseBinaryFromString(t, `[{"a": 1, "b": true}, 3, 3.5, "hello, world", null, true]`) bj3 := mustParseBinaryFromString(t, `{"properties": {"$type": "TiDB"}}`) bj4 := mustParseBinaryFromString(t, `{"properties": {"$type$type": {"$a$a" : "TiDB"}}}`) bj5 := mustParseBinaryFromString(t, `{"properties": {"$type": {"$a" : {"$b" : "TiDB"}}}}`) bj6 := mustParseBinaryFromString(t, `{"properties": {"$type": {"$a$a" : "TiDB"}},"hello": {"$b$b": "world","$c": "amazing"}}`) bj7 := mustParseBinaryFromString(t, `{ "a": { "x" : { "b": { "y": { "b": { "z": { "c": 100 } } } } } } }`) bj8 := mustParseBinaryFromString(t, `{ "a": { "b" : [ 1, 2, 3 ] } }`) bj9 := mustParseBinaryFromString(t, `[[0,1],[2,3],[4,[5,6]]]`) bj10 := mustParseBinaryFromString(t, `[1]`) bj11 := mustParseBinaryFromString(t, `{"metadata": {"comment": "1234"}}`) bj12 := mustParseBinaryFromString(t, `{"metadata": {"age": 19, "name": "Tom"}}`) var tests = []struct { bj BinaryJSON pathExprStrings []string expected BinaryJSON found bool err error }{ // test extract with only one path expression. {bj1, []string{"$.a"}, mustParseBinaryFromString(t, `[1, "2", {"aa": "bb"}, 4.0, {"aa": "cc"}]`), true, nil}, {bj2, []string{"$.a"}, mustParseBinaryFromString(t, "null"), false, nil}, {bj1, []string{"$[0]"}, bj1, true, nil}, // in Extract, autowraped bj1 as an array. {bj2, []string{"$[0]"}, mustParseBinaryFromString(t, `{"a": 1, "b": true}`), true, nil}, {bj1, []string{"$.a[2].aa"}, mustParseBinaryFromString(t, `"bb"`), true, nil}, {bj1, []string{"$.a[*].aa"}, mustParseBinaryFromString(t, `["bb", "cc"]`), true, nil}, {bj1, []string{"$.*[0]"}, mustParseBinaryFromString(t, `["world", 1, true, "d"]`), true, nil}, {bj1, []string{`$.a[*]."aa"`}, mustParseBinaryFromString(t, `["bb", "cc"]`), true, nil}, {bj1, []string{`$."\"hello\""`}, mustParseBinaryFromString(t, `"world"`), true, nil}, {bj1, []string{`$**[1]`}, mustParseBinaryFromString(t, `["2"]`), true, nil}, {bj3, []string{`$.properties.$type`}, mustParseBinaryFromString(t, `"TiDB"`), true, nil}, {bj4, []string{`$.properties.$type$type`}, mustParseBinaryFromString(t, `{"$a$a" : "TiDB"}`), true, nil}, {bj4, []string{`$.properties.$type$type.$a$a`}, mustParseBinaryFromString(t, `"TiDB"`), true, nil}, {bj5, []string{`$.properties.$type.$a.$b`}, mustParseBinaryFromString(t, `"TiDB"`), true, nil}, {bj5, []string{`$.properties.$type.$a.*[0]`}, mustParseBinaryFromString(t, `["TiDB"]`), true, nil}, {bj11, []string{"$.metadata.comment"}, mustParseBinaryFromString(t, `"1234"`), true, nil}, {bj9, []string{"$[0]"}, mustParseBinaryFromString(t, `[0, 1] `), true, nil}, {bj9, []string{"$[last][last]"}, mustParseBinaryFromString(t, `[5,6]`), true, nil}, {bj9, []string{"$[last-1][last]"}, mustParseBinaryFromString(t, `3`), true, nil}, {bj9, []string{"$[last-1][last-1]"}, mustParseBinaryFromString(t, `2`), true, nil}, {bj9, []string{"$[1 to 2]"}, mustParseBinaryFromString(t, `[[2,3],[4,[5,6]]]`), true, nil}, {bj9, []string{"$[1 to 2][1 to 2]"}, mustParseBinaryFromString(t, `[3,[5,6]]`), true, nil}, {bj9, []string{"$[1 to last][1 to last]"}, mustParseBinaryFromString(t, `[3,[5,6]]`), true, nil}, {bj9, []string{"$[1 to last][1 to last - 1]"}, bj9, false, nil}, {bj9, []string{"$[1 to last][0 to last - 1]"}, mustParseBinaryFromString(t, `[2,4]`), true, nil}, // test extract with multi path expressions. {bj1, []string{"$.a", "$[5]"}, mustParseBinaryFromString(t, `[[1, "2", {"aa": "bb"}, 4.0, {"aa": "cc"}]]`), true, nil}, {bj2, []string{"$.a", "$[0]"}, mustParseBinaryFromString(t, `[{"a": 1, "b": true}]`), true, nil}, {bj6, []string{"$.properties", "$[1]"}, mustParseBinaryFromString(t, `[{"$type": {"$a$a" : "TiDB"}}]`), true, nil}, {bj6, []string{"$.hello", "$[2]"}, mustParseBinaryFromString(t, `[{"$b$b": "world","$c": "amazing"}]`), true, nil}, {bj7, []string{"$.a**.b**.c"}, mustParseBinaryFromString(t, `[100]`), true, nil}, {bj8, []string{"$**[0]"}, mustParseBinaryFromString(t, `[{"a": {"b": [1, 2, 3]}}, {"b": [1, 2, 3]}, 1, 2, 3]`), true, nil}, {bj9, []string{"$**[0]"}, mustParseBinaryFromString(t, `[[0, 1], 0, 1, 2, 3, 4, 5, 6] `), true, nil}, {bj10, []string{"$**[0]"}, mustParseBinaryFromString(t, `[1]`), true, nil}, {bj12, []string{"$.metadata.age", "$.metadata.name"}, mustParseBinaryFromString(t, `[19, "Tom"]`), true, nil}, } for _, test := range tests { var pathExprList = make([]JSONPathExpression, 0) for _, peStr := range test.pathExprStrings { pe, err := ParseJSONPathExpr(peStr) require.NoError(t, err) pathExprList = append(pathExprList, pe) } result, found := test.bj.Extract(pathExprList) require.Equal(t, test.found, found, test.bj.String()) if found { require.Equal(t, test.expected.String(), result.String()) } } } func TestBinaryJSONType(t *testing.T) { var tests = []struct { in string out string }{ {`{"a": "b"}`, "OBJECT"}, {`["a", "b"]`, "ARRAY"}, {`3`, "INTEGER"}, {`3.0`, "DOUBLE"}, {`null`, "NULL"}, {`true`, "BOOLEAN"}, } for _, test := range tests { result := mustParseBinaryFromString(t, test.in) require.Equal(t, test.out, result.Type()) } // we can't parse '9223372036854775808' to JSON::Uint64 now, // because go builtin JSON parser treats that as DOUBLE. require.Equal(t, "UNSIGNED INTEGER", CreateBinaryJSON(uint64(1<<63)).Type()) } func TestBinaryJSONUnquote(t *testing.T) { var tests = []struct { json string unquoted string }{ {json: `3`, unquoted: "3"}, {json: `"3"`, unquoted: "3"}, {json: `"[{\"x\":\"{\\\"y\\\":12}\"}]"`, unquoted: `[{"x":"{\"y\":12}"}]`}, {json: `"hello, \"escaped quotes\" world"`, unquoted: "hello, \"escaped quotes\" world"}, {json: "\"\\u4f60\"", unquoted: "你"}, {json: `true`, unquoted: "true"}, {json: `null`, unquoted: "null"}, {json: `{"a": [1, 2]}`, unquoted: `{"a": [1, 2]}`}, {json: `"'"`, unquoted: `'`}, {json: `"''"`, unquoted: `''`}, {json: `""`, unquoted: ``}, } for _, test := range tests { result := mustParseBinaryFromString(t, test.json) unquoted, err := result.Unquote() require.NoError(t, err) require.Equal(t, test.unquoted, unquoted) } } func TestQuoteString(t *testing.T) { var tests = []struct { raw string quoted string }{ {raw: "3", quoted: `"3"`}, {raw: "hello, \"escaped quotes\" world", quoted: `"hello, \"escaped quotes\" world"`}, {raw: "你", quoted: `你`}, {raw: "true", quoted: `true`}, {raw: "null", quoted: `null`}, {raw: `"`, quoted: `"\""`}, {raw: `'`, quoted: `"'"`}, {raw: `''`, quoted: `"''"`}, {raw: ``, quoted: `""`}, {raw: "\\ \" \b \f \n \r \t", quoted: `"\\ \" \b \f \n \r \t"`}, } for _, test := range tests { require.Equal(t, test.quoted, quoteJSONString(test.raw)) } } func TestBinaryJSONModify(t *testing.T) { var tests = []struct { base string setField string setValue string expected string success bool mt JSONModifyType }{ {`null`, "$", `{}`, `{}`, true, JSONModifySet}, {`{}`, "$.a", `3`, `{"a": 3}`, true, JSONModifySet}, {`{"a": 3}`, "$.a", `[]`, `{"a": []}`, true, JSONModifyReplace}, {`{"a": 3}`, "$.b", `"3"`, `{"a": 3, "b": "3"}`, true, JSONModifySet}, {`{"a": []}`, "$.a[0]", `3`, `{"a": [3]}`, true, JSONModifySet}, {`{"a": [3]}`, "$.a[1]", `4`, `{"a": [3, 4]}`, true, JSONModifyInsert}, {`{"a": [3]}`, "$[0]", `4`, `4`, true, JSONModifySet}, {`{"a": [3]}`, "$[1]", `4`, `[{"a": [3]}, 4]`, true, JSONModifySet}, {`{"b": true}`, "$.b", `false`, `{"b": false}`, true, JSONModifySet}, // These tests illustrate the differences among the three JSONModifyType {`{"foo": "bar"}`, "$.foo", `"moo"`, `{"foo": "bar"}`, true, JSONModifyInsert}, {`{"foo": "bar"}`, "$.foo", `"moo"`, `{"foo": "moo"}`, true, JSONModifyReplace}, {`{"foo": "bar"}`, "$.foo", `"moo"`, `{"foo": "moo"}`, true, JSONModifySet}, {`{"foo": "bar"}`, "$.foo", `null`, `{"foo": null}`, true, JSONModifySet}, {`{"foo": "bar"}`, "$.baz", `"moo"`, `{"foo": "bar", "baz": "moo"}`, true, JSONModifyInsert}, {`{"foo": "bar"}`, "$.baz", `"moo"`, `{"foo": "bar"}`, true, JSONModifyReplace}, {`{"foo": "bar"}`, "$.baz", `"moo"`, `{"foo": "bar", "baz": "moo"}`, true, JSONModifySet}, {`{"foo": "bar"}`, "$.baz", `null`, `{"foo": "bar", "baz": null}`, true, JSONModifySet}, // nothing changed because the path is empty and we want to insert. {`{}`, "$", `1`, `{}`, true, JSONModifyInsert}, // nothing changed because the path without last leg doesn't exist. {`{"a": [3, 4]}`, "$.b[1]", `3`, `{"a": [3, 4]}`, true, JSONModifySet}, // nothing changed because the path without last leg doesn't exist. {`{"a": [3, 4]}`, "$.a[2].b", `3`, `{"a": [3, 4]}`, true, JSONModifySet}, // nothing changed because we want to insert but the full path exists. {`{"a": [3, 4]}`, "$.a[0]", `30`, `{"a": [3, 4]}`, true, JSONModifyInsert}, // nothing changed because we want to replace but the full path doesn't exist. {`{"a": [3, 4]}`, "$.a[2]", `30`, `{"a": [3, 4]}`, true, JSONModifyReplace}, // bad path expression. {"null", "$.*", "{}", "null", false, JSONModifySet}, {"null", "$[*]", "{}", "null", false, JSONModifySet}, {"null", "$**.a", "{}", "null", false, JSONModifySet}, {"null", "$**[3]", "{}", "null", false, JSONModifySet}, } for _, test := range tests { pathExpr, err := ParseJSONPathExpr(test.setField) require.NoError(t, err) base := mustParseBinaryFromString(t, test.base) value := mustParseBinaryFromString(t, test.setValue) expected := mustParseBinaryFromString(t, test.expected) obtain, err := base.Modify([]JSONPathExpression{pathExpr}, []BinaryJSON{value}, test.mt) if test.success { require.NoError(t, err) require.Equal(t, expected.String(), obtain.String()) } else { require.Error(t, err) } } } func TestBinaryJSONRemove(t *testing.T) { var tests = []struct { base string path string expected string success bool }{ {`null`, "$", `{}`, false}, {`{"a":[3]}`, "$.a[*]", `{"a":[3]}`, false}, {`{}`, "$.a", `{}`, true}, {`{"a":3}`, "$.a", `{}`, true}, {`{"a":1,"b":2,"c":3}`, "$.b", `{"a":1,"c":3}`, true}, {`{"a":1,"b":2,"c":3}`, "$.d", `{"a":1,"b":2,"c":3}`, true}, {`{"a":3}`, "$[0]", `{"a":3}`, true}, {`{"a":[3,4,5]}`, "$.a[0]", `{"a":[4,5]}`, true}, {`{"a":[3,4,5]}`, "$.a[1]", `{"a":[3,5]}`, true}, {`{"a":[3,4,5]}`, "$.a[4]", `{"a":[3,4,5]}`, true}, {`{"a": [1, 2, {"aa": "xx"}]}`, "$.a[2].aa", `{"a": [1, 2, {}]}`, true}, } for _, test := range tests { pathExpr, err := ParseJSONPathExpr(test.path) require.NoError(t, err) base := mustParseBinaryFromString(t, test.base) expected := mustParseBinaryFromString(t, test.expected) obtain, err := base.Remove([]JSONPathExpression{pathExpr}) if test.success { require.NoError(t, err) require.Equal(t, expected.String(), obtain.String()) } else { require.Error(t, err) } } } func TestCompareBinary(t *testing.T) { jNull := mustParseBinaryFromString(t, `null`) jBoolTrue := mustParseBinaryFromString(t, `true`) jBoolFalse := mustParseBinaryFromString(t, `false`) jIntegerLarge := CreateBinaryJSON(uint64(1 << 63)) jIntegerSmall := mustParseBinaryFromString(t, `3`) jStringLarge := mustParseBinaryFromString(t, `"hello, world"`) jStringSmall := mustParseBinaryFromString(t, `"hello"`) jArrayLarge := mustParseBinaryFromString(t, `["a", "c"]`) jArraySmall := mustParseBinaryFromString(t, `["a", "b"]`) jObject := mustParseBinaryFromString(t, `{"a": "b"}`) var tests = []struct { left BinaryJSON right BinaryJSON result int }{ {jNull, jIntegerSmall, -1}, {jIntegerSmall, jIntegerLarge, -1}, {jIntegerLarge, jStringSmall, -1}, {jStringSmall, jStringLarge, -1}, {jStringLarge, jObject, -1}, {jObject, jArraySmall, -1}, {jArraySmall, jArrayLarge, -1}, {jArrayLarge, jBoolFalse, -1}, {jBoolFalse, jBoolTrue, -1}, {CreateBinaryJSON(int64(922337203685477580)), CreateBinaryJSON(int64(922337203685477580)), 0}, {CreateBinaryJSON(int64(922337203685477580)), CreateBinaryJSON(int64(922337203685477581)), -1}, {CreateBinaryJSON(int64(922337203685477581)), CreateBinaryJSON(int64(922337203685477580)), 1}, {CreateBinaryJSON(int64(-1)), CreateBinaryJSON(uint64(18446744073709551615)), -1}, {CreateBinaryJSON(int64(922337203685477580)), CreateBinaryJSON(uint64(922337203685477581)), -1}, {CreateBinaryJSON(int64(2)), CreateBinaryJSON(uint64(1)), 1}, {CreateBinaryJSON(int64(math.MaxInt64)), CreateBinaryJSON(uint64(math.MaxInt64)), 0}, {CreateBinaryJSON(uint64(18446744073709551615)), CreateBinaryJSON(int64(-1)), 1}, {CreateBinaryJSON(uint64(922337203685477581)), CreateBinaryJSON(int64(922337203685477580)), 1}, {CreateBinaryJSON(uint64(1)), CreateBinaryJSON(int64(2)), -1}, {CreateBinaryJSON(uint64(math.MaxInt64)), CreateBinaryJSON(int64(math.MaxInt64)), 0}, {CreateBinaryJSON(float64(9.0)), CreateBinaryJSON(int64(9)), 0}, {CreateBinaryJSON(float64(8.9)), CreateBinaryJSON(int64(9)), -1}, {CreateBinaryJSON(float64(9.1)), CreateBinaryJSON(int64(9)), 1}, {CreateBinaryJSON(float64(9.0)), CreateBinaryJSON(uint64(9)), 0}, {CreateBinaryJSON(float64(8.9)), CreateBinaryJSON(uint64(9)), -1}, {CreateBinaryJSON(float64(9.1)), CreateBinaryJSON(uint64(9)), 1}, {CreateBinaryJSON(int64(9)), CreateBinaryJSON(float64(9.0)), 0}, {CreateBinaryJSON(int64(9)), CreateBinaryJSON(float64(8.9)), 1}, {CreateBinaryJSON(int64(9)), CreateBinaryJSON(float64(9.1)), -1}, {CreateBinaryJSON(uint64(9)), CreateBinaryJSON(float64(9.0)), 0}, {CreateBinaryJSON(uint64(9)), CreateBinaryJSON(float64(8.9)), 1}, {CreateBinaryJSON(uint64(9)), CreateBinaryJSON(float64(9.1)), -1}, } for _, test := range tests { result := CompareBinaryJSON(test.left, test.right) comment := fmt.Sprintf("left: %v, right: %v, expect: %v, got: %v", test.left, test.right, test.result, result) require.Equal(t, test.result, result, comment) } } func TestBinaryJSONMerge(t *testing.T) { var tests = []struct { suffixes []string expected string }{ {[]string{`{"a": 1}`, `{"b": 2}`}, `{"a": 1, "b": 2}`}, {[]string{`{"a": 1}`, `{"a": 2}`}, `{"a": [1, 2]}`}, {[]string{`[1]`, `[2]`}, `[1, 2]`}, {[]string{`{"a": 1}`, `[1]`}, `[{"a": 1}, 1]`}, {[]string{`[1]`, `{"a": 1}`}, `[1, {"a": 1}]`}, {[]string{`{"a": 1}`, `4`}, `[{"a": 1}, 4]`}, {[]string{`[1]`, `4`}, `[1, 4]`}, {[]string{`4`, `{"a": 1}`}, `[4, {"a": 1}]`}, {[]string{`4`, `1`}, `[4, 1]`}, {[]string{`{}`, `[]`}, `[{}]`}, {[]string{`{"comment": "1234"}`, `{"age": 19, "name": "Tom"}`}, `{"age": 19, "comment": "1234", "name": "Tom"}`}, {[]string{`{"metadata": {"comment": "1234"}}`, `{"metadata": {"age": 19, "name": "Tom"}}`}, `{"metadata": {"age": 19, "comment": "1234", "name": "Tom"}}`}, {[]string{`{"comment": "1234"}`, `{"comment": "abc"}`}, `{"comment": ["1234", "abc"]}`}, } for _, test := range tests { suffixes := make([]BinaryJSON, 0, len(test.suffixes)+1) for _, s := range test.suffixes { suffixes = append(suffixes, mustParseBinaryFromString(t, s)) } result := MergeBinaryJSON(suffixes) cmp := CompareBinaryJSON(result, mustParseBinaryFromString(t, test.expected)) require.Equal(t, 0, cmp, result.String()) } } func mustParseBinaryFromString(t *testing.T, s string) BinaryJSON { result, err := ParseBinaryJSONFromString(s) require.NoError(t, err) return result } func BenchmarkBinaryMarshal(b *testing.B) { b.ReportAllocs() b.SetBytes(int64(len(jsonBenchStr))) bj, _ := ParseBinaryJSONFromString(jsonBenchStr) for i := 0; i < b.N; i++ { _, _ = bj.MarshalJSON() } } func TestBinaryJSONContains(t *testing.T) { var tests = []struct { input string target string expected bool }{ {`{}`, `{}`, true}, {`{"a":1}`, `{}`, true}, {`{"a":1}`, `1`, false}, {`{"a":[1]}`, `[1]`, false}, {`{"b":2, "c":3}`, `{"c":3}`, true}, {`1`, `1`, true}, {`[1]`, `1`, true}, {`[1,2]`, `[1]`, true}, {`[1,2]`, `[1,3]`, false}, {`[1,2]`, `["1"]`, false}, {`[1,2,[1,3]]`, `[1,3]`, true}, {`[1,2,[1,[5,[3]]]]`, `[1,3]`, true}, {`[1,2,[1,[5,{"a":[2,3]}]]]`, `[1,{"a":[3]}]`, true}, {`[{"a":1}]`, `{"a":1}`, true}, {`[{"a":1,"b":2}]`, `{"a":1}`, true}, {`[{"a":{"a":1},"b":2}]`, `{"a":1}`, false}, } for _, test := range tests { obj := mustParseBinaryFromString(t, test.input) target := mustParseBinaryFromString(t, test.target) require.Equal(t, test.expected, ContainsBinaryJSON(obj, target)) } } func TestBinaryJSONCopy(t *testing.T) { expectedList := []string{ `{"a": [1, "2", {"aa": "bb"}, 4, null], "b": true, "c": null}`, `{"aaaaaaaaaaa": [1, "2", {"aa": "bb"}, 4.1], "bbbbbbbbbb": true, "ccccccccc": "d"}`, `[{"a": 1, "b": true}, 3, 3.5, "hello, world", null, true]`, } for _, expected := range expectedList { result := mustParseBinaryFromString(t, expected) require.Equal(t, result.String(), result.Copy().String()) } } func TestGetKeys(t *testing.T) { parsedBJ := mustParseBinaryFromString(t, "[]") require.Equal(t, "[]", parsedBJ.GetKeys().String()) parsedBJ = mustParseBinaryFromString(t, "{}") require.Equal(t, "[]", parsedBJ.GetKeys().String()) parsedBJ = mustParseBinaryFromString(t, "{\"comment\": \"1234\"}") require.Equal(t, "[\"comment\"]", parsedBJ.GetKeys().String()) parsedBJ = mustParseBinaryFromString(t, "{\"name\": \"Tom\", \"age\": 19}") require.Equal(t, "[\"age\", \"name\"]", parsedBJ.GetKeys().String()) require.Equal(t, 2, parsedBJ.GetKeys().GetElemCount()) b := strings.Builder{} b.WriteString("{\"") for i := 0; i < 65536; i++ { b.WriteByte('a') } b.WriteString("\": 1}") parsedBJ, err := ParseBinaryJSONFromString(b.String()) require.Error(t, err) require.EqualError(t, err, "[types:8129]TiDB does not yet support JSON objects with the key length >= 65536") } func TestBinaryJSONDepth(t *testing.T) { var tests = []struct { input string expected int }{ {`{}`, 1}, {`[]`, 1}, {`true`, 1}, {`[10, 20]`, 2}, {`[[], {}]`, 2}, {`[10, {"a": 20}]`, 3}, {`{"Person": {"Name": "Homer", "Age": 39, "Hobbies": ["Eating", "Sleeping"]} }`, 4}, } for _, test := range tests { obj := mustParseBinaryFromString(t, test.input) require.Equal(t, test.expected, obj.GetElemDepth()) } } func TestParseBinaryFromString(t *testing.T) { obj, err := ParseBinaryJSONFromString("") require.Error(t, err) require.Equal(t, "", obj.String()) require.Contains(t, err.Error(), "The document is empty") obj, err = ParseBinaryJSONFromString(`"a""`) require.Error(t, err) require.Equal(t, "", obj.String()) require.Contains(t, err.Error(), "The document root must not be followed by other values.") } func TestCreateBinary(t *testing.T) { bj := CreateBinaryJSON(int64(1 << 62)) require.Equal(t, JSONTypeCodeInt64, bj.TypeCode) require.NotNil(t, bj.Value) bj = CreateBinaryJSON(123456789.1234567) require.Equal(t, JSONTypeCodeFloat64, bj.TypeCode) bj = CreateBinaryJSON(0.00000001) require.Equal(t, JSONTypeCodeFloat64, bj.TypeCode) bj = CreateBinaryJSON(1e-20) require.Equal(t, JSONTypeCodeFloat64, bj.TypeCode) require.NotNil(t, bj.Value) bj2 := CreateBinaryJSON(bj) require.Equal(t, bj.TypeCode, bj2.TypeCode) require.NotNil(t, bj2.Value) func() { defer func() { r := recover() require.Regexp(t, "^unknown type:", r) }() bj = CreateBinaryJSON(int8(123)) require.Equal(t, bj.TypeCode, bj.TypeCode) }() } func TestFunctions(t *testing.T) { testByte := []byte{'\\', 'b', 'f', 'n', 'r', 't', 'u', 'z', '0'} testOutput, err := unquoteJSONString(string(testByte)) require.Equal(t, "\bfnrtuz0", testOutput) require.NoError(t, err) n, err := PeekBytesAsJSON(testByte) require.Equal(t, 0, n) require.EqualError(t, err, "Invalid JSON bytes") n, err = PeekBytesAsJSON([]byte("")) require.Equal(t, 0, n) require.EqualError(t, err, "Cant peek from empty bytes") } func TestBinaryJSONExtractCallback(t *testing.T) { bj1 := mustParseBinaryFromString(t, `{"\"hello\"": "world", "a": [1, "2", {"aa": "bb"}, 4.0, {"aa": "cc"}], "b": true, "c": ["d"]}`) bj2 := mustParseBinaryFromString(t, `[{"a": 1, "b": true}, 3, 3.5, "hello, world", null, true]`) type ExpectedPair struct { path string bj BinaryJSON } var tests = []struct { bj BinaryJSON pathExpr string expected []ExpectedPair }{ {bj1, "$.a", []ExpectedPair{ {"$.a", mustParseBinaryFromString(t, `[1, "2", {"aa": "bb"}, 4.0, {"aa": "cc"}]`)}, }}, {bj2, "$.a", []ExpectedPair{}}, {bj1, "$[0]", []ExpectedPair{}}, // in extractToCallback/Walk/Search, DON'T autowraped bj as an array. {bj2, "$[0]", []ExpectedPair{ {"$[0]", mustParseBinaryFromString(t, `{"a": 1, "b": true}`)}, }}, {bj1, "$.a[2].aa", []ExpectedPair{ {"$.a[2].aa", mustParseBinaryFromString(t, `"bb"`)}, }}, {bj1, "$.a[*].aa", []ExpectedPair{ {"$.a[2].aa", mustParseBinaryFromString(t, `"bb"`)}, {"$.a[4].aa", mustParseBinaryFromString(t, `"cc"`)}, }}, {bj1, "$.*[0]", []ExpectedPair{ // {"$.\"hello\"[0]", mustParseBinaryFromString(c, `"world"`)}, // NO autowraped as an array. {"$.a[0]", mustParseBinaryFromString(t, `1`)}, // {"$.b[0]", mustParseBinaryFromString(c, `true`)}, // NO autowraped as an array. {"$.c[0]", mustParseBinaryFromString(t, `"d"`)}, }}, {bj1, `$.a[*]."aa"`, []ExpectedPair{ {"$.a[2].aa", mustParseBinaryFromString(t, `"bb"`)}, {"$.a[4].aa", mustParseBinaryFromString(t, `"cc"`)}, }}, {bj1, `$."\"hello\""`, []ExpectedPair{ {`$."\"hello\""`, mustParseBinaryFromString(t, `"world"`)}, }}, {bj1, `$**[1]`, []ExpectedPair{ {`$.a[1]`, mustParseBinaryFromString(t, `"2"`)}, }}, } for _, test := range tests { pe, err := ParseJSONPathExpr(test.pathExpr) require.NoError(t, err) count := 0 cb := func(fullPath JSONPathExpression, bj BinaryJSON) (stop bool, err error) { require.Less(t, count, len(test.expected)) if count < len(test.expected) { require.Equal(t, test.expected[count].path, fullPath.String()) require.Equal(t, test.expected[count].bj.String(), bj.String()) } count++ return false, nil } fullPath := JSONPathExpression{legs: make([]jsonPathLeg, 0), flags: jsonPathExpressionFlag(0)} _, err = test.bj.extractToCallback(pe, cb, fullPath) require.NoError(t, err) require.Equal(t, len(test.expected), count) } } func TestBinaryJSONWalk(t *testing.T) { bj1 := mustParseBinaryFromString(t, `["abc", [{"k": "10"}, "def"], {"x":"abc"}, {"y":"bcd"}]`) bj2 := mustParseBinaryFromString(t, `{}`) type ExpectedPair struct { path string bj BinaryJSON } var tests = []struct { bj BinaryJSON paths []string expected []ExpectedPair }{ {bj1, []string{}, []ExpectedPair{ {`$`, mustParseBinaryFromString(t, `["abc", [{"k": "10"}, "def"], {"x":"abc"}, {"y":"bcd"}]`)}, {`$[0]`, mustParseBinaryFromString(t, `"abc"`)}, {`$[1]`, mustParseBinaryFromString(t, `[{"k": "10"}, "def"]`)}, {`$[1][0]`, mustParseBinaryFromString(t, `{"k": "10"}`)}, {`$[1][0].k`, mustParseBinaryFromString(t, `"10"`)}, {`$[1][1]`, mustParseBinaryFromString(t, `"def"`)}, {`$[2]`, mustParseBinaryFromString(t, `{"x":"abc"}`)}, {`$[2].x`, mustParseBinaryFromString(t, `"abc"`)}, {`$[3]`, mustParseBinaryFromString(t, `{"y":"bcd"}`)}, {`$[3].y`, mustParseBinaryFromString(t, `"bcd"`)}, }}, {bj1, []string{`$[1]`}, []ExpectedPair{ {`$[1]`, mustParseBinaryFromString(t, `[{"k": "10"}, "def"]`)}, {`$[1][0]`, mustParseBinaryFromString(t, `{"k": "10"}`)}, {`$[1][0].k`, mustParseBinaryFromString(t, `"10"`)}, {`$[1][1]`, mustParseBinaryFromString(t, `"def"`)}, }}, {bj1, []string{`$[1]`, `$[1]`}, []ExpectedPair{ // test for unique {`$[1]`, mustParseBinaryFromString(t, `[{"k": "10"}, "def"]`)}, {`$[1][0]`, mustParseBinaryFromString(t, `{"k": "10"}`)}, {`$[1][0].k`, mustParseBinaryFromString(t, `"10"`)}, {`$[1][1]`, mustParseBinaryFromString(t, `"def"`)}, }}, {bj1, []string{`$.m`}, []ExpectedPair{}}, {bj2, []string{}, []ExpectedPair{ {`$`, mustParseBinaryFromString(t, `{}`)}, }}, } for _, test := range tests { count := 0 cb := func(fullPath JSONPathExpression, bj BinaryJSON) (stop bool, err error) { require.Less(t, count, len(test.expected)) if count < len(test.expected) { require.Equal(t, test.expected[count].path, fullPath.String()) require.Equal(t, test.expected[count].bj.String(), bj.String()) } count++ return false, nil } var err error if len(test.paths) > 0 { peList := make([]JSONPathExpression, 0, len(test.paths)) for _, path := range test.paths { pe, errPath := ParseJSONPathExpr(path) require.NoError(t, errPath) peList = append(peList, pe) } err = test.bj.Walk(cb, peList...) } else { err = test.bj.Walk(cb) } require.NoError(t, err) require.Equal(t, len(test.expected), count) } } func TestBinaryJSONOpaque(t *testing.T) { var tests = []struct { bj BinaryJSON expectedOpaque Opaque expectedOutput string }{ { BinaryJSON{ TypeCode: JSONTypeCodeOpaque, Value: []byte{233, 1, '9'}, }, Opaque{ TypeCode: 233, Buf: []byte{'9'}, }, `"base64:type233:OQ=="`, }, { BinaryJSON{ TypeCode: JSONTypeCodeOpaque, Value: append([]byte{233, 0x80, 0x01}, make([]byte, 128)...), }, Opaque{ TypeCode: 233, Buf: make([]byte, 128), }, `"base64:type233:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA="`, }, } for _, test := range tests { buf := []byte{} require.Equal(t, test.expectedOpaque.TypeCode, test.bj.GetOpaqueFieldType()) require.Equal(t, test.expectedOpaque, test.bj.GetOpaque()) buf, err := test.bj.marshalTo(buf) require.NoError(t, err) require.Equal(t, string(buf), test.expectedOutput) } } func TestHashValue(t *testing.T) { // The following values should have different hash value jsons := []BinaryJSON{ CreateBinaryJSON([]interface{}{}), CreateBinaryJSON([]interface{}{CreateBinaryJSON([]interface{}{})}), CreateBinaryJSON([]interface{}{CreateBinaryJSON([]interface{}{CreateBinaryJSON([]interface{}{})})}), CreateBinaryJSON(map[string]interface{}{}), CreateBinaryJSON([]interface{}{CreateBinaryJSON(false)}), CreateBinaryJSON([]interface{}{CreateBinaryJSON(true)}), CreateBinaryJSON([]interface{}{CreateBinaryJSON(nil)}), } // TODO: use a better way to count distinct json value counter := make(map[string]struct{}) for _, j := range jsons { hashKey := j.HashValue([]byte{}) counter[string(hashKey)] = struct{}{} } require.Equal(t, len(jsons), len(counter)) }
package backends import "fmt" // DummyAuthorizator is a fake authorizator interface implementation used for test type DummyAuthorizator struct { } // Authorize user for given username and password. func (a DummyAuthorizator) Authorize(user, pass string) bool { return true } // DummyBackend is a fake backend interface implementation used for test type DummyBackend struct { } // Returns total message count and total mailbox size in bytes (octets). // Deleted messages are ignored. func (b DummyBackend) Stat(user string) (messages, octets int, err error) { return 5, 50, nil } // List of sizes of all messages in bytes (octets) func (b DummyBackend) List(user string) (octets []int, err error) { return []int{10, 10, 10, 10, 10}, nil } // Returns whether message exists and if yes, then return size of the message in bytes (octets) func (b DummyBackend) ListMessage(user string, msgId int) (exists bool, octets int, err error) { if msgId > 4 { return false, 0, nil } return true, 10, nil } // Retrieve whole message by ID - note that message ID is a message position returned // by List() function, so be sure to keep that order unchanged while client is connected // See Lock() function for more details func (b DummyBackend) Retr(user string, msgId int) (message string, err error) { return "this is dummy message", nil } // Delete message by message ID - message should be just marked as deleted until // Update() is called. Be aware that after Dele() is called, functions like List() etc. // should ignore all these messages even if Update() hasn't been called yet func (b DummyBackend) Dele(user string, msgId int) error { return nil } // Undelete all messages marked as deleted in single connection func (b DummyBackend) Rset(user string) error { return nil } // List of unique IDs of all message, similar to List(), but instead of size there // is a unique ID which persists the same across all connections. Uid (unique id) is // used to allow client to be able to keep messages on the server. func (b DummyBackend) Uidl(user string) (uids []string, err error) { return []string{"1", "2", "3", "4", "5"}, nil } // Similar to ListMessage, but returns unique ID by message ID instead of size. func (b DummyBackend) UidlMessage(user string, msgId int) (exists bool, uid string, err error) { if msgId > 4 { return false, "", nil } return true, fmt.Sprintf("%d", msgId+1), nil } // Write all changes to persistent storage, i.e. delete all messages marked as deleted. func (b DummyBackend) Update(user string) error { return nil } // Lock is called immediately after client is connected. The best way what to use Lock() for // is to read all the messages into cache after client is connected. If another user // tries to lock the storage, you should return an error to avoid data race. func (b DummyBackend) Lock(user string) error { return nil } // Release lock on storage, Unlock() is called after client is disconnected. func (b DummyBackend) Unlock(user string) error { return nil }
package main import ( "crypto/sha512" "database/sql" "encoding/binary" "fmt" "log" "math" "net/http" "os" "regexp" "strings" "time" _ "github.com/lib/pq" ) // characters used for short-urls const ( SYMBOLS = "0123456789abcdefghijklmnopqrsuvwxyzABCDEFGHIJKLMNOPQRSTUVXYZ" BASE = uint32(len(SYMBOLS)) ) var ( db *sql.DB err error ) func handler(w http.ResponseWriter, r *http.Request) { switch r.Method { case "GET": short := r.URL.Path[1:] if short != "" { //urlStr := decodeURL(short) urlStr := getURL(short) log.Println(urlStr) http.Redirect(w, r, urlStr, http.StatusFound) } else { fmt.Fprintf(w, "<h1>Input url below</h1><br>"+ "<form action=\"/save/\" method=\"POST\">"+ //"&nbsp<textarea name=\"url\"></textarea><br><br>"+ "<label>url:</label>"+"&nbsp<input type=\"text\" name=\"url\"><br><br>"+ "<label>shorturl:</label>"+"&nbsp<input type=\"text\" name=\"short\"><br><br>"+ "&nbsp<input type=\"submit\" value=\"Save\">"+ "</form>") } case "POST": if u := r.FormValue("url"); u != "" { log.Println(u) //validate url start with http rHTTP, _ := regexp.Compile("^(http|https)://") if !rHTTP.MatchString(u) { //set the url start with http as default u = "http://" + u } //validate the url _, validURLErr := http.Get(u) if validURLErr != nil { log.Println(err.Error()) fmt.Fprintf(w, "invalid url "+u+"\n") } else { // should check the existing here short := r.FormValue("short") if short == "" { //input url but without customised shorturl short = encodeURL(u) } err = checkCustomURL(u, short) if err != nil { //check whether the short url exists fmt.Fprintf(w, "%v", err) //print error will try to find err.Error() } else { //s := postURL(u) insertURL(u, short) fmt.Fprintf(w, "<a href=\"http://%s\">%s</a>", "localhost:3008/"+short, "localhost:3008/"+short) } } } w.Header().Set("Content-Type", "text/html") w.Header().Set("Cache-Control", "no-cache") w.Header().Set("Access-Control-Allow-Origin", "*") return default: http.Error((w), fmt.Sprintf("Unsupported method: %s", r.Method), http.StatusMethodNotAllowed) } } // ErrInvalidShortURL ... type ErrInvalidShortURL string //override the error message func (e ErrInvalidShortURL) Error() string { return fmt.Sprintf("the short url %v is invalid", string(e)) } func getURL(short string) (url string) { log.Println(short) err = db.QueryRow("SELECT url FROM shorturl WHERE surl = $1", short).Scan(&url) if err == sql.ErrNoRows { log.Println("No Results Found") } return string(url) } func encodeURL(url string) (short string) { h := sha512.New() h.Write([]byte(url)) bs := h.Sum(nil) temp := binary.BigEndian.Uint32(bs) short = Encode(temp) return short } func insertURL(url string, short string) { _, err = db.Exec(`INSERT INTO shorturl(surl, url) SELECT $1,$2 WHERE NOT EXISTS (SELECT surl FROM shorturl WHERE surl = $1);`, short, url) checkErr(err) } // ErrShortURLExist defines the struct which shows the existing shorturl record type ErrShortURLExist struct { urlRecord string shortRecord string } //override the error message func (e ErrShortURLExist) Error() string { return fmt.Sprintf("the short url %v has already been assigned to %v", e.shortRecord, e.urlRecord) } func checkCustomURL(url string, short string) error { //validate short url non-special character rShort, _ := regexp.Compile("^[a-zA-Z0-9-]+$") if !rShort.MatchString(short) { return ErrInvalidShortURL(short) } var urlRecord, surlRecord string db.QueryRow("SELECT url FROM shorturl WHERE surl = $1", short).Scan(&urlRecord) db.QueryRow("SELECT surl FROM shorturl WHERE url = $1", url).Scan(&surlRecord) if urlRecord != "" { log.Printf("the url record is %s \n", urlRecord) return ErrShortURLExist{urlRecord, short} } else if surlRecord != "" { log.Printf("the short url record is %s \n", surlRecord) return ErrShortURLExist{url, surlRecord} } return nil } // Encode ... func Encode(number uint32) string { rest := number % BASE result := string(SYMBOLS[rest]) if number-rest != 0 { newnumber := (number - rest) / BASE result = Encode(newnumber) + result } return result } // Decode ... func Decode(input string) uint32 { const floatbase = float64(BASE) l := len(input) var sum int for index := l - 1; index > -1; index-- { current := string(input[index]) pos := strings.Index(SYMBOLS, current) sum = sum + (pos * int(math.Pow(floatbase, float64((l-index-1))))) } return uint32(sum) } //handle favourte icon request by some browsers func handlerIcon(w http.ResponseWriter, r *http.Request) {} func checkErr(err error) { if err != nil { log.Println(err) panic(err) } } func main() { port := os.Getenv("PORT") if port == "" { port = "3008" } connInfo := fmt.Sprintf( "user=%s dbname=%s password=%s host=%s port=%s sslmode=disable", "postgres", "postgres", os.Getenv("DB_ENV_POSTGRES_PASSWORD"), os.Getenv("DB_PORT_5432_TCP_ADDR"), os.Getenv("DB_PORT_5432_TCP_PORT"), ) db, err = sql.Open("postgres", connInfo) checkErr(err) for i := 0; i < 5; i++ { time.Sleep(time.Duration(i) * time.Second) if err = db.Ping(); err == nil { log.Println("try to connect db") break } log.Println(err) } //initialise the DB table _, err = db.Exec( `create table if not exists shorturl ( surl character(10) NOT NULL, url text, CONSTRAINT unique_url PRIMARY KEY (surl) )`) checkErr(err) http.HandleFunc("/favicon.ico", handlerIcon) http.HandleFunc("/", handler) log.Println("Server started: http://localhost:" + port) log.Fatal(http.ListenAndServe(":"+port, nil)) }
package plugins import ( "github.com/dataprism/dataprism-commons/api" "github.com/dataprism/dataprism-commons/core" ) type DataprismPlugin interface { Id() string CreateRoutes(platform *core.Platform, API *api.Rest) }
package order import ( "github.com/alfuhigi/micro-order-api/pkg/order/item" "gorm.io/gorm" ) type Order struct { gorm.Model ClientID string UserID uint OrderItems []*item.OrderItem OrderStatus []*OrderStatus PaymentOptions uint PickUp string DropOff string DeliveryFees float32 Notes string Phone string Email string TotalPrice float32 //SUM(...OrderItems.TotalPrice) // Payment_options // Address string // Promo_code } type OrderStatus struct { gorm.Model OrderID uint Name string Description string }
package functions import ( c "Project/config" "encoding/json" "fmt" "net/http" "strings" ) //make json req. with parameters func ResJSON(name c.Names)(name_json []byte){ n := c.Names{name.Firstname,name.Lastname} name_json, err := json.Marshal(n) if err != nil{ ResJSON(EmpytJSON()) } return } //parse GET req. as firstname,lastname func ParseReq(r *http.Request) (n c.Names){ req := r.URL.String() // /whoami?firstname?=buse&lastname=sabah res := strings.Split(req, "&lastname=") //res[0]=whoami?firstname=buse, res[1]=sabah if len(res) == 2{ n.Lastname = res[1] n.Firstname = strings.SplitAfter(res[0],"=")[1] }else{ n = EmpytJSON() } return } //make empty JSON func EmpytJSON() c.Names{ n := c.Names{"",""} return n } func CheckMethod(w http.ResponseWriter, r *http.Request) bool{ if r.Method=="GET"{ fmt.Fprintln(w, "This endpoint can not accept GET request") return false } return true } func CheckHeaderType(w http.ResponseWriter, r *http.Request) bool{ var headerType string if headerType = r.Header.Get("Content-Type"); headerType != "application/json" { fmt.Fprintln(w, "Invalid Header Content Type") return false } return true } func ErrCheck(err error, w http.ResponseWriter, message string) bool{ if err != nil { fmt.Fprintln(w, message) return true } return false } func WebhookReq(w http.ResponseWriter, data string, conf c.Configuration){ fmt.Println(data) reqBody := strings.NewReader(data) req, err := http.NewRequest("POST", conf.Environment.DUMMY_WEBHOOK_URL, reqBody) ErrCheck(err,w,"Problem occurred at request process") req.Header.Add("Content-Type", "application/json") //request-response _, err = http.DefaultClient.Do(req) ErrCheck(err,w,"Problem occurred at response process") }
package entities import ( discord "github.com/bwmarrin/discordgo" ) // Player is a Discord player. type Player struct { *Character user *discord.User } // NewPlayer create a new player with a given role. func NewPlayer(role Role, user *discord.User) *Player { p := Player{ Character: NewCharacter(role), user: user, } return &p } func (p Player) UserID() string { return p.user.ID } func (p Player) Mention() string { return p.user.Mention() } func (p Player) UserName() string { return p.user.Username }
package main import ( "fmt" ) func add(x int, y int) int { return x + y } func swap(x, y string) (string, string) { return y, x } func main() { a, b := swap("hello", "world") _, c := swap("hello", "world") fmt.Println(a, b, c) var number int number = 42 if sum := add(number, 13); sum > 50 { fmt.Printf("Resultado da soma: %d\n", sum) } acc := 0 for i := 0; i < 10; i++ { sum += i } carry := 1 for carry < 1000 { carry += carry } }