text
stringlengths
11
4.05M
package roles // User represents a single user type User struct { ID uint Registry *Registry `json:"-"` rights []Right } // Check returns true if user has all of provided rights func (u *User) Check(rights ...Right) bool { return intersection(u.rights, rights) } // Guard stops process if user has not any of provided rights func (u *User) Guard(right ...Right) { if !u.Check(right...) { panic("Access denied") } } func intersection(base, search []Right) bool { if search == nil { return true } if base == nil { return false } nextrule: for j := range search { for i := range base { if search[j] == base[i] { continue nextrule } } return false } return true }
package main import "fmt" func main() { results := make([]string, 0) results = append(results, "111") results = append(results, "333") results = append(results, "222") results = append(results, "111") results = append(results, "444") results = append(results, "111") results = append(results, "333") mvKeys := make([]int, 0) res := make([]string, 0) fmt.Println("resultsLen:", len(results)) flagMap := make(map[string]bool, 0) for k, v := range results { if flagMap[v] != true { flagMap[v] = true mvKeys = append(mvKeys, k) } } for _, v := range mvKeys { res = append(res, results[v]) } fmt.Println("mvKeys:", mvKeys) fmt.Println("res:", res) }
package main import ( "github.com/oceanho/gw/contrib/cmder/gwcli" "os" ) func main() { app := gwcli.App() app.Run(os.Args) }
package model type ConfigInfo struct { JavaHome string `json:"java_home"` ApkTool string `json:"apk_tool"` KeyStore string `json:"key_store"` StoreAlias string `json:"store_alias"` StorePwd string `json:"store_password"` }
// Copyright 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package mm import ( "bytes" "fmt" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/memmap" ) const ( // devMinorBits is the number of minor bits in a device number. Linux: // include/linux/kdev_t.h:MINORBITS devMinorBits = 20 vsyscallEnd = hostarch.Addr(0xffffffffff601000) vsyscallMapsEntry = "ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]\n" vsyscallSmapsEntry = vsyscallMapsEntry + "Size: 4 kB\n" + "Rss: 0 kB\n" + "Pss: 0 kB\n" + "Shared_Clean: 0 kB\n" + "Shared_Dirty: 0 kB\n" + "Private_Clean: 0 kB\n" + "Private_Dirty: 0 kB\n" + "Referenced: 0 kB\n" + "Anonymous: 0 kB\n" + "AnonHugePages: 0 kB\n" + "Shared_Hugetlb: 0 kB\n" + "Private_Hugetlb: 0 kB\n" + "Swap: 0 kB\n" + "SwapPss: 0 kB\n" + "KernelPageSize: 4 kB\n" + "MMUPageSize: 4 kB\n" + "Locked: 0 kB\n" + "VmFlags: rd ex \n" ) // MapsCallbackFuncForBuffer creates a /proc/[pid]/maps entry including the trailing newline. func (mm *MemoryManager) MapsCallbackFuncForBuffer(buf *bytes.Buffer) MapsCallbackFunc { return func(start, end hostarch.Addr, permissions hostarch.AccessType, private string, offset uint64, devMajor, devMinor uint32, inode uint64, path string) { // Do not include the guard page: fs/proc/task_mmu.c:show_map_vma() => // stack_guard_page_start(). lineLen, err := fmt.Fprintf(buf, "%08x-%08x %s%s %08x %02x:%02x %d ", start, end, permissions, private, offset, devMajor, devMinor, inode) if err != nil { log.Warningf("Failed to write to buffer with error: %v", err) return } if path != "" { // Per linux, we pad until the 74th character. for pad := 73 - lineLen; pad > 0; pad-- { buf.WriteByte(' ') // never returns a non-nil error } buf.WriteString(path) // never returns a non-nil error } buf.WriteByte('\n') // never returns a non-nil error } } // ReadMapsDataInto is called by fsimpl/proc.mapsData.Generate to // implement /proc/[pid]/maps. func (mm *MemoryManager) ReadMapsDataInto(ctx context.Context, fn MapsCallbackFunc) { // FIXME(b/235153601): Need to replace RLockBypass with RLockBypass // after fixing b/235153601. mm.mappingMu.RLockBypass() defer mm.mappingMu.RUnlockBypass() var start hostarch.Addr for vseg := mm.vmas.LowerBoundSegment(start); vseg.Ok(); vseg = vseg.NextSegment() { mm.appendVMAMapsEntryLocked(ctx, vseg, fn) } // We always emulate vsyscall, so advertise it here. Everything about a // vsyscall region is static, so just hard code the maps entry since we // don't have a real vma backing it. The vsyscall region is at the end of // the virtual address space so nothing should be mapped after it (if // something is really mapped in the tiny ~10 MiB segment afterwards, we'll // get the sorting on the maps file wrong at worst; but that's not possible // on any current platform). // // Artifically adjust the seqfile handle so we only output vsyscall entry once. if start != vsyscallEnd { fn(hostarch.Addr(0xffffffffff600000), hostarch.Addr(0xffffffffff601000), hostarch.ReadExecute, "p", 0, 0, 0, 0, "[vsyscall]") } } // vmaMapsEntryLocked returns a /proc/[pid]/maps entry for the vma iterated by // vseg, including the trailing newline. // // Preconditions: mm.mappingMu must be locked. func (mm *MemoryManager) vmaMapsEntryLocked(ctx context.Context, vseg vmaIterator) []byte { var b bytes.Buffer mm.appendVMAMapsEntryLocked(ctx, vseg, mm.MapsCallbackFuncForBuffer(&b)) return b.Bytes() } // Preconditions: mm.mappingMu must be locked. func (mm *MemoryManager) appendVMAMapsEntryLocked(ctx context.Context, vseg vmaIterator, fn MapsCallbackFunc) { vma := vseg.ValuePtr() private := "p" if !vma.private { private = "s" } var dev, ino uint64 if vma.id != nil { dev = vma.id.DeviceID() ino = vma.id.InodeID() } devMajor := uint32(dev >> devMinorBits) devMinor := uint32(dev & ((1 << devMinorBits) - 1)) // Figure out our filename or hint. var path string if vma.hint != "" { path = vma.hint } else if vma.id != nil { // FIXME(jamieliu): We are holding mm.mappingMu here, which is // consistent with Linux's holding mmap_sem in // fs/proc/task_mmu.c:show_map_vma() => fs/seq_file.c:seq_file_path(). // However, it's not clear that fs.File.MappedName() is actually // consistent with this lock order. path = vma.id.MappedName(ctx) } fn(vseg.Start(), vseg.End(), vma.realPerms, private, vma.off, devMajor, devMinor, ino, path) } // ReadSmapsDataInto is called by fsimpl/proc.smapsData.Generate to // implement /proc/[pid]/maps. func (mm *MemoryManager) ReadSmapsDataInto(ctx context.Context, buf *bytes.Buffer) { // FIXME(b/235153601): Need to replace RLockBypass with RLockBypass // after fixing b/235153601. mm.mappingMu.RLockBypass() defer mm.mappingMu.RUnlockBypass() var start hostarch.Addr for vseg := mm.vmas.LowerBoundSegment(start); vseg.Ok(); vseg = vseg.NextSegment() { mm.vmaSmapsEntryIntoLocked(ctx, vseg, buf) } // We always emulate vsyscall, so advertise it here. See // ReadMapsSeqFileData for additional commentary. if start != vsyscallEnd { buf.WriteString(vsyscallSmapsEntry) } } // vmaSmapsEntryLocked returns a /proc/[pid]/smaps entry for the vma iterated // by vseg, including the trailing newline. // // Preconditions: mm.mappingMu must be locked. func (mm *MemoryManager) vmaSmapsEntryLocked(ctx context.Context, vseg vmaIterator) []byte { var b bytes.Buffer mm.vmaSmapsEntryIntoLocked(ctx, vseg, &b) return b.Bytes() } func (mm *MemoryManager) vmaSmapsEntryIntoLocked(ctx context.Context, vseg vmaIterator, b *bytes.Buffer) { mm.appendVMAMapsEntryLocked(ctx, vseg, mm.MapsCallbackFuncForBuffer(b)) vma := vseg.ValuePtr() // We take mm.activeMu here in each call to vmaSmapsEntryLocked, instead of // requiring it to be locked as a precondition, to reduce the latency // impact of reading /proc/[pid]/smaps on concurrent performance-sensitive // operations requiring activeMu for writing like faults. mm.activeMu.RLock() var rss uint64 var anon uint64 vsegAR := vseg.Range() for pseg := mm.pmas.LowerBoundSegment(vsegAR.Start); pseg.Ok() && pseg.Start() < vsegAR.End; pseg = pseg.NextSegment() { psegAR := pseg.Range().Intersect(vsegAR) size := uint64(psegAR.Length()) rss += size if pseg.ValuePtr().private { anon += size } } mm.activeMu.RUnlock() fmt.Fprintf(b, "Size: %8d kB\n", vseg.Range().Length()/1024) fmt.Fprintf(b, "Rss: %8d kB\n", rss/1024) // Currently we report PSS = RSS, i.e. we pretend each page mapped by a pma // is only mapped by that pma. This avoids having to query memmap.Mappables // for reference count information on each page. As a corollary, all pages // are accounted as "private" whether or not the vma is private; compare // Linux's fs/proc/task_mmu.c:smaps_account(). fmt.Fprintf(b, "Pss: %8d kB\n", rss/1024) fmt.Fprintf(b, "Shared_Clean: %8d kB\n", 0) fmt.Fprintf(b, "Shared_Dirty: %8d kB\n", 0) // Pretend that all pages are dirty if the vma is writable, and clean otherwise. clean := rss if vma.effectivePerms.Write { clean = 0 } fmt.Fprintf(b, "Private_Clean: %8d kB\n", clean/1024) fmt.Fprintf(b, "Private_Dirty: %8d kB\n", (rss-clean)/1024) // Pretend that all pages are "referenced" (recently touched). fmt.Fprintf(b, "Referenced: %8d kB\n", rss/1024) fmt.Fprintf(b, "Anonymous: %8d kB\n", anon/1024) // Hugepages (hugetlb and THP) are not implemented. fmt.Fprintf(b, "AnonHugePages: %8d kB\n", 0) fmt.Fprintf(b, "Shared_Hugetlb: %8d kB\n", 0) fmt.Fprintf(b, "Private_Hugetlb: %7d kB\n", 0) // Swap is not implemented. fmt.Fprintf(b, "Swap: %8d kB\n", 0) fmt.Fprintf(b, "SwapPss: %8d kB\n", 0) fmt.Fprintf(b, "KernelPageSize: %8d kB\n", hostarch.PageSize/1024) fmt.Fprintf(b, "MMUPageSize: %8d kB\n", hostarch.PageSize/1024) locked := rss if vma.mlockMode == memmap.MLockNone { locked = 0 } fmt.Fprintf(b, "Locked: %8d kB\n", locked/1024) b.WriteString("VmFlags: ") if vma.realPerms.Read { b.WriteString("rd ") } if vma.realPerms.Write { b.WriteString("wr ") } if vma.realPerms.Execute { b.WriteString("ex ") } if vma.canWriteMappableLocked() { // VM_SHARED b.WriteString("sh ") } if vma.maxPerms.Read { b.WriteString("mr ") } if vma.maxPerms.Write { b.WriteString("mw ") } if vma.maxPerms.Execute { b.WriteString("me ") } if !vma.private { // VM_MAYSHARE b.WriteString("ms ") } if vma.growsDown { b.WriteString("gd ") } if vma.mlockMode != memmap.MLockNone { // VM_LOCKED b.WriteString("lo ") } if vma.mlockMode == memmap.MLockLazy { // VM_LOCKONFAULT b.WriteString("?? ") // no explicit encoding in fs/proc/task_mmu.c:show_smap_vma_flags() } if vma.private && vma.effectivePerms.Write { // VM_ACCOUNT b.WriteString("ac ") } b.WriteString("\n") }
package sandbox import ( "github.com/regclient/regclient/regclient/types" lua "github.com/yuin/gopher-lua" ) func setupReference(s *Sandbox) { s.setupMod( luaReferenceName, map[string]lua.LGFunction{ "new": s.newReference, "__tostring": s.referenceString, }, map[string]map[string]lua.LGFunction{ "__index": { "digest": s.referenceGetSetDigest, "tag": s.referenceGetSetTag, }, }, ) } // reference refers to a repository or image name type reference struct { ref types.Ref } // newReference creates a reference func (s *Sandbox) newReference(ls *lua.LState) int { ref := s.checkReference(ls, 1) ud := ls.NewUserData() ud.Value = &reference{ref: ref.ref} ls.SetMetatable(ud, ls.GetTypeMetatable(luaReferenceName)) ls.Push(ud) return 1 } func (s *Sandbox) checkReference(ls *lua.LState, i int) *reference { var ref *reference switch ls.Get(i).Type() { case lua.LTString: nr, err := types.NewRef(ls.CheckString(i)) if err != nil { ls.ArgError(i, "reference parsing failed: "+err.Error()) } ref = &reference{ref: nr} case lua.LTUserData: ud := ls.CheckUserData(i) switch ud.Value.(type) { case *reference: ref = ud.Value.(*reference) case *sbManifest: m := ud.Value.(*sbManifest) ref = &reference{ref: m.ref} case *config: c := ud.Value.(*config) ref = &reference{ref: c.ref} default: ls.ArgError(i, "reference expected") } default: ls.ArgError(i, "reference expected") } return ref } func isReference(ls *lua.LState, i int) bool { if ls.Get(i).Type() != lua.LTUserData { return false } ud := ls.CheckUserData(i) if _, ok := ud.Value.(*reference); ok { return true } return false } // referenceString converts a reference back to a common name func (s *Sandbox) referenceString(ls *lua.LState) int { r := s.checkReference(ls, 1) ls.Push(lua.LString(r.ref.CommonName())) return 1 } func (s *Sandbox) referenceGetSetDigest(ls *lua.LState) int { r := s.checkReference(ls, 1) if ls.GetTop() == 2 { r.ref.Digest = ls.CheckString(2) return 0 } ls.Push(lua.LString(r.ref.Digest)) return 1 } func (s *Sandbox) referenceGetSetTag(ls *lua.LState) int { r := s.checkReference(ls, 1) if ls.GetTop() == 2 { r.ref.Tag = ls.CheckString(2) return 0 } ls.Push(lua.LString(r.ref.Tag)) return 1 }
package kmgo import "net/http" import "net/url" func NewKM(key string) *KM { return &KM{api_key: key} } func (km KM) NewEvent(event string, person string, properties url.Values) *KMEvent { event_struct := KMEvent{km, person, event, properties, 0} return &event_struct } func (ev *KMEvent) SetTimestamp(time int) *KMEvent { ev.timestamp = time return ev } func (ev *KMEvent) Send() (*KMEvent, error) { address := "https://trk.kissmetrics.com/e?" ev.properties.Add("_k", ev.api_key) ev.properties.Add("_p", ev.person) ev.properties.Add("_n", ev.name) // If we're specifying a custom timestamp if ev.timestamp != 0 { ev.properties.Add("_t", string(ev.timestamp)) ev.properties.Add("_d", "1") } url_params := ev.properties.Encode() address += url_params request, err := http.NewRequest("GET", address, nil) if err != nil { return nil, err } client := &http.Client{} _, err = client.Do(request) if err != nil { return nil, err } return ev, nil }
package sanity import ( "github.com/go-gorp/gorp" "github.com/ovh/cds/sdk" ) // LoadAllWarnings loads all warnings existing in CDS func LoadAllWarnings(db gorp.SqlExecutor, al string) ([]sdk.Warning, error) { // TODO return nil, nil } // LoadUserWarnings loads all warnings related to Jobs user has access to func LoadUserWarnings(db gorp.SqlExecutor, al string, userID int64) ([]sdk.Warning, error) { // TODO return nil, nil }
package translated import "github.com/stephens2424/php/passes/togo/internal/phpctx" func Echo(ctx phpctx.PHPContext) { ctx.Echo.Write("test") }
package connector import ( "common" "logger" "rankclient" "rpc" ) const ( RANK_PROFIT = 1 RANK_COIN = 2 RANK_EXP = 3 ) const ( RANK_CNT = 50 ) func (cn *CNServer) GetRanking(conn rpc.RpcConn, msg rpc.ReqRankList) error { logger.Info("GetRanking called") p, exist := cn.getPlayerByConnId(conn.GetId()) if !exist { return nil } eType := msg.GetRankType() sendMsg := &rpc.RankList{} sendMsg.SetRankType(eType) if eType == RANK_EXP { cns.rankMgr.explock.RLock() sendMsg.RankList = cns.rankMgr.Exps.RankList cns.rankMgr.explock.RUnlock() } else if eType == RANK_COIN { cns.rankMgr.explock.RLock() sendMsg.RankList = cns.rankMgr.Coins.RankList cns.rankMgr.explock.RUnlock() common.WriteResult(conn, sendMsg) } else { cns.rankMgr.explock.RLock() sendMsg.RankList = cns.rankMgr.Profits.RankList cns.rankMgr.explock.RUnlock() } cns.getMyRanking(sendMsg, p, eType) common.WriteResult(conn, sendMsg) return nil } func (cn *CNServer) getMyRanking(ranking *rpc.RankList, p *player, rankType int32) { logger.Info("getMyRanking rankType:", rankType) if len(ranking.RankList) < RANK_CNT { return } //在排行榜中 uid := p.GetUid() for _, rank := range ranking.RankList { if rank.GetUid() == uid { return } } info := &rpc.RankInfo{} info.SetUid(p.GetUid()) info.SetRoleId(p.GetRoleId()) info.SetSex(p.GetSex()) info.SetName(p.GetName()) info.SetLevel(p.GetLevel()) info.SetExp(p.GetExp()) if p.GetVipLeftDay() > 0 { info.SetBVip(true) } else { info.SetBVip(false) } info.SetCoin(p.GetCoin()) info.SetGem(p.GetGem()) info.SetHeaderUrl(p.GetHeaderUrl()) rank, err := rankclient.GetMyRankingInfo(int(rankType), uid) if err != nil { logger.Error("rankclient.GetRankingInfo error, eType:%d, uid:%s", rankType, uid, err) return } info.SetRankNum(int32(rank + 1)) if rankType == RANK_EXP { info.SetRankValue(int64(p.GetExp())) } else if rankType == RANK_COIN { info.SetRankValue(int64(p.GetCoin())) } else if rankType == RANK_PROFIT { info.SetRankValue(int64(p.GetProfits())) } }
// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package windowarrangementcuj import ( "context" "time" "chromiumos/tast/common/action" "chromiumos/tast/errors" "chromiumos/tast/local/chrome" "chromiumos/tast/local/chrome/ash" "chromiumos/tast/local/chrome/display" "chromiumos/tast/local/chrome/uiauto" "chromiumos/tast/local/chrome/uiauto/event" "chromiumos/tast/local/chrome/uiauto/mouse" "chromiumos/tast/local/chrome/uiauto/nodewith" "chromiumos/tast/local/chrome/uiauto/pointer" "chromiumos/tast/local/chrome/uiauto/role" "chromiumos/tast/local/coords" "chromiumos/tast/local/input" "chromiumos/tast/testing" ) // multiresize summons a multiresizer and drags it like dragAndRestore, but // with all drag points adjusted for the location of the multiresizer. func multiresize(ctx context.Context, tconn *chrome.TestConn, ui *uiauto.Context, pc pointer.Context, duration time.Duration, dragPoints ...coords.Point) error { // Move the mouse near the first drag point until the multiresize widget appears. multiresizer := nodewith.Role("window").ClassName("MultiWindowResizeController") for hoverOffset := -5; ; hoverOffset++ { if err := mouse.Move(tconn, dragPoints[0].Add(coords.NewPoint(hoverOffset, hoverOffset)), 100*time.Millisecond)(ctx); err != nil { return errors.Wrap(err, "failed to move mouse") } multiresizerExists, err := ui.IsNodeFound(ctx, multiresizer) if err != nil { return errors.Wrap(err, "failed to check for multiresizer") } if multiresizerExists { break } if hoverOffset == 5 { return errors.New("never found multiresize widget") } } multiresizerBounds, err := ui.ImmediateLocation(ctx, multiresizer) if err != nil { return errors.Wrap(err, "failed to get the multiresizer location") } offset := multiresizerBounds.CenterPoint().Sub(dragPoints[0]) var multiresizeDragPoints []coords.Point for _, p := range dragPoints { multiresizeDragPoints = append(multiresizeDragPoints, p.Add(offset)) } if err := dragAndRestore(ctx, tconn, pc, duration, multiresizeDragPoints...); err != nil { return errors.Wrap(err, "failed to drag multiresizer") } return nil } // RunClamShell runs window arrangement cuj for clamshell. We test performance // for resizing window, dragging window, maximizing window, minimizing window // and split view resizing. func RunClamShell(ctx, closeCtx context.Context, tconn *chrome.TestConn, ui *uiauto.Context, pc pointer.Context) (retErr error) { const ( timeout = 10 * time.Second duration = 2 * time.Second ) // Gets primary display info and interesting drag points. info, err := display.GetPrimaryInfo(ctx, tconn) if err != nil { return errors.Wrap(err, "failed to get the primary display info") } splitViewDragPoints := []coords.Point{ info.WorkArea.CenterPoint(), coords.NewPoint(info.WorkArea.Left+info.WorkArea.Width/4, info.WorkArea.CenterY()), coords.NewPoint(info.WorkArea.Left+info.WorkArea.Width-1, info.WorkArea.CenterY()), } snapLeftPoint := coords.NewPoint(info.WorkArea.Left+1, info.WorkArea.CenterY()) snapRightPoint := coords.NewPoint(info.WorkArea.Right()-1, info.WorkArea.CenterY()) // Get the browser window. ws, err := getAllNonPipWindows(ctx, tconn) if err != nil { return errors.Wrap(err, "failed to obtain the window list") } if len(ws) != 1 { return errors.Errorf("unexpected number of windows: got %d, want 1", len(ws)) } browserWinID := ws[0].ID // Set the browser window state to "Normal". if err := ash.SetWindowStateAndWait(ctx, tconn, browserWinID, ash.WindowStateNormal); err != nil { return errors.Wrap(err, "failed to set browser window state to \"Normal\"") } // Initialize the browser window bounds. desiredBounds := info.WorkArea.WithInset(50, 50) bounds, displayID, err := ash.SetWindowBounds(ctx, tconn, browserWinID, desiredBounds, info.ID) if err != nil { return errors.Wrap(err, "failed to set the browser window bounds") } if displayID != info.ID { return errors.Errorf("unexpected display ID for browser window: got %q; want %q", displayID, info.ID) } if bounds != desiredBounds { return errors.Errorf("unexpected browser window bounds: got %v; want %v", bounds, desiredBounds) } // Wait for the browser window to finish animating to the desired bounds. if err := ash.WaitWindowFinishAnimating(ctx, tconn, browserWinID); err != nil { return errors.Wrap(err, "failed to wait for the browser window animation") } // Resize window. upperLeftPt := coords.NewPoint(bounds.Left, bounds.Top) middlePt := coords.NewPoint(bounds.Left+bounds.Width/2, bounds.Top+bounds.Height/2) testing.ContextLog(ctx, "Resizing the browser window") if err := dragAndRestore(ctx, tconn, pc, duration, upperLeftPt, middlePt); err != nil { return errors.Wrap(err, "failed to resize browser window from the upper left to the middle and back") } // Drag window. newTabButton := nodewith.Name("New Tab") newTabButtonRect, err := ui.Location(ctx, newTabButton) if err != nil { return errors.Wrap(err, "failed to get the location of the new tab button") } tabStripGapPt := coords.NewPoint(newTabButtonRect.Right()+10, newTabButtonRect.Top) testing.ContextLog(ctx, "Dragging the browser window") if err := dragAndRestore(ctx, tconn, pc, duration, tabStripGapPt, middlePt); err != nil { return errors.Wrap(err, "failed to drag browser window from the tab strip point to the middle and back") } // Maximize window and then minimize and restore it. // TODO(https://crbug.com/1324662): When the bug is fixed, // do these window state changes more like a real user. for _, windowState := range []ash.WindowStateType{ash.WindowStateMaximized, ash.WindowStateMinimized, ash.WindowStateNormal} { if err := ash.SetWindowStateAndWait(ctx, tconn, browserWinID, windowState); err != nil { return errors.Wrapf(err, "failed to set browser window state to %v", windowState) } } // Lacros browser sometime restores to a different bounds so calculate // a new grab point. newBrowserWin, err := ash.GetWindow(ctx, tconn, browserWinID) if err != nil { return errors.Wrap(err, "failed to get browser window info") } newBounds := newBrowserWin.BoundsInRoot tabStripGapPt = coords.NewPoint(newBounds.Left+newBounds.Width*3/4, newBounds.Top+10) // Snap the window to the left and drag the second tab to snap to the right. testing.ContextLog(ctx, "Snapping the browser window to the left") if err := pc.Drag(tabStripGapPt, pc.DragTo(snapLeftPoint, duration))(ctx); err != nil { return errors.Wrap(err, "failed to snap the browser window to the left") } if err := ash.WaitForCondition(ctx, tconn, func(w *ash.Window) bool { return w.ID == browserWinID && w.State == ash.WindowStateLeftSnapped && !w.IsAnimating }, &testing.PollOptions{Timeout: timeout}); err != nil { return errors.Wrap(err, "failed to wait for browser window to be left snapped") } testing.ContextLog(ctx, "Snapping the second tab to the right") firstTab := nodewith.Role(role.Tab).ClassName("Tab").First() firstTabRect, err := ui.Location(ctx, firstTab) if err != nil { return errors.Wrap(err, "failed to get the location of the first tab") } if err := pc.Drag(firstTabRect.CenterPoint(), pc.DragTo(snapRightPoint, duration))(ctx); err != nil { return errors.Wrap(err, "failed to snap the second tab to the right") } defer cleanUp(ctx, action.Named( "recombine the browser tabs", func(ctx context.Context) error { return combineTabs(ctx, tconn, ui, pc, duration) }, ), &retErr) if err := testing.Poll(ctx, func(ctx context.Context) error { ws, err := getAllNonPipWindows(ctx, tconn) if err != nil { return errors.Wrap(err, "failed to obtain the window list") } if len(ws) != 2 { return errors.Errorf("should be 2 windows, got %v", len(ws)) } if (ws[1].State == ash.WindowStateLeftSnapped && ws[0].State == ash.WindowStateRightSnapped) || (ws[0].State == ash.WindowStateLeftSnapped && ws[1].State == ash.WindowStateRightSnapped) { return nil } return errors.New("browser windows are not snapped yet") }, &testing.PollOptions{Timeout: timeout}); err != nil { return errors.Wrap(err, "failed to wait for browser windows to be snapped correctly") } // Use multiresize on the two snapped windows. testing.ContextLog(ctx, "Multiresizing two snapped browser windows") const dividerDragError = "failed to drag divider slightly left, all the way right, and back to center" if err := multiresize(ctx, tconn, ui, pc, duration, splitViewDragPoints...); err != nil { return errors.Wrap(err, dividerDragError) } kw, err := input.Keyboard(ctx) if err != nil { return errors.Wrap(err, "failed to open the keyboard") } defer cleanUp(closeCtx, action.Named( "close the keyboard", func(ctx context.Context) error { return kw.Close() }, ), &retErr) // Enter the overview mode. topRow, err := input.KeyboardTopRowLayout(ctx, kw) if err != nil { return errors.Wrap(err, "failed to obtain the top-row layout") } enterOverview := kw.AccelAction(topRow.SelectTask) if err := enterOverview(ctx); err != nil { return errors.Wrap(err, "failed to enter overview mode") } defer cleanUp(closeCtx, action.Named( "ensure not in overview", func(ctx context.Context) error { return ash.SetOverviewModeAndWait(ctx, tconn, false) }, ), &retErr) // Create a second virtual desk. if err := ash.CreateNewDesk(ctx, tconn); err != nil { return errors.Wrap(err, "failed to create a new desk") } defer cleanUp(closeCtx, action.Named( "remove extra desk", func(ctx context.Context) error { return removeExtraDesk(ctx, tconn) }, ), &retErr) // Wait for location-change events to be completed. if err := ui.WithInterval(2*time.Second).WaitUntilNoEvent(nodewith.Root(), event.LocationChanged)(ctx); err != nil { return errors.Wrap(err, "failed to wait for location-change events to be completed") } // Drag the first window from overview grid to snap. w, err := ash.FindFirstWindowInOverview(ctx, tconn) if err != nil { return errors.Wrap(err, "failed to find the browser window in the overview mode") } if err := pc.Drag(w.OverviewInfo.Bounds.CenterPoint(), pc.DragTo(snapLeftPoint, duration))(ctx); err != nil { return errors.Wrap(err, "failed to drag browser window from overview to snap") } // Wait for location-change events to be completed. if err := ui.WithInterval(2*time.Second).WaitUntilNoEvent(nodewith.Root(), event.LocationChanged)(ctx); err != nil { return errors.Wrap(err, "failed to wait for location-change events to be completed") } // Drag divider. testing.ContextLog(ctx, "Dragging the divider between a snapped browser window and an overview window") if err := dragAndRestore(ctx, tconn, pc, duration, splitViewDragPoints...); err != nil { return errors.Wrap(err, dividerDragError) } // Drag the second window to another desk to obtain an empty overview grid. w, err = ash.FindFirstWindowInOverview(ctx, tconn) if err != nil { return errors.Wrap(err, "failed to find the window in the overview mode to drag to another desk") } deskMiniViews, err := ui.NodesInfo(ctx, nodewith.ClassName("DeskMiniView")) if err != nil { return errors.Wrap(err, "failed to get desk mini-views") } if deskMiniViewCount := len(deskMiniViews); deskMiniViewCount < 2 { return errors.Errorf("expected more than 1 desk mini-views; found %v", deskMiniViewCount) } if err := pc.Drag(w.OverviewInfo.Bounds.CenterPoint(), pc.DragTo(deskMiniViews[1].Location.CenterPoint(), duration))(ctx); err != nil { return errors.Wrap(err, "failed to drag browser window from overview grid to desk mini-view") } // Wait for location-change events to be completed. if err := ui.WithInterval(2*time.Second).WaitUntilNoEvent(nodewith.Root(), event.LocationChanged)(ctx); err != nil { return errors.Wrap(err, "failed to wait for location-change events to be completed") } // Drag divider. testing.ContextLog(ctx, "Dragging the divider between a snapped browser window and an empty overview grid") if err := dragAndRestore(ctx, tconn, pc, duration, splitViewDragPoints...); err != nil { return errors.Wrap(err, dividerDragError) } return nil }
package perfect import ( "errors" "math" ) // Classification structure type Classification string const ( ClassificationAbundant Classification = "ClassificationAbundant" ClassificationDeficient Classification = "ClassificationDeficient" ClassificationPerfect Classification = "ClassificationPerfect" ) var ( ErrOnlyPositive = errors.New("negative number") ) // Classify checks a given number whether is perfect, abundant, or deficient number. func Classify(n int64) (Classification, error) { if n <= 0 { return "", ErrOnlyPositive } var sum int64 = 1 limit := int64(math.Sqrt(float64(n))) + 1 for i := int64(2); i < limit; i++ { if n%i == 0 { sum += i if i != n/i { sum += n / i } } } if sum == n && sum != 1 { return ClassificationPerfect, nil } else if sum > n { return ClassificationAbundant, nil } return ClassificationDeficient, nil }
// Package broker implementation from https://stackoverflow.com/a/49877632 package broker type Broker struct { stopCh chan struct{} publishCh chan interface{} subCh chan chan interface{} unsubCh chan chan interface{} } func New() *Broker { return &Broker{ stopCh: make(chan struct{}), publishCh: make(chan interface{}, 4), subCh: make(chan chan interface{}), unsubCh: make(chan chan interface{}), } } func (b *Broker) Start() { subs := map[chan interface{}]struct{}{} for { select { case <-b.stopCh: return case msgCh := <-b.subCh: subs[msgCh] = struct{}{} case msgCh := <-b.unsubCh: delete(subs, msgCh) case msg := <-b.publishCh: for msgCh := range subs { // msgCh is buffered, use non-blocking send to protect the broker: select { case msgCh <- msg: default: } } } } } func (b *Broker) Stop() { close(b.stopCh) } func (b *Broker) Subscribe() chan interface{} { msgCh := make(chan interface{}, 4) b.subCh <- msgCh return msgCh } func (b *Broker) Unsubscribe(msgCh chan interface{}) { b.unsubCh <- msgCh } func (b *Broker) Publish(msg interface{}) { b.publishCh <- msg }
// Copyright 2013, Bryan Matsuo. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // service.go [created: Sun, 18 Aug 2013] package main import ( "bytes" "crypto/sha1" "encoding/base64" "errors" "fmt" "io" "io/ioutil" "time" ) var ErrDigestMismatch = errors.New("digest did not match content") var ErrTooLarge = fmt.Errorf("too large") var ErrUnexpectedEOF = fmt.Errorf("unexpected eof") type API interface { Get(id string) (Block, error) Create(r io.Reader, checksum string) (string, error) Delete(id string) error } type Service struct { } func NewService() *Service { s := new(Service) return s } func (service *Service) Get(id string) (Block, error) { return nil, fmt.Errorf("unimplemented") } func (service *Service) Create(r io.Reader, size uint64, digest string) (string, error) { // read request data and fork to file system and checksum hash := sha1.New() tee := newTWriter(hash, ioutil.Discard) // FIXME err := copyN(tee, r, size) if err != nil { return "", err } computedDigest := base64.StdEncoding.EncodeToString(hash.Sum(nil)) if digest != computedDigest { return "", ErrDigestMismatch } return digest, nil } func (service *Service) Delete(id string) error { return fmt.Errorf("unimplemented") } type Block interface { Id() string ModTime() time.Time Size() uint64 Open() (io.ReadCloser, error) } type simpleBlock struct { id string data []byte } func (b *simpleBlock) Id() string { return b.id } func (b *simpleBlock) ModeTime() time.Time { return time.Now() } func (b *simpleBlock) Size() uint64 { return uint64(len(b.data)) } func (b *simpleBlock) Open() io.ReadCloser { return ioutil.NopCloser(bytes.NewBuffer(b.data)) } func copyN(w io.Writer, r io.Reader, n uint64) error { bufSize := 4096 if n%uint64(bufSize) == 0 { bufSize++ } var totalread uint64 buf := make([]byte, bufSize) for { nread, err := r.Read(buf) if err == io.EOF { if nread == 0 { break } } else if err != nil { return err } _nwrite := nread if totalread+uint64(nread) > n { // pushed over just now. _nwrite = int(n - totalread) } totalread += uint64(nread) _, err = w.Write(buf[:_nwrite]) if err != nil { return err } if totalread > n { return ErrTooLarge } } if totalread < n { return ErrUnexpectedEOF } return nil } type writeResponse struct { id int n int err error } type tWriter struct { resp chan writeResponse out1, out2 io.Writer } func newTWriter(out1, out2 io.Writer) *tWriter { return &tWriter{ resp: make(chan writeResponse, 0), out1: out1, out2: out2, } } func (w tWriter) Write(p []byte) (n int, err error) { go func() { n, err := w.out1.Write(p) w.resp <- writeResponse{1, n, err} }() go func() { n, err := w.out2.Write(p) w.resp <- writeResponse{1, n, err} }() resp1 := <-w.resp resp2 := <-w.resp if resp1.err != nil { return resp1.n, resp1.err } if resp2.err != nil { return resp2.n, resp2.err } return len(p), nil }
package main import ( "fmt" ) //declare a object type Rect struct { x, y float64 width, height float64 } //realize the func of the object func (r *Rect) Area() float64 { return r.width * r.height } // golang has no construct func // 对象的创建通常交由一个全局的创建函数 NewXXX 来命名 表示构造函数 func newRect(x, y, width, height float64) *Rect { return &Rect {x, y, width, height} } // go也提供了继续 但是财通了组合的玩法 称之为匿名组合 // 匿名组合 示例 type Base struct { Name string } func (base *Base) Foo() { base.Name = "Base Foo2" } func (base *Base) Bar() { base.Name = "Base Bar" } //================================== type Foo struct { Base // 这里声明这个结构体继续自 Base 结构体, 所以这个结构体拥有Base的一切方法和属性 Name1 string } func (foo *Foo) Bar() { foo.Base.Bar() } func main() { foo := &Foo{} foo.Foo() fmt.Println(foo.Name) rect:=new(Rect) rect1:=&Rect{width:109,height:10} rect2:=&Rect{1,2,3,4} rect.width=19.9 rect.height=22.1 fmt.Println(rect.Area()) fmt.Println(rect1.Area()) fmt.Println(rect2.Area()) }
package bus import ( "context" "time" "github.com/stretchr/testify/mock" ) type mockDriver struct { mock.Mock itd *internalDriver } func (m *mockDriver) CreateQueue(name string, delay time.Duration) error { err := m.Called(name, delay).Error(0) if err == nil { err = m.itd.CreateQueue(name, delay) } return err } func (m *mockDriver) CreateTopic(name string) error { err := m.Called(name).Error(0) if err == nil { err = m.itd.CreateTopic(name) } return err } func (m *mockDriver) Subscribe(topic, queue, routeKey string) error { err := m.Called(topic, queue, routeKey).Error(0) if err == nil { err = m.itd.Subscribe(topic, queue, routeKey) } return err } func (m *mockDriver) UnSubscribe(topic, queue, routeKey string) error { err := m.Called(topic, queue, routeKey).Error(0) if err == nil { err = m.itd.UnSubscribe(topic, queue, routeKey) } return err } func (m *mockDriver) SendToQueue(queue string, content []byte, delay time.Duration) error { err := m.Called(queue, content, delay).Error(0) if err == nil { err = m.itd.SendToQueue(queue, content, delay) } return err } func (m *mockDriver) SendToTopic(topic string, content []byte, routeKey string) error { err := m.Called(topic, content, routeKey).Error(0) if err == nil { err = m.itd.SendToTopic(topic, content, routeKey) } return err } func (m *mockDriver) ReceiveMessage(ctx context.Context, queue string, errChan chan error, handler func([]byte) bool) { m.itd.ReceiveMessage(ctx, queue, errChan, handler) }
package requests import ( "encoding/json" "fmt" "net/url" "strings" "time" "github.com/google/go-querystring/query" "github.com/atomicjolt/canvasapi" "github.com/atomicjolt/string_utils" ) // SubmitAssignmentSections Make a submission for an assignment. You must be enrolled as a student in // the course/section to do this. // // All online turn-in submission types are supported in this API. However, // there are a few things that are not yet supported: // // * Files can be submitted based on a file ID of a user or group file or through the {api:SubmissionsApiController#create_file file upload API}. However, there is no API yet for listing the user and group files. // * Media comments can be submitted, however, there is no API yet for creating a media comment to submit. // * Integration with Google Docs is not yet supported. // https://canvas.instructure.com/doc/api/submissions.html // // Path Parameters: // # Path.SectionID (Required) ID // # Path.AssignmentID (Required) ID // // Form Parameters: // # Form.Comment.TextComment (Optional) Include a textual comment with the submission. // # Form.Submission.SubmissionType (Required) . Must be one of online_text_entry, online_url, online_upload, media_recording, basic_lti_launch, student_annotationThe type of submission being made. The assignment submission_types must // include this submission type as an allowed option, or the submission will be rejected with a 400 error. // // The submission_type given determines which of the following parameters is // used. For instance, to submit a URL, submission [submission_type] must be // set to "online_url", otherwise the submission [url] parameter will be // ignored. // # Form.Submission.Body (Optional) Submit the assignment as an HTML document snippet. Note this HTML snippet // will be sanitized using the same ruleset as a submission made from the // Canvas web UI. The sanitized HTML will be returned in the response as the // submission body. Requires a submission_type of "online_text_entry". // # Form.Submission.Url (Optional) Submit the assignment as a URL. The URL scheme must be "http" or "https", // no "ftp" or other URL schemes are allowed. If no scheme is given (e.g. // "www.example.com") then "http" will be assumed. Requires a submission_type // of "online_url" or "basic_lti_launch". // # Form.Submission.FileIDs (Optional) Submit the assignment as a set of one or more previously uploaded files // residing in the submitting user's files section (or the group's files // section, for group assignments). // // To upload a new file to submit, see the submissions {api:SubmissionsApiController#create_file Upload a file API}. // // Requires a submission_type of "online_upload". // # Form.Submission.MediaCommentID (Optional) The media comment id to submit. Media comment ids can be submitted via // this API, however, note that there is not yet an API to generate or list // existing media comments, so this functionality is currently of limited use. // // Requires a submission_type of "media_recording". // # Form.Submission.MediaCommentType (Optional) . Must be one of audio, videoThe type of media comment being submitted. // # Form.Submission.UserID (Optional) Submit on behalf of the given user. Requires grading permission. // # Form.Submission.AnnotatableAttachmentID (Optional) The Attachment ID of the document being annotated. This should match // the annotatable_attachment_id on the assignment. // // Requires a submission_type of "student_annotation". // # Form.Submission.SubmittedAt (Optional) Choose the time the submission is listed as submitted at. Requires grading permission. // type SubmitAssignmentSections struct { Path struct { SectionID string `json:"section_id" url:"section_id,omitempty"` // (Required) AssignmentID string `json:"assignment_id" url:"assignment_id,omitempty"` // (Required) } `json:"path"` Form struct { Comment struct { TextComment string `json:"text_comment" url:"text_comment,omitempty"` // (Optional) } `json:"comment" url:"comment,omitempty"` Submission struct { SubmissionType string `json:"submission_type" url:"submission_type,omitempty"` // (Required) . Must be one of online_text_entry, online_url, online_upload, media_recording, basic_lti_launch, student_annotation Body string `json:"body" url:"body,omitempty"` // (Optional) Url string `json:"url" url:"url,omitempty"` // (Optional) FileIDs []string `json:"file_ids" url:"file_ids,omitempty"` // (Optional) MediaCommentID string `json:"media_comment_id" url:"media_comment_id,omitempty"` // (Optional) MediaCommentType string `json:"media_comment_type" url:"media_comment_type,omitempty"` // (Optional) . Must be one of audio, video UserID int64 `json:"user_id" url:"user_id,omitempty"` // (Optional) AnnotatableAttachmentID int64 `json:"annotatable_attachment_id" url:"annotatable_attachment_id,omitempty"` // (Optional) SubmittedAt time.Time `json:"submitted_at" url:"submitted_at,omitempty"` // (Optional) } `json:"submission" url:"submission,omitempty"` } `json:"form"` } func (t *SubmitAssignmentSections) GetMethod() string { return "POST" } func (t *SubmitAssignmentSections) GetURLPath() string { path := "sections/{section_id}/assignments/{assignment_id}/submissions" path = strings.ReplaceAll(path, "{section_id}", fmt.Sprintf("%v", t.Path.SectionID)) path = strings.ReplaceAll(path, "{assignment_id}", fmt.Sprintf("%v", t.Path.AssignmentID)) return path } func (t *SubmitAssignmentSections) GetQuery() (string, error) { return "", nil } func (t *SubmitAssignmentSections) GetBody() (url.Values, error) { return query.Values(t.Form) } func (t *SubmitAssignmentSections) GetJSON() ([]byte, error) { j, err := json.Marshal(t.Form) if err != nil { return nil, nil } return j, nil } func (t *SubmitAssignmentSections) HasErrors() error { errs := []string{} if t.Path.SectionID == "" { errs = append(errs, "'Path.SectionID' is required") } if t.Path.AssignmentID == "" { errs = append(errs, "'Path.AssignmentID' is required") } if t.Form.Submission.SubmissionType == "" { errs = append(errs, "'Form.Submission.SubmissionType' is required") } if t.Form.Submission.SubmissionType != "" && !string_utils.Include([]string{"online_text_entry", "online_url", "online_upload", "media_recording", "basic_lti_launch", "student_annotation"}, t.Form.Submission.SubmissionType) { errs = append(errs, "Submission must be one of online_text_entry, online_url, online_upload, media_recording, basic_lti_launch, student_annotation") } if t.Form.Submission.MediaCommentType != "" && !string_utils.Include([]string{"audio", "video"}, t.Form.Submission.MediaCommentType) { errs = append(errs, "Submission must be one of audio, video") } if len(errs) > 0 { return fmt.Errorf(strings.Join(errs, ", ")) } return nil } func (t *SubmitAssignmentSections) Do(c *canvasapi.Canvas) error { _, err := c.SendRequest(t) if err != nil { return err } return nil }
//pb-17 package main import( "fmt" "time" "strconv" "strings" ) var try = []map[string]int{ {"1": 3, "2": 3, "3": 5, "4": 4, "5": 4, "6": 3, "7": 5, "8": 5, "9": 4}, {"2": 6, "3": 6, "4": 5, "5": 5, "6": 5, "7": 7, "8": 6, "9": 6, "10": 3, "11": 6, "12": 6, "13": 8, "14": 8, "15": 7, "16": 7, "17": 9, "18": 8, "19": 8}, {"_": 7}, {"_": 8}, } func numberLetterCounts(n int) int{ sum := 0 for i := 1; i <= n; i++{ sum += countThis(i) } return sum } func countThis(n int) int{ var str string = strconv.Itoa(n) var totalLen int = len(str) var sum int = 0 for i := totalLen - 1; i >= 0; i--{ var revIdx = totalLen - i - 1 if revIdx == 0 && totalLen >= 2 && str[i - 1] == '1'{ sum += try[revIdx + 1][str[i - 1 : i + 1]] i = totalLen - 2 }else if revIdx >= 2 && str[i: i + 1] != "0"{ sum += try[revIdx]["_"] + try[0][str[i: i + 1]] }else{ sum += try[revIdx][str[i : i + 1]] } } if totalLen > 2 && strings.Count(str, "0") != totalLen - 1{ sum += 3 } return sum } func main(){ start := time.Now() fmt.Println(numberLetterCounts(1000)) elapsed := time.Since(start) fmt.Printf("Time taken: %s\n", elapsed) }
package main import ( "encoding/json" "fmt" ) type Preson struct { Name string `json:"name"` Age int `json:"age"` Gender string `json:"gender"` City []string `json:"city"` } func main() { p1 := &Preson{Name: "Sumeet", Age: 22, Gender: "Male", City: []string{"Chennai", "TN"}} data, _ := json.Marshal(p1) fmt.Println("Encode (marshal) struct to JSON:") fmt.Println(string(data)) p2 := `{"Name":"Sumeet","Age":22,"Gender":"Male","City":["Chennai","TN"]}` data2 := &Preson{} json.Unmarshal([]byte(p2), data2) fmt.Println("\nDecode (unmarshal) JSON to struct:") fmt.Println(data2) } // - Sumeet Ranjan Parida (Batch - 9A) //Output: //Encode (marshal) struct to JSON: //{"name":"Sumeet","age":22,"gender":"Male","city":["Chennai","TN"]} //Decode (unmarshal) JSON to struct: //&{Sumeet 22 Male [Chennai TN]}
package eventstore import ( "context" "fmt" "os" "github.com/machinebox/graphql" ) func sendRequest(ctx context.Context, req *graphql.Request, data interface{}) error { URL := os.Getenv("EVENT_STORE_URL") if URL == "" { return fmt.Errorf("Missing required environment variable EVENT_STORE_URL") } client := graphql.NewClient(URL) // client.Log = func(s string) { // glog.Info(s) // } return client.Run(ctx, req, data) }
package csblob import ( "bytes" "crypto/x509" "encoding/asn1" "encoding/binary" ) type reqBuilder struct { out bytes.Buffer err error } func DefaultRequirement(identifier string, certs []*x509.Certificate) ([]byte, error) { leaf := certs[0] b := new(reqBuilder) b.putUint32(1) items := []func(){ b.identifier(identifier), b.anchorAppleGeneric(), b.certFieldEqual(0, "subject.CN", leaf.Subject.CommonName), } // find intermediate cert for _, cert := range certs[1:] { if !bytes.Equal(cert.RawSubject, leaf.RawIssuer) { continue } // look for endorsement for specific signature role for _, ext := range cert.Extensions { if hasPrefix(ext.Id, Intermediate) { items = append(items, b.certExtensionExists(1, ext.Id)) break } } break } // build requirement from criteria b.and(items...)() if b.err != nil { return nil, b.err } i := newSuperItem(csRequirement, b.out.Bytes()) i.itype = uint32(DesignatedRequirement) return marshalSuperBlob(csRequirements, []superItem{i}), nil } func (b *reqBuilder) putUint32(v uint32) { var d [4]byte binary.BigEndian.PutUint32(d[:], v) b.out.Write(d[:]) } func (b *reqBuilder) putData(v []byte) { b.putUint32(uint32(len(v))) b.out.Write(v) n := len(v) for n%4 != 0 { b.out.WriteByte(0) n++ } } func (b *reqBuilder) putOID(oid asn1.ObjectIdentifier) { // pack first two digits together packed := append(asn1.ObjectIdentifier{oid[0]*40 + oid[1]}, oid[2:]...) var out []byte for _, v := range packed { if v < 0x80 { // simple case out = append(out, byte(v)) continue } // build starting from least-significant word var outv []byte for { outv = append(outv, byte(v&0x7f)) if v >= 0x80 { v >>= 7 } else { break } } // reverse and set MSB on all but the last word for i := len(outv) - 1; i >= 0; i-- { vv := outv[i] if i != 0 { vv |= 0x80 } out = append(out, vv) } } b.putData(out) } func (b *reqBuilder) and(items ...func()) func() { return func() { for len(items) > 1 { b.putUint32(opAnd) items[0]() items = items[1:] } items[0]() } } func (b *reqBuilder) identifier(v string) func() { return func() { b.putUint32(opIdent) b.putData([]byte(v)) } } func (b *reqBuilder) anchorAppleGeneric() func() { return func() { b.putUint32(opAppleGenericAnchor) } } func (b *reqBuilder) certFieldEqual(slot int32, field, value string) func() { return func() { b.putUint32(opCertField) b.putUint32(uint32(slot)) b.putData([]byte(field)) b.putUint32(uint32(matchEqual)) b.putData([]byte(value)) } } func (b *reqBuilder) certExtensionExists(slot int32, oid asn1.ObjectIdentifier) func() { return func() { b.putUint32(opCertGeneric) b.putUint32(uint32(slot)) b.putOID(oid) b.putUint32(uint32(matchExists)) } }
package userModel import ( "hd-mall-ed/packages/common/database" "hd-mall-ed/packages/common/database/tableModel" ) type User tableModel.User // 创建用户 func (user *User) CreateUser() (err error) { err = database.DataBase.Create(&user).Error return } // 查找user func (user *User) FindUser() User { database.DataBase.First(user) return *user } // 通过 id 查找user func (*User) FindUserById(id uint) (User, error) { resultUser := User{} err := database.DataBase.First(&resultUser, id).Error if err != nil { return resultUser, err } return resultUser, nil } // 通过用户名查找 func (*User) FindUserByName(name string) (User, error) { resultUser := User{} err := database.DataBase.Where("name = ?", name).First(&resultUser).Error if err != nil { return resultUser, err } return resultUser, nil } // 更新方法 func (user *User) Update(updateMap interface{}) error { err := database.DataBase.Model(&User{}).Where("id = ?", user.ID).Updates(updateMap).Error return err }
package reverse_words_in_a_string_iii func reverseWords(s string) string { arr := []byte(s) for i := 0; i < len(arr); { for i < len(arr) && arr[i] == ' ' { i++ } j := i + 1 for j < len(arr) && arr[j] != ' ' { j++ } reverseWord(arr[i:j]) i = j + 1 } return string(arr) } func reverseWord(arr []byte) { i, j := 0, len(arr)-1 for i < j { arr[i], arr[j] = arr[j], arr[i] i++ j-- } } // 17:24 start // 17:32 first submit
package controllers import ( "encoding/json" "github.com/astaxie/beego" beeLogger "github.com/beego/bee/logger" "hello/business" ) type PayControllers struct { beego.Controller } func (pay *PayControllers) ActionFunc() { logger := beeLogger.Log action := pay.Ctx.Input.Param(":action") data := pay.Ctx.Input.RequestBody params := map[string]interface{}{} _ = json.Unmarshal(data, &params) payInter := NewPayFactory().CreateUserFactory("alipay_pay") var respData map[string]interface{} switch action { case "pay": respData = payInter.InsertPay(params) case "refund": respData = payInter.RefundPay(params) default: logger.Error("未定义方法类型。") } pay.Data["json"] = respData pay.ServeJSON() } type PayInter interface { InsertPay(params map[string]interface{}) map[string]interface{} RefundPay(params map[string]interface{}) map[string]interface{} } type PayFactory struct { } func NewPayFactory() *PayFactory { return &PayFactory{} } func (pay *PayFactory) CreateUserFactory(payType string) PayInter { if payType == "alipay_pay" { return &business.ZhiFuBaoBiz{} } else if payType == "weixin_pay" { } return nil }
package debug import ( "os" "os/user" "path/filepath" "runtime/pprof" "strings" "github.com/cosmos/cosmos-sdk/server" "github.com/tendermint/tendermint/libs/log" ) // isCPUProfileConfigurationActivated checks if cpuprofile was configured via flag func isCPUProfileConfigurationActivated(ctx *server.Context) bool { // TODO: use same constants as server/start.go // constant declared in start.go cannot be imported (cyclical dependency) const flagCPUProfile = "cpu-profile" if cpuProfile := ctx.Viper.GetString(flagCPUProfile); cpuProfile != "" { return true } return false } // ExpandHome expands home directory in file paths. // ~someuser/tmp will not be expanded. func ExpandHome(p string) (string, error) { if strings.HasPrefix(p, "~/") || strings.HasPrefix(p, "~\\") { usr, err := user.Current() if err != nil { return p, err } home := usr.HomeDir p = home + p[1:] } return filepath.Clean(p), nil } // writeProfile writes the data to a file func writeProfile(name, file string, log log.Logger) error { p := pprof.Lookup(name) log.Info("Writing profile records", "count", p.Count(), "type", name, "dump", file) fp, err := ExpandHome(file) if err != nil { return err } f, err := os.Create(fp) if err != nil { return err } if err := p.WriteTo(f, 0); err != nil { f.Close() return err } return f.Close() }
package sej import ( "reflect" "testing" ) func TestWriteFlush(t *testing.T) { tt := Test{t} path := newTestPath(t) messages := []string{"a", "bc"} w := newTestWriter(t, path) defer closeTestWriter(t, w) writeTestMessages(t, w, messages...) if err := w.Flush(); err != nil { t.Fatal(err) } tt.VerifyMessageValues(path, messages...) } func TestWriteSegment(t *testing.T) { tt := Test{t} for _, testcase := range []struct { messages []string maxSize int fileSizes []int }{ { messages: []string{"a", "ab"}, maxSize: 0, fileSizes: []int{metaSize + 1, metaSize + 2, 0}, }, { messages: []string{"a"}, maxSize: (metaSize + 1), fileSizes: []int{metaSize + 1, 0}, }, { messages: []string{"a", "bc"}, maxSize: (metaSize + 1) + (metaSize + 2), fileSizes: []int{(metaSize + 1) + (metaSize + 2), 0}, }, } { func() { path := newTestPath(t) w := newTestWriter(t, path, testcase.maxSize) writeTestMessages(t, w, testcase.messages...) closeTestWriter(t, w) journalFiles, err := OpenJournalDir(JournalDirPath(path)) if err != nil { t.Fatal(err) } sizes := journalFiles.sizes(t) if !reflect.DeepEqual(sizes, testcase.fileSizes) { t.Fatalf("expect journal files with size %v but got %d", testcase.fileSizes, sizes) } tt.VerifyMessageValues(path, testcase.messages...) }() } } func TestWriteReopen(t *testing.T) { tt := Test{t} messages := []string{"a", "bc", "def"} // test cases for multiple and single segments for _, segmentSize := range []int{0, 50} { func() { path := newTestPath(t) for _, msg := range messages { w := newTestWriter(t, path, segmentSize) writeTestMessages(t, w, msg) if err := w.Close(); err != nil { t.Fatal(err) } } tt.VerifyMessageValues(path, messages...) }() } } func TestWriteDetectCorruption(t *testing.T) { path := newTestPath(t) w := newTestWriter(t, path) writeTestMessages(t, w, "a", "b", "c") closeTestWriter(t, w) file := JournalDirPath(path) + "/0000000000000000.jnl" // corrupt the last message truncateFile(t, file, 1) // 1st time w, err := NewWriter(path) if err == nil { w.Close() } if _, ok := err.(*CorruptionError); !ok { t.Fatalf("expect corruption error but got %v", err) } // 2nd time w, err = NewWriter(path) if err == nil { w.Close() } if err != nil { t.Fatalf("expect corruption fixed but got: %v", err) } }
package main import ( "fmt" "strconv" "strings" "github.com/mit-dci/utreexo/utreexo" ) type TxoTTL struct { utreexo.Hash ExpiryBlock int32 } // plusLine reads in a line of text, generates a utxo leaf, and determines // if this is a leaf to remember or not. func plusLine(s string) ([]utreexo.LeafTXO, error) { // fmt.Printf("%s\n", s) parts := strings.Split(s[1:], ";") if len(parts) < 2 { return nil, fmt.Errorf("line %s has no ; in it", s) } txid := parts[0] postsemicolon := parts[1] indicatorHalves := strings.Split(postsemicolon, "x") ttldata := indicatorHalves[1] ttlascii := strings.Split(ttldata, ",") // the last one is always empty as there's a trailing , ttlval := make([]int32, len(ttlascii)-1) for i, _ := range ttlval { if ttlascii[i] == "s" { // ttlval[i] = 0 // 0 means don't remember it! so 1 million blocks later ttlval[i] = 1 << 20 continue } val, err := strconv.Atoi(ttlascii[i]) if err != nil { return nil, err } ttlval[i] = int32(val) } txoIndicators := strings.Split(indicatorHalves[0], "z") numoutputs, err := strconv.Atoi(txoIndicators[0]) if err != nil { return nil, err } if numoutputs != len(ttlval) { return nil, fmt.Errorf("%d outputs but %d ttl indicators", numoutputs, len(ttlval)) } // numoutputs++ // for testnet3.txos unspend := make(map[int]bool) if len(txoIndicators) > 1 { unspendables := txoIndicators[1:] for _, zstring := range unspendables { n, err := strconv.Atoi(zstring) if err != nil { return nil, err } unspend[n] = true } } adds := []utreexo.LeafTXO{} for i := 0; i < numoutputs; i++ { if unspend[i] { continue } utxostring := fmt.Sprintf("%s;%d", txid, i) addData := utreexo.LeafTXO{ Hash: utreexo.HashFromString(utxostring), Duration: int32(ttlval[i])} // Remember: lookahead >= ttlval[i]} adds = append(adds, addData) // fmt.Printf("expire in\t%d remember %v\n", ttlval[i], addData.Remember) } return adds, nil }
package paypal_fee_computer // import "src.techknowlogick.com/paypal-fee-computer" import ( "testing" ) func TestCompute(t *testing.T) { cases := []struct{ amount, percentage, fixed, want float64 }{ {0.01, 2.9, 0.3, 0.32 }, {0.1, 2.9, 0.3, 0.41 }, {1.0, 2.9, 0.3, 1.34 }, {10.0, 2.9, 0.3, 10.61 }, {25.0, 2.9, 0.3, 26.06 }, {100.0, 2.9, 0.3, 103.30 }, {500.0, 2.9, 0.3, 515.24 }, {1000.0, 2.9, 0.3, 1030.18 }, } for _, c := range cases { got, err := Compute(c.amount, c.percentage, c.fixed) if err != nil { got = -1.0 } if got != c.want { t.Errorf("Compute(%v, %v, %v) == %v, expected %v", c.amount, c.percentage, c.fixed, got, c.want) } } }
// Copyright 2016 Albert Nigmatzianov. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package ui import ( "bufio" "fmt" "os" "strings" "time" colorable "github.com/mattn/go-colorable" ) var Output = colorable.NewColorableStdout() func Error(message string, err error) { out := "ERROR: " if err == nil { out += message } else if message == "" { out += err.Error() } else { out += message + ": " + err.Error() } Println(RedString(out)) } func Newline() { Println("") } func Print(s string) { fmt.Fprint(Output, s) } func Println(s string) { Print(s + "\n") } func Quit() { os.Exit(0) } func ReadInput() string { reader := bufio.NewReader(os.Stdin) input, err := reader.ReadString('\n') if err != nil { Term("couldn't read the input", err) } return strings.TrimSpace(input) } func Sleep() { time.Sleep(2 * time.Second) } func Success(s string) { Println(GreenString(s)) } func Term(message string, err error) { if message != "" || err != nil { Error(message, err) } os.Exit(1) } func Warning(message string) { Println(YellowString("WARNING: " + message)) }
package server import ( "context" "encoding/json" "fmt" "net/http" "github.com/go-chi/chi" "github.com/meso-org/meso/repository" "github.com/meso-org/meso/workers" ) type workerHandler struct { s workers.Service } func (h *workerHandler) router() chi.Router { r := chi.NewRouter() r.Route("/worker", func(chi.Router) { // TODO: this is a dummy login handler for later r.Post("/login", h.loginWorker) r.Post("/", h.registerWorker) r.Get("/", h.listWorkers) r.Route("/{workerID}", func(r chi.Router) { r.Get("/", h.findWorker) }) r.Route("/location", func(r chi.Router) { r.Post("/update", h.setWorkerLocation) }) /* if we were to add more sub routing: r.Route("/pattern", func(chi.Router) { r.Verb("/pattern", handlerFunc) }) */ }) r.Get("/ping", h.testPing) return r } func (h *workerHandler) testPing(w http.ResponseWriter, r *http.Request) { ctx := context.Background() var response = struct { Domain string `json:"domain"` Ping string `json:"ping"` }{ Domain: "worker", Ping: "pong", } w.Header().Set("Content-Type", "application/json; charset=utf-8") if err := json.NewEncoder(w).Encode(response); err != nil { encodeError(ctx, err, w) return } } func (h *workerHandler) loginWorker(w http.ResponseWriter, r *http.Request) { var err error var request struct { Email string `json:"email"` Password string `json:"password"` } var response struct { User *repository.Worker `json:"worker"` Token string `json:"token"` } if err := json.NewDecoder(r.Body).Decode(&request); err != nil { fmt.Printf("unable to decode json: %v", err) } parsedEmail := repository.Email(request.Email) response.User, err = h.s.FindWorkerByEmail(parsedEmail) if err != nil { fmt.Println(err) w.WriteHeader(http.StatusInternalServerError) return } if response.User.Email == parsedEmail { response.Token = "FAKEOAUTHTOKEN1234567890" if err := json.NewEncoder(w).Encode(response); err != nil { w.WriteHeader(http.StatusInternalServerError) return } } } func (h *workerHandler) registerWorker(w http.ResponseWriter, r *http.Request) { var request struct { Email string `json:"email"` FirstName string `json:"firstName"` LastName string `json:"lastName"` Occupation string `json:"occupation"` License string `json:"license"` } var response struct { ID repository.WorkerID `json:"workerId"` } if err := json.NewDecoder(r.Body).Decode(&request); err != nil { fmt.Printf("unable to decode json: %v", err) } fmt.Println("heres the request string ", request.Email, request.FirstName, request.LastName, request.Occupation, request.License) id, err := h.s.RegisterNewWorker(request.Email, request.FirstName, request.LastName, request.Occupation, request.License) if err != nil { w.WriteHeader(http.StatusBadGateway) return } response.ID = id w.Header().Set("Content-Type", "application/json; charset=utf-8") if err := json.NewEncoder(w).Encode(response); err != nil { w.WriteHeader(http.StatusInternalServerError) return } } func (h *workerHandler) setWorkerLocation(w http.ResponseWriter, r *http.Request) { fmt.Println("/location/update") var err error var request struct { WorkerID string `json:"workerID"` Latitude string `json:"latitude"` Longitude string `json:"longitude"` MileRadius string `json:"mileRadius"` } var response struct { ID repository.WorkerID `json:"workerId"` Worker *repository.Worker `json:"worker"` } if err = json.NewDecoder(r.Body).Decode(&request); err != nil { fmt.Printf("unable to decode json: %v", err) } if worker, err := h.s.UpdateWorkerLocationPreference(request.WorkerID, request.Latitude, request.Longitude, request.MileRadius); err != nil { response.Worker = worker response.ID = worker.WorkerID if err := json.NewEncoder(w).Encode(response); err != nil { fmt.Printf("ERROR: %v", err) w.WriteHeader(http.StatusInternalServerError) return } } } func (h *workerHandler) findWorker(w http.ResponseWriter, r *http.Request) { var err error var response struct { Worker *repository.Worker `json:"worker"` } workerID := repository.WorkerID(chi.URLParam(r, "workerID")) fmt.Println("heres the request string ", workerID) response.Worker, err = h.s.FindWorkerByID(workerID) if err != nil { fmt.Println(err) w.WriteHeader(http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json; charset=utf-8") if err := json.NewEncoder(w).Encode(response); err != nil { w.WriteHeader(http.StatusInternalServerError) return } } func (h *workerHandler) listWorkers(w http.ResponseWriter, r *http.Request) { var err error var response struct { Workers []*repository.Worker `json:"workers"` } response.Workers, err = h.s.FindAllWorkers() if err != nil { w.WriteHeader(http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json; charset=utf-8") if err := json.NewEncoder(w).Encode(response); err != nil { w.WriteHeader(http.StatusInternalServerError) return } }
package _4_Chain_of_Responsibility_Pattern //步骤 1 //创建抽象的记录器类。 const ( INFO = 1 DEBUG = 2 ERROR = 3 ) type AbstractLogger interface { logMessage(int, string) string } //步骤 2 //创建扩展了该记录器类的实体类。 type ConsoleLogger struct { nextLogger AbstractLogger level int } func (receiver *ConsoleLogger) logMessage(level int, message string) (ret string) { if receiver.level <= level { ret += "Console::Logger: " + message } if receiver.nextLogger != nil { ret += receiver.nextLogger.logMessage(level, message) } return } type FileLogger struct { nextLogger AbstractLogger level int } func (receiver *FileLogger) logMessage(level int, message string) (ret string) { if receiver.level <= level { ret += "File::Logger: " + message } if receiver.nextLogger != nil { ret += receiver.nextLogger.logMessage(level, message) } return } type ErrorLogger struct { nextLogger AbstractLogger level int } func (receiver *ErrorLogger) logMessage(level int, message string) (ret string) { if receiver.level <= level { ret += "Error::Logger: " + message } if receiver.nextLogger != nil { ret += receiver.nextLogger.logMessage(level, message) } return }
// Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package codegen import ( "fmt" "strings" "llvm/bindings/go/llvm" ) // IndexOrName can be an index (int) or field name (string). type IndexOrName interface{} // ValueIndexOrName can be an index (Value or int) or field name (string). type ValueIndexOrName interface{} // Builder is provided to the callback of Build() for building a function's body. type Builder struct { function *Function // The function being built. params []*Value // Function parameter values. entry llvm.BasicBlock // Function entry block. exit llvm.BasicBlock // Function exit block. result llvm.Value // Function return value. llvm llvm.Builder m *Module } // buildFailure is a special type thrown as a panic by fail(). // They are caught in Build(). type buildFailure string func (f buildFailure) String() string { return string(f) } func fail(msg string, args ...interface{}) { str := fmt.Sprintf(msg, args...) panic(buildFailure(str)) } // Call invokes the function f with the specified arguments func (b *Builder) Call(f *Function, args ...*Value) *Value { return b.call(f.llvm, f.Type.Signature, f.Name, args) } // CallIndirect invokes the function pointer f with the specified arguments func (b *Builder) CallIndirect(f *Value, args ...*Value) *Value { var fty *FunctionType if ptrTy, ok := Underlying(f.Type()).(Pointer); ok { fty, _ = Underlying(ptrTy.Element).(*FunctionType) } if fty == nil { fail("CallIndirect() can only be called with function pointers. Got %v", f.Type()) } return b.call(f.llvm, fty.Signature, f.name, args) } func (b *Builder) call(fn llvm.Value, sig Signature, name string, args []*Value) *Value { l := b.callArgs(sig, name, args) if sig.Result == b.m.Types.Void { name = "" } return b.val(sig.Result, b.llvm.CreateCall(fn, l, name)) } // Invoke invokes the function f with the specified arguments. // If an exception is thrown while calling f, then cleanup will be called before // rethrowing the exception. func (b *Builder) Invoke(f *Function, cleanup func(), args ...*Value) *Value { fn, sig, name := f.llvm, f.Type.Signature, f.Name l := b.callArgs(sig, name, args) then := b.m.ctx.AddBasicBlock(b.function.llvm, fmt.Sprintf("%v_nothrow", name)) throw := b.m.ctx.AddBasicBlock(b.function.llvm, fmt.Sprintf("%v_catch", name)) if sig.Result == b.m.Types.Void { name = "" } res := b.val(sig.Result, b.llvm.CreateInvoke(fn, l, then, throw, name)) b.function.llvm.SetPersonality(b.m.exceptions.personalityFn.llvm) b.block(throw, llvm.BasicBlock{}, func() { lp := b.llvm.CreateLandingPad(b.m.exceptions.exceptionTy.llvmTy(), 0, "cleanup") lp.SetCleanup(true) cleanup() b.llvm.CreateResume(lp) }) b.setInsertPointAtEnd(then) return res } // Throw throws an exception with the given value. func (b *Builder) Throw(v *Value) { b.m.exceptions.throw(b, v) } func (b *Builder) callArgs(sig Signature, name string, args []*Value) []llvm.Value { if sig.Variadic { if g, e := len(args), len(sig.Parameters); g < e { fail("Got %d arguments, but needed %d to call %v", g, e, sig.string(name)) } } else if g, e := len(args), len(sig.Parameters); g != e { fail("Got %d arguments, but needed %d to call %v", g, e, sig.string(name)) } l := make([]llvm.Value, len(args)) for i, a := range args { if a == nil { fail("Argument %d is nil when attempting to call %v", i, sig.string(name)) } l[i] = a.llvm if i < len(sig.Parameters) { if g, e := a.ty, sig.Parameters[i]; g != e { fail("Incorrect argument type for parameter %d when calling %v: Got %v, expected %v", i, sig.string(name), g.TypeName(), e.TypeName()) } } } return l } // Parameter returns i'th function parameter func (b *Builder) Parameter(i int) *Value { return b.params[i] } // Undef returns a new undefined value of the specified type. func (b *Builder) Undef(ty Type) *Value { return b.val(ty, llvm.Undef(ty.llvmTy())) } // Local returns a pointer to a new local variable with the specified name and // type. func (b *Builder) Local(name string, ty Type) *Value { block := b.llvm.GetInsertBlock() b.llvm.SetInsertPoint(b.entry, b.entry.FirstInstruction()) local := b.llvm.CreateAlloca(ty.llvmTy(), "") b.setInsertPointAtEnd(block) return b.val(b.m.Types.Pointer(ty), local).SetName(name) } // LocalInit returns a new local variable with the specified name and initial value. func (b *Builder) LocalInit(name string, val *Value) *Value { local := b.Local(name, val.ty) local.Store(val) return local } // If builds an if statement. func (b *Builder) If(cond *Value, onTrue func()) { b.IfElse(cond, onTrue, nil) } // IfElse builds an if-else statement. func (b *Builder) IfElse(cond *Value, onTrue, onFalse func()) { trueBlock := b.m.ctx.AddBasicBlock(b.function.llvm, "if_true") var falseBlock llvm.BasicBlock if onFalse != nil { falseBlock = b.m.ctx.AddBasicBlock(b.function.llvm, "if_false") } endBlock := b.m.ctx.AddBasicBlock(b.function.llvm, "end_if") if onFalse == nil { falseBlock = endBlock } b.llvm.CreateCondBr(cond.llvm, trueBlock, falseBlock) b.block(trueBlock, endBlock, onTrue) if onFalse != nil { b.block(falseBlock, endBlock, onFalse) } b.setInsertPointAtEnd(endBlock) } // While builds a logic block with the following psuedocode: // // while test() { // loop() // } // func (b *Builder) While(test func() *Value, loop func()) { testBlock := b.m.ctx.AddBasicBlock(b.function.llvm, "while_test") loopBlock := b.m.ctx.AddBasicBlock(b.function.llvm, "while_loop") exitBlock := b.m.ctx.AddBasicBlock(b.function.llvm, "while_exit") b.llvm.CreateBr(testBlock) b.block(testBlock, llvm.BasicBlock{}, func() { cond := test() if !b.IsBlockTerminated() { b.llvm.CreateCondBr(cond.llvm, loopBlock, exitBlock) } }) b.block(loopBlock, testBlock, loop) b.setInsertPointAtEnd(exitBlock) } // ForN builds a logic block with the following psuedocode: // // for it := 0; it < n; it++ { // cont := cb() // if cont == false { break; } // } // // If cb returns nil then the loop will never exit early. func (b *Builder) ForN(n *Value, cb func(iterator *Value) (cont *Value)) { one := llvm.ConstInt(n.Type().llvmTy(), 1, false) zero := b.Zero(n.Type()) iterator := b.LocalInit("loop_iterator", zero) test := b.m.ctx.AddBasicBlock(b.function.llvm, "for_n_test") loop := b.m.ctx.AddBasicBlock(b.function.llvm, "for_n_loop") exit := b.m.ctx.AddBasicBlock(b.function.llvm, "for_n_exit") b.llvm.CreateBr(test) b.block(test, llvm.BasicBlock{}, func() { done := b.llvm.CreateICmp(llvm.IntSLT, iterator.Load().llvm, n.llvm, "for_n_condition") b.llvm.CreateCondBr(done, loop, exit) }) b.block(loop, llvm.BasicBlock{}, func() { it := iterator.Load() cont := cb(it) if b.IsBlockTerminated() { return } b.llvm.CreateStore(b.llvm.CreateAdd(it.llvm, one, "for_n_iterator_inc"), iterator.llvm) if cont == nil { b.llvm.CreateBr(test) } else { assertTypesEqual(cont.ty, b.m.Types.Bool) b.llvm.CreateCondBr(cont.llvm, test, exit) } }) b.setInsertPointAtEnd(exit) } // SwitchCase is a single condition and block used as a case statement in a // switch. type SwitchCase struct { Conditions func() []*Value Block func() } // Switch builds a switch statement. func (b *Builder) Switch(cases []SwitchCase, defaultCase func()) { tests := make([]llvm.BasicBlock, len(cases)) blocks := make([]llvm.BasicBlock, len(cases)) for i := range cases { tests[i] = b.m.ctx.AddBasicBlock(b.function.llvm, fmt.Sprintf("switch_case_%d_test", i)) blocks[i] = b.m.ctx.AddBasicBlock(b.function.llvm, fmt.Sprintf("switch_case_%d_block", i)) } var defaultBlock llvm.BasicBlock if defaultCase != nil { defaultBlock = b.m.ctx.AddBasicBlock(b.function.llvm, "switch_case_default") tests = append(tests, defaultBlock) } exit := b.m.ctx.AddBasicBlock(b.function.llvm, "end_switch") b.llvm.CreateBr(tests[0]) for i, c := range cases { i, c := i, c b.block(tests[i], llvm.BasicBlock{}, func() { conds := c.Conditions() match := conds[0] for _, c := range conds[1:] { match = b.Or(match, c) } next := exit if i+1 < len(tests) { next = tests[i+1] } b.llvm.CreateCondBr(match.llvm, blocks[i], next) }) b.block(blocks[i], exit, c.Block) } if defaultCase != nil { b.block(defaultBlock, exit, defaultCase) } b.setInsertPointAtEnd(exit) } // Return returns execution of the function with the given value func (b *Builder) Return(val *Value) { if val != nil { assertTypesEqual(val.Type(), b.function.Type.Signature.Result) b.llvm.CreateStore(val.llvm, b.result) } else if !b.result.IsNil() { b.llvm.CreateStore(llvm.ConstNull(b.function.Type.Signature.Result.llvmTy()), b.result) } b.llvm.CreateBr(b.exit) } // IsBlockTerminated returns true if the last instruction is a terminator // (unconditional jump). It is illegal to write another instruction after a // terminator. func (b *Builder) IsBlockTerminated() bool { return !b.llvm.GetInsertBlock().LastInstruction().IsATerminatorInst().IsNil() } // FuncAddr returns the pointer to the given function. func (b *Builder) FuncAddr(f *Function) *Value { return b.val(b.m.Types.Pointer(f.Type), f.llvm) } // PrintfSpecifier returns the string and values that can be used to print v // with printf. func (b *Builder) PrintfSpecifier(v *Value) (string, []*Value) { t := v.Type() switch t { case b.m.Types.Bool: v = b.Select(v, b.Scalar("true"), b.Scalar("false")) return "%s", []*Value{v} case b.m.Types.Float32: return "%f", []*Value{v} case b.m.Types.Float64: return "%d", []*Value{v} case b.m.Types.Int, b.m.Types.Int8, b.m.Types.Int16, b.m.Types.Int32, b.m.Types.Int64: return "%lld", []*Value{v.Cast(b.m.Types.Int64)} case b.m.Types.Uint, b.m.Types.Uint8, b.m.Types.Uint16, b.m.Types.Uint32, b.m.Types.Uint64: return "%llu", []*Value{v.Cast(b.m.Types.Int64)} case b.m.Types.Uintptr: return "%p", []*Value{v.Cast(b.m.Types.Int64)} case b.m.Types.Size: return "%z", []*Value{v.Cast(b.m.Types.Int64)} case b.m.Types.Float32: return "%f", []*Value{v} case b.m.Types.Float64: return "%d", []*Value{v} } switch t := t.(type) { case Pointer: return "%p", []*Value{v} case *Struct: vals := []*Value{} sb := strings.Builder{} sb.WriteString(t.TypeName()) sb.WriteString("{ ") for i, f := range t.Fields() { if i > 0 { sb.WriteString(", ") } fmt, val := b.PrintfSpecifier(v.Extract(i)) sb.WriteString(f.Name) sb.WriteString(": ") sb.WriteString(fmt) vals = append(vals, val...) } sb.WriteString(" }") return sb.String(), vals } return fmt.Sprintf("<%v>", v.Type()), nil } // StructOf builds a struct value that holds all the values in v. func (b *Builder) StructOf(name string, v []*Value) *Value { fields := make([]Field, len(v)) for i, v := range v { fields[i].Type = v.Type() } s := b.Undef(b.m.Types.Struct(name, fields...)) for i, v := range v { s = s.Insert(i, v) } return s } // block calls f to appends instructions to the specified block. // If next is not nil and the f returns without terminating the block, then a // unconditional jump to next is added to the block. func (b *Builder) block(block, next llvm.BasicBlock, f func()) { b.setInsertPointAtEnd(block) f() if !next.IsNil() && !b.IsBlockTerminated() { b.llvm.CreateBr(next) } } func (b *Builder) setInsertPointAtEnd(block llvm.BasicBlock) { b.llvm.SetInsertPointAtEnd(block) // LLVM will clear the debug location on a insert point change. // Restore it to what we previously had. b.dbgRestoreLocation() }
package matchs import ( "sync" "testing" ) func TestNewAcMatcher(t *testing.T) { w := []string{ "a", "b", } m := NewAcMatcher(w) c := []string{ "aaa", "bbb", "aabab", "vc", "ccsad", "aasd", } r := make([]interface{}, len(c)) wg := sync.WaitGroup{} for i := range c { wg.Add(1) go func(i int) { defer wg.Done() r[i] = m.Match(c[i]) }(i) } wg.Wait() println(r) }
package cmd import ( "os" "time" "github.com/0xTanvir/pp/contest" "github.com/0xTanvir/pp/db" "github.com/0xTanvir/pp/home" "github.com/0xTanvir/pp/server" "github.com/0xTanvir/pp/users" "github.com/gin-gonic/contrib/ginrus" "github.com/gin-gonic/gin" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" ) var runCmd = &cobra.Command{ Use: "run", Short: "run server", Long: "run star the apps, it start the server, this is server entry point", Run: func(cmd *cobra.Command, args []string) { logger := logrus.StandardLogger() if lvl, err := logrus.ParseLevel(viper.GetString("log.level")); err == nil { logger.Level = lvl } logger.Out = os.Stderr logrus.Info("main : Started : Initialize Mongo") // Start MongoDB dbc, err := db.Dial(viper.GetString("db.uri")) if err != nil { logger.Error(err) return } defer dbc.Close() // Creates a gin router with default middleware: engine := gin.Default() if viper.GetBool("log.ginrus") { engine.Use(ginrus.Ginrus(logrus.StandardLogger(), time.RFC3339, true)) } // Initialize all available controller with their service controllers := &server.Controllers{ User: &users.Controller{UserService: &users.Service{DB: dbc}}, Home: &home.Controller{HomeService: &home.Service{DB: dbc}}, Contest: &contest.Controller{ContestService: &contest.Service{DB: dbc}}, } server := &server.Server{ Engine: engine, Controllers: controllers, } if err := server.Run(); err != nil { logger.Error(err) return } }, }
package book import ( "github.com/gofiber/fiber" )
package main import ( "fmt" "grpc_api/api" "grpc_api/database" "grpc_api/proto" "log" "net" "google.golang.org/grpc" ) func main() { fmt.Println("running grpc server on port 7777") lis, err := net.Listen("tcp", fmt.Sprintf(":%d", 7777)) if err != nil { log.Fatalf("failed to listen: %v", err) } s := api.Server{ Database: database.NewDatabase("user"), } grpcserver := grpc.NewServer() proto.RegisterUserServiceServer(grpcserver, &s) if err := grpcserver.Serve(lis); err != nil { log.Fatalf("failed to serve: %s", err) } }
// Copyright 2023 Gravitational, Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package events_test import ( "context" "testing" "time" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/types" apievents "github.com/gravitational/teleport/api/types/events" "github.com/gravitational/teleport/lib/events" ) func TestSearchEventsLimiter(t *testing.T) { t.Parallel() t.Run("emitting events happen without any limiting", func(t *testing.T) { s, err := events.NewSearchEventLimiter(events.SearchEventsLimiterConfig{ RefillAmount: 1, Burst: 1, AuditLogger: &mockAuditLogger{ emitAuditEventRespFn: func() error { return nil }, }, }) require.NoError(t, err) for i := 0; i < 20; i++ { require.NoError(t, s.EmitAuditEvent(context.Background(), &apievents.AccessRequestCreate{})) } }) t.Run("with limiter", func(t *testing.T) { burst := 20 s, err := events.NewSearchEventLimiter(events.SearchEventsLimiterConfig{ RefillTime: 20 * time.Millisecond, RefillAmount: 1, Burst: burst, AuditLogger: &mockAuditLogger{ searchEventsRespFn: func() ([]apievents.AuditEvent, string, error) { return nil, "", nil }, }, }) require.NoError(t, err) someDate := clockwork.NewFakeClock().Now().UTC() // searchEvents and searchSessionEvents are helper fn to avoid coping those methods with huge // number of attributes multiple times in that test case. searchEvents := func() ([]apievents.AuditEvent, string, error) { return s.SearchEvents(someDate, someDate, "default", nil /* eventTypes */, 100 /* limit */, types.EventOrderAscending, "" /* startKey */) } searchSessionEvents := func() ([]apievents.AuditEvent, string, error) { return s.SearchSessionEvents(someDate, someDate, 100 /* limit */, types.EventOrderAscending, "" /* startKey */, nil /* cond */, "" /* sessionID */) } for i := 0; i < burst; i++ { var err error // rate limit is shared between both search endpoints. if i%2 == 0 { _, _, err = searchEvents() } else { _, _, err = searchSessionEvents() } require.NoError(t, err) } // Now all tokens from rate limit should be used _, _, err = searchEvents() require.True(t, trace.IsLimitExceeded(err)) // Also on SearchSessionEvents _, _, err = searchSessionEvents() require.True(t, trace.IsLimitExceeded(err)) // After 20ms 1 token should be added according to rate. require.Eventually(t, func() bool { _, _, err := searchEvents() return err == nil }, 40*time.Millisecond, 5*time.Millisecond) }) } func TestSearchEventsLimiterConfig(t *testing.T) { tests := []struct { name string cfg events.SearchEventsLimiterConfig wantFn func(t *testing.T, err error, cfg events.SearchEventsLimiterConfig) }{ { name: "valid config", cfg: events.SearchEventsLimiterConfig{ AuditLogger: &mockAuditLogger{}, RefillAmount: 1, Burst: 1, }, wantFn: func(t *testing.T, err error, cfg events.SearchEventsLimiterConfig) { require.NoError(t, err) require.Equal(t, time.Second, cfg.RefillTime) }, }, { name: "empty rate in config", cfg: events.SearchEventsLimiterConfig{ AuditLogger: &mockAuditLogger{}, Burst: 1, }, wantFn: func(t *testing.T, err error, cfg events.SearchEventsLimiterConfig) { require.ErrorContains(t, err, "RefillAmount cannot be less or equal to 0") }, }, { name: "empty burst in config", cfg: events.SearchEventsLimiterConfig{ AuditLogger: &mockAuditLogger{}, RefillAmount: 1, }, wantFn: func(t *testing.T, err error, cfg events.SearchEventsLimiterConfig) { require.ErrorContains(t, err, "Burst cannot be less or equal to 0") }, }, { name: "empty logger", cfg: events.SearchEventsLimiterConfig{ RefillAmount: 1, Burst: 1, }, wantFn: func(t *testing.T, err error, cfg events.SearchEventsLimiterConfig) { require.ErrorContains(t, err, "empty auditLogger") }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := tt.cfg.CheckAndSetDefaults() tt.wantFn(t, err, tt.cfg) }) } } type mockAuditLogger struct { searchEventsRespFn func() ([]apievents.AuditEvent, string, error) emitAuditEventRespFn func() error events.AuditLogger } func (m *mockAuditLogger) SearchEvents(fromUTC, toUTC time.Time, namespace string, eventTypes []string, limit int, order types.EventOrder, startKey string) ([]apievents.AuditEvent, string, error) { return m.searchEventsRespFn() } func (m *mockAuditLogger) SearchSessionEvents(fromUTC, toUTC time.Time, limit int, order types.EventOrder, startKey string, cond *types.WhereExpr, sessionID string) ([]apievents.AuditEvent, string, error) { return m.searchEventsRespFn() } func (m *mockAuditLogger) EmitAuditEvent(context.Context, apievents.AuditEvent) error { return m.emitAuditEventRespFn() }
package gowhere import ( "fmt" ) // RuleSet holds a group of Rules to be applied together type RuleSet struct { rules []Rule } func (rs *RuleSet) firstMatch(target string, verbose bool) *Match { if verbose { fmt.Printf("\nfirstMatch '%s'\n", target) } for _, r := range rs.rules { if verbose { fmt.Printf("checking: '%s' against %s '%s'\n", target, r.Directive, r.Pattern) } s := r.Match(target) if s != "" { m := Match{r, s} return &m } } return nil } // FindMatches locates all of the Rules that match the Check func (rs *RuleSet) FindMatches(check *Check, settings Settings) []Match { var r []Match seen := make(map[string]bool) match := rs.firstMatch(check.Input, settings.Verbose) for { if match == nil { if settings.Verbose { fmt.Printf("no more matches\n") } break } if settings.Verbose { fmt.Printf("matched: %v\n", *match) } if seen[match.Match] { // cycle detected if settings.Verbose { fmt.Printf("cycle\n") } break } r = append(r, *match) seen[match.Match] = true if settings.MaxHops > 0 && len(r) > settings.MaxHops { if settings.Verbose { fmt.Printf("max hops\n") } break } if match.Match == "" { // a redirect that doesn't point to a path, // like code 410 if settings.Verbose { fmt.Printf("no-target redirect\n") } break } // look for another item in a redirect chain match = rs.firstMatch(match.Match, settings.Verbose) } return r }
package email_test import ( "os" "strconv" "testing" "github.com/mylxsw/adanos-alert/pkg/messager/email" "github.com/stretchr/testify/assert" ) func TestEmailSend(t *testing.T) { host := os.Getenv("EMAIL_HOST") port := os.Getenv("EMAIL_PORT") user := os.Getenv("EMAIL_USER") password := os.Getenv("EMAIL_PASSWORD") if host == "" { return } portI, err := strconv.Atoi(port) assert.NoError(t, err) client := email.NewClient(host, portI, user, password) assert.NoError(t, client.Send("Hello, world", "This is message body", "mylxsw@aicode.cc", "mylxsw@126.com")) }
package hub import ( "github.com/exitcodezero/picloud/message" "github.com/pborman/uuid" "time" ) // Connection maintains info about the connected device // and subscribed events type Connection struct { ID string ClientName string IPAddress string ConnectedAt time.Time Subscribed []string Out chan message.SocketMessage } // NewConnection constructs a new Connection func NewConnection(ipAddress string, clientName string) Connection { hc := Connection{} hc.ID = uuid.New() hc.ClientName = clientName hc.IPAddress = ipAddress hc.ConnectedAt = time.Now().UTC() hc.Out = make(chan message.SocketMessage) return hc }
package usecases import ( "context" "errors" "github.com/landistas/badulaque/pkg/entities" ) type ProductUseCase interface { CreateProduct(ctx context.Context, product entities.Product) (*entities.Product, error) GetProduct(ctx context.Context, productID string) (*entities.Product, error) } type ProductStorageAdapter interface { Add(ctx context.Context, product entities.Product) (*entities.Product, error) Get(ctx context.Context, productID string) (*entities.Product, error) } type DefaultProductUseCase struct { productStorageAdapter ProductStorageAdapter } func NewDefaultProductUseCase(productStorageAdapter ProductStorageAdapter) DefaultProductUseCase { return DefaultProductUseCase{ productStorageAdapter: productStorageAdapter, } } func (useCase DefaultProductUseCase) CreateProduct(ctx context.Context, product entities.Product) (*entities.Product, error) { // 1. Validate that it has an ID // 2. Check that it doesn't exists // 3. Save if product.ID == "" { return nil, errors.New("id can not be nil") } returnedProduct, err := useCase.productStorageAdapter.Get(ctx, product.ID) if err != nil { return nil, err } if returnedProduct != nil { return nil, errors.New("product already exists") } return useCase.productStorageAdapter.Add(ctx, product) } func (useCase DefaultProductUseCase) GetProduct(ctx context.Context, productID string) (*entities.Product, error) { return nil, nil }
// Copyright 2020 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Package wifipeer builds and controls peer ChromeOS devices for Wi-Fi tests. package wifipeer import ( "context" "chromiumos/tast/dut" "chromiumos/tast/errors" "chromiumos/tast/ssh" ) // MakePeers constructs the peer devices needed for the tests. func MakePeers(ctx context.Context, testdut *dut.DUT, count int) (peers []*ssh.Conn, retErr error) { defer func() { if retErr != nil { for _, peer := range peers { peer.Close(ctx) } } }() for i := 1; i <= count; i++ { peer, err := testdut.WifiPeerHost(ctx, i) if err != nil { return nil, errors.Wrapf(err, "failed to connet to the peer %d", i) } peers = append(peers, peer) } return peers, nil }
package main import ( "fmt" "strconv" ) // Unlike in C, in Go assignment between items of // different type requires an explicit conversion. func main() { var a int = 65 // The expression T(v) converts the value v to the type T. b := string(a) // Itoa is equivalent to FormatInt(int64(i), 10). c := strconv.Itoa(a) // Atoi is equivalent to ParseInt(s, 10, 0), converted to type int. d, _ := strconv.Atoi(c) fmt.Printf("Type: %T Value: %v\n", b, b) // Type: string Value: A fmt.Printf("Type: %T Value: %v\n", c, c) // Type: string Value: 65 fmt.Printf("Type: %T Value: %v\n", d, d) // Type: int Value: 65 }
// Difficult: Medium // Input: (2 -> 4 -> 3) + (5 -> 6 -> 4) // Output: 7 -> 0 -> 8 // Explanation: 342 + 465 = 807. package main import "fmt" // ListNode is a struct for linked List type ListNode struct { Val int Next *ListNode } func main() { item1 := &ListNode{2, nil} item2 := &ListNode{4, nil} item3 := &ListNode{3, nil} item4 := &ListNode{5, nil} item5 := &ListNode{6, nil} item6 := &ListNode{4, nil} items := item1 // list1 items2 := item4 // list2 items = addNodeToEnd(item2, items) items = addNodeToEnd(item3, items) items2 = addNodeToEnd(item5, items) items2 = addNodeToEnd(item6, items) items = reverseRecurrsive(items) items2 = reverseRecurrsive(items2) for list := items; list != nil; list = list.Next { fmt.Println(list) } for list := items2; list != nil; list = list.Next { fmt.Println(list) } // addTwoNumbers(items, items2) } func addNodeToEnd(li, item *ListNode) *ListNode { if item == nil { return item } for list := item; list != nil; list = list.Next { if list.Next == nil { list.Next = li return item } } return item } // func addTwoNumbers(l1 *ListNode, l2 *ListNode) *ListNode { // for list := l1; list != nil; list = list.Next { // fmt.Println(list) // } // } func reverseRecurrsive(list *ListNode) *ListNode { if list == nil { return list } l := list if l.Next == nil { return l } else { newHead := reverseRecurrsive(l.Next) l.Next.Next = l l.Next = nil return newHead } }
package modifiers import ( dynatracev1beta1 "github.com/Dynatrace/dynatrace-operator/src/api/v1beta1" "github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/activegate/consts" "github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/activegate/internal/statefulset/builder" "github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/connectioninfo" "github.com/Dynatrace/dynatrace-operator/src/kubeobjects" "github.com/Dynatrace/dynatrace-operator/src/kubeobjects/address" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" ) var _ envModifier = RawImageModifier{} var _ volumeModifier = RawImageModifier{} var _ volumeMountModifier = RawImageModifier{} var _ builder.Modifier = RawImageModifier{} func NewRawImageModifier(dynakube dynatracev1beta1.DynaKube) RawImageModifier { return RawImageModifier{ dynakube: dynakube, } } type RawImageModifier struct { dynakube dynatracev1beta1.DynaKube } func (mod RawImageModifier) Enabled() bool { return !mod.dynakube.FeatureDisableActivegateRawImage() } func (mod RawImageModifier) Modify(sts *appsv1.StatefulSet) error { baseContainer := kubeobjects.FindContainerInPodSpec(&sts.Spec.Template.Spec, consts.ActiveGateContainerName) sts.Spec.Template.Spec.Volumes = append(sts.Spec.Template.Spec.Volumes, mod.getVolumes()...) baseContainer.VolumeMounts = append(baseContainer.VolumeMounts, mod.getVolumeMounts()...) baseContainer.Env = append(baseContainer.Env, mod.getEnvs()...) return nil } func (mod RawImageModifier) getVolumes() []corev1.Volume { return []corev1.Volume{ { Name: connectioninfo.TenantSecretVolumeName, VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: mod.dynakube.ActivegateTenantSecret(), }, }, }, } } func (mod RawImageModifier) getVolumeMounts() []corev1.VolumeMount { return []corev1.VolumeMount{ { Name: connectioninfo.TenantSecretVolumeName, ReadOnly: true, MountPath: connectioninfo.TenantTokenMountPoint, SubPath: connectioninfo.TenantTokenName, }, } } func (mod RawImageModifier) getEnvs() []corev1.EnvVar { return []corev1.EnvVar{mod.tenantUUIDEnvVar(), mod.communicationEndpointEnvVar()} } func (mod RawImageModifier) tenantUUIDEnvVar() corev1.EnvVar { return corev1.EnvVar{ Name: connectioninfo.EnvDtTenant, ValueFrom: &corev1.EnvVarSource{ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ Name: mod.dynakube.ActiveGateConnectionInfoConfigMapName(), }, Key: connectioninfo.TenantUUIDName, Optional: address.Of(false), }}} } func (mod RawImageModifier) communicationEndpointEnvVar() corev1.EnvVar { return corev1.EnvVar{ Name: connectioninfo.EnvDtServer, ValueFrom: &corev1.EnvVarSource{ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ Name: mod.dynakube.ActiveGateConnectionInfoConfigMapName(), }, Key: connectioninfo.CommunicationEndpointsName, Optional: address.Of(false), }}, } }
// Copyright 2020 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package wpaeap import ( "chromiumos/tast/common/crypto/certificate" "chromiumos/tast/common/wifi/security/eap" "chromiumos/tast/common/wifi/security/wpa" ) // Option is the function signature used to specify options of Config. type Option func(*ConfigFactory) // Mode returns an Option which sets WPA mode in Config. func Mode(mode wpa.ModeEnum) Option { return func(c *ConfigFactory) { c.blueprint.mode = mode } } // FTMode returns an Option which sets fast transition mode in Config. func FTMode(ft wpa.FTModeEnum) Option { return func(c *ConfigFactory) { c.blueprint.ftMode = ft } } // NotUseSystemCAs returns an Option which sets that we are NOT using system CAs in Config. func NotUseSystemCAs() Option { return func(c *ConfigFactory) { c.blueprint.useSystemCAs = false } } // AltSubjectMatch returns an Option which sets shill EAP.SubjectAlternativeNameMatch property in Config. func AltSubjectMatch(sans []string) Option { return func(c *ConfigFactory) { c.blueprint.altSubjectMatch = append([]string(nil), sans...) } } // DomainSuffixMatch returns an Option which sets shill EAP.DomainSuffixMatch property in Config. func DomainSuffixMatch(domainSuffix []string) Option { return func(c *ConfigFactory) { c.blueprint.domainSuffixMatch = append([]string(nil), domainSuffix...) } } // Options below are re-wrapped from the options of package eap. // FileSuffix returns an Option which sets the file suffix in Config. func FileSuffix(suffix string) Option { return func(c *ConfigFactory) { c.eapOps = append(c.eapOps, eap.FileSuffix(suffix)) } } // Identity returns an Option which sets the user to authenticate as in Config. func Identity(id string) Option { return func(c *ConfigFactory) { c.eapOps = append(c.eapOps, eap.Identity(id)) } } // ServerEAPUsers returns an Option which sets the EAP users for server in Config. func ServerEAPUsers(users string) Option { return func(c *ConfigFactory) { c.eapOps = append(c.eapOps, eap.ServerEAPUsers(users)) } } // ClientCACert returns an Option which sets the PEM encoded CA certificate for client in Config. func ClientCACert(caCert string) Option { return func(c *ConfigFactory) { c.eapOps = append(c.eapOps, eap.ClientCACert(caCert)) } } // ClientCred returns an Option which sets the PEM encoded credentials for client in Config. func ClientCred(cred certificate.Credential) Option { return func(c *ConfigFactory) { c.eapOps = append(c.eapOps, eap.ClientCred(cred)) } }
package main import ( "fmt" ) func fact(n int) int { if n > 1 { return n * fact(n - 1) } return 1 } func play_with_array() { fmt.Println("### play_with_array ###") var a [3]int // all set to 0 fmt.Println(a) fmt.Printf("a[0] == %d\n", a[0]) fmt.Printf("len(a) == %d\n", len(a)) // Array literals a = [3]int{1, 2, 3} fmt.Println(a) a = [...]int{4, 5, 6} fmt.Println(a) a = [3]int{0:7, 2:8} fmt.Println(a) // Type error: // cannot use array literal (type [4]int) // as type [3]int in assignment // a = [4]int{1, 2, 3, 4} } func play_with_slice() { fmt.Println("### play_with_slice ###") var array [10]int; array[0] = 0 array[1] = 1 array[2] = 2 // slice var slice []int fmt.Println("initial value of slice <") fmt.Print(" slice == ") fmt.Println(slice) // an empty slice // an empty slice (an initial value of slice) is nil. // slice is a kind of pointer. fmt.Printf(" (slice == nil) == %t\n", slice == nil) // true fmt.Printf(" len(slice) == %d\n", len(slice)) // 0 fmt.Printf(" cap(slice) == %d\n", cap(slice)) // 0 fmt.Println(">") slice = array[:] fmt.Printf("len(slice) = %d\n", len(slice)) fmt.Printf("cap(slice) = %d\n", cap(slice)) fmt.Printf("slice[0] = %d\n", slice[0]) slice = array[1:] fmt.Printf("len(slice) = %d\n", len(slice)) fmt.Printf("cap(slice) = %d\n", cap(slice)) fmt.Printf("slice[0] = %d\n", slice[0]) // slice literal slice = []int{1, 3, 5} fmt.Println(slice) // alloc arbitrary size array to slice slice = make([]int, 20) fmt.Println("** slice = make([]int, 20) **") fmt.Printf("len(slice) == %d\n", len(slice)) fmt.Printf("cap(slice) == %d\n", cap(slice)) slice = array[3:5] fmt.Printf("len(slice) == %d\n", len(slice)) fmt.Printf("cap(slice) == %d\n", cap(slice)) slice = make([]int, 10, 20) fmt.Printf("len(slice) == %d\n", len(slice)) fmt.Printf("cap(slice) == %d\n", cap(slice)) // fmt.Println(slice[15]) // index out of range slice = slice[:20] fmt.Printf("slice[15] = %d\n", slice[15]) // slice = slice[:100] // index out of range } func play_with_append() { // There is a special function "append" in golang. // "apend" is special as types of return values are changed by args of // "append". We can not define such kind of functions in golang. // // C++ : Go // -------------------------------- // vector<T> v : var v T[] // v.push_back(e) : v = append(v, e) // v.size() : len(v) // v[i] : v[i] // vector<T>* p : var p *T[] fmt.Println("### play_with_append ###") ia := []int{1, 2, 3} fmt.Println("ia =", ia) ia = append(ia, 4, 5, 6) fmt.Println("ia =", ia) sa := []string{"foo", "bar"} fmt.Println("sa =", sa) sa = append(sa, "hoge", "piyo") fmt.Println("sa =", sa) } func play_with_pointer() { fmt.Println("### play_with_pointer ###") var x int = 1 var p *int = &x fmt.Printf("*p == %d\n", *p) x = 2 fmt.Printf("*p == %d\n", *p) p = nil fmt.Println(p) fmt.Println(p == nil) } func play_with_map() { fmt.Println("### play_with_map ###") // Equivalent to m := make(map[string]float64) var m map[string]float64 = make(map[string]float64) m["foo"] = 10.0 var m2 = m m2["foo"] = 20.0 fmt.Println(m["foo"]) // Shows 20.0. map is a thin object like slice or pointer. // nil is an default initial value. // map is also a kind of pointer like slice. m2 = nil fmt.Println(m2) fmt.Println(m2 == nil) m3 := map[string]float64{"foo": 1, "bar": 3} // map literal fmt.Println(m3) var value = m3["piyo"] // No exception. value is set to 'zero'. fmt.Printf("value == %f\n", value) value, ok := m3["piyo"] // ok is set true if a map has a key. fmt.Println("value, ok =", value, ok) delete(m3, "foo") // Removes an element. fmt.Println(m3) } func play_with_if() { fmt.Println("### play_with_if ###") n := 2 m := 3 if n % 2 == 1 { fmt.Println("cond0") } else if m == 3 { fmt.Println("cond1") } else { fmt.Println("cond2") } // multiline condition if n == 2 && m == 3 { // n == 2 // && m == 3 -> syntax error: unexpected &&, expecting { } // We can use 1 initialization sentence with ; // Of course, vars defined here are visible only from inside of if-sentence. if tmp := n * m; tmp == 6 { // var tmp = ... is not valid. } } func play_with_for() { fmt.Println("### play_with_for ###") for i := 0; i < 3; i++ { fmt.Printf("usual for: i == %d\n", i) } var count int count = 0 for count < 3 { fmt.Printf("while loop: count == %d\n", count) count++ } count = 0 for { fmt.Printf("infinite loop: count == %d\n", count) count++ if count > 3 { break } } } func play_with_foreach() { fmt.Println("### play_with_foreach ###") array := [...]string{"foo", "bar", "baz"} for i, v := range array { fmt.Printf("for range with array: %d - %s\n", i, v) } dict := map[string]int{"foo": 3, "bar": 4} for key, value := range dict { fmt.Printf("for range with map: %s - %d\n", key, value) } } func play_with_switch() { fmt.Println("### play_with_switch ###") for i := 0; i < 4; i++ { fmt.Println("** i ==", i, "**") switch i { case 0: fmt.Println("i is 0.") // fall through is not default behavior of switch in golang. fallthrough case 1, 2: fmt.Println("i is 0, 1 or 2.") default: fmt.Println("i is not either 0, 1 or 2.") } } // In golang, we can switch instead of if-else. for i := -1; i <= 1; i++ { fmt.Println("** i ==", i, "**") switch { case i < 0: fmt.Println("i is negative.") case i == 0: // Do nothing default: fmt.Println("i is positive.") } } // With initialization switch value := 10; value % 2 { case 0: fmt.Println("value is even.") case 1: fmt.Println("value is odd.") } // Hmm..., I feel it is slightly confusing... switch value := 7; { case value % 2 == 0: fmt.Println("value is even.") default: fmt.Println("value is odd.") } // Also, we can use "type switch" in golang. See ooo.go for details. } func play_with_label() { fmt.Println("play_with_label") OuterLoop: for i := 0;; i++ { for j := 0; j <= i; j++ { fmt.Println("i =", i, ", j =", j) if i * j == 6 { fmt.Println("break!") break OuterLoop } } } } func basicMain() { n := 5 fmt.Printf("fact(%d) = %d\n", n, fact(n)) play_with_array() play_with_slice() play_with_append() play_with_pointer() play_with_map() play_with_if() play_with_for() play_with_foreach() play_with_switch() play_with_label() }
package simple import ( "fmt" ) // BasicPointer tbd func BasicPointer() { fmt.Println("\nBasicPointer") a := 42 fmt.Println(a) // address fmt.Println(&a) fmt.Printf("%T\n", a) fmt.Printf("%T\n", &a) b := &a fmt.Println(b) // value stored at an address fmt.Println(*b) fmt.Println(*&a) *b = 43 fmt.Println(a) }
package cmd import ( "context" "encoding/json" "fmt" "net/http" "net/http/httputil" "os" "os/signal" "syscall" "time" "github.com/gorilla/mux" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) var scheme, key, cert string var returnStatusCode, port int type responseYesSir struct { Message string `json:"message,omitempty"` Status int `json:"status,omitempty"` } // runCmd represents the run command var runCmd = &cobra.Command{ Use: "run", Short: "Run the mock API server", Long: `Start serving!`, Run: func(cmd *cobra.Command, args []string) { log.SetFormatter(&log.TextFormatter{ DisableColors: false, FullTimestamp: true, }) log.SetOutput(os.Stdout) //Capture signals to correctly close channels and connections sigc := make(chan os.Signal, 1) var srv http.Server signal.Notify(sigc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) go func() { s := <-sigc switch s { case os.Interrupt: log.Debug("Interrupt received. Closing connections...") ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) srv.Shutdown(ctx) os.Exit(0) case syscall.SIGTERM: log.Debug("Termination received. Closing connections...") ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) srv.Shutdown(ctx) os.Exit(-1) default: log.Debug("Closing connections...") ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) srv.Shutdown(ctx) os.Exit(-2) } }() //Create Router for REST interface muxRouter := mux.NewRouter() muxRouter.PathPrefix("/").HandlerFunc(getInfo) //Starting up server log.Info("Starting up " + ApplicationName + " on port " + fmt.Sprintf("%v", port)) if scheme == "https" { log.Error(http.ListenAndServeTLS(fmt.Sprintf(":%v", port), cert, key, muxRouter)) } else { srv.Addr = fmt.Sprintf(":%v", port) srv.Handler = muxRouter if err := srv.ListenAndServe(); err != nil { log.Error("%s\n" + fmt.Sprintf("%v", err)) } } }, } func init() { rootCmd.AddCommand(runCmd) runCmd.Flags().StringVarP(&scheme, "scheme", "s", "http", "Scheme http|https") runCmd.Flags().IntVarP(&returnStatusCode, "return", "r", 200, "HTTP return code (200,404,500)") runCmd.Flags().IntVarP(&port, "port", "p", 8888, "Port to listen on") runCmd.Flags().StringVarP(&cert, "cert", "c", "", "Path to the server TLS certificate file (only for https)") runCmd.Flags().StringVarP(&key, "key", "k", "", "Path to the server TLS certificate key file (only for https)") if scheme != "https" && scheme != "http" { log.Error("Scheme needs to be http or https") os.Exit(-1) } if scheme == "https" && (cert == "" || key == "") { log.Error("Need a certificate and a key for https!") os.Exit(-1) } } func getInfo(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Header().Set("Access-Control-Allow-Origin", "*") connData := fmt.Sprintf("%v:%v %v", r.RemoteAddr, r.RequestURI, r.Method) callback := r.URL.Query().Get("callback") log.Info("Request from " + connData) req, _ := httputil.DumpRequest(r, true) log.Info(fmt.Sprintf("%s", req)) //Send them over the connection switch returnStatusCode { case 200: w.WriteHeader(http.StatusOK) case 500: w.WriteHeader(http.StatusInternalServerError) case 404: w.WriteHeader(http.StatusNotFound) default: w.WriteHeader(http.StatusOK) returnStatusCode = 200 } stat := responseYesSir{ Message: fmt.Sprintf("%s", req), Status: returnStatusCode, } resp, _ := json.Marshal(stat) if callback != "" { fmt.Fprintf(w, "%s(%s)", callback, resp) } else { w.Write(resp) } }
package storage import ( "btcnetwork/common" "btcnetwork/p2p" "context" "encoding/binary" "encoding/hex" "errors" "github.com/syndtr/goleveldb/leveldb" "reflect" "sync" ) var ( LatestBlockKey = []byte("latestblock") ErrOphanBlock = errors.New("ophan block") ) type blockMgr struct { newBlock chan p2p.BlockPayload DBhash2block *leveldb.DB DBhash2height *leveldb.DB DBheight2hash *leveldb.DB DBlatestblock *leveldb.DB } var defaultBlockMgr *blockMgr func startBlockMgr(cfg *common.Config, ctx context.Context, wg *sync.WaitGroup) { defaultBlockMgr = newBlockMgr(cfg) go defaultBlockMgr.manageBlockDB(ctx, wg) } func newBlockMgr(cfg *common.Config) *blockMgr { s := blockMgr{} s.newBlock = make(chan p2p.BlockPayload, 500) //todo:仔细考量一下这个数字该如何定 var err error if s.DBhash2block, err = leveldb.OpenFile(cfg.DataDir+"/blockchain/block/hash2block", nil); err != nil { log.Error(err) panic(err) } if s.DBhash2height, err = leveldb.OpenFile(cfg.DataDir+"/blockchain/block/hash2height", nil); err != nil { log.Error(err) panic(err) } if s.DBheight2hash, err = leveldb.OpenFile(cfg.DataDir+"/blockchain/block/height2hash", nil); err != nil { log.Error(err) panic(err) } if s.DBlatestblock, err = leveldb.OpenFile(cfg.DataDir+"/blockchain/block/latestblock", nil); err != nil { log.Error(err) panic(err) } return &s } func (bm *blockMgr) manageBlockDB(ctx context.Context,wg *sync.WaitGroup) { nb := p2p.BlockPayload{} deadloop: for { select { case <-ctx.Done(): break deadloop case nb = <-bm.newBlock: log.Info("update a block...") err := bm.updateDBs(&nb) if err != nil { if err == ErrOphanBlock { log.Info(err) continue } log.Error(err) break deadloop } log.Info("update a block done.") } } _ = bm.DBhash2block.Close() _ = bm.DBheight2hash.Close() _ = bm.DBhash2height.Close() _ = bm.DBlatestblock.Close() close(bm.newBlock) log.Info("exit db mamager...") wg.Done() } //把新区块写入leveldb func (bm *blockMgr) updateDBs(newBlock *p2p.BlockPayload) error { var preHeight uint32 preHash := newBlock.PreHash genesisBlockHash := bm.genesisBlockHash() if reflect.DeepEqual(preHash[:], genesisBlockHash[:]) { //处理创世区块没有前区块高度的问题 preHeight = 0 } else { var err error preHeight, err = bm.hash2Height(preHash) //根据区块哈希找区块高度 if err != nil { log.Error(err) //可能没找到这个哈希值,那当前这个区块可能就是一个孤块,处理孤块 return ErrOphanBlock } } log.Debug("blockheader: >>", hex.EncodeToString(newBlock.Header.Serialize())) log.Debug("block: >>", hex.EncodeToString(newBlock.Serialize())) hash := common.Sha256AfterSha256(newBlock.Header.Serialize()) log.Debug("storage----hash:", hex.EncodeToString(hash[:])) err := bm.DBhash2block.Put(hash[:], newBlock.Serialize(), nil) if err != nil { log.Error(err) } curHeight := preHeight + 1 var i2b4 [4]byte binary.LittleEndian.PutUint32(i2b4[:], curHeight) if err = bm.DBheight2hash.Put(i2b4[:], hash[:], nil); err != nil { log.Error(err) return err } // if err = bm.DBhash2height.Put(hash[:], i2b4[:], nil); err != nil { log.Error(err) return err } if err = bm.DBlatestblock.Put(LatestBlockKey, i2b4[:], nil); err != nil { log.Error(err) return err } //UTXO分析 for i := uint64(0); i < newBlock.TxnCount.Value; i++ { defaultUtxoMgr.tx <- newBlock.Txns[i] } log.Debug("update block height:", curHeight) return nil } // 从leveldb中查找区块哈希对应的区块高度 func (bm *blockMgr) hash2Height(hash [32]byte) (uint32, error) { buf, err := bm.DBhash2height.Get(hash[:], nil) if err != nil { return 0, err } height := binary.LittleEndian.Uint32(buf) return height, nil } func (bm *blockMgr) genesisBlockHash() [32]byte { var buf []byte buf, _ = hex.DecodeString(common.GenesisBlockHash) var hash [32]byte copy(hash[:], buf) return hash } func (bm *blockMgr) IsEmpty() bool { has, err := defaultBlockMgr.DBlatestblock.Has(LatestBlockKey, nil) if err != nil { log.Error(err) panic(err) } return !has }
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. // See LICENSE.txt for license information. // package main import ( "fmt" "strconv" "strings" "time" "github.com/gosuri/uilive" "github.com/mattermost/mattermost-cloud/model" "github.com/olekukonko/tablewriter" "github.com/pkg/errors" "github.com/spf13/cobra" ) func newCmdDashboard() *cobra.Command { var flags dashboardFlags cmd := &cobra.Command{ Use: "dashboard", Short: "View an auto-refreshing dashboard of all cloud server resources.", RunE: func(command *cobra.Command, args []string) error { command.SilenceUsage = true return executeDashboardCmd(flags) }, } flags.addFlags(cmd) return cmd } func executeDashboardCmd(flags dashboardFlags) error { client := model.NewClient(flags.serverAddress) if flags.refreshSeconds < 1 { return errors.Errorf("refresh seconds (%d) must be set to 1 or higher", flags.refreshSeconds) } writer := uilive.New() writer.Start() for { tableString := &strings.Builder{} table := tablewriter.NewWriter(tableString) table.SetAlignment(tablewriter.ALIGN_LEFT) table.SetHeader([]string{"TYPE", "TOTAL", "STABLE", "WIP"}) var unstableList []string // Clusters start := time.Now() clusters, err := client.GetClusters(&model.GetClustersRequest{ Paging: model.AllPagesNotDeleted(), }) if err != nil { return errors.Wrap(err, "failed to query clusters") } clusterQueryTime := time.Since(start) clusterCount := len(clusters) var clusterStableCount int for _, cluster := range clusters { if cluster.State == model.ClusterStateStable { clusterStableCount++ } else { unstableList = append(unstableList, fmt.Sprintf("Cluster: %s (%s)", cluster.ID, cluster.State)) } } table.Append([]string{ "Cluster", toStr(clusterCount), toStr(clusterStableCount), toStr(clusterCount - clusterStableCount), }) // Installations start = time.Now() installations, err := client.GetInstallations(&model.GetInstallationsRequest{ Paging: model.AllPagesNotDeleted(), }) if err != nil { return errors.Wrap(err, "failed to query installations") } installationQueryTime := time.Since(start) installationCount := len(installations) var installationStableCount, installationsHibernatingCount, installationsPendingDeletionCount int for _, installation := range installations { switch installation.State { case model.ClusterInstallationStateStable: installationStableCount++ case model.InstallationStateHibernating: installationsHibernatingCount++ case model.InstallationStateDeletionPending: installationsPendingDeletionCount++ default: var domainName string if len(installation.DNSRecords) > 0 { domainName = installation.DNSRecords[0].DomainName } unstableList = append(unstableList, fmt.Sprintf("Installation: %s | %s (%s)", installation.ID, domainName, installation.State)) } } table.Append([]string{ "Installation", toStr(installationCount), fmt.Sprintf("%d (H=%d, DP=%d)", installationStableCount+installationsHibernatingCount, installationsHibernatingCount, installationsPendingDeletionCount), toStr(installationCount - (installationStableCount + installationsHibernatingCount + installationsPendingDeletionCount)), }) // Cluster Installations start = time.Now() clusterInstallations, err := client.GetClusterInstallations(&model.GetClusterInstallationsRequest{ Paging: model.AllPagesNotDeleted(), }) if err != nil { return errors.Wrap(err, "failed to query clusters") } ciQueryTime := time.Since(start) clusterInstallationCount := len(clusterInstallations) var clusterInstallationStableCount int for _, clusterInstallation := range clusterInstallations { if clusterInstallation.State == model.ClusterInstallationStateStable { clusterInstallationStableCount++ } else { unstableList = append(unstableList, fmt.Sprintf("Cluster Installation: %s (%s)", clusterInstallation.ID, clusterInstallation.State)) } } table.Append([]string{ "Cluster Installation", toStr(clusterInstallationCount), toStr(clusterInstallationStableCount), toStr(clusterInstallationCount - clusterInstallationStableCount), }) table.Render() renderedDashboard := "\n### CLOUD DASHBOARD\n" renderedDashboard += fmt.Sprintf("[ Query Time Stats: CLSR=%s, INST=%s, CLIN=%s ]\n\n", clusterQueryTime.Round(time.Millisecond).String(), installationQueryTime.Round(time.Millisecond).String(), ciQueryTime.Round(time.Millisecond).String()) renderedDashboard += tableString.String() for _, entry := range unstableList { renderedDashboard += fmt.Sprintf("%s\n", entry) } if len(unstableList) != 0 { renderedDashboard += "\n" } for i := flags.refreshSeconds; i > 0; i-- { _, _ = fmt.Fprintf(writer, "%s\nUpdating in %d seconds...\n", renderedDashboard, i) time.Sleep(time.Second) } } } func toStr(i int) string { return strconv.Itoa(i) }
package dashboard import ( "net/http" "github.com/gorilla/mux" "services" "middleware/auth" "models" "controllers/viewmodels" "repository" "github.com/satori/go.uuid" "strconv" ) type CategoriesController struct { r *mux.Router s *services.CategoryService } func NewCategoriesController(r *mux.Router) *CategoriesController { return &CategoriesController{ r: r.StrictSlash(true).PathPrefix("/categories").Subrouter(), s: services.NewCategoryService(), } } func (cc *CategoriesController) RegisterEndpoints() { cc.r.Path("/"). Methods(http.MethodGet). //Handler(alice.New(auth.AuthRedirect).ThenFunc(cc.CategoriesHandler)) HandlerFunc(cc.CategoriesHandler) cc.r.Path("/{id:[1-9]([0-9]?)+}"). Methods(http.MethodGet). Queries("task", "edit"). //Handler(alice.New(auth.AuthRedirect).ThenFunc(cc.UpdateCategoryHandler)) HandlerFunc(cc.UpdateCategoryHandler) cc.r.Path("/{id:[1-9]([0-9]?)+}"). Methods(http.MethodGet). //Handler(alice.New(auth.AuthRedirect).ThenFunc(cc.NewCategoryHandler)) HandlerFunc(cc.NewCategoryHandler) } func (cc *CategoriesController) CategoriesHandler(w http.ResponseWriter, r *http.Request) { /*user, err := auth.GetUserPrincipal(r) if err != nil { http.Redirect(w, r, "/error", http.StatusSeeOther) return }*/ var data struct { Categories []*viewmodels.Category CurrentUser *models.UserPrincipal CsrfToken string Signature string Error string } //data.CurrentUser = user data.CsrfToken = auth.CsrfToken(r) data.Signature = uuid.NewV4().String() categories, err := cc.s.GetCategoriesByQuery(repository.NewDefaultQuery()) if err != nil { data.Error = err.Error() renderer.HTML(w, http.StatusOK, "error", err.Error()) return } data.Categories = categories renderer.HTML(w, http.StatusOK, "categories", &data) } func (c *CategoriesController) NewCategoryHandler(w http.ResponseWriter, r *http.Request) { /*user, err := auth.GetUserPrincipal(r) if err != nil { http.Redirect(w, r, "/error", http.StatusSeeOther) return }*/ var data struct { CurrentUser *models.UserPrincipal Parents []*models.Category CsrfToken string Error string } //data.CurrentUser = user data.CsrfToken = auth.CsrfToken(r) /*parents, err := c.service.GetCategoriesFromQuery(utils.NewDefaultQuery()) if err != nil { data.Error = err.Error() DashboardRenderer.HTML(w, http.StatusOK, "error", err.Error()) return } data.Parents = parents*/ renderer.HTML(w, http.StatusOK, "category", &data) } func (c *CategoriesController) UpdateCategoryHandler(w http.ResponseWriter, r *http.Request) { var data struct { Category *viewmodels.Category CurrentUser *models.UserPrincipal CsrfToken string Error string } /*user, err := auth.GetUserPrincipal(r) if err != nil { http.Redirect(w, r, "/error", http.StatusSeeOther) return } data.CurrentUser = user*/ data.CsrfToken = auth.CsrfToken(r) vars := mux.Vars(r) id, err := strconv.Atoi(vars["id"]) if err != nil { data.Error = err.Error() renderer.HTML(w, http.StatusOK, "error", err.Error()) return } category, err := c.s.GetCategoryByID(uint32(id)) if err != nil { data.Error = err.Error() renderer.HTML(w, http.StatusOK, "error", err.Error()) return } data.Category = category renderer.HTML(w, http.StatusOK, "category-edit", &data) }
package leetcode_go func reverseVowels(s string) string { res := []byte(s) for i, j := 0, len(s)-1; i < j; { for !isVowel(s[i]) { i++ } for !isVowel(s[j]) { j-- } res[i], res[j] = res[j], res[i] i++ j-- } return string(s) } func isVowel(c byte) bool { if c == 'a' || c == 'e' || c == 'i' || c == 'o' || c == 'u' || c == 'A' || c == 'E' || c == 'I' || c == 'O' || c == 'U' { return true } return false }
package main import ( "fmt" "net" "time" ) type Transaction struct { Conn *net.TCPConn Ctx interface{} Req InRequest Rsp OutRespose Time time.Time Err error ClientMsg string SvrMsg string } func NewTrans(c *net.TCPConn, ctx interface{}) *Transaction { t := new(Transaction) t.Conn = c t.Ctx = ctx t.Time = time.Now() return t } func (t *Transaction) String() string { return fmt.Sprintf("%d\t%d\t%d\t%s\t%s\t%s\t%d\t%s\t%s\t%v\n", VERSION, t.Time.Unix(), time.Since(t.Time), t.Req.Method, t.Req.Path, t.Conn.RemoteAddr(), t.Rsp.Status, t.ClientMsg, t.SvrMsg, t.Err) }
package dlstream import ( "errors" "io" "net" "syscall" ) // shouldRetryRequest analyzes a given request error and determines whether its a good idea to retry the request func shouldRetryRequest(err error) bool { if err == nil { return false } if errors.Is(err, io.ErrUnexpectedEOF) { return true } var netError net.Error if errors.As(err, &netError) && netError.Timeout() { return true } var netOpError *net.OpError if errors.As(err, &netOpError) { switch netOpError.Op { case "dial": return false case "read": return true } } var errNo *syscall.Errno if errors.As(err, &errNo) { if *errNo == syscall.ECONNREFUSED { // Connection refused return true } if *errNo == syscall.ECONNRESET { // Connection reset return true } if *errNo == syscall.ECONNABORTED { // Connection aborted return true } } return false }
package main import ( "fmt" "log" "os" "strings" "github.com/codegangsta/cli" "github.com/libgit2/git2go" ) func main() { commands := []cli.Command{ { Name: "status", ShortName: "s", Usage: "./gobetween s", Description: "Get Status of current directories dependencies, based on glide.lock", Action: func(c *cli.Context) { workingDir, err := os.Getwd() if err != nil { log.Printf("Working Directory failed:%v\n", err) os.Exit(100) } repo, err := git.OpenRepository(workingDir) if err != nil { log.Printf("git.OpenRepository failed:%v\n", err) os.Exit(100) } state := repo.State() log.Printf("State:%v\n", state) }, }, { Name: "Prepare", ShortName: "p", Usage: "./gobetween p", Description: "Get Status of current directories dependencies, based on glide.lock", Flags: []cli.Flag{ cli.StringFlag{Name: "orgId", Value: "", Usage: "A valid UUID"}, }, Action: func(c *cli.Context) { orgId := strings.TrimSpace(c.String("orgId")) if len(orgId) == 0 { log.Printf("Cannot pass a blank orgId") os.Exit(1) } }, }, } app := cli.NewApp() app.Commands = commands app.Name = "gobetween" app.Usage = "Glide Helper." app.Version = "0.0.1" app.Action = func(ctx *cli.Context) { if len(ctx.Args()) == 0 { cli.ShowAppHelp(ctx) os.Exit(1) } console := cli.NewApp() console.Commands = commands console.Action = func(c *cli.Context) { fmt.Println("Command not found. Type 'help' for a list of commands.") } } app.Run(os.Args) os.Exit(0) }
package metrics import "fmt" const namespaceSeparator = "." type namespaced struct { namespace string adapted Metrics } // NewNamespaced returns the metrics with the metrics names with a namespace or prefix. func NewNamespaced(m Metrics, namespace string) Metrics { return &namespaced{namespace, m} } func (n *namespaced) Counter(name string, tags ...Tag) Counter { return n.adapted.Counter(n.prefix(name), tags...) } func (n *namespaced) Gauge(name string, tags ...Tag) Gauge { return n.adapted.Gauge(n.prefix(name), tags...) } func (n *namespaced) Event(name string, tags ...Tag) Event { return n.adapted.Event(n.prefix(name), tags...) } func (n *namespaced) Timer(name string, tags ...Tag) Timer { return n.adapted.Timer(n.prefix(name), tags...) } func (n *namespaced) Histogram(name string, tags ...Tag) Histogram { return n.adapted.Histogram(n.prefix(name), tags...) } func (n *namespaced) prefix(name string) string { return fmt.Sprintf("%s%s%s", n.namespace, namespaceSeparator, name) }
package usersync import ( "fmt" "sort" "strings" "github.com/prebid/prebid-server/config" ) type namedSyncerConfig struct { name string cfg config.Syncer } // SyncerBuildError represents an error with building a syncer. type SyncerBuildError struct { Bidder string SyncerKey string Err error } // Error implements the standard error interface. func (e SyncerBuildError) Error() string { return fmt.Sprintf("cannot create syncer for bidder %s with key %s: %v", e.Bidder, e.SyncerKey, e.Err) } func BuildSyncers(hostConfig *config.Configuration, bidderInfos config.BidderInfos) (map[string]Syncer, []error) { // map syncer config by bidder cfgByBidder := make(map[string]config.Syncer, len(bidderInfos)) for bidder, cfg := range bidderInfos { if shouldCreateSyncer(cfg) { cfgByBidder[bidder] = *cfg.Syncer } } // map syncer config by key cfgBySyncerKey := make(map[string][]namedSyncerConfig, len(bidderInfos)) for bidder, cfg := range cfgByBidder { if cfg.Key == "" { cfg.Key = bidder } cfgBySyncerKey[cfg.Key] = append(cfgBySyncerKey[cfg.Key], namedSyncerConfig{bidder, cfg}) } // resolve host endpoint hostUserSyncConfig := hostConfig.UserSync if hostUserSyncConfig.ExternalURL == "" { hostUserSyncConfig.ExternalURL = hostConfig.ExternalURL } // create syncers errs := []error{} syncers := make(map[string]Syncer, len(bidderInfos)) for key, cfgGroup := range cfgBySyncerKey { primaryCfg, err := chooseSyncerConfig(cfgGroup) if err != nil { errs = append(errs, err) continue } for _, bidder := range cfgGroup { syncer, err := NewSyncer(hostUserSyncConfig, primaryCfg.cfg, bidder.name) if err != nil { errs = append(errs, SyncerBuildError{ Bidder: primaryCfg.name, SyncerKey: key, Err: err, }) continue } syncers[bidder.name] = syncer } } if len(errs) > 0 { return nil, errs } return syncers, nil } func shouldCreateSyncer(cfg config.BidderInfo) bool { if cfg.Disabled { return false } if cfg.Syncer == nil { return false } // a syncer may provide just a Supports field to provide hints to the host. we should only try to create a syncer // if there is at least one non-Supports value populated. return cfg.Syncer.Key != "" || cfg.Syncer.IFrame != nil || cfg.Syncer.Redirect != nil || cfg.Syncer.SupportCORS != nil } func chooseSyncerConfig(biddersSyncerConfig []namedSyncerConfig) (namedSyncerConfig, error) { if len(biddersSyncerConfig) == 1 { return biddersSyncerConfig[0], nil } var bidderNames []string var bidderNamesWithEndpoints []string var syncerConfig namedSyncerConfig for _, bidder := range biddersSyncerConfig { bidderNames = append(bidderNames, bidder.name) if bidder.cfg.IFrame != nil || bidder.cfg.Redirect != nil { bidderNamesWithEndpoints = append(bidderNamesWithEndpoints, bidder.name) syncerConfig = bidder } } if len(bidderNamesWithEndpoints) == 0 { sort.Strings(bidderNames) bidders := strings.Join(bidderNames, ", ") return namedSyncerConfig{}, fmt.Errorf("bidders %s share the same syncer key, but none define endpoints (iframe and/or redirect)", bidders) } if len(bidderNamesWithEndpoints) > 1 { sort.Strings(bidderNamesWithEndpoints) bidders := strings.Join(bidderNamesWithEndpoints, ", ") return namedSyncerConfig{}, fmt.Errorf("bidders %s define endpoints (iframe and/or redirect) for the same syncer key, but only one bidder is permitted to define endpoints", bidders) } return syncerConfig, nil }
package main import ( "strconv" "testing" ) func TestDigitsTwo(t *testing.T) { sum := FilterDigits(59) if sum != 14 { t.Error("Error on filtering digits ") } } func TestDigitsThree(t *testing.T) { sum := FilterDigits(111) if sum != 3 { t.Error("Error on filtering digits") } } // Checks whether the absolute sum // of all points equals zero func TestAbsSumZero(t *testing.T) { x1, y1 := 0, 0 total := GetSum(x1, y1) if total != 0 { t.Error("Total doesn't match Zero") } } // Checks whether the absolute sum is less than // 23 like expected: 6 + 6 + 6 for positive func TestAbsSumPos(t *testing.T) { x1, y1 := 66, 6 total := GetSum(x1, y1) if total != 18 { t.Error("Total doesn't match Positive value required") t.Error("Actual: " + strconv.Itoa(total)) } } // Checks whether the absolute sum is less than // 23 like expected: -6 + 9 + -6 for positive func TestAbsSumNegative(t *testing.T) { x1, y1 := -69, -6 total := GetSum(x1, y1) if total != 21 { t.Error("Total doesn't match Positive value required from negative values") } } // Checks whether the total is less than max should pass func TestIsSafeBelow(t *testing.T) { if IsSafe(6) != true { t.Error("IsSafe is below so should pass") } } // Checks whether the total is equal to safe value and // should be safe func TestIsSafeEqual(t *testing.T) { if IsSafe(23) != true { t.Error("IsSafe is equal to value so should pass") } } // Checks whether the total is equal to safe value and // should be safe func TestIsSafeAbove(t *testing.T) { if IsSafe(90) == true { t.Error("IsSafe is equal to value so should pass") } }
package main import ( "bufio" "github.com/thoj/go-ircevent" "log" "os" "regexp" "strings" ) var Icseh map [string] []string func stampaIcse (icsamelo string) []string { lines := []string{"", "", "", "", "", "", ""} for _, letter := range icsamelo { for idx, line := range Icseh[string(letter)] { lines[idx] = lines[idx] + line } } return lines } func caricaIcseh() { leggino, err := os.Open("./dati/icse.txt") if err != nil { log.Fatal(err) return } var icsehLines []string r := bufio.NewReader(leggino) line, _, erre := r.ReadLine() for erre == nil { icsehLines = append(icsehLines, string(line)) line, _, erre = r.ReadLine() } Icseh = map [string] []string {} for _, icsina := range icsehLines { var splitti []string splitti = strings.Split(icsina, "|") if len(splitti) == 10 { splitti = splitti[:len(splitti)-3] splitti = append(splitti, "|", "") } Icseh[splitti[len(splitti)-2]] = splitti[:len(splitti)-2] } } func init() { caricaIcseh() Dialogo = append(Dialogo, botCommand { regexp.MustCompile("^icsah (.+)"), func(event *irc.Event, matches []string) ([]string, string) { return stampaIcse(matches[1]), whomToReply(event) }, }, botCommand { regexp.MustCompile("^bamba$"), func(event *irc.Event, c []string) ([]string, string) { return stampaIcse("ROSA"), whomToReply(event) }, }, ) }
package main import "fmt" type person struct { first string last string } type secretAgent struct { person license bool } // func (r receiver) identifier(parameter(s)) (return(s)) { ... } // All instances of secretAgent now have access to the speak method func (s secretAgent) speak() { fmt.Println("I am", s.first, s.last) } func main() { sa1 := secretAgent{ person: person{ first: "James", last: "Bond", }, license: true, } fmt.Println(sa1) sa1.speak() }
package isom import ( "bytes" "encoding/hex" "testing" ) func TestReadElemStreamDesc(t *testing.T) { debugReader = true debugWriter = true var err error data, _ := hex.DecodeString("03808080220002000480808014401500000000030d400000000005808080021210068080800102") t.Logf("elemDesc=%x", data) t.Logf("length=%d", len(data)) var aconfig MPEG4AudioConfig if aconfig, err = ReadElemStreamDescAAC(bytes.NewReader(data)); err != nil { t.Error(err) } aconfig = aconfig.Complete() t.Logf("aconfig=%v", aconfig) bw := &bytes.Buffer{} WriteMPEG4AudioConfig(bw, aconfig) bw = &bytes.Buffer{} WriteElemStreamDescAAC(bw, aconfig, 2) t.Logf("elemDesc=%x", bw.Bytes()) data = bw.Bytes() t.Logf("length=%d", len(data)) if aconfig, err = ReadElemStreamDescAAC(bytes.NewReader(data)); err != nil { t.Error(err) } t.Logf("aconfig=%v", aconfig.Complete()) //00000000 ff f1 50 80 04 3f fc de 04 00 00 6c 69 62 66 61 |..P..?.....libfa| //00000010 61 63 20 31 2e 32 38 00 00 42 40 93 20 04 32 00 |ac 1.28..B@. .2.| //00000020 47 ff f1 50 80 05 1f fc 21 42 fe ed b2 5c a8 00 |G..P....!B...\..| data, _ = hex.DecodeString("fff15080043ffcde040000") var n, framelen int aconfig, _, n, _, _ = ReadADTSFrame(data) t.Logf("%v n=%d", aconfig.Complete(), n) data = MakeADTSHeader(aconfig, 1024*3, 33) data = append(data, []byte{1,2,3,4,5}...) t.Logf("%x", data) aconfig, _, n, framelen, err = ReadADTSFrame(data) t.Logf("%v n=%d framelen=%d err=%v", aconfig.Complete(), n, framelen, err) }
package main import ( "fmt" "net/http" ) func cat(res http.ResponseWriter, req *http.Request) { fmt.Fprintf(res, "<h1>Handles %v</h1>\n%v\n", "/catOnly", req.URL.Path) } func dog(res http.ResponseWriter, req *http.Request) { fmt.Fprintf(res, "<h1>Handles %v</h1>\n%v\n", "/dog/", req.URL.Path) } func home(res http.ResponseWriter, req *http.Request) { fmt.Fprintf(res, "<h1>Handles %v</h1>\n%v\n", "/", req.URL.Path) } func main() { fmt.Println("server listening on port :8080") http.HandleFunc("/", home) http.HandleFunc("/catOnly", cat) http.HandleFunc("/dog/", dog) http.ListenAndServe(":8080", nil) }
/* Copyright 2022 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package platform import ( "fmt" sErrors "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/errors" "github.com/GoogleContainerTools/skaffold/v2/proto/v1" ) // UnknownPlatformCLIFlag specifies that the platform provided via CLI flag couldn't be parsed func UnknownPlatformCLIFlag(platform string, err error) error { return sErrors.NewError(err, &proto.ActionableErr{ Message: fmt.Sprintf("unable to recognise platform %q: %v", platform, err), ErrCode: proto.StatusCode_BUILD_UNKNOWN_PLATFORM_FLAG, Suggestions: []*proto.Suggestion{ { SuggestionCode: proto.SuggestionCode_BUILD_FIX_UNKNOWN_PLATFORM_FLAG, Action: "Check that the value provided to --platform flag is a valid platform and formatted correctly, like linux/amd64, linux/arm64, linux/arm/v7, etc.", }, }, }) }
package main import ( "os" "text/template" ) func main() { repoRoot, _ := os.Getwd() dir := repoRoot + "/Chapter 2/Video 10" tpl, _ := template.ParseGlob(dir + "/onur.gohtml") tpl.Execute(os.Stdout, "Onur") }
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package util const metadataFormat = ` instance-id: "{{ .Hostname }}" local-hostname: "{{ .Hostname }}" network: version: 2 ethernets: {{- range $i, $net := .Devices }} id{{ $i }}: match: macaddress: "{{ $net.MACAddr }}" wakeonlan: true dhcp4: {{ $net.DHCP4 }} dhcp6: {{ $net.DHCP6 }} {{- if $net.IPAddrs }} addresses: {{- range $net.IPAddrs }} - "{{ . }}" {{- end }} {{- end }} {{- if $net.Gateway4 }} gateway4: "{{ $net.Gateway4 }}" {{- end }} {{- if $net.Gateway6 }} gateway6: "{{ $net.Gateway6 }}" {{- end }} {{- if .MTU }} mtu: {{ .MTU }} {{- end }} {{- if .Routes }} routes: {{- range .Routes }} - to: "{{ .To }}" via: "{{ .Via }}" metric: {{ .Metric }} {{- end }} {{- end }} {{- if nameservers $net }} nameservers: {{- if $net.Nameservers }} addresses: {{- range $net.Nameservers }} - "{{ . }}" {{- end }} {{- end }} {{- if $net.SearchDomains }} search: {{- range $net.SearchDomains }} - "{{ . }}" {{- end }} {{- end }} {{- end }} {{- end }} {{- if .Routes }} routes: {{- range .Routes }} - to: "{{ .To }}" via: "{{ .Via }}" metric: {{ .Metric }} {{- end }} {{- end }} `
package v1alpha1 import ( "time" coreV1Api "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. // AdminConsoleSpec defines the desired state of AdminConsole // +k8s:openapi-gen=true type AdminConsoleSpec struct { Image string `json:"image"` Version string `json:"version"` ImagePullSecrets []coreV1Api.LocalObjectReference `json:"imagePullSecrets,omitempty"` KeycloakSpec KeycloakSpec `json:"keycloakSpec,omitempty"` EdpSpec EdpSpec `json:"edpSpec"` DbSpec AdminConsoleDbSettings `json:"dbSpec, omitempty"` BasePath string `json:"basePath, omitempty"` // Add custom validation using kubebuilder tags: https://book.kubebuilder.io/beyond_basics/generating_crd.html } type EdpSpec struct { Version string `json:"version"` Name string `json:"name, omitempty"` DnsWildcard string `json:"dnsWildcard"` IntegrationStrategies string `json:"integrationStrategies,omitempty"` TestReportTools string `json:"testReportTools,omitempty"` } type KeycloakSpec struct { Enabled bool `json:"enabled,omitempty"` } type AdminConsoleDbSettings struct { Name string `json:"name,omitempty"` Hostname string `json:"hostname,omitempty"` Port string `json:"port,omitempty"` Enabled bool `json:"enabled,omitempty"` } // AdminConsoleStatus defines the observed state of AdminConsole // +k8s:openapi-gen=true type AdminConsoleStatus struct { Available bool `json:"available,omitempty"` LastTimeUpdated time.Time `json:"lastTimeUpdated,omitempty"` Status string `json:"status,omitempty"` // Add custom validation using kubebuilder tags: https://book.kubebuilder.io/beyond_basics/generating_crd.html } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // AdminConsole is the Schema for the adminconsoles API // +k8s:openapi-gen=true // +kubebuilder:subresource:status type AdminConsole struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec AdminConsoleSpec `json:"spec,omitempty"` Status AdminConsoleStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // AdminConsoleList contains a list of AdminConsole type AdminConsoleList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []AdminConsole `json:"items"` } func init() { SchemeBuilder.Register(&AdminConsole{}, &AdminConsoleList{}) }
package blurhash import "math" func signPow(val, exp float64) float64 { sign := 1.0 if val < 0 { sign = -1 } return sign * math.Pow(math.Abs(val), exp) } func sRGBToLinear(val int) float64 { v := float64(val) / 255 if v <= 0.04045 { return v / 12.92 } return math.Pow((v+0.055)/1.055, 2.4) } func linearTosRGB(val float64) int { v := math.Max(0, math.Min(1, val)) if v <= 0.0031308 { return int(v * 12.92 * 255 * 0.5) } return int((1.055*math.Pow(v, 1/2.4)-0.055)*255 + 0.5) }
package postgres import ( "github.com/google/uuid" "github.com/orbis-challenge/src/models" ) func (q DBQuery) SaveHolding(holding *models.Holding) (models.Holding, error) { _, err := q.Model(holding). Returning("*"). Insert() return *holding, err } func (q DBQuery) DeleteHoldingByID(id uuid.UUID) (err error) { h := models.Holding{ID: id} _, err = q.Model(&h). Delete() return err }
package reserve import ( "github.com/yosisa/arec/command" "io" "log" "os" "os/signal" "syscall" "time" ) type RecorderItem interface { Info() *RecordInfo io.WriteCloser } type Engine struct { Scheduler *Scheduler reserved map[string]*RecordInfo } func NewEngine(gr, bs int) *Engine { e := new(Engine) e.Scheduler = NewScheduler(gr, bs) e.reserved = make(map[string]*RecordInfo) return e } func (e *Engine) Reserve(item RecorderItem) error { info := item.Info() if _, ok := e.reserved[info.Id]; ok { log.Printf("%+v already reserved", *info) return nil } if err := e.Scheduler.Reserve(info); err != nil { return err } e.reserved[info.Id] = info log.Printf("%+v scheduled to record", *info) now := time.Now().Unix() wait := info.Start - int(now) if wait > 0 { info.timer = time.NewTimer(time.Duration(wait) * time.Second) log.Printf("Recording for %s scheduled after %d seconds", info.Id, wait) go func() { select { case <-info.timer.C: e.Record(item) case <-info.cancelCh: } }() } else if rest := wait + (info.End - info.Start); rest > 0 { log.Printf("Recording for %s is starting immediately", info.Id) go e.Record(item) } else { log.Printf("Program %s is already finished", info.Id) } return nil } func (e *Engine) Record(item RecorderItem) { info := item.Info() recpt1 := command.NewRecpt1(info.Ch, info.Sid) duration := time.Unix(int64(info.End), 0).Sub(time.Now()) recpt1.CloseAfter(duration) io.Copy(item, recpt1) item.Close() e.Scheduler.Cancel(info) delete(e.reserved, info.Id) } func (e *Engine) ReserveFromDB() { programs, err := GetReservedPrograms() if err != nil { log.Print(err) } for _, program := range programs { if channel, err := GetChannel(&program.Channel); err != nil { log.Print(err) } else { record := NewFileRecord(channel, program) e.Reserve(record) } } } func (e *Engine) RunForever(handler func()) { signalCh := make(chan os.Signal, 4) signal.Notify(signalCh, syscall.SIGHUP, syscall.SIGINT, syscall.SIGQUIT) for { switch <-signalCh { case syscall.SIGHUP: log.Printf("Rescheduling") handler() default: os.Exit(0) } } }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package colexec import ( "context" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/errors" ) // newPartiallyOrderedDistinct creates a distinct operator on the given // distinct columns when we have partial ordering on some of the distinct // columns. func newPartiallyOrderedDistinct( allocator *colmem.Allocator, input colexecop.Operator, distinctCols []uint32, orderedCols []uint32, typs []*types.T, ) (colexecop.Operator, error) { if len(orderedCols) == 0 || len(orderedCols) == len(distinctCols) { return nil, errors.AssertionFailedf( "partially ordered distinct wrongfully planned: numDistinctCols=%d "+ "numOrderedCols=%d", len(distinctCols), len(orderedCols)) } chunker, err := newChunker(allocator, input, typs, orderedCols) if err != nil { return nil, err } chunkerOperator := newChunkerOperator(allocator, chunker, typs) // distinctUnorderedCols will contain distinct columns that are not present // among orderedCols. The unordered distinct operator will use these columns // to find distinct tuples within "chunks" of tuples that are the same on the // ordered columns. distinctUnorderedCols := make([]uint32, 0, len(distinctCols)-len(orderedCols)) for _, distinctCol := range distinctCols { isOrdered := false for _, orderedCol := range orderedCols { if orderedCol == distinctCol { isOrdered = true break } } if !isOrdered { distinctUnorderedCols = append(distinctUnorderedCols, distinctCol) } } distinct := NewUnorderedDistinct(allocator, chunkerOperator, distinctUnorderedCols, typs) return &partiallyOrderedDistinct{ input: chunkerOperator, distinct: distinct.(colexecop.ResettableOperator), }, nil } // partiallyOrderedDistinct implements DISTINCT operation using a combination // of chunkerOperator and unorderedDistinct. It's only job is to check whether // the input has been fully processed and, if not, to move to the next chunk // (where "chunk" is all tuples that are equal on the ordered columns). type partiallyOrderedDistinct struct { input *chunkerOperator distinct colexecop.ResettableOperator } var _ colexecop.Operator = &partiallyOrderedDistinct{} func (p *partiallyOrderedDistinct) ChildCount(bool) int { return 1 } func (p *partiallyOrderedDistinct) Child(nth int, _ bool) execinfra.OpNode { if nth == 0 { return p.input } colexecerror.InternalError(errors.AssertionFailedf("invalid index %d", nth)) // This code is unreachable, but the compiler cannot infer that. return nil } func (p *partiallyOrderedDistinct) Init() { p.distinct.Init() } func (p *partiallyOrderedDistinct) Next(ctx context.Context) coldata.Batch { for { batch := p.distinct.Next(ctx) if batch.Length() == 0 { if p.input.done() { // We're done, so return a zero-length batch. return coldata.ZeroBatch } // p.distinct will reset p.Input. p.distinct.Reset(ctx) } else { return batch } } } func newChunkerOperator( allocator *colmem.Allocator, input *chunker, inputTypes []*types.T, ) *chunkerOperator { return &chunkerOperator{ input: input, inputTypes: inputTypes, windowedBatch: allocator.NewMemBatchNoCols(inputTypes, coldata.BatchSize()), } } // chunkerOperator is an adapter from chunker to Operator interface. It outputs // all tuples from a single chunk followed by zero-length batches until it is // reset. // It will have returned all tuples from all of the chunks only when it returns // a zero-length *and* done() method returns true (i.e. a zero-length batch // indicates the end of a chunk, but when done() returns true, it indicates // that the input has been fully processed). type chunkerOperator struct { input *chunker inputTypes []*types.T // haveChunksToEmit indicates whether we have spooled input and still there // are more chunks to emit. haveChunksToEmit bool // numTuplesInChunks stores the number of tuples that are currently spooled // by input. numTuplesInChunks int // currentChunkFinished indicates whether we have emitted all tuples from the // current chunk and should be returning a zero-length batch. currentChunkFinished bool // newChunksCol, when non-nil, stores the boundaries of chunks. Every true // value indicates that a new chunk begins at the corresponding index. If // newChunksCol is nil, all spooled tuples belong to the same chunk. newChunksCol []bool // outputTupleStartIdx indicates the index of the first tuple to be included // in the output batch. outputTupleStartIdx int // windowedBatch is the output batch of chunkerOperator. For performance // reasons, the spooled tuples are not copied into it, instead we use a // "window" approach. windowedBatch coldata.Batch } var _ colexecop.ResettableOperator = &chunkerOperator{} func (c *chunkerOperator) ChildCount(bool) int { return 1 } func (c *chunkerOperator) Child(nth int, _ bool) execinfra.OpNode { if nth == 0 { return c.input } colexecerror.InternalError(errors.AssertionFailedf("invalid index %d", nth)) // This code is unreachable, but the compiler cannot infer that. return nil } func (c *chunkerOperator) Init() { c.input.init() } func (c *chunkerOperator) Next(ctx context.Context) coldata.Batch { if c.currentChunkFinished { return coldata.ZeroBatch } if !c.haveChunksToEmit { // We don't have any chunks to emit, so we need to spool the input. c.input.spool(ctx) c.haveChunksToEmit = true c.numTuplesInChunks = c.input.getNumTuples() c.newChunksCol = c.input.getPartitionsCol() } outputTupleEndIdx := c.numTuplesInChunks if c.outputTupleStartIdx == outputTupleEndIdx { // Current chunk has been fully output. c.currentChunkFinished = true return coldata.ZeroBatch } if c.newChunksCol == nil { // When newChunksCol is nil, then all tuples that are returned via // getValues are equal on the ordered columns, so we simply emit the next // "window" of those tuples. if outputTupleEndIdx-c.outputTupleStartIdx > coldata.BatchSize() { outputTupleEndIdx = c.outputTupleStartIdx + coldata.BatchSize() } } else { // newChunksCol is non-nil, so there are multiple chunks within the // current tuples. We will emit a single chunk as a separate batch and // then will proceed to emitting zero-length batches until we're reset. outputTupleEndIdx = c.outputTupleStartIdx + 1 for outputTupleEndIdx < c.numTuplesInChunks && !c.newChunksCol[outputTupleEndIdx] { outputTupleEndIdx++ } c.currentChunkFinished = true } for i := range c.inputTypes { window := c.input.getValues(i).Window(c.outputTupleStartIdx, outputTupleEndIdx) c.windowedBatch.ReplaceCol(window, i) } c.windowedBatch.SetSelection(false) c.windowedBatch.SetLength(outputTupleEndIdx - c.outputTupleStartIdx) c.outputTupleStartIdx = outputTupleEndIdx return c.windowedBatch } func (c *chunkerOperator) done() bool { return c.input.done() } func (c *chunkerOperator) Reset(_ context.Context) { c.currentChunkFinished = false if c.newChunksCol != nil { if c.outputTupleStartIdx == c.numTuplesInChunks { // We have processed all chunks among the current tuples, so we will need // to get new chunks. c.haveChunksToEmit = false } } else { // We have processed all current tuples (that comprised a single chunk), so // we will need to get new chunks. c.haveChunksToEmit = false } if !c.haveChunksToEmit { c.input.emptyBuffer() c.outputTupleStartIdx = 0 } }
package usecase import ( "github.com/jerolan/slack-poll/domain/port" "github.com/jerolan/slack-poll/domain/service" ) type UseCase struct { pollService service.PollService uuid port.UUIDPort } func NewUseCase(pS service.PollService, uuid port.UUIDPort) *UseCase { return &UseCase{ pollService: pS, uuid: uuid, } }
package view import ( proj_model "github.com/caos/zitadel/internal/project/model" "github.com/caos/zitadel/internal/project/repository/view" "github.com/caos/zitadel/internal/project/repository/view/model" "github.com/caos/zitadel/internal/view/repository" ) const ( applicationTable = "management.applications" ) func (v *View) ApplicationByID(appID string) (*model.ApplicationView, error) { return view.ApplicationByID(v.Db, applicationTable, appID) } func (v *View) SearchApplications(request *proj_model.ApplicationSearchRequest) ([]*model.ApplicationView, int, error) { return view.SearchApplications(v.Db, applicationTable, request) } func (v *View) PutApplication(project *model.ApplicationView) error { err := view.PutApplication(v.Db, applicationTable, project) if err != nil { return err } return v.ProcessedApplicationSequence(project.Sequence) } func (v *View) DeleteApplication(appID string, eventSequence uint64) error { err := view.DeleteApplication(v.Db, applicationTable, appID) if err != nil { return nil } return v.ProcessedApplicationSequence(eventSequence) } func (v *View) GetLatestApplicationSequence() (*repository.CurrentSequence, error) { return v.latestSequence(applicationTable) } func (v *View) ProcessedApplicationSequence(eventSequence uint64) error { return v.saveCurrentSequence(applicationTable, eventSequence) } func (v *View) GetLatestApplicationFailedEvent(sequence uint64) (*repository.FailedEvent, error) { return v.latestFailedEvent(applicationTable, sequence) } func (v *View) ProcessedApplicationFailedEvent(failedEvent *repository.FailedEvent) error { return v.saveFailedEvent(failedEvent) }
// Copyright 2018 Saferwall. All rights reserved. // Use of this source code is governed by Apache v2 license // license that can be found in the LICENSE file. package drweb import ( "context" "fmt" "regexp" "strings" "time" multiav "github.com/saferwall/saferwall/internal/multiav" "github.com/saferwall/saferwall/internal/utils" ) const ( cmd = "/opt/drweb.com/bin/drweb-ctl" regexStr = "infected with (.*)" configD = "/opt/drweb.com/bin/drweb-configd" ) // Scanner represents an empty struct that can be used to a method received. type Scanner struct{} // Version returns ScanCL, Core and VDF versions func Version() (string, error) { var ver string out, err := utils.ExecCmd(cmd, "baseinfo") // Core engine version: 7.00.47.04280 // Virus database timestamp: 2020-Aug-11 18:40:16 // Virus database fingerprint: D2EFA560783BC31243E97B3B73766C18 // Virus databases loaded: 202 // Virus records: 9118543 // Anti-spam core is not loaded // Last successful update: 2020-Aug-11 20:42:37 // Next scheduled update: 2020-Aug-11 21:12:37 if err != nil { return ver, err } lines := strings.Split(out, "\n") for _, line := range lines { if strings.Contains(line, "Core engine version:") { ver = strings.TrimSpace(strings.TrimPrefix(line, "Core engine version:")) break } } return ver, nil } // ScanFile scans a given file func (Scanner) ScanFile(filepath string, opts multiav.Options) (multiav.Result, error) { var err error res := multiav.Result{} if opts.ScanTimeout == 0 { opts.ScanTimeout = multiav.DefaultScanTimeout } // Create a new context and add a timeout to it. ctx, cancel := context.WithTimeout( context.Background(), opts.ScanTimeout) defer cancel() // # /opt/drweb.com/bin/drweb-ctl scan --help // Scan file or directory // Usage: drweb-ctl scan <path_to_scan> [options] // Available options: // -a [ --Autonomous ] start autonomous component set // --Report arg (=BRIEF) report type BRIEF, DEBUG or JSON // --ScanTimeout arg (=0) scan timeout (in ms), 0 means no timeout // --PackerMaxLevel arg (=8) limit packer nesting level // --ArchiveMaxLevel arg (=8) limit archive (like zip) nesting level // --MailMaxLevel arg (=8) limit mail (like pst, tbb) nesting level // --ContainerMaxLevel arg (=8) limit container (like html) nesting level // --MaxCompressionRatio arg (=3000) limit compression ratio (must be >= 2) // --HeuristicAnalysis arg (=ON) use heuristic analysis ON, OFF // --Exclude arg exclude specified paths from scan // (wildcards are allowed) // --OnKnownVirus arg (=REPORT) action REPORT, CURE, QUARANTINE, DELETE // --OnIncurable arg (=REPORT) action REPORT, QUARANTINE, DELETE // --OnSuspicious arg (=REPORT) action REPORT, QUARANTINE, DELETE // --OnAdware arg (=REPORT) action REPORT, QUARANTINE, DELETE // --OnDialers arg (=REPORT) action REPORT, QUARANTINE, DELETE // --OnJokes arg (=REPORT) action REPORT, QUARANTINE, DELETE // --OnRiskware arg (=REPORT) action REPORT, QUARANTINE, DELETE // --OnHacktools arg (=REPORT) action REPORT, QUARANTINE, DELETE // --Stdin read '\n'-separated paths from stdin // --Stdin0 read '\0'-separated paths from stdin // -d [ --Debug ] extended diagnostic output res.Out, err = utils.ExecCmdWithContext(ctx, cmd, "scan", filepath) // # /opt/drweb.com/bin/drweb-ctl scan /eicar // /eicar - infected with EICAR Test File (NOT a Virus!) // Scanned objects: 1, scan errors: 0, threats found: 1, threats neutralized: 0. // Scanned 0.07 KB in 0.08 s with speed 0.80 KB/s. // // List of return codes : // 1: Error on monitor channel // 2: Operation is already in progress // 3: Operation is in pending state // 4: Interrupted by user // 5: Operation canceled // 6: IPC connection terminated // 7: Invalid IPC message size // 8: Invalid IPC message format // 9: Not ready // 10: The component is not installed // 11: Unexpected IPC message // 12: IPC protocol violation // 13: Subsystem state is unknown // 20: Path must be absolute // 21: Not enough memory // 22: IO error // 23: No such file or directory // 24: Permission denied // 25: Not a directory // 26: Data file corrupted // 27: File already exists // 28: Read-only file system // 29: Network error // 30: Not a drive // 31: Unexpected EOF if err != nil { return res, err } // Grab the detection result re := regexp.MustCompile(regexStr) l := re.FindStringSubmatch(res.Out) if len(l) > 0 { res.Output = l[1] res.Infected = true } return res, nil } // StartDaemon starts the drweb daemon. func StartDaemon() error { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() out, err := utils.ExecCmdWithContext(ctx, "sudo", configD, "-d") if err != nil { return fmt.Errorf("failed to start daemon, err: %v, out:%s", err, out) } return nil }
package helpers import ( "encoding/binary" "image" "image/color" "io" ) func ReadByte(r io.Reader) byte { b := make([]byte, 1) r.Read(b) return b[0] } func ReadInt(r io.Reader) int { b := make([]byte, 8) r.Read(b) i64, _ := binary.Varint(b) return int(i64) } func ReadInt64(r io.Reader) int64 { b := make([]byte, 8) r.Read(b) i64, _ := binary.Varint(b) return i64 } func Fill(img *image.RGBA, c color.RGBA) { if img == nil { return } sz := img.Bounds().Size() for y := 0; y < sz.Y; y++ { for x := 0; x <= sz.X; x++ { img.SetRGBA(x, y, c) } } }
package modules import ( "bytes" "context" "fmt" "io" "net/http" "sync/atomic" "time" "github.com/buguang01/Logger" "github.com/buguang01/bige/messages" "github.com/buguang01/util/threads" "golang.org/x/net/websocket" ) func WebSocketSetIpPort(ipPort string) options { return func(mod IModule) { mod.(*WebSocketModule).ipPort = ipPort } } //超时时间(秒) //例:超时时间为10秒时,就传入10 func WebSocketSetTimeout(timeout time.Duration) options { return func(mod IModule) { mod.(*WebSocketModule).timeout = timeout * time.Second } } //连接成功后回调,可以用来获取一些连接的信息,比如IP func WebSocketSetOnlineFun(fun func(conn *messages.WebSocketModel)) options { return func(mod IModule) { mod.(*WebSocketModule).webSocketOnlineFun = fun } } //设置路由 func WebSocketSetRoute(route messages.IMessageHandle) options { return func(mod IModule) { mod.(*WebSocketModule).RouteHandle = route } } //设置Frame //websocket.XXXFrame default websocket.BinaryFrame func WebSocketSetFrame(frame byte) options { return func(mod IModule) { mod.(*WebSocketModule).frame = frame } } type WebSocketModule struct { ipPort string //HTTP监听的地址 timeout time.Duration //超时时间 RouteHandle messages.IMessageHandle //消息路由 webSocketOnlineFun func(conn *messages.WebSocketModel) //连接成功后回调,可以用来获取一些连接的信息,比如IP getnum int64 //收到的总消息数 runing int64 //当前在处理的消息数 connlen int64 //连接数 httpServer *http.Server //HTTP请求的对象 thgo *threads.ThreadGo //协程管理器 frame byte //websocket PayloadType } func NewWebSocketModule(opts ...options) *WebSocketModule { result := &WebSocketModule{ ipPort: ":8081", timeout: 60 * time.Second, getnum: 0, runing: 0, connlen: 0, thgo: threads.NewThreadGo(), RouteHandle: messages.JsonMessageHandleNew(), webSocketOnlineFun: nil, frame: websocket.BinaryFrame, //因为我们用的是路由是二进制的方式,所以这里要用这个值 } for _, opt := range opts { opt(result) } return result } //Init 初始化 func (mod *WebSocketModule) Init() { mod.httpServer = &http.Server{ Addr: mod.ipPort, WriteTimeout: mod.timeout * 2, } //还可以加别的参数,已后再加,有需要再加 mux := http.NewServeMux() //这个是主要的逻辑 mux.Handle("/", websocket.Handler(mod.Handle)) //你也可以在外面继续扩展 mod.httpServer.Handler = mux } //Start 启动 func (mod *WebSocketModule) Start() { //启动的协程 mod.thgo.Go(func(ctx context.Context) { Logger.PStatus("websocket Module Start!") err := mod.httpServer.ListenAndServe() if err != nil { if err == http.ErrServerClosed { Logger.PStatus("websocket run Server closed under requeset!!") // log.Print("Server closed under requeset!!") } else { Logger.PFatal("Server closed unexpecteed:" + err.Error()) // log.Fatal("Server closed unexpecteed!!") } } }) } //Stop 停止 func (mod *WebSocketModule) Stop() { if err := mod.httpServer.Close(); err != nil { Logger.PError(err, "Close websocket Module:") } mod.thgo.CloseWait() Logger.PStatus("websocket Module Stop.") } //PrintStatus 打印状态 func (mod *WebSocketModule) PrintStatus() string { return fmt.Sprintf( "\r\n\t\twebsocket Module\t:%d/%d/%d\t(connum/getmsg/runing)", atomic.LoadInt64(&mod.connlen), atomic.LoadInt64(&mod.getnum), atomic.LoadInt64(&mod.runing)) } //Handle http发来的所有请求都会到这个方法来 func (mod *WebSocketModule) Handle(conn *websocket.Conn) { conn.PayloadType = mod.frame //标注子连接是不是都停下来 mod.thgo.Wg.Add(1) defer mod.thgo.Wg.Done() defer conn.Close() //发给下面的连接对象,可以自定义一些信息和回调 wsconn := new(messages.WebSocketModel) wsconn.Conn = conn wsconn.KeyID = -1 if mod.webSocketOnlineFun != nil { mod.webSocketOnlineFun(wsconn) } atomic.AddInt64(&mod.connlen, 1) //发消息来说明这个用户掉线了 defer func() { atomic.AddInt64(&mod.connlen, -1) Logger.PDebugKey("websocket client closeing:%+v .", wsconn.KeyID, wsconn.ConInfo) //用来处理发生连接关闭的时候,要处理的事 if wsconn.CloseFun != nil { wsconn.CloseFun(wsconn) } Logger.PDebugKey("websocket client close:%+v .", wsconn.KeyID, wsconn.ConInfo) }() Logger.PDebugKey("websocket client open:%+v .", wsconn.KeyID, wsconn.ConInfo) runchan := make(chan bool, 8) //用来处理超时 mod.thgo.Go( func(ctx context.Context) { timeout := time.NewTimer(mod.timeout) defer timeout.Stop() defer conn.Close() for { select { case <-ctx.Done(): return case <-timeout.C: return case ok := <-runchan: if ok { timeout.Reset(mod.timeout) } else { return } } } //超时关连接 }) mod.thgo.Try( func(ctx context.Context) { buf := &bytes.Buffer{} listen: for { rdbuff := make([]byte, 10240) n, err := conn.Read(rdbuff) if err != nil { if err == io.EOF { runchan <- false } break listen } buf.Write(rdbuff[:n]) buff := buf.Bytes() if msglen, ok := mod.RouteHandle.CheckMaxLenVaild(buff); ok { buff = buf.Next(int(msglen)) } else { if msglen == 0 { //消息长度异常 break listen } continue } msg, err := mod.RouteHandle.Unmarshal(buff) if err != nil { Logger.PInfo("web socket RouteHandle Unmarshal Error:%s", err.Error()) return } modmsg, ok := msg.(messages.IWebSocketMessageHandle) if !ok { Logger.PInfo("Not is Web socket Msg:%+v", msg) return } else { Logger.PInfo("Web socket Get Msg:%+v", msg) } runchan <- true atomic.AddInt64(&mod.getnum, 1) mod.thgo.Try(func(ctx context.Context) { atomic.AddInt64(&mod.runing, 1) modmsg.WebSocketDirectCall(wsconn) }, nil, func() { atomic.AddInt64(&mod.runing, -1) }) } }, nil, nil, ) } //GetPlayerNum用户连接数量 func (mod *WebSocketModule) GetPlayerNum() int64 { return atomic.LoadInt64(&mod.connlen) }
/* Copyright 2015 All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package api import ( "fmt" "strings" "github.com/UKHomeOffice/vaultctl/pkg/utils" ) var ( supportedBackendTypes = []string{ "aws", "generic", "pki", "transit", "cassandra", "consul", "cubbyhole", "mysql", "postgres", "ssh", "custom", } supportAuthTypes = []string{ "userpass", "ldap", "token", "appid", "github", "mfa", "tls", } ) // IsValid validates the attributes func (r Attributes) IsValid() error { if r.URI() == "" { return fmt.Errorf("attributes must have a uri specified") } return nil } // IsValid validates the auth backend func (r Auth) IsValid() error { if r.Type == "" { return fmt.Errorf("you must specify a auth type") } if r.Path == "" { return fmt.Errorf("you must specify a path") } if strings.HasSuffix(r.Path, "/") { return fmt.Errorf("path should not end with /") } if !utils.ContainedIn(r.Type, supportAuthTypes) { return fmt.Errorf("auth type: %s is a unsupported auth type", r.Type) } for i, x := range r.Attrs { if err := x.IsValid(); err != nil { return fmt.Errorf("attribute %s invalid, error: %s", i, err) } } return nil } // IsValid validates the user is ok func (r *User) IsValid() error { if r.Path != "" && strings.HasSuffix(r.Path, "/") { return fmt.Errorf("path should not end with /") } if r.UserPass != nil { return r.UserPass.IsValid() } if r.UserToken != nil { return r.UserToken.IsValid() } return fmt.Errorf("you have not added authentication to the user") } // IsValid validates the user credential is ok func (r UserPass) IsValid() error { if r.Username == "" { return fmt.Errorf("does not have a username") } if r.Password == "" { return fmt.Errorf("does not have a password") } return nil } // IsValid checks the user token is valid func (r UserToken) IsValid() error { if r.DisplayName == "" { return fmt.Errorf("you must specify a display name for the token") } return nil } // IsValid validates the secret is ok func (r Secret) IsValid() error { if r.Path == "" { return fmt.Errorf("the secret must have a path") } if r.Values == nil || len(r.Values) <= 0 { return fmt.Errorf("the secret must have some values") } return nil } // IsValid validates the policy is ok func (r Policy) IsValid() error { if r.Name == "" { return fmt.Errorf("the policy must have a name") } return nil } // IsValid validates the backend is ok func (r Backend) IsValid() error { if r.Path == "" { return fmt.Errorf("backend must have a path") } if r.Type == "" { return fmt.Errorf("backend %s must have a type", r.Path) } if r.Description == "" { return fmt.Errorf("backend %s must have a description", r.Path) } if r.MaxLeaseTTL.Seconds() < r.DefaultLeaseTTL.Seconds() { return fmt.Errorf("backend: %s, max lease ttl cannot be less than the default", r.Path) } if r.DefaultLeaseTTL.Seconds() < 0 { return fmt.Errorf("backend: %s, default lease time must be positive", r.Path) } if r.MaxLeaseTTL.Seconds() < 0 { return fmt.Errorf("backend: %s, max lease time must be positive", r.Path) } if !utils.ContainedIn(r.Type, supportedBackendTypes) { return fmt.Errorf("backend: %s, unsupported type: %s, supported types are: %s", r.Path, r.Type, supportedBackends()) } if r.Attrs != nil && len(r.Attrs) > 0 { for _, x := range r.Attrs { // step: ensure the config has a uri if x.URI() == "" { return fmt.Errorf("backend: %s, config for must have uri", r.Path) } } } return nil } // supportedBackends returns a list of supported backend types func supportedBackends() string { return strings.Join(supportedBackendTypes, ",") }
package q3 import ( "runtime" "testing" ) func BenchmarkGetPrimes(b *testing.B) { for i := 0; i < b.N; i++ { GetPrimes(1000) } } func TestMaxProcs(t *testing.T) { println(runtime.GOMAXPROCS(0)) }
// Copyright 2019 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package apply import ( "context" "github.com/cockroachdb/errors" "go.etcd.io/etcd/raft/v3/raftpb" ) // StateMachine represents an instance of a replicated state machine being // driven by a replication group. The state machine accepts Commands that // have been committed to the replication group's log and applies them to // advance to a new state. // // All state transitions performed by the state machine are expected to be // deterministic, which ensures that if each instance is driven from the // same consistent shared log, they will all stay in sync. type StateMachine interface { // NewBatch creates a new batch that is suitable for accumulating the // effects that a group of Commands will have on the replicated state // machine. Commands are staged in the batch one-by-one and then the // entire batch is committed at once. // // Batch comes in two flavors - real batches and ephemeral batches. // Real batches are capable of accumulating updates from commands and // applying them to the state machine. Ephemeral batches are not able // to make changes to the durable state machine, but can still be used // for the purpose of checking commands to determine whether they will // be rejected or not when staged in a real batch. The principal user // of ephemeral batches is AckCommittedEntriesBeforeApplication. NewBatch(ephemeral bool) Batch // ApplySideEffects applies the in-memory side-effects of a Command to // the replicated state machine. The method will be called in the order // that the commands are committed to the state machine's log. Once the // in-memory side-effects of the Command are applied, an AppliedCommand // is returned so that it can be finished and acknowledged. // // The method will always be called with a Command that has been checked // and whose persistent state transition has been applied to the state // machine. Because this method is called after applying the persistent // state transition for a Command, it may not be called in the case of // an untimely crash. This means that applying these side-effects will // typically update the in-memory representation of the state machine // to the same state that it would be in if the process restarted. ApplySideEffects(CheckedCommand) (AppliedCommand, error) } // ErrRemoved can be returned from ApplySideEffects which will stop the task // from processing more commands and return immediately. The error should // only be thrown by non-trivial commands. var ErrRemoved = errors.New("replica removed") // Batch accumulates a series of updates from Commands and performs them // all at once to its StateMachine when applied. Groups of Commands will be // staged in the Batch such that one or more trivial Commands are staged or // exactly one non-trivial Command is staged. type Batch interface { // Stage inserts a Command into the Batch. In doing so, the Command is // checked for rejection and a CheckedCommand is returned. Stage(Command) (CheckedCommand, error) // ApplyToStateMachine applies the persistent state transitions staged // in the Batch to the StateMachine, atomically. ApplyToStateMachine(context.Context) error // Close closes the batch and releases any resources that it holds. Close() } // Decoder is capable of decoding a list of committed raft entries and // binding any that were locally proposed to their local proposals. type Decoder interface { // DecodeAndBind decodes each of the provided raft entries into commands // and binds any that were proposed locally to their local proposals. // The method must only be called once per Decoder. It returns whether // any of the commands were bound to local proposals waiting for // acknowledgement. DecodeAndBind(context.Context, []raftpb.Entry) (anyLocal bool, _ error) // NewCommandIter creates an iterator over the replicated commands that // were passed to DecodeAndBind. The method must not be called until // after DecodeAndBind is called. NewCommandIter() CommandIterator // Reset resets the Decoder and releases any resources that it holds. Reset() } // Task is an object capable of coordinating the application of commands to // a replicated state machine after they have been durably committed to a // raft log. // // Committed raft entries are provided to the task through its Decode // method. The task will then apply these entries to the provided state // machine when ApplyCommittedEntries is called. type Task struct { sm StateMachine dec Decoder // Have entries been decoded yet? decoded bool // Were any of the decoded commands locally proposed? anyLocal bool // The maximum number of commands that can be applied in a batch. batchSize int32 } // MakeTask creates a new task with the provided state machine and decoder. func MakeTask(sm StateMachine, dec Decoder) Task { return Task{sm: sm, dec: dec} } // Decode decodes the committed raft entries into commands and prepared for the // commands to be applied to the replicated state machine. func (t *Task) Decode(ctx context.Context, committedEntries []raftpb.Entry) error { var err error t.anyLocal, err = t.dec.DecodeAndBind(ctx, committedEntries) t.decoded = true return err } func (t *Task) assertDecoded() { if !t.decoded { panic("Task.Decode not called yet") } } // AckCommittedEntriesBeforeApplication attempts to acknowledge the success of // raft entries that have been durably committed to the raft log but have not // yet been applied to the proposer replica's replicated state machine. // // This is safe because a proposal through raft can be known to have succeeded // as soon as it is durably replicated to a quorum of replicas (i.e. has // committed in the raft log). The proposal does not need to wait for the // effects of the proposal to be applied in order to know whether its changes // will succeed or fail. This is because the raft log is the provider of // atomicity and durability for replicated writes, not (ignoring log // truncation) the replicated state machine itself. // // However, there are a few complications to acknowledging the success of a // proposal at this stage: // // 1. Committing an entry in the raft log and having the command in that entry // succeed are similar but not equivalent concepts. Even if the entry succeeds // in achieving durability by replicating to a quorum of replicas, its command // may still be rejected "beneath raft". This means that a (deterministic) // check after replication decides that the command will not be applied to the // replicated state machine. In that case, the client waiting on the result of // the command should not be informed of its success. Luckily, this check is // cheap to perform so we can do it here and when applying the command. // // Determining whether the command will succeed or be rejected before applying // it for real is accomplished using an ephemeral batch. Commands are staged in // the ephemeral batch to acquire CheckedCommands, which can then be acknowledged // immediately even though the ephemeral batch itself cannot be used to update // the durable state machine. Once the rejection status of each command is // determined, any successful commands that permit acknowledgement before // application (see CanAckBeforeApplication) are acknowledged. The ephemeral // batch is then thrown away. // // 2. Some commands perform non-trivial work such as updating Replica configuration // state or performing Range splits. In those cases, it's likely that the client // is interested in not only knowing whether it has succeeded in sequencing the // change in the raft log, but also in knowing when the change has gone into // effect. There's currently no exposed hook to ask for an acknowledgement only // after a command has been applied, so for simplicity the current implementation // only ever acks transactional writes before they have gone into effect. All // other commands wait until they have been applied to ack their client. // // 3. Even though we can determine whether a command has succeeded without applying // it, the effect of the command will not be visible to conflicting commands until // it is applied. Because of this, the client can be informed of the success of // a write at this point, but we cannot release that write's latches until the // write has applied. See ProposalData.signalProposalResult/finishApplication. // // 4. etcd/raft may provided a series of CommittedEntries in a Ready struct that // haven't actually been appended to our own log. This is most common in single // node replication groups, but it is possible when a follower in a multi-node // replication group is catching up after falling behind. In the first case, // the entries are not yet committed so acknowledging them would be a lie. In // the second case, the entries are committed so we could acknowledge them at // this point, but doing so seems risky. To avoid complications in either case, // the method takes a maxIndex parameter that limits the indexes that it will // acknowledge. Typically, callers will supply the highest index that they have // durably written to their raft log for this upper bound. // func (t *Task) AckCommittedEntriesBeforeApplication(ctx context.Context, maxIndex uint64) error { t.assertDecoded() if !t.anyLocal { return nil // fast-path } // Create a new ephemeral application batch. All we're interested in is // whether commands will be rejected or not when staged in a real batch. batch := t.sm.NewBatch(true /* ephemeral */) defer batch.Close() iter := t.dec.NewCommandIter() defer iter.Close() // Collect a batch of trivial commands from the applier. Stop at the first // non-trivial command or at the first command with an index above maxIndex. batchIter := takeWhileCmdIter(iter, func(cmd Command) bool { if cmd.Index() > maxIndex { return false } return cmd.IsTrivial() }) // Stage the commands in the (ephemeral) batch. stagedIter, err := mapCmdIter(batchIter, batch.Stage) if err != nil { return err } // Acknowledge any locally-proposed commands that succeeded in being staged // in the batch and can be acknowledged before they are actually applied. // Don't acknowledge rejected proposals early because the StateMachine may // want to retry the command instead of returning the error to the client. return forEachCheckedCmdIter(ctx, stagedIter, func(cmd CheckedCommand, ctx context.Context) error { if !cmd.Rejected() && cmd.IsLocal() && cmd.CanAckBeforeApplication() { return cmd.AckSuccess(ctx) } return nil }) } // SetMaxBatchSize sets the maximum application batch size. If 0, no limit // will be placed on the number of commands that can be applied in a batch. func (t *Task) SetMaxBatchSize(size int) { t.batchSize = int32(size) } // ApplyCommittedEntries applies raft entries that have been committed to the // raft log but have not yet been applied to the replicated state machine. func (t *Task) ApplyCommittedEntries(ctx context.Context) error { t.assertDecoded() iter := t.dec.NewCommandIter() for iter.Valid() { if err := t.applyOneBatch(ctx, iter); err != nil { // If the batch threw an error, reject all remaining commands in the // iterator to avoid leaking resources or leaving a proposer hanging. // // NOTE: forEachCmdIter closes iter. if rejectErr := forEachCmdIter(ctx, iter, func(cmd Command, ctx context.Context) error { return cmd.AckErrAndFinish(ctx, err) }); rejectErr != nil { return rejectErr } return err } } iter.Close() return nil } // applyOneBatch consumes a batch-worth of commands from the provided iter and // applies them atomically to the StateMachine. A batch will contain either: // a) one or more trivial commands // b) exactly one non-trivial command func (t *Task) applyOneBatch(ctx context.Context, iter CommandIterator) error { // Create a new application batch. batch := t.sm.NewBatch(false /* ephemeral */) defer batch.Close() // Consume a batch-worth of commands. pol := trivialPolicy{maxCount: t.batchSize} batchIter := takeWhileCmdIter(iter, func(cmd Command) bool { return pol.maybeAdd(cmd.IsTrivial()) }) // Stage each command in the batch. stagedIter, err := mapCmdIter(batchIter, batch.Stage) if err != nil { return err } // Apply the persistent state transitions to the state machine. if err := batch.ApplyToStateMachine(ctx); err != nil { return err } // Apply the side-effects of each command to the state machine. appliedIter, err := mapCheckedCmdIter(stagedIter, t.sm.ApplySideEffects) if err != nil { return err } // Finish and acknowledge the outcome of each command. return forEachAppliedCmdIter(ctx, appliedIter, AppliedCommand.AckOutcomeAndFinish) } // trivialPolicy encodes a batching policy that allows a batch to consist of // either one or more trivial commands or exactly one non-trivial command. type trivialPolicy struct { maxCount int32 trivialCount int32 nonTrivialCount int32 } // maybeAdd returns whether a command with the specified triviality should be // added to a batch given the batching policy. If the method returns true, the // command is considered to have been added. func (p *trivialPolicy) maybeAdd(trivial bool) bool { if !trivial { if p.trivialCount+p.nonTrivialCount > 0 { return false } p.nonTrivialCount++ return true } if p.nonTrivialCount > 0 { return false } if p.maxCount > 0 && p.maxCount == p.trivialCount { return false } p.trivialCount++ return true } // Close ends the task, releasing any resources that it holds and resetting the // Decoder. The Task cannot be used again after being closed. func (t *Task) Close() { t.dec.Reset() *t = Task{} }
package main import ( "os" "testing" ) //test handler 't' func TestNewDeck(t *testing.T) { d := newDeck() if len(d) != 16 { t.Errorf("Expected the deck length to be 16, but got %v", len(d)) } if d[0] != "Ace of Spades" { t.Errorf("Expected the first card to be 'Ace of Spades', but got %v", d[0]) } if d[len(d)-1] != "Four of King" { t.Errorf("Expected the first card to be 'Four of King', but got %v", d[len(d)-1]) } } func TestSaveToFileAndNewDeckFromFile(t *testing.T) { os.Remove("_testingDeck") d := newDeck() d.saveToFile("_testingDeck") loadedDeck := newDeckFromFile("_testingDec") if len(loadedDeck) != len(d) { t.Errorf("Expected the length of the loadedDeck to be 16, but got %v", len((loadedDeck))) } os.Remove("_testingDeck") }
package main import ( "encoding/gob" "log" "os" "github.com/diamondburned/cchat/repository" ) const output = "repository.gob" func main() { f, err := os.Create(output) if err != nil { log.Fatalln("Failed to create file:", err) } defer f.Close() if err := gob.NewEncoder(f).Encode(repository.Main); err != nil { os.Remove(output) log.Fatalln("Failed to gob encode:", err) } }
package graphqlorm import ( "context" "fmt" "strings" "github.com/iancoleman/strcase" "github.com/jinzhu/inflection" "github.com/machinebox/graphql" ) // FetchResponse ... type FetchResponse struct { Result interface{} `json:"result"` } // GetEntityOptions ... type GetEntityOptions struct { Entity string EntityID *string Fields []string Filter *map[string]interface{} } // GetEntity ... func (c *ORMClient) GetEntity(ctx context.Context, options GetEntityOptions, res interface{}) error { qFormat := ` query ($id: ID) { result: %[1]s(id:$id) { id %[3]s } } ` if options.Filter != nil { qFormat = ` query ($id: ID,$filter:%[2]sFilterType) { result: %[1]s(id:$id, filter: $filter) { id %[3]s } } ` } query := fmt.Sprintf(qFormat, strcase.ToLowerCamel(options.Entity), options.Entity, strings.Join(options.Fields, " ")) req := graphql.NewRequest(query) if options.EntityID != nil { req.Var("id", *options.EntityID) } if options.Filter != nil { req.Var("filter", options.Filter) } return c.run(ctx, req, res) } type GetEntitiesSortDirection string const ( GetEntitiesSortDirectionASC GetEntitiesSortDirection = "ASC" GetEntitiesSortDirectionDESC GetEntitiesSortDirection = "DESC" ) type GetEntitiesSort map[string]GetEntitiesSortDirection // GetEntitiesOptions ... type GetEntitiesOptions struct { Entity string Fields []string Filter *map[string]interface{} Sort []GetEntitiesSort Limit *int Offset *int } // GetEntities ... func (c *ORMClient) GetEntities(ctx context.Context, options GetEntitiesOptions, res interface{}) error { query := fmt.Sprintf(` query ($filter: %sFilterType, $sort: [%sSortType!], $limit: Int, $offset: Int) { result: %s(filter:$filter,sort:$sort,limit:$limit,offset:$offset) { items { id %s } count } } `, options.Entity, options.Entity, inflection.Plural(strcase.ToLowerCamel(options.Entity)), strings.Join(options.Fields, " ")) req := graphql.NewRequest(query) if options.Filter != nil { req.Var("filter", options.Filter) } req.Var("sort", options.Sort) if options.Offset != nil { req.Var("offset", options.Offset) } if options.Limit != nil { req.Var("limit", options.Limit) } return c.run(ctx, req, res) } // SendQuery ... func (c *ORMClient) SendQuery(ctx context.Context, query string, variables map[string]interface{}, res interface{}) error { req := graphql.NewRequest(query) for key, value := range variables { req.Var(key, value) } return c.run(ctx, req, res) }
// @APIVersion 0.1.0 // @Title GR社区RESTful API // @Description 基于Beego API实现的GR社区API,使用依赖注入提供多个存储的支持 // @Contact tiannianshou@gmail.com // @TermsOfServiceUrl http://github.com/go-react // @License Apache 2.0 // @LicenseUrl http://www.apache.org/licenses/LICENSE-2.0.html package routers import ( "github.com/astaxie/beego" "github.com/go-react/community/controllers" "github.com/go-react/community/filters" ) func init() { // 错误处理 beego.ErrorController(&controllers.ErrorController{}) // 命名空间路由处理 ns := beego.NewNamespace("/v1", beego.NSNamespace("/object", beego.NSInclude( &controllers.ObjectController{}, ), ), beego.NSAfter(filters.ResSuccess), ) beego.AddNamespace(ns) }
package config import ( "encoding/json" "fmt" "os" ) func LoadSpec(cPath string) (spec *Spec, err error) { cf, err := os.Open(cPath) if err != nil { if os.IsNotExist(err) { return nil, fmt.Errorf("JSON specification file %s not found", cPath) } return nil, err } defer cf.Close() if err = json.NewDecoder(cf).Decode(&spec); err != nil { return nil, err } return spec, validateProcessSpec(spec.Process) }
/* It can be seen that the number, 125874, and its double, 251748, contain exactly the same digits, but in a different order. Find the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x, contain the same digits. */ package main import ( "reflect" "sort" ) // returns sorted list of digits of an argument func digits(n int) []int { res := make([]int, 0, 10) for n != 0 { res = append(res, n%10) n /= 10 } sort.Ints(res) return res } func main() { OuterLoop: for i := 1; ; i++ { iDigits := digits(i) for m := 2; m <= 6; m++ { if !reflect.DeepEqual(iDigits, digits(i*m)) { continue OuterLoop } } println(i, " ", i*2, " ", i*3, " ", i*4, " ", i*5, " ", i*6) return } }
package main import ( "fmt" "math/big" "os" "strings" "syscall" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/usbwallet" "github.com/ethereum/go-ethereum/common" ethcmn "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" cli "github.com/jawher/mow.cli" "github.com/pkg/errors" "golang.org/x/crypto/ssh/terminal" "github.com/InjectiveLabs/etherman/keystore" ) var ( keystoreDir *string from *string fromPassphrase *string fromPrivKey *string useLedger *bool ) func readEthereumKeyOptions( keystoreDir **string, from **string, fromPassphrase **string, fromPrivKey **string, useLedger **bool, ) { *keystoreDir = app.String(cli.StringOpt{ Name: "keystore-dir", Desc: "Specify Ethereum keystore dir (Geth or Clef) prefix.", EnvVar: "DEPLOYER_KEYSTORE_DIR", }) *from = app.String(cli.StringOpt{ Name: "F from", Desc: "Specify the from address. If specified, must exist in keystore, ledger or match the privkey.", EnvVar: "DEPLOYER_FROM", }) *fromPassphrase = app.String(cli.StringOpt{ Name: "from-passphrase", Desc: "Passphrase to unlock the private key from armor, if empty then stdin is used.", EnvVar: "DEPLOYER_FROM_PASSPHRASE", }) *fromPrivKey = app.String(cli.StringOpt{ Name: "P from-pk", Desc: "Provide a raw Ethereum private key of the validator in hex.", EnvVar: "DEPLOYER_FROM_PK", }) *useLedger = app.Bool(cli.BoolOpt{ Name: "ledger", Desc: "Use the Ethereum app on hardware ledger to sign transactions.", EnvVar: "DEPLOYER_USE_LEDGER", Value: false, }) } var emptyEthAddress = ethcmn.Address{} func initEthereumAccountsManager( chainID uint64, keystoreDir *string, from *string, fromPassphrase *string, fromPrivKey *string, useLedger *bool, ) ( fromAddress ethcmn.Address, signerFn bind.SignerFn, err error, ) { switch { case *useLedger: if from == nil { err := errors.New("cannot use Ledger without from address specified") return emptyEthAddress, nil, err } fromAddress = ethcmn.HexToAddress(*from) if fromAddress == (ethcmn.Address{}) { err = errors.Wrap(err, "failed to parse Ethereum from address") return emptyEthAddress, nil, err } ledgerBackend, err := usbwallet.NewLedgerHub() if err != nil { err = errors.Wrap(err, "failed to connect with Ethereum app on Ledger device") return emptyEthAddress, nil, err } signerFn = func(from common.Address, tx *ethtypes.Transaction) (*ethtypes.Transaction, error) { acc := accounts.Account{ Address: from, } wallets := ledgerBackend.Wallets() for _, w := range wallets { if err := w.Open(""); err != nil { err = errors.Wrap(err, "failed to connect to wallet on Ledger device") return nil, err } if !w.Contains(acc) { if err := w.Close(); err != nil { err = errors.Wrap(err, "failed to disconnect the wallet on Ledger device") return nil, err } continue } tx, err = w.SignTx(acc, tx, new(big.Int).SetUint64(chainID)) _ = w.Close() return tx, err } return nil, errors.Errorf("account %s not found on Ledger", from.String()) } return fromAddress, signerFn, nil case len(*fromPrivKey) > 0: pkHex := strings.TrimPrefix(*fromPrivKey, "0x") ethPk, err := crypto.HexToECDSA(pkHex) if err != nil { err = errors.Wrap(err, "failed to hex-decode Ethereum ECDSA Private Key") return emptyEthAddress, nil, err } ethAddressFromPk := crypto.PubkeyToAddress(ethPk.PublicKey) if len(*from) > 0 { addr := ethcmn.HexToAddress(*from) if addr == (ethcmn.Address{}) { err = errors.Wrap(err, "failed to parse Ethereum from address") return emptyEthAddress, nil, err } else if addr != ethAddressFromPk { err = errors.Wrap(err, "Ethereum from address does not match address from ECDSA Private Key") return emptyEthAddress, nil, err } } txOpts, err := bind.NewKeyedTransactorWithChainID(ethPk, new(big.Int).SetUint64(chainID)) if err != nil { err = errors.New("failed to init NewKeyedTransactorWithChainID") return emptyEthAddress, nil, err } return txOpts.From, txOpts.Signer, nil case len(*keystoreDir) > 0: if from == nil { err := errors.New("cannot use Ethereum keystore without from address specified") return emptyEthAddress, nil, err } fromAddress = ethcmn.HexToAddress(*from) if fromAddress == (ethcmn.Address{}) { err = errors.Wrap(err, "failed to parse Ethereum from address") return emptyEthAddress, nil, err } if info, err := os.Stat(*keystoreDir); err != nil || !info.IsDir() { err = errors.New("failed to locate keystore dir") return emptyEthAddress, nil, err } ks, err := keystore.New(*keystoreDir) if err != nil { err = errors.Wrap(err, "failed to load keystore") return emptyEthAddress, nil, err } var pass string if len(*fromPassphrase) > 0 { pass = *fromPassphrase } else { pass, err = ethPassFromStdin() if err != nil { return emptyEthAddress, nil, err } } signerFn, err := ks.SignerFn(chainID, fromAddress, pass) if err != nil { err = errors.Wrapf(err, "failed to load key for %s", fromAddress) return emptyEthAddress, nil, err } return fromAddress, signerFn, nil default: err := errors.New("insufficient ethereum key details provided") return emptyEthAddress, nil, err } } func ethPassFromStdin() (string, error) { fmt.Print("Passphrase for Ethereum account: ") bytePassword, err := terminal.ReadPassword(int(syscall.Stdin)) if err != nil { err := errors.Wrap(err, "failed to read password from stdin") return "", err } password := string(bytePassword) return strings.TrimSpace(password), nil }
package odoo import ( "fmt" ) // IrFieldsConverter represents ir.fields.converter model. type IrFieldsConverter struct { LastUpdate *Time `xmlrpc:"__last_update,omptempty"` DisplayName *String `xmlrpc:"display_name,omptempty"` Id *Int `xmlrpc:"id,omptempty"` } // IrFieldsConverters represents array of ir.fields.converter model. type IrFieldsConverters []IrFieldsConverter // IrFieldsConverterModel is the odoo model name. const IrFieldsConverterModel = "ir.fields.converter" // Many2One convert IrFieldsConverter to *Many2One. func (ifc *IrFieldsConverter) Many2One() *Many2One { return NewMany2One(ifc.Id.Get(), "") } // CreateIrFieldsConverter creates a new ir.fields.converter model and returns its id. func (c *Client) CreateIrFieldsConverter(ifc *IrFieldsConverter) (int64, error) { ids, err := c.CreateIrFieldsConverters([]*IrFieldsConverter{ifc}) if err != nil { return -1, err } if len(ids) == 0 { return -1, nil } return ids[0], nil } // CreateIrFieldsConverter creates a new ir.fields.converter model and returns its id. func (c *Client) CreateIrFieldsConverters(ifcs []*IrFieldsConverter) ([]int64, error) { var vv []interface{} for _, v := range ifcs { vv = append(vv, v) } return c.Create(IrFieldsConverterModel, vv) } // UpdateIrFieldsConverter updates an existing ir.fields.converter record. func (c *Client) UpdateIrFieldsConverter(ifc *IrFieldsConverter) error { return c.UpdateIrFieldsConverters([]int64{ifc.Id.Get()}, ifc) } // UpdateIrFieldsConverters updates existing ir.fields.converter records. // All records (represented by ids) will be updated by ifc values. func (c *Client) UpdateIrFieldsConverters(ids []int64, ifc *IrFieldsConverter) error { return c.Update(IrFieldsConverterModel, ids, ifc) } // DeleteIrFieldsConverter deletes an existing ir.fields.converter record. func (c *Client) DeleteIrFieldsConverter(id int64) error { return c.DeleteIrFieldsConverters([]int64{id}) } // DeleteIrFieldsConverters deletes existing ir.fields.converter records. func (c *Client) DeleteIrFieldsConverters(ids []int64) error { return c.Delete(IrFieldsConverterModel, ids) } // GetIrFieldsConverter gets ir.fields.converter existing record. func (c *Client) GetIrFieldsConverter(id int64) (*IrFieldsConverter, error) { ifcs, err := c.GetIrFieldsConverters([]int64{id}) if err != nil { return nil, err } if ifcs != nil && len(*ifcs) > 0 { return &((*ifcs)[0]), nil } return nil, fmt.Errorf("id %v of ir.fields.converter not found", id) } // GetIrFieldsConverters gets ir.fields.converter existing records. func (c *Client) GetIrFieldsConverters(ids []int64) (*IrFieldsConverters, error) { ifcs := &IrFieldsConverters{} if err := c.Read(IrFieldsConverterModel, ids, nil, ifcs); err != nil { return nil, err } return ifcs, nil } // FindIrFieldsConverter finds ir.fields.converter record by querying it with criteria. func (c *Client) FindIrFieldsConverter(criteria *Criteria) (*IrFieldsConverter, error) { ifcs := &IrFieldsConverters{} if err := c.SearchRead(IrFieldsConverterModel, criteria, NewOptions().Limit(1), ifcs); err != nil { return nil, err } if ifcs != nil && len(*ifcs) > 0 { return &((*ifcs)[0]), nil } return nil, fmt.Errorf("ir.fields.converter was not found with criteria %v", criteria) } // FindIrFieldsConverters finds ir.fields.converter records by querying it // and filtering it with criteria and options. func (c *Client) FindIrFieldsConverters(criteria *Criteria, options *Options) (*IrFieldsConverters, error) { ifcs := &IrFieldsConverters{} if err := c.SearchRead(IrFieldsConverterModel, criteria, options, ifcs); err != nil { return nil, err } return ifcs, nil } // FindIrFieldsConverterIds finds records ids by querying it // and filtering it with criteria and options. func (c *Client) FindIrFieldsConverterIds(criteria *Criteria, options *Options) ([]int64, error) { ids, err := c.Search(IrFieldsConverterModel, criteria, options) if err != nil { return []int64{}, err } return ids, nil } // FindIrFieldsConverterId finds record id by querying it with criteria. func (c *Client) FindIrFieldsConverterId(criteria *Criteria, options *Options) (int64, error) { ids, err := c.Search(IrFieldsConverterModel, criteria, options) if err != nil { return -1, err } if len(ids) > 0 { return ids[0], nil } return -1, fmt.Errorf("ir.fields.converter was not found with criteria %v and options %v", criteria, options) }
package gae import ( "github.com/ahmadmuzakki/gae/internal" "golang.org/x/net/context" "google.golang.org/appengine" ) func Namespace(ctx context.Context, ns string) (context.Context, error) { ctx = internal.WithNamespace(ctx, ns) return appengine.Namespace(ctx, ns) }
package user import ( "errors" uuid "github.com/satori/go.uuid" ) // ErrUserNotFound is returned when the user can't be found. var ErrDidNotComplete = errors.New("database operation did not complete") var ErrUserNotFound = errors.New("user not found") var ErrUserAlreadyExists = errors.New("user already exists") var ErrUsernameUnavailable = errors.New("username not available") var ErrPreparingQuery = errors.New("issue preparing query") var ErrUnexpected = errors.New("unexpected error occurred") //var ErrSessionUuidDoesNotExist = errors.New("session does not exist") //var ErrSessionUuidAlreadyExists = errors.New("session does not exist") // Store represents a store for Users. // // Store abstracts the common actions involving the database for users, // abstracting the underlying interaction with the database. type Store interface { // CREATE ///////////////////////////////////////////////////////////////////////////////////////////////////////// // CreateSession creates a new session entry with the provided session information. //TODO: CreateSession(sessionUuid uuid.UUID, sessionId session.SessionID) error // CreateUserSession creates a new session entry and associates it with the given user. //TODO: CreateUserSession(userUuid uuid.UUID, sessionUuid uuid.UUID, sessionId session.SessionID) error // CreateUserSessionAssociation associates an existing session entry with the given user. //TODO: CreateUserSessionAssociation(userUuid uuid.UUID, sessionUuid uuid.UUID) error // CreateUser will add the new user to the database CreateUser(newUser *NewUser) (*User, error) // CreateUserEmail adds the email to the given user's account //TODO: CreateUserEmail(userUuid uuid.UUID, email string) error // READ ///////////////////////////////////////////////////////////////////////////////////////////////////////// // ReadProcedureVersion gets the procedure version implemented in the database. //TODO: ReadProcedureVersion() *utility.SemVer // ReadUserActiveSessions gets the active sessions of the user by uuid. //TODO: ReadUserActiveSessions(userUuid uuid.UUID) (TODO: Define Type, error) // ReadUserSessions gets all the sessions associated with the user. //TODO: ReadUserSessions(userUuid uuid.UUID) (TODO: define type, error) // ReadUserDisplayName gets the display name for the given user. //TODO: ReadUserDisplayName(userUuid uuid.UUID) (string, error) // ReadUserFullName gets the full name for the given user. //TODO: ReadUserFullName(userUuid uuid.UUID) // ReadUserEmails gets the list of emails associated with the user. //TODO: ReadUserEmails(userUuid uuid.UUID) (TODO: define type, error) // ReadUserEncodedHash gets the encoded hash of the users password. ReadUserEncodedHash(username string) (string, error) // ReadUserInfo gets the basic information about the user. ReadUserInfo(userUuid uuid.UUID) (*User, error) // ReadUserProfile gets the profile information for the user. //TODO: ReadUserProfile(userUuid uuid.UUID) (TODO: define type, error) // ReadUserUsername gets the username for the given user. //TODO: ReadUserUsername(userUuid uuid.UUID) (string, error) // ReadUserUsernamesByEmail gets the usernames associated with a given email. //TODO: ReadUserUsernamesByEmail(email string) (TODO: define type, error) // ReadUserUuid gets the uuid for the user based on the given username. ReadUserUuid(username string) (*uuid.UUID, error) // UPDATE ///////////////////////////////////////////////////////////////////////////////////////////////////////// // UpdateSessionExpired sets the given session's status to "Expired". //TODO: UpdateSessionExpired(sessionUuid uuid.UUID) error // UpdateUserDisplayName updates the display name of the user. //TODO: UpdateUserDisplayName(displayName string) error // UpdateUserEncodedHash updates the encoded hash associated with the user. //TODO: UpdateUserEncodedHash(userUuid uuid.UUID, encodedHash string) error // UpdateUserFullName updates the full name of the user. //TODO: UpdateUserFullName(userUuid uuid.UUID, fullName string) error // UpdateUserProfileBio updates the bio associated with the user's profile. //TODO: UpdateUserProfileBio(userUuid uuid.UUID, bio string) error // UpdateUserProfileGravatarUrl updates the gravatar url associated with the user's profile. //TODO: UpdateUserProfileGravatarUrl(userUuid uuid.UUID, gravatarUrl string) error // UpdateUserProfileSharingBio updates the public sharing preference for the bio // associated with the user's profile. //TODO: UpdateUserProfileSharingBio(userUuid uuid.UUID, share bool) error // UpdateUserProfileSharingDisplayName updates the public sharing preference for the display name // associated with the user's profile. //TODO: UpdateUserProfileSharingDisplayName(userUuid uuid.UUID, share bool) error // UpdateUserProfileSharingGravatarUrl updates the public sharing preference for the gravatar url // associated with the user's profile. //TODO: UpdateUserProfileSharingGravatarUrl(userUuid uuid.UUID, share bool) error // UpdateUserUsername updates the username for the given user. //TODO: UpdateUserUsername(userUuid uuid.UUID, username string) error // DELETE ///////////////////////////////////////////////////////////////////////////////////////////////////////// // DeleteUser removes the user from the database. DeleteUser(userUuid uuid.UUID) error // DeleteUserEmail removes the given email from the users account. //TODO: DeleteUserEmail(userUuid uuid.UUID, email string) // DeleteSession removes the given session from the list //TODO: DeleteSession(sessionUuid uuid.UUID) error }
package main import ( "os" "log" "fmt" "flag" "sort" "io/ioutil" "encoding/base64" "github.com/steakknife/hamming" ) type Key struct { length int hamming int } type Candidate struct { plaintext []byte score float64 key []byte length int } type Keys []Key type Candidates []Candidate func (c Candidates) Len() int { return len(c) } func (c Candidates) Less(i, j int) bool { return c[i].score < c[j].score } func (c Candidates) Swap(i, j int) { c[i], c[j] = c[j], c[i] } func (k Keys) Len() int { return len(k) } func (k Keys) Less(i, j int) bool { return k[i].hamming < k[j].hamming } func (k Keys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } func readFile(fPath string) []byte { file, err := os.Open(fPath) if err != nil { log.Fatal(err) } text, err := ioutil.ReadAll(file) if err != nil { log.Fatal(err) } return text } func morph(c []byte, m int) [][]byte { // We need to morph the single byte array into multiple byte arrays, based off the suspected key length size. // This will split characters into their 'm'th character grouping to perform single byte decryption. cc := make([][]byte, m) for s := range c { cc[s % m] = append(cc[s % m], c[s]) } return cc } func repxor(text []byte, k []byte) []byte { result := make([]byte, len(text)) for i := 0; i < len(text); i++ { result[i] = text[i] ^ k[(i % len(k))] } return result } func cipher(text []byte, k byte) []byte { result := make([]byte, len(text)) for i := 0; i < len(text); i++ { result[i] = text[i] ^ k } return result } func keyCandidate(text []byte) byte { var t float64 var tmax float64 var k byte for i := 0; i < 255; i++ { t = Score(cipher(text, byte(i))) if tmax == 0 || t < tmax { tmax = t k = byte(i) } } return k } func main() { var fPath string var text []byte var keys Keys var candidates Candidates flag.StringVar(&fPath, "f", "", "File path to the file to read in") flag.Parse() valid := true if fPath == "" { fmt.Println("Filename (-f) must be specified") valid = false } else if _, err := os.Stat(fPath); os.IsNotExist(err) { fmt.Println("Filename (-f): File does not exist:", fPath) valid = false } if valid { text = readFile(fPath) } text,_ = base64.StdEncoding.DecodeString(string(text)) for i:=2; i<41; i++ { k := Key{} k.hamming = hamming.Bytes(text[0:i],text[i:i*2])/i k.length = i keys = append(keys,k) } sort.Sort(keys) fmt.Printf("Hamming Calculated....\n") fmt.Printf("Investigating....\n") for s := range keys { c := Candidate{} cc := morph(text, keys[s].length) var key []byte for i := range cc { key = append(key, keyCandidate(cc[i])) } c.key = key c.length = keys[s].length c.plaintext = repxor(text, key) c.score = Score(c.plaintext) candidates = append(candidates, c) } sort.Sort(candidates) for a := 0; a < 5; a++ { fmt.Printf("Best Guess..\n") fmt.Printf("Key Length: %d\n", candidates[a].length) fmt.Printf("Key: %x (%s)\n", candidates[a].key, candidates[a].key) fmt.Printf("Plaintext: \n\n%s\n\n", candidates[a].plaintext) } }
package gooob import ( "net" "strconv" ) type OOBConnection struct { addr *net.UDPAddr password string } func newConnection(raddr *net.UDPAddr, password string) (*OOBConnection, error) { return &OOBConnection{raddr, password}, nil } func (conn *OOBConnection) Send(cmd string) (response string, err error) { buffer, err := conn.sendAndClean(cmd) return string(buffer), err } func (conn *OOBConnection) Info(version int, flags string) (response InfoResponse, err error) { cmd := "info " + strconv.Itoa(version) + " " + flags buffer, err := conn.sendAndClean(cmd) if err != nil { return } return conn.parseInfo(buffer) } func (conn *OOBConnection) GetInfo() (response InfoResponse, error error) { challenge := conn.newChallenge() cmd := "getinfo " + challenge buffer, err := conn.sendAndClean(cmd) if err != nil { return } return conn.parseGetInfo(buffer) } func (conn *OOBConnection) GetStatus() (response StatusResponse, error error) { challenge := conn.newChallenge() cmd := "getstatus " + challenge buffer, err := conn.sendAndClean(cmd) if err != nil { return } return conn.parseGetStatus(buffer) } func (conn *OOBConnection) Rcon(cmd string) (response RconResponse, err error) { cmd = "rcon " + conn.password + cmd buffer, err := conn.sendAndClean(cmd) if err != nil { return } return conn.parseRcon(buffer) }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package main import ( "bytes" "context" "fmt" "io" // For the debug http handlers. _ "net/http/pprof" "runtime" "strings" "time" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/version" "github.com/petermattis/goid" ) type testSpec struct { Skip string // if non-empty, test will be skipped // When Skip is set, this can contain more text to be printed in the logs // after the "--- SKIP" line. SkipDetails string Name string // Owner is the name of the team responsible for signing off on failures of // this test that happen in the release process. This must be one of a limited // set of values (the keys in the roachtestTeams map). Owner Owner // The maximum duration the test is allowed to run before it is considered // failed. If not specified, the default timeout is 10m before the test's // associated cluster expires. The timeout is always truncated to 10m before // the test's cluster expires. Timeout time.Duration // MinVersion indicates the minimum cockroach version that is required for // the test to be run. If MinVersion is less than the version specified // --cockroach-version, Skip will be populated causing the test to be // skipped. MinVersion string minVersion *version.Version // Tags is a set of tags associated with the test that allow grouping // tests. If no tags are specified, the set ["default"] is automatically // given. Tags []string // Cluster provides the specification for the cluster to use for the test. Cluster clusterSpec // UseIOBarrier controls the local-ssd-no-ext4-barrier flag passed to // roachprod when creating a cluster. If set, the flag is not passed, and so // you get durable writes. If not set (the default!), the filesystem is // mounted without the barrier. // // The default (false) is chosen because it the no-barrier option is needed // explicitly by some tests (particularly benchmarks, ironically, since they'd // rather measure other things than I/O) and the vast majority of other tests // don't care - there's no durability across machine crashes that roachtests // care about. UseIOBarrier bool // NonReleaseBlocker indicates that a test failure should not be tagged as a // release blocker. Use this for tests that are not yet stable but should // still be run regularly. NonReleaseBlocker bool // RequiresLicense indicates that the test requires an // enterprise license to run correctly. Use this to ensure // tests will fail-early if COCKROACH_DEV_LICENSE is not set // in the environment. RequiresLicense bool // Run is the test function. Run func(ctx context.Context, t *test, c *cluster) } // perfArtifactsDir is the directory on cluster nodes in which perf artifacts // reside. Upon success this directory is copied into test artifactsDir from // each node in the cluster. const perfArtifactsDir = "perf" // matchOrSkip returns true if the filter matches the test. If the filter does // not match the test because the tag filter does not match, the test is // matched, but marked as skipped. func (t *testSpec) matchOrSkip(filter *testFilter) bool { if !filter.name.MatchString(t.Name) { return false } if len(t.Tags) == 0 { if !filter.tag.MatchString("default") { t.Skip = fmt.Sprintf("%s does not match [default]", filter.rawTag) } return true } for _, t := range t.Tags { if filter.tag.MatchString(t) { return true } } t.Skip = fmt.Sprintf("%s does not match %s", filter.rawTag, t.Tags) return true } type testStatus struct { msg string time time.Time progress float64 } type test struct { spec *testSpec // buildVersion is the version of the Cockroach binary that the test will run // against. buildVersion version.Version // l is the logger that the test will use for its output. l *logger runner string // runnerID is the test's main goroutine ID. runnerID int64 start time.Time end time.Time // artifactsDir is the path to the directory holding all the artifacts for // this test. It will contain a test.log file and cluster logs. artifactsDir string // artifactsSpec is a TeamCity artifacts spec used to publish this test's // artifacts. See: // https://www.jetbrains.com/help/teamcity/2019.1/configuring-general-settings.html#Artifact-Paths artifactsSpec string mu struct { syncutil.RWMutex done bool failed bool // cancel, if set, is called from the t.Fatal() family of functions when the // test is being marked as failed (i.e. when the failed field above is also // set). This is used to cancel the context passed to t.spec.Run(), so async // test goroutines can be notified. cancel func() failLoc struct { file string line int } failureMsg string // status is a map from goroutine id to status set by that goroutine. A // special goroutine is indicated by runnerID; that one provides the test's // "main status". status map[int64]testStatus output []byte } } func (t *test) Helper() {} func (t *test) Name() string { return t.spec.Name } func (t *test) logger() *logger { return t.l } func (t *test) status(ctx context.Context, id int64, args ...interface{}) { t.mu.Lock() defer t.mu.Unlock() if t.mu.status == nil { t.mu.status = make(map[int64]testStatus) } if len(args) == 0 { delete(t.mu.status, id) return } msg := fmt.Sprint(args...) t.mu.status[id] = testStatus{ msg: msg, time: timeutil.Now(), } if !t.l.closed() { if id == t.runnerID { t.l.PrintfCtxDepth(ctx, 3, "test status: %s", msg) } else { t.l.PrintfCtxDepth(ctx, 3, "test worker status: %s", msg) } } } // Status sets the main status message for the test. When called from the main // test goroutine (i.e. the goroutine on which testSpec.Run is invoked), this // is equivalent to calling WorkerStatus. If no arguments are specified, the // status message is erased. func (t *test) Status(args ...interface{}) { t.status(context.TODO(), t.runnerID, args...) } // GetStatus returns the status of the tests's main goroutine. func (t *test) GetStatus() string { t.mu.Lock() defer t.mu.Unlock() status, ok := t.mu.status[t.runnerID] if ok { return fmt.Sprintf("%s (set %s ago)", status.msg, timeutil.Now().Sub(status.time).Round(time.Second)) } return "N/A" } // WorkerStatus sets the status message for a worker goroutine associated with // the test. The status message should be cleared before the goroutine exits by // calling WorkerStatus with no arguments. func (t *test) WorkerStatus(args ...interface{}) { t.status(context.TODO(), goid.Get(), args...) } func (t *test) progress(id int64, frac float64) { t.mu.Lock() defer t.mu.Unlock() if t.mu.status == nil { t.mu.status = make(map[int64]testStatus) } status := t.mu.status[id] status.progress = frac t.mu.status[id] = status } // Progress sets the progress (a fraction in the range [0,1]) associated with // the main test status messasge. When called from the main test goroutine // (i.e. the goroutine on which testSpec.Run is invoked), this is equivalent to // calling WorkerProgress. func (t *test) Progress(frac float64) { t.progress(t.runnerID, frac) } // WorkerProgress sets the progress (a fraction in the range [0,1]) associated // with the a worker status messasge. func (t *test) WorkerProgress(frac float64) { t.progress(goid.Get(), frac) } var _ skip.SkippableTest = (*test)(nil) // Skip skips the test. The first argument if any is the main message. // The remaining argument, if any, form the details. // This implements the skip.SkippableTest interface. func (t *test) Skip(args ...interface{}) { if len(args) > 0 { t.spec.Skip = fmt.Sprint(args[0]) args = args[1:] } t.spec.SkipDetails = fmt.Sprint(args...) panic(errTestFatal) } // Skipf skips the test. The formatted message becomes the skip reason. // This implements the skip.SkippableTest interface. func (t *test) Skipf(format string, args ...interface{}) { t.spec.Skip = fmt.Sprintf(format, args...) panic(errTestFatal) } // Fatal marks the test as failed, prints the args to t.l, and calls // panic(errTestFatal). It can be called multiple times. // // If the only argument is an error, it is formatted by "%+v", so it will show // stack traces and such. // // ATTENTION: Since this calls panic(errTestFatal), it should only be called // from a test's closure. The test runner itself should never call this. func (t *test) Fatal(args ...interface{}) { t.fatalfInner("" /* format */, args...) } // Fatalf is like Fatal, but takes a format string. func (t *test) Fatalf(format string, args ...interface{}) { t.fatalfInner(format, args...) } // FailNow implements the TestingT interface. func (t *test) FailNow() { t.Fatal() } // Errorf implements the TestingT interface. func (t *test) Errorf(format string, args ...interface{}) { t.Fatalf(format, args...) } func (t *test) fatalfInner(format string, args ...interface{}) { // Skip two frames: our own and the caller. if format != "" { t.printfAndFail(2 /* skip */, format, args...) } else { t.printAndFail(2 /* skip */, args...) } panic(errTestFatal) } // FatalIfErr calls t.Fatal() if err != nil. func FatalIfErr(t *test, err error) { if err != nil { t.fatalfInner("" /* format */, err) } } func (t *test) printAndFail(skip int, args ...interface{}) { var msg string if len(args) == 1 { // If we were passed only an error, then format it with "%+v" in order to // get any stack traces. if err, ok := args[0].(error); ok { msg = fmt.Sprintf("%+v", err) } } if msg == "" { msg = fmt.Sprint(args...) } t.failWithMsg(t.decorate(skip+1, msg)) } func (t *test) printfAndFail(skip int, format string, args ...interface{}) { if format == "" { panic(fmt.Sprintf("invalid empty format. args: %s", args)) } t.failWithMsg(t.decorate(skip+1, fmt.Sprintf(format, args...))) } func (t *test) failWithMsg(msg string) { t.mu.Lock() defer t.mu.Unlock() prefix := "" if t.mu.failed { prefix = "[not the first failure] " // NB: the first failure is not always the relevant one due to: // https://github.com/cockroachdb/cockroach/issues/44436 // // So we chain all failures together in the order in which we see // them. msg = "\n" + msg } t.l.Printf("%stest failure: %s", prefix, msg) t.mu.failed = true t.mu.failureMsg += msg t.mu.output = append(t.mu.output, msg...) if t.mu.cancel != nil { t.mu.cancel() } } // Args: // skip: The number of stack frames to exclude from the result. 0 means that // the caller will be the first frame identified. 1 means the caller's caller // will be the first, etc. func (t *test) decorate(skip int, s string) string { // Skip two extra frames to account for this function and runtime.Callers // itself. var pc [50]uintptr n := runtime.Callers(2+skip, pc[:]) if n == 0 { panic("zero callers found") } buf := new(bytes.Buffer) frames := runtime.CallersFrames(pc[:n]) sep := "\t" runnerFound := false for { if runnerFound { break } frame, more := frames.Next() if !more { break } if frame.Function == t.runner { runnerFound = true // Handle the special case of the runner function being the caller of // t.Fatal(). In that case, that's the line to be used for issue creation. if t.mu.failLoc.file == "" { t.mu.failLoc.file = frame.File t.mu.failLoc.line = frame.Line } } if !t.mu.failed && !runnerFound { // Keep track of the highest stack frame that is lower than the t.runner // stack frame. This is used to determine the author of that line of code // and issue assignment. t.mu.failLoc.file = frame.File t.mu.failLoc.line = frame.Line } file := frame.File if index := strings.LastIndexByte(file, '/'); index >= 0 { file = file[index+1:] } fmt.Fprintf(buf, "%s%s:%d", sep, file, frame.Line) sep = "," } buf.WriteString(": ") lines := strings.Split(s, "\n") if l := len(lines); l > 1 && lines[l-1] == "" { lines = lines[:l-1] } for i, line := range lines { if i > 0 { buf.WriteString("\n\t\t") } buf.WriteString(line) } buf.WriteByte('\n') return buf.String() } func (t *test) duration() time.Duration { return t.end.Sub(t.start) } func (t *test) Failed() bool { t.mu.RLock() defer t.mu.RUnlock() return t.mu.failed } func (t *test) FailureMsg() string { t.mu.RLock() defer t.mu.RUnlock() return t.mu.failureMsg } func (t *test) ArtifactsDir() string { return t.artifactsDir } // IsBuildVersion returns true if the build version is greater than or equal to // minVersion. This allows a test to optionally perform additional checks // depending on the cockroach version it is running against. Note that the // versions are Cockroach build tag version numbers, not the internal cluster // version number. func (t *test) IsBuildVersion(minVersion string) bool { vers, err := version.Parse(minVersion) if err != nil { t.Fatal(err) } if p := vers.PreRelease(); p != "" { panic("cannot specify a prerelease: " + p) } // We append "-0" to the min-version spec so that we capture all // prereleases of the specified version. Otherwise, "v2.1.0" would compare // greater than "v2.1.0-alpha.x". vers = version.MustParse(minVersion + "-0") return t.buildVersion.AtLeast(vers) } // teamCityEscape escapes a string for use as <value> in a key='<value>' attribute // in TeamCity build output marker. // Documentation here: https://confluence.jetbrains.com/display/TCD10/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-Escapedvalues func teamCityEscape(s string) string { r := strings.NewReplacer( "\n", "|n", "'", "|'", "|", "||", "[", "|[", "]", "|]", ) return r.Replace(s) } func teamCityNameEscape(name string) string { return strings.Replace(name, ",", "_", -1) } type testWithCount struct { spec testSpec // count maintains the number of runs remaining for a test. count int } type clusterType int const ( localCluster clusterType = iota roachprodCluster ) type loggingOpt struct { // l is the test runner logger. // Note that individual test runs will use a different logger. l *logger // tee controls whether test logs (not test runner logs) also go to stdout or // not. tee teeOptType stdout, stderr io.Writer // artifactsDir is that path to the dir that will contain the artifacts for // all the tests. artifactsDir string // runnerLogPath is that path to the runner's log file. runnerLogPath string } type workerStatus struct { // name is the worker's identifier. name string mu struct { syncutil.Mutex // status is presented in the HTML progress page. status string ttr testToRunRes t *test c *cluster } } func (w *workerStatus) Status() string { w.mu.Lock() defer w.mu.Unlock() return w.mu.status } func (w *workerStatus) SetStatus(status string) { w.mu.Lock() w.mu.status = status w.mu.Unlock() } func (w *workerStatus) Cluster() *cluster { w.mu.Lock() defer w.mu.Unlock() return w.mu.c } func (w *workerStatus) SetCluster(c *cluster) { w.mu.Lock() w.mu.c = c w.mu.Unlock() } func (w *workerStatus) TestToRun() testToRunRes { w.mu.Lock() defer w.mu.Unlock() return w.mu.ttr } func (w *workerStatus) Test() *test { w.mu.Lock() defer w.mu.Unlock() return w.mu.t } func (w *workerStatus) SetTest(t *test, ttr testToRunRes) { w.mu.Lock() w.mu.t = t w.mu.ttr = ttr w.mu.Unlock() } // shout logs a message both to a logger and to an io.Writer. // If format doesn't end with a new line, one will be automatically added. func shout(ctx context.Context, l *logger, stdout io.Writer, format string, args ...interface{}) { if len(format) == 0 || format[len(format)-1] != '\n' { format += "\n" } msg := fmt.Sprintf(format, args...) l.PrintfCtxDepth(ctx, 2 /* depth */, msg) fmt.Fprint(stdout, msg) }
package main import ( "errors" "fmt" "os" "github.com/hlfstr/flagger" ) func main() { f := flagger.New() f.AddHelp("Show this help", "Testing Flags:") f.AddVersion("Show version", flagger.Info()) //Bool flag b := f.Bool("Test Bool", "-b", "--bool") //int flag i := f.Int(8, "Test Int", "-i", "--integer") //string flag s := f.String("h", "Test String", "-s", "--string") fmt.Printf("Before\n") fmt.Printf(" Bool: %t\n", *b) fmt.Printf(" Int: %d\n", *i) fmt.Printf(" String: %s\n", *s) d, err := f.Parse(os.Args) if errors.Is(err, flagger.ErrNoFlags) { f.Usage("[OPTION]...", fmt.Sprintf("Try '%s --help' for more information", f.Name)) os.Exit(0) // Exit clean } else if err != nil { f.Usage("[OPTION]...", err.Error()) fmt.Printf("Try '%s --help' for more information\n", f.Name) os.Exit(1) // Exit error } fmt.Println("After") fmt.Printf(" Bool: %t\n", *b) fmt.Printf(" Int: %d\n", *i) fmt.Printf(" String: %s\n", *s) fmt.Print("Data: ") fmt.Println(d) }
package structures type Tone struct { Tonic PitchName ScoreFlatNum int ScoreSharpNum int } func NewTone(tonic PitchName) *Tone { tone := &Tone{Tonic: tonic} return tone } //func Tonic() String { // //} //func main() { // _ = NewTone(C) //}
package tests import ( "testing" "reflect" . "github.com/go-dash/slice/tests/types" "github.com/go-dash/slice/_string" "github.com/go-dash/slice/_int" "github.com/go-dash/slice/_Person" // github.com/go-dash/slice/tests/types ) var tableReverseString = []struct { input []string output []string }{ {nil, []string{}}, {[]string{}, []string{}}, {[]string{"aaa", "aaa", "bbb"}, []string{"bbb", "aaa", "aaa"}}, {[]string{"aa", "bb", "aa", "cc", "bb"}, []string{"bb", "cc", "aa", "bb", "aa"}}, } func TestReverseString(t *testing.T) { for _, tt := range tableReverseString { res := _string.Reverse(tt.input) if !reflect.DeepEqual(res, tt.output) { t.Fatalf("Expected %v received %v", tt.output, res) } res = _string.Chain(tt.input).Reverse().Value() if !reflect.DeepEqual(res, tt.output) { t.Fatalf("Expected %v received %v", tt.output, res) } } } var tableReverseInt = []struct { input []int output []int }{ {nil, []int{}}, {[]int{}, []int{}}, {[]int{1, 2, 3}, []int{3, 2, 1}}, {[]int{1, 2, 1, 3, 2}, []int{2, 3, 1, 2, 1}}, } func TestReverseInt(t *testing.T) { for _, tt := range tableReverseInt { res := _int.Reverse(tt.input) if !reflect.DeepEqual(res, tt.output) { t.Fatalf("Expected %v received %v", tt.output, res) } res = _int.Chain(tt.input).Reverse().Value() if !reflect.DeepEqual(res, tt.output) { t.Fatalf("Expected %v received %v", tt.output, res) } } } var tableReversePerson = []struct { input []Person output []Person }{ {nil, []Person{}}, {[]Person{}, []Person{}}, {[]Person{Person{"aa", 18}, Person{"bb", 19}}, []Person{Person{"bb", 19}, Person{"aa", 18}}}, } func TestReversePerson(t *testing.T) { for _, tt := range tableReversePerson { res := _Person.Reverse(tt.input) if !reflect.DeepEqual(res, tt.output) { t.Fatalf("Expected %v received %v", tt.output, res) } res = _Person.Chain(tt.input).Reverse().Value() if !reflect.DeepEqual(res, tt.output) { t.Fatalf("Expected %v received %v", tt.output, res) } } }
package roomserver import ( conn "centerclient" cmn "common" "logger" "rpc" "strconv" "sync" "time" ) const ( JieSanRoomName = "JieSanRoomName" ) type CustomRoom struct { //id int32 //ID owner string //房间的拥有者(当前的房主) creatingPlayer string //房间的创建者(扣他的房卡) //name string //名字 //pwd string //密码 gameType int32 //游戏类型(大贰,麻将,德州扑克) limitCoin int32 //进入房间的限制 currencyType int32 //货币类型 //maxMultiple int32 //最大倍数 tiYongAmount int32 //替用数量 -麻将有效 times int32 //游戏场数 curTimes int32 //当前场次数 isAlreadyFinalJieSuan bool //是否已经最终结算了 waitingReadyTime int32 //等待准备的时间(小于等于0:无尽等待) startWaitingDissolveTime int32 //等待解散的时间 middleWaitingDissolveTime int32 //开场后的等待房间时间 voteList []*rpc.JieSanPlayerInfo //玩家解散房间的投票列表 voteRl sync.RWMutex //voteList的读写锁 voteStartTime int64 //投票开始时间 voteDuration int32 //投票持续时间 playerTotalCoin map[string]int32 //玩家总的金币结算信息 } func (self *CustomRoom) InitCustomRoom( /*id int32,*/ owner string, gameType int32, roomInfo *rpc.CreateRoomREQ) { //self.id = id self.owner = owner self.creatingPlayer = owner //r.name = roomInfo.GetName() self.gameType = gameType //r.pwd = roomInfo.GetPwd() //结算货币类型 self.currencyType = roomInfo.GetCurrencyType() //当前结算次数 self.curTimes = 1 //结算场次 self.times = roomInfo.GetTimes() //是否已经最终结算了 self.isAlreadyFinalJieSuan = false //统计玩家最后的结算信息 self.playerTotalCoin = make(map[string]int32, 0) //玩家解散房间的投票列表 self.voteList = []*rpc.JieSanPlayerInfo{} //初始化金币限制 self.limitCoin = 0 if self.currencyType == CTCoin { self.limitCoin = roomInfo.GetLimitCoin() } //通过配置初始化数据 self.InitCustomRoomByConfig(gameType, self.currencyType) } func (self *CustomRoom) InitCustomRoomByConfig(gameType, currencyType int32) { switch currencyType { case CTCoin: cfg := cmn.GetCustomRoomConfig(strconv.Itoa(int(gameType))) if cfg != nil { self.waitingReadyTime = cfg.CoinWaitingReadyTime self.startWaitingDissolveTime = cfg.CoinRoomDissolveTime self.middleWaitingDissolveTime = cfg.CoinRoomDissolveTime } else { logger.Error("读取房间配置表出错ID:%s", gameType) } case CTCredits: cfg := cmn.GetCustomRoomConfig(strconv.Itoa(int(gameType))) if cfg != nil { self.waitingReadyTime = cfg.CreditsWaitingReadyTime self.startWaitingDissolveTime = cfg.CreditsStartDissolveTime self.middleWaitingDissolveTime = cfg.CreditsMiddleDissolveTime } else { logger.Error("读取房间配置表出错ID:%s", gameType) } default: logger.Error("不能识别的获取结算类型") } //投票持续时间 self.voteDuration = 60 gcfg := cmn.GetDaerGlobalConfig("507") if gcfg != nil { self.voteDuration = gcfg.IntValue } else { logger.Error("GetDaerGlobalConfig return nil") } } //初始化结算房间投票列表 func (self *CustomRoom) InitVoteList(claimerID string, playerIDs []string) { if len(playerIDs) <= 0 { logger.Error("players is empty.") return } self.ClearVoteList() claimer := &rpc.JieSanPlayerInfo{} claimer.SetPlayerID(claimerID) claimer.SetStatus(JSClaimer) self.voteRl.Lock() defer self.voteRl.Unlock() self.voteList = append(self.voteList, claimer) for _, pid := range playerIDs { if pid != claimerID { jsp := &rpc.JieSanPlayerInfo{} jsp.SetPlayerID(pid) jsp.SetStatus(JSWatingDispose) self.voteList = append(self.voteList, jsp) } } } func (self *CustomRoom) ClearVoteList() { self.voteRl.Lock() defer self.voteRl.Unlock() self.voteList = []*rpc.JieSanPlayerInfo{} self.voteStartTime = time.Now().Unix() } func (self *CustomRoom) IsVoting() bool { self.voteRl.RLock() defer self.voteRl.RUnlock() return len(self.voteList) > 0 } //更新投票列表 func (self *CustomRoom) UpdateVote(uid string, result int32) { self.voteRl.Lock() defer self.voteRl.Unlock() if len(self.voteList) <= 0 { logger.Error("还没有人发起解散房间投票!") return } for i, vote := range self.voteList { if vote.GetPlayerID() == uid { self.voteList[i].SetStatus(result) return } } } //获取投票是否结束和是否成功 func (self *CustomRoom) IsVoteEnd() (isEnd, isSuccess bool) { self.voteRl.RLock() defer self.voteRl.RUnlock() if len(self.voteList) <= 0 { logger.Error("还没有人发起解散房间投票!") return true, false } //检查是否有人拒绝 for _, vote := range self.voteList { if vote.GetStatus() == JSRefuse { return true, false } } //检查是否全部投同意了 for _, vote := range self.voteList { if vote.GetStatus() == JSWatingDispose { return false, false } } return true, true } func (self *CustomRoom) StatisticsCoin(coins []*rpc.JieSuanCoin) { if coins == nil { logger.Error("coin is nil.") return } for _, coin := range coins { uid := coin.GetPlayerID() coin := coin.GetCoin() self.playerTotalCoin[uid] += coin } } //转换最后的结算金币信息 func (self *CustomRoom) GetTotalCoin() []*rpc.JieSuanCoin { result := make([]*rpc.JieSuanCoin, 0) for uid, coin := range self.playerTotalCoin { jieSuanCoin := &rpc.JieSuanCoin{} jieSuanCoin.SetPlayerID(uid) jieSuanCoin.SetCoin(coin) result = append(result, jieSuanCoin) } return result } func ConvertToCustomRoom(gameType int32, room cmn.GameRoom) *CustomRoom { if room == nil { logger.Error("room is nil.") return nil } switch gameType { case cmn.DaerGame: cr := room.(*CustomDaerRoom) return &cr.CustomRoom case cmn.MaJiang: cr := room.(*CustomMaJiangRoom) return &cr.CustomRoom case cmn.DeZhouPuker: default: logger.Error("不能识别的游戏类型!") } return nil } func GenerateRoomInfo(gameType int32, room cmn.GameRoom) *rpc.RoomInfo { //装换为自建房间 cr := ConvertToCustomRoom(gameType, room) if cr == nil { logger.Error("转换房间类型错误") return nil } roomInfo := &rpc.RoomInfo{} roomInfo.SetId(room.UID()) //roomInfo.SetName(cr.name) roomInfo.SetCurrencyType(cr.currencyType) roomInfo.SetGameType(cr.gameType) roomInfo.SetDifen(room.GetDifen()) roomInfo.SetTimes(cr.times) //roomInfo.SetHavePwd(cr.pwd != "") roomInfo.SetLimitCoin(cr.limitCoin) roomInfo.SetMaxMultiple(room.GetMaxMultiple()) roomInfo.SetIsDaiGui(room.GetIsDaiGui()) roomInfo.SetTiYongAmount(room.GetTiYongAmount()) roomInfo.SetQiHuKeAmount(room.GetQiHuKeAmount()) //roomInfo.SetPlayerCount(room.GetPlayerAmount()) return roomInfo } //网路相关的 //通知投票开启 func (self *CustomRoom) SendJieSanRoomNotify(palyerIDs []string) { if len(self.voteList) <= 0 { logger.Error("投票列表等于空!") return } msg := &rpc.JieSanRoomNotify{} msg.SetRemainTime(int32(int64(self.voteDuration) - (time.Now().Unix() - self.voteStartTime))) msg.JieSanPlayerInfo = append(msg.JieSanPlayerInfo, self.voteList...) if err := conn.SendCommonNotify2S(palyerIDs, msg, "JieSanRoomNotify"); err != nil { logger.Error("发送结束投票通知出错:", err, msg) } } //通知投票结果改变 func (self *CustomRoom) SendJieSanRoomUpdateStatusNotify(palyerIDs []string, uid string, status int32) { if len(palyerIDs) <= 0 { logger.Error("palyerIDs is null.") return } msg := &rpc.JieSanRoomUpdateStatusNotify{} vote := &rpc.JieSanPlayerInfo{} vote.SetPlayerID(uid) vote.SetStatus(status) msg.SetJieSanPlayerInfo(vote) if err := conn.SendCommonNotify2S(palyerIDs, msg, "JieSanRoomUpdateStatusNotify"); err != nil { logger.Error("发送结束投票更新通知出错:", err, msg) } }
package server import ( "encoding/json" "github.com/gorilla/mux" pet "goAPI/pkg" "net/http" ) type api struct { router http.Handler repository pet.PetRepository } type Server interface { Router() http.Handler } func New(repo pet.PetRepository) Server { a := &api{repository: repo} r := mux.NewRouter() r.HandleFunc("/pets", a.fetchPets).Methods(http.MethodGet) r.HandleFunc("/pets/{ID:[a-zA-Z0-9_]+", a.fetchPet).Methods(http.MethodGet) a.router = r return a } func (a *api) Router() http.Handler { return a.router } func (a *api) fetchPets(writer http.ResponseWriter, _ *http.Request) { pets, _ := a.repository.FetchPets() writer.Header().Set("Content-Type", "application/json") _ = json.NewEncoder(writer).Encode(pets) } func (a *api) fetchPet(writer http.ResponseWriter, request *http.Request) { vars := mux.Vars(request) animal, err := a.repository.FetchPetByID(vars["ID"]) writer.Header().Set("Content-Type", "application/json") if err != nil { writer.WriteHeader(http.StatusNotFound) //Not found for sample simplicity _ = json.NewEncoder(writer).Encode("Pet not found") return } _ = json.NewEncoder(writer).Encode(animal) }
package main import ( "log" "net/http" "github.com/julienschmidt/httprouter" ) func main() { router := httprouter.New() router.POST("/calculate", Calculate) log.Fatal(http.ListenAndServe(":8989", router)) }
package main import ( "fmt" "testing" ) var f []float64 func init() { f = make([]float64, 5) } func TestInCircle(t *testing.T) { for k, v := range map[string]bool{ "Center: (2.12, -3.48); Radius: 17.22; Point: (16.21, -5)": true, "Center: (5.05, -11); Radius: 21.2; Point: (-31, -45)": false, "Center: (-9.86, 1.95); Radius: 47.28; Point: (6.03, -6.42)": true} { fmt.Sscanf(k, "Center: (%f, %f); Radius: %f; Point: (%f, %f)", &f[0], &f[1], &f[2], &f[3], &f[4]) if r := inCircle(f[0]-f[3], f[1]-f[4], f[2]); r != v { t.Errorf("failed: inCircle %s is %t, got %t", k, v, r) } } } func inCircle(a, b, c float64) bool { return a*a+b*b <= c*c }
package redis_test import ( "fmt" "math/rand" "os/exec" "strings" "testing" "time" "github.com/farzadrastegar/simple-cab/driver_location" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/spf13/viper" ) var port string var paramsReady bool var randSeed = time.Date(2019, 01, 01, 02, 00, 00, 00, time.UTC) type Location struct { Latitude float64 `json:"latitude"` Longitude float64 `json:"longitude"` Updated_at string `json:"updated_at,omitempty"` } func genRand() int { return rand.Intn(1000000) } func OnAvailableRedisIt(description string, f interface{}) { if dbPortIsListening() { It(description, f) } else { // PIt(description, f) } } var _ = Describe("basic functionality", func() { var client *Client entryLen := 12 pathID := "1000" latitude := 1.1 longitude := 2.2 BeforeSuite(func() { // Set random number seed. rand.Seed(randSeed.Unix()) // Execute the flushdb command in Redis. flushDB() }) AfterSuite(func() { }) BeforeEach(func() { }) AfterEach(func() { if client != nil { Expect(client.Close()).NotTo(HaveOccurred()) } }) OnAvailableRedisIt("should store", func() { client = MustOpenClient() client.Now = randSeed.Unix() s := client.Connect() for i := 0; i < entryLen; i++ { loc := driver_location.Location{Latitude: latitude + float64(i), Longitude: longitude + float64(i)} err := s.StoreLocation(pathID, &loc) Expect(err).NotTo(HaveOccurred()) client.Now += 5 } vals, err := client.GetDB().XRange("drivers:"+pathID, "-", "+").Result() Expect(err).NotTo(HaveOccurred()) Expect(len(vals)).To(Equal(entryLen)) }) OnAvailableRedisIt("should query", func() { client = MustOpenClient() client.Now = randSeed.Unix() s := client.Connect() minutes := 1.0 client.Now += 60 locations, err := s.GetDriverLocations(pathID, minutes) Expect(err).NotTo(HaveOccurred()) Expect(len(locations.Locations)).To(Equal(entryLen)) for i := 0; i < len(locations.Locations); i++ { Expect(locations.Locations[i].Latitude).To(Equal(latitude + float64(i))) Expect(locations.Locations[i].Longitude).To(Equal(longitude + float64(i))) } }) }) func TestSuite(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "database") } func readConf() { // Load configurations. driver_location.LoadConfigurationFromBranch() // Set port. port = viper.GetString("database.port") } func dbPortIsListening() bool { // Read port (if needed) if !paramsReady { readConf() paramsReady = true } // Port must be available here. if port == "" { return false } // Check DB port is listening. checkPort := fmt.Sprintf("lsof -i -n -P | grep %s | grep LISTEN | tail -n1", port) cmdOut, _ := exec.Command("/bin/bash", "-c", checkPort).Output() portIsListening := true var strBuilder strings.Builder strBuilder.Write(cmdOut) if strings.Index(strBuilder.String(), port) == -1 { portIsListening = false } return portIsListening } func flushDB() { if dbPortIsListening() { client := MustOpenClient() Expect(client.GetDB().FlushDB().Err()).NotTo(HaveOccurred()) Expect(client.Close()).NotTo(HaveOccurred()) } }