text
stringlengths
11
4.05M
package leetcode import "testing" func TestDominantIndex(t *testing.T) { if dominantIndex([]int{3, 6, 1, 0}) != 1 { t.Fatal() } if dominantIndex([]int{1, 2, 3, 4}) != -1 { t.Fatal() } }
package job import ( "bytes" "fmt" "io" "io/ioutil" "os" "os/signal" "strings" "syscall" "time" "github.com/dnephin/dobi/config" "github.com/dnephin/dobi/logging" "github.com/dnephin/dobi/tasks/client" "github.com/dnephin/dobi/tasks/context" "github.com/dnephin/dobi/tasks/image" "github.com/dnephin/dobi/tasks/mount" "github.com/dnephin/dobi/tasks/task" "github.com/dnephin/dobi/tasks/types" "github.com/dnephin/dobi/utils/fs" "github.com/docker/docker/pkg/term" "github.com/docker/go-connections/nat" docker "github.com/fsouza/go-dockerclient" log "github.com/sirupsen/logrus" ) // DefaultUnixSocket to connect to the docker API const DefaultUnixSocket = "/var/run/docker.sock" func newRunTask(name task.Name, conf config.Resource) types.Task { return &Task{name: name, config: conf.(*config.JobConfig)} } // Task is a task which runs a command in a container to produce a // file or set of files. type Task struct { types.NoStop name task.Name config *config.JobConfig outStream io.Writer } // Name returns the name of the task func (t *Task) Name() task.Name { return t.name } func (t *Task) logger() *log.Entry { return logging.ForTask(t) } // Repr formats the task for logging func (t *Task) Repr() string { buff := &bytes.Buffer{} if !t.config.Command.Empty() { buff.WriteString(" " + t.config.Command.String()) } if !t.config.Command.Empty() && !t.config.Artifact.Empty() { buff.WriteString(" ->") } if !t.config.Artifact.Empty() { buff.WriteString(" " + t.config.Artifact.String()) } return fmt.Sprintf("%s%v", t.name.Format("job"), buff.String()) } // Run the job command in a container func (t *Task) Run(ctx *context.ExecuteContext, depsModified bool) (bool, error) { if !depsModified { stale, err := t.isStale(ctx) switch { case err != nil: return false, err case !stale: t.logger().Info("is fresh") return false, nil } } t.logger().Debug("is stale") t.logger().Info("Start") var err error if ctx.Settings.BindMount { err = t.runContainerWithBinds(ctx) } else { err = t.runWithBuildAndCopy(ctx) } if err != nil { return false, err } t.logger().Info("Done") return true, nil } // nolint: gocyclo func (t *Task) isStale(ctx *context.ExecuteContext) (bool, error) { if t.config.Artifact.Empty() { return true, nil } artifactLastModified, err := t.artifactLastModified(ctx.WorkingDir) if err != nil { t.logger().Warnf("Failed to get artifact last modified: %s", err) return true, err } if t.config.Sources.NoMatches() { t.logger().Warnf("No sources found matching: %s", &t.config.Sources) return true, nil } if len(t.config.Sources.Paths()) != 0 { sourcesLastModified, err := fs.LastModified(&fs.LastModifiedSearch{ Root: ctx.WorkingDir, Paths: t.config.Sources.Paths(), }) if err != nil { return true, err } if artifactLastModified.Before(sourcesLastModified) { t.logger().Debug("artifact older than sources") return true, nil } return false, nil } mountsLastModified, err := t.mountsLastModified(ctx) if err != nil { t.logger().Warnf("Failed to get mounts last modified: %s", err) return true, err } if artifactLastModified.Before(mountsLastModified) { t.logger().Debug("artifact older than mount files") return true, nil } imageName := ctx.Resources.Image(t.config.Use) taskImage, err := image.GetImage(ctx, imageName) if err != nil { return true, fmt.Errorf("failed to get image %q: %s", imageName, err) } if artifactLastModified.Before(taskImage.Created) { t.logger().Debug("artifact older than image") return true, nil } return false, nil } func (t *Task) artifactLastModified(workDir string) (time.Time, error) { paths := t.config.Artifact.Paths() // File or directory doesn't exist if len(paths) == 0 { return time.Time{}, nil } return fs.LastModified(&fs.LastModifiedSearch{Root: workDir, Paths: paths}) } // TODO: support a .mountignore file used to ignore mtime of files func (t *Task) mountsLastModified(ctx *context.ExecuteContext) (time.Time, error) { mountPaths := []string{} ctx.Resources.EachMount(t.config.Mounts, func(name string, mount *config.MountConfig) { mountPaths = append(mountPaths, mount.Bind) }) return fs.LastModified(&fs.LastModifiedSearch{Root: ctx.WorkingDir, Paths: mountPaths}) } func (t *Task) runContainerWithBinds(ctx *context.ExecuteContext) error { name := containerName(ctx, t.name.Resource()) imageName := image.GetImageName(ctx, ctx.Resources.Image(t.config.Use)) options := t.createOptions(ctx, name, imageName) defer removeContainerWithLogging(t.logger(), ctx.Client, name) return t.runContainer(ctx, options) } func removeContainerWithLogging( logger *log.Entry, client client.DockerClient, containerID string, ) { removed, err := removeContainer(logger, client, containerID) if !removed && err == nil { logger.WithFields(log.Fields{"container": containerID}).Warn( "Container does not exist") } } func (t *Task) runContainer( ctx *context.ExecuteContext, options docker.CreateContainerOptions, ) error { name := options.Name container, err := ctx.Client.CreateContainer(options) if err != nil { return fmt.Errorf("failed creating container %q: %s", name, err) } chanSig := t.forwardSignals(ctx.Client, container.ID) defer signal.Stop(chanSig) closeWaiter, err := ctx.Client.AttachToContainerNonBlocking(docker.AttachToContainerOptions{ Container: container.ID, OutputStream: t.output(), ErrorStream: os.Stderr, InputStream: ioutil.NopCloser(os.Stdin), Stream: true, Stdin: t.config.Interactive, RawTerminal: t.config.Interactive, Stdout: true, Stderr: true, }) if err != nil { return fmt.Errorf("failed attaching to container %q: %s", name, err) } defer closeWaiter.Wait() // nolint: errcheck if t.config.Interactive { inFd, _ := term.GetFdInfo(os.Stdin) state, err := term.SetRawTerminal(inFd) if err != nil { return err } defer func() { if err := term.RestoreTerminal(inFd, state); err != nil { t.logger().Warnf("Failed to restore fd %v: %s", inFd, err) } }() } if err := ctx.Client.StartContainer(container.ID, nil); err != nil { return fmt.Errorf("failed starting container %q: %s", name, err) } initWindow(chanSig) return t.wait(ctx.Client, container.ID) } func (t *Task) output() io.Writer { if t.outStream == nil { return os.Stdout } return io.MultiWriter(t.outStream, os.Stdout) } func (t *Task) createOptions( ctx *context.ExecuteContext, name string, imageName string, ) docker.CreateContainerOptions { t.logger().Debugf("Image name %q", imageName) interactive := t.config.Interactive portBinds, exposedPorts := asPortBindings(t.config.Ports) // TODO: only set Tty if running in a tty opts := docker.CreateContainerOptions{ Name: name, Config: &docker.Config{ Cmd: t.config.Command.Value(), Image: imageName, User: t.config.User, OpenStdin: interactive, Tty: interactive, AttachStdin: interactive, StdinOnce: interactive, Labels: t.config.Labels, AttachStderr: true, AttachStdout: true, Env: t.config.Env, Entrypoint: t.config.Entrypoint.Value(), WorkingDir: t.config.WorkingDir, ExposedPorts: exposedPorts, }, HostConfig: &docker.HostConfig{ Binds: getMountsForHostConfig(ctx, t.config.Mounts), Privileged: t.config.Privileged, NetworkMode: t.config.NetMode, PortBindings: portBinds, Devices: getDevices(t.config.Devices), }, } if t.config.ProvideDocker { opts = provideDocker(opts) } return opts } func getMountsForHostConfig(ctx *context.ExecuteContext, mounts []string) []string { binds := []string{} ctx.Resources.EachMount(mounts, func(name string, mountConfig *config.MountConfig) { if !ctx.Settings.BindMount && mountConfig.IsBind() { return } binds = append(binds, mount.AsBind(mountConfig, ctx.WorkingDir)) }) return binds } func getDevices(devices []config.Device) []docker.Device { var dockerdevices []docker.Device for _, dev := range devices { if dev.Container == "" { dev.Container = dev.Host } if dev.Permissions == "" { dev.Permissions = "rwm" } dockerdevices = append(dockerdevices, docker.Device{ PathInContainer: dev.Container, PathOnHost: dev.Host, CgroupPermissions: dev.Permissions, }) } return dockerdevices } func asPortBindings(ports []string) (map[docker.Port][]docker.PortBinding, map[docker.Port]struct{}) { // nolint: lll binds := make(map[docker.Port][]docker.PortBinding) exposed := make(map[docker.Port]struct{}) for _, port := range ports { parts := strings.SplitN(port, ":", 2) proto, cport := nat.SplitProtoPort(parts[1]) cport = cport + "/" + proto binds[docker.Port(cport)] = []docker.PortBinding{{HostPort: parts[0]}} exposed[docker.Port(cport)] = struct{}{} } return binds, exposed } func provideDocker(opts docker.CreateContainerOptions) docker.CreateContainerOptions { if os.Getenv("DOCKER_HOST") == "" { path := DefaultUnixSocket opts.HostConfig.Binds = append(opts.HostConfig.Binds, path+":"+path) } for _, envVar := range os.Environ() { if strings.HasPrefix(envVar, "DOCKER_") { opts.Config.Env = append(opts.Config.Env, envVar) } } return opts } func (t *Task) wait(client client.DockerClient, containerID string) error { status, err := client.WaitContainer(containerID) if err != nil { return fmt.Errorf("failed to wait on container exit: %s", err) } if status != 0 { return fmt.Errorf("exited with non-zero status code %d", status) } return nil } func (t *Task) forwardSignals( client client.DockerClient, containerID string, ) chan<- os.Signal { chanSig := make(chan os.Signal, 128) signal.Notify(chanSig, syscall.SIGINT, syscall.SIGTERM, SIGWINCH) go func() { for sig := range chanSig { logger := t.logger().WithField("signal", sig) logger.Debug("received") sysSignal, ok := sig.(syscall.Signal) if !ok { logger.Warnf("Failed to convert signal from %T", sig) return } switch sysSignal { case SIGWINCH: handleWinSizeChangeSignal(logger, client, containerID) default: handleShutdownSignals(logger, client, containerID, sysSignal) } } }() return chanSig } func handleWinSizeChangeSignal( logger log.FieldLogger, client client.DockerClient, containerID string, ) { winsize, err := term.GetWinsize(os.Stdin.Fd()) if err != nil { logger.WithError(err). Error("Failed to get host's TTY window size") return } err = client.ResizeContainerTTY(containerID, int(winsize.Height), int(winsize.Width)) if err != nil { logger.WithError(err). Error("Failed to set container's TTY window size") } } func handleShutdownSignals( logger log.FieldLogger, client client.DockerClient, containerID string, sig syscall.Signal, ) { if err := client.KillContainer(docker.KillContainerOptions{ ID: containerID, Signal: docker.Signal(sig), }); err != nil { logger.WithError(err). Warn("Failed to forward signal") } }
// Copyright 2016 Kranz. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package context import ( "fmt" "gopkg.in/macaron.v1" "github.com/rodkranz/fakeApi/modules/base" ) // type Context struct { *macaron.Context } // HasApiError return if has error or not func (ctx *Context) HasApiError() bool { hasErr, ok := ctx.Data["HasError"] if !ok { return false } return hasErr.(bool) } // GetErrMsg return error message func (ctx *Context) GetErrMsg() string { return ctx.Data["ErrorMsg"].(string) } // HasError returns true if error occurs in form validation. func (ctx *Context) HasError() bool { hasErr, ok := ctx.Data["HasError"] if !ok { return false } return hasErr.(bool) } // HasValue returns true if value of given name exists. func (ctx *Context) HasValue(name string) bool { _, ok := ctx.Data[name] return ok } // HTML calls Context.HTML and converts template name to string. func (ctx *Context) HTML(status int, name base.TplName) { ctx.Context.HTML(status, string(name)) } // Handle handles and logs error by given status. func (ctx *Context) Handle(status int, title string, err error) { if err != nil { if macaron.Env != macaron.PROD { ctx.Data["ErrorMsg"] = err } } switch status { case 404: ctx.Data["Title"] = "Page Not Found" case 500: ctx.Data["Title"] = "Internal Server Error" } ctx.HTML(status, base.TplName(fmt.Sprintf("status/%d", status))) } // HandleText handles simple text func (ctx *Context) HandleText(status int, title string) { ctx.PlainText(status, []byte(title)) } // Contexter initializes a classic context for a request. func Contexter() macaron.Handler { return func(c *macaron.Context) { ctx := &Context{ Context: c, } c.Map(ctx) } }
package constants const ( EMP_BASE_DIR = "/Users/adeep/workspace/icode/golang-tutorials/resources/employee" )
package sync import ( "sync" "github.com/kubernetes-sigs/aws-alb-ingress-controller/pkg/util/log" ) var logger *log.Logger func init() { logger = log.New("sync") } type RWMutex struct { m sync.RWMutex } func (r *RWMutex) RLock() { logger.DebugLevelf(3, "Requesting RLock.") r.m.RLock() logger.DebugLevelf(3, "RLock was available.") } func (r *RWMutex) Lock() { logger.DebugLevelf(3, "Requesting Lock.") r.m.Lock() logger.DebugLevelf(3, "Lock was available.") } func (r *RWMutex) RUnlock() { r.m.RUnlock() logger.DebugLevelf(3, "RUnlock'd.") } func (r *RWMutex) Unlock() { r.m.Unlock() logger.DebugLevelf(3, "Unlock'd.") }
package main import ( "fmt" "github.com/zealic/go2node" ) func main() { channel, err := go2node.RunAsNodeChild() if err != nil { panic(err) } // Golang will output: {"hello":"child"} msg, err := channel.Read() if err != nil { panic(err) } fmt.Println(string(msg.Message)) // Node will output: {"hello":'parent'} err = channel.Write(&go2node.NodeMessage{ Message: []byte(`{"hello":"parent"}`), }) if err != nil { panic(err) } }
package http import ( "context" "fmt" "io" "os" "os/exec" "testing" "github.com/ovh/venom" "github.com/stretchr/testify/require" ) func generateClientFile(t *testing.T) (string, string) { TLSClientKey, err := os.CreateTemp(os.TempDir(), "TLSClientKey.*.key") require.NoError(t, err) TLSClientKeyFileName := TLSClientKey.Name() t.Logf("generating file %q", TLSClientKeyFileName) cmd := exec.Command("openssl", "genrsa", "-out", TLSClientKeyFileName, "2048") output, err := cmd.CombinedOutput() t.Log(string(output)) require.NoError(t, err) TLSClientCert, err := os.CreateTemp(os.TempDir(), "TLSClientCert.*.crt") require.NoError(t, err) TLSClientCertFilename := TLSClientCert.Name() t.Logf("generating file %q", TLSClientCertFilename) cmd = exec.Command("openssl", "req", "-batch", "-subj", "/C=GB/ST=Yorks/L=York/O=MyCompany Ltd./OU=IT/CN=mysubdomain.mydomain.com", "-new", "-x509", "-sha256", "-key", TLSClientKeyFileName, "-out", TLSClientCertFilename, "-days", "365") output, err = cmd.CombinedOutput() t.Log(string(output)) require.NoError(t, err) return TLSClientKeyFileName, TLSClientCertFilename } func TestExecutor_TLSOptions_From_File(t *testing.T) { TLSClientKeyFileName, TLSClientCertFilename := generateClientFile(t) e := Executor{ IgnoreVerifySSL: true, TLSClientCert: TLSClientCertFilename, TLSClientKey: TLSClientKeyFileName, TLSRootCA: "../../tests/http/tls/digicert-root-ca.crt", } opts, err := e.TLSOptions(context.Background()) require.NoError(t, err) require.Len(t, opts, 3) } func TestExecutor_TLSOptions_From_String(t *testing.T) { TLSClientKeyFileName, TLSClientCertFilename := generateClientFile(t) TLSClientCert, err := os.ReadFile(TLSClientCertFilename) require.NoError(t, err) TLSClientKey, err := os.ReadFile(TLSClientKeyFileName) require.NoError(t, err) TLSRootCA, err := os.ReadFile("../../tests/http/tls/digicert-root-ca.crt") require.NoError(t, err) e := Executor{ TLSClientCert: string(TLSClientCert), TLSClientKey: string(TLSClientKey), TLSRootCA: string(TLSRootCA), } opts, err := e.TLSOptions(context.Background()) require.NoError(t, err) require.Len(t, opts, 2) } func TestInterpolation_Of_String(t *testing.T) { e := &Executor{ Method: "", URL: "http://example.com", Path: "", BodyFile: "tests/http/bodyfile_with_interpolation", PreserveBodyFile: false, MultipartForm: nil, Headers: map[string]string{}, } ctx := context.Background() keys := make(map[string]string) keys["fullName"] = "{{.name}} test" keys["name"] = "123" ctx = context.WithValue(ctx, venom.ContextKey("vars"), []string{"fullName", "name"}) for k := range keys { ctx = context.WithValue(ctx, venom.ContextKey(fmt.Sprintf("var.%s", k)), keys[k]) } vars := venom.AllVarsFromCtx(ctx) fmt.Println("vars: ", vars) require.Len(t, vars, 2) r, err := e.getRequest(ctx, "../../") require.NoError(t, err) defer r.Body.Close() b, err := io.ReadAll(r.Body) require.NoError(t, err) fmt.Printf("Output") fmt.Println(string(b)) require.Equal(t, "{\n \"key\": \"123 test\"\n}", string(b)) } func TestInterpolation_without_match_Of_String(t *testing.T) { e := &Executor{ Method: "", URL: "http://example.com", Path: "", BodyFile: "tests/http/bodyfile_with_interpolation", PreserveBodyFile: false, MultipartForm: nil, Headers: map[string]string{}, } ctx := context.Background() keys := make(map[string]string) keys["fullName"] = "{{.name}} test" ctx = context.WithValue(ctx, venom.ContextKey("vars"), []string{"fullName"}) for k := range keys { ctx = context.WithValue(ctx, venom.ContextKey(fmt.Sprintf("var.%s", k)), keys[k]) } _, err := e.getRequest(ctx, "../../") require.Errorf(t, err, "unable to interpolate file due to unresolved variables {{.name}}") }
package azure import ( "sync" "github.com/pkg/errors" typesazure "github.com/openshift/installer/pkg/types/azure" ) // Metadata holds additional metadata for InstallConfig resources that // does not need to be user-supplied (e.g. because it can be retrieved // from external APIs). type Metadata struct { session *Session client *Client dnsCfg *DNSConfig // CloudName indicates the Azure cloud environment (e.g. public, gov't). CloudName typesazure.CloudEnvironment `json:"cloudName,omitempty"` // ARMEndpoint indicates the resource management API endpoint used by AzureStack. ARMEndpoint string `json:"armEndpoint,omitempty"` // Credentials hold prepopulated Azure credentials. // At the moment the installer doesn't use it and reads credentials // from the file system, but external consumers of the package can // provide credentials. This is useful when we run the installer // as a service (Azure Red Hat OpenShift, for example): in this case // we do not want to rely on the filesystem or user input as we // serve multiple users with different credentials via a web server. Credentials *Credentials `json:"credentials,omitempty"` mutex sync.Mutex } // NewMetadata initializes a new Metadata object. func NewMetadata(cloudName typesazure.CloudEnvironment, armEndpoint string) *Metadata { return NewMetadataWithCredentials(cloudName, armEndpoint, nil) } // NewMetadataWithCredentials initializes a new Metadata object // with prepopulated Azure credentials. func NewMetadataWithCredentials(cloudName typesazure.CloudEnvironment, armEndpoint string, credentials *Credentials) *Metadata { return &Metadata{ CloudName: cloudName, ARMEndpoint: armEndpoint, Credentials: credentials, } } // Session holds an Azure session which can be used for Azure API calls // during asset generation. func (m *Metadata) Session() (*Session, error) { m.mutex.Lock() defer m.mutex.Unlock() return m.unlockedSession() } func (m *Metadata) unlockedSession() (*Session, error) { if m.session == nil { var err error m.session, err = GetSessionWithCredentials(m.CloudName, m.ARMEndpoint, m.Credentials) if err != nil { return nil, errors.Wrap(err, "creating Azure session") } } return m.session, nil } // Client holds an Azure Client that implements calls to the Azure API. func (m *Metadata) Client() (*Client, error) { if m.client == nil { ssn, err := m.Session() if err != nil { return nil, err } m.client = NewClient(ssn) } return m.client, nil } // DNSConfig holds an Azure DNSConfig Client that implements calls to the Azure API. func (m *Metadata) DNSConfig() (*DNSConfig, error) { if m.dnsCfg == nil { ssn, err := m.Session() if err != nil { return nil, err } m.dnsCfg = NewDNSConfig(ssn) } return m.dnsCfg, nil }
package entity import ( "strconv" "boiler/pkg/entity" ) // NewUser return a new User entity func NewUser(u *entity.User) *User { return &User{ ID: strconv.FormatInt(u.ID, 10), Name: u.Name, } } // NewEmail return a new Email entity func NewEmail(e *entity.Email) *Email { return &Email{ ID: strconv.FormatInt(e.ID, 10), Address: e.Address, User: &User{ID: strconv.FormatInt(e.UserID, 10)}, } }
// Copyright (c) 2018 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package action import ( "math/big" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes/timestamp" "github.com/pkg/errors" "github.com/iotexproject/iotex-core/pkg/keypair" "github.com/iotexproject/iotex-core/pkg/util/byteutil" "github.com/iotexproject/iotex-core/pkg/version" "github.com/iotexproject/iotex-core/protogen/iotextypes" ) const ( // VoteIntrinsicGas represents the intrinsic gas for vote VoteIntrinsicGas = uint64(10000) ) var _ hasDestination = (*Vote)(nil) // Vote defines the struct of account-based vote type Vote struct { AbstractAction timestamp *timestamp.Timestamp votee string } // NewVote returns a Vote instance func NewVote(nonce uint64, voteeAddress string, gasLimit uint64, gasPrice *big.Int) (*Vote, error) { return &Vote{ AbstractAction: AbstractAction{ version: version.ProtocolVersion, nonce: nonce, gasLimit: gasLimit, gasPrice: gasPrice, }, votee: voteeAddress, }, nil } // VoterPublicKey returns the voter's public key func (v *Vote) VoterPublicKey() keypair.PublicKey { return v.SrcPubkey() } // Votee returns the votee's address func (v *Vote) Votee() string { return v.votee } // Destination returns the votee's address func (v *Vote) Destination() string { return v.Votee() } // TotalSize returns the total size of this Vote func (v *Vote) TotalSize() uint32 { return v.BasicActionSize() + uint32(8) // TimestampSizeInBytes } // ByteStream returns a raw byte stream of this Transfer func (v *Vote) ByteStream() []byte { // TODO: remove pbVote.Timestamp from the proto because we never set it return byteutil.Must(proto.Marshal(v.Proto())) } // Proto converts Vote to protobuf's Action func (v *Vote) Proto() *iotextypes.Vote { return &iotextypes.Vote{ VoteeAddress: v.votee, Timestamp: v.timestamp, } } // LoadProto converts a protobuf's Action to Vote func (v *Vote) LoadProto(pbAct *iotextypes.Vote) error { if pbAct == nil { return errors.New("empty action proto to load") } if v == nil { return errors.New("nil action to load proto") } *v = Vote{} v.votee = pbAct.GetVoteeAddress() v.timestamp = pbAct.GetTimestamp() return nil } // IntrinsicGas returns the intrinsic gas of a vote func (v *Vote) IntrinsicGas() (uint64, error) { return VoteIntrinsicGas, nil } // Cost returns the total cost of a vote func (v *Vote) Cost() (*big.Int, error) { intrinsicGas, err := v.IntrinsicGas() if err != nil { return nil, errors.Wrap(err, "failed to get intrinsic gas for the vote") } voteFee := big.NewInt(0).Mul(v.GasPrice(), big.NewInt(0).SetUint64(intrinsicGas)) return voteFee, nil }
package search import ( "encoding/json" "os" ) const dataFile = "data/data.json" // Feed 定义 type Feed struct { Name string `json:"site"` URI string `json:"link"` Type string `json:"type"` } // RetrieveFeeds 读取并序列化feed数据文件 func RetrieveFeeds() ([]*Feed, error) { file, err := os.Open(dataFile) if err != nil { return nil, err } // 调度在函数返回时文件被关闭一次 defer file.Close() // 将文件解码为Feed值指针的slice var feeds []*Feed err = json.NewDecoder(file).Decode(&feeds) // 由调用方负责检测err return feeds, err }
/** *@Author: haoxiongxiao *@Date: 2019/3/18 *@Description: CREATE GO FILE main */ package main import ( "testing" ) func Test_main(t *testing.T) { }
package has_cycle // ListNode provides interface for ListNode struct type ListNode interface { SetNext(*listNode) } type listNode struct { val int next *listNode } // SetNext sets $node to current listNode func (l *listNode) SetNext(node *listNode) { l.next = node } // NewListNode ... func NewListNode(val int) *listNode { return &listNode{ val: val, next: nil, } }
package main import ( "bytes" "encoding/json" "fmt" "log" "os" ) var data = ` { "user": "Name", "type": "deposit", "amount": 10.2 } ` // must start with Uppercase // Use field tag // Request is a bank transactions type Request struct { Login string `json:"user"` Type string `json:"type"` Amount float64 `json:"amount"` } func main2() { rdr := bytes.NewBufferString(data) // SImulate a file.socket // Decode request dec := json.NewDecoder(rdr) req := &Request{} if err := dec.Decode(req); err != nil { log.Fatalf("error: cannot decode -%s", err) } fmt.Printf("got: %+v\n", req) // Create response prevBalance := 850.0 // This is a map of empty interface, which means any type resp := map[string]interface{}{ "ok": true, "balance": prevBalance + req.Amount, } // Encode response enc := json.NewEncoder(os.Stdout) if err := enc.Encode(resp); err != nil { log.Fatalf("error: cannot encode -%s", err) } }
package user import ( "github.com/BRO3886/findvity-backend/pkg" "github.com/BRO3886/findvity-backend/pkg/group" ) //Gender for user type Gender string const ( //Male enum Male Gender = "Male" //Female enum Female = "Female" //NonBinary enum NonBinary = "Non-Binary" //Undisclosed enum Undisclosed = "Prefer not to disclose" ) //User struct for user details type User struct { pkg.Base Name string `json:"name"` Username string `json:"username"` Phone string `json:"phone"` Age int `json:"age"` Sex Gender `json:"gender"` Password string `json:"password"` ProfileImgURL string `json:"profile_img_url"` Verified bool `json:"verified"` Tags string `json:"tags"` Groups []group.Group `json:"groups"` }
package cmd import ( "context" "fmt" "log" "os" "strings" "github.com/grrtrr/clcv2" "github.com/pkg/errors" "github.com/spf13/cobra" "golang.org/x/sync/errgroup" ) // deleteFlags determine how to perform deletions var deleteFlags struct { recurse bool keep bool } func init() { delete.Flags().BoolVarP(&deleteFlags.recurse, "recurse", "r", true, "When deleting a group directory, also delete all of its sub-directories") delete.Flags().BoolVarP(&deleteFlags.keep, "keep-directory", "k", false, "Keep any specified group directories (only delete their contents)") Root.AddCommand(delete) } var delete = &cobra.Command{ Use: "rm [group|server [group|server]...]", Aliases: []string{"remove", "del", "delete", "clean-up", "rd"}, Short: "Delete server(s)/group(s) (CAUTION)", Long: "Completely and irreversibly removes servers/groups - USE WITH CAUTION", PreRunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { return errors.Errorf("Need at least 1 server or group to remove") } for _, arg := range args { if arg == "" { return errors.Errorf("%s: you requested %q, which means EVERYTHING IN %s - refusing to continue", cmd.Name(), arg, strings.ToUpper(conf.Location)) } } return nil }, RunE: func(cmd *cobra.Command, args []string) error { var root, g *clcv2.Group var eg = new(errgroup.Group) groups, servers, err := resolveNames(args) if err != nil { return errors.Errorf("%s: %s", cmd.Name(), err) } if len(groups) > 0 { if conf.Location == "" { return errors.Errorf("Location argument (-l) is required in order to traverse nested groups.") } else if root, err = client.GetGroups(conf.Location); err != nil { log.Fatalf("Failed to query group structure in %s: %s", conf.Location, err) } } for _, srv := range servers { deleteSingleServer(eg, srv) } for _, grp := range groups { grp := grp eg.Go(func() error { if grp == "" { return errors.Errorf("Not accepting %q, as it means to delete everything in %s", grp, conf.Location) } else if g = clcv2.FindGroupNode(root, func(g *clcv2.Group) bool { return g.Id == grp }); g == nil { return errors.Errorf("Failed to look up group %q in %s - is the location correct?", grp, conf.Location) } groupDir, err := clcv2.WalkGroupHierarchy(context.TODO(), g, nil) // nil callback here, so will process fast if err != nil { return errors.Errorf("Failed to process %s group hierarchy in %s: %s", grp, conf.Location, err) } if !deleteFlags.keep && deleteFlags.recurse { // wipe this directory and all of its children deleteSingleGroup(eg, groupDir) } else if len(groupDir.Servers) == 0 && (!deleteFlags.recurse || len(groupDir.Groups) == 0) { if l := len(groupDir.Groups); l == 0 { log.Printf("Nothing to delete in %s - directory empty", groupDir.Name) } else if !deleteFlags.recurse { log.Printf("Nothing to delete in %s (will not delete %d subdirectories since --recurse=false)", groupDir.Name, l) } } else { for _, srv := range groupDir.Servers { deleteSingleServer(eg, srv) } if deleteFlags.recurse { for _, grp := range groupDir.Groups { deleteSingleGroup(eg, grp) } } } return nil }) } if err = eg.Wait(); err != nil { fmt.Fprintf(os.Stderr, "ERROR: %s\n", err) } return nil }, } // deleteSingleServer is a helper function to delete server @srv within error group @eg func deleteSingleServer(eg *errgroup.Group, srv string) { eg.Go(func() error { if reqID, err := client.DeleteServer(srv); err != nil { fmt.Fprintf(os.Stderr, "ERROR deleting server %s: %s\n", srv, err) } else if reqID != "" { log.Printf("Deleting %s: %s", srv, reqID) client.PollStatusFn(reqID, intvl, func(s clcv2.QueueStatus) { log.Printf("Deleting %s: %s", srv, s) }) } else { fmt.Println("schase") } // For single items we do not feed back the error to the error group, just log it. return nil }) } // deleteSingleGroup is analogous to deleteSingleServer group, deleting a group @g with all of its children func deleteSingleGroup(eg *errgroup.Group, grp *clcv2.GroupInfo) { eg.Go(func() error { if reqID, err := client.DeleteGroup(grp.ID); err != nil { fmt.Fprintf(os.Stderr, "ERROR deleting group %s (%s): %s\n", grp.Name, grp.ID, err) } else if reqID != "" { log.Printf("Deleting %s (%s): %s", grp.Name, grp.ID, reqID) client.PollStatusFn(reqID, intvl, func(s clcv2.QueueStatus) { log.Printf("Deleting %s (%s): %s", grp.Name, grp.ID, s) }) } // For single items we do not feed back the error to the error group, just log it. return nil }) }
package ehttp import ( "bytes" "errors" "fmt" "net/http" "strconv" "testing" "time" "encoding/json" "io/ioutil" "github.com/enjoy-web/ehttp/swagger" "github.com/gin-gonic/gin" ) type ErrorMessage struct { Code int `json:"code"` Message string `json:"message"` Details string `json:"detail"` } const ( ErrorCodeParameter = iota ErrorCodeReadBody ErrorCodeUnmarshalJSON ) var ErrorCodes = map[int]string{ ErrorCodeParameter: "Parameter error", ErrorCodeReadBody: "Read body error", ErrorCodeUnmarshalJSON: "Unmarshal JSON error", } func newErrorMessage(code int, err error) *ErrorMessage { var errStr string if err != nil { errStr = err.Error() } return &ErrorMessage{code, ErrorCodes[code], errStr} } func newCodeErrorMarkDownDoc(errorCodes map[int]string) string { doc := "**Error code reference table**\n" doc += "| code | message |\n" doc += "| --- | --- |\n" for code, message := range errorCodes { doc += fmt.Sprintf("| %d | %s |\n", code, message) } return doc } type BookImageUrls struct { Small string `json:"small"` Medium string `json:"medium"` Large string `json:"larger"` } type Book struct { ID string `json:"id"` Title string `json:"title"` Summary string `json:"summary"` Authors []string `json:"authors"` Images BookImageUrls `json:"images"` Pages int `json:"pages"` Price float32 `json:"price"` HasReview bool `json:"has_review"` } type Books struct { Total int64 `json:"total" desc:"total of zoos"` Start int64 `json:"start"` Count int64 `json:"count"` Books []*Book `json:"books" desc:"books"` } func getInt64InQuery(c *gin.Context, name string) (int64, error) { str, ok := c.GetQuery(name) if !ok { return 0, errors.New("miss parameter " + name + " InQuery") } return strconv.ParseInt(str, 10, 64) } var DocGETBook = &APIDocCommon{ Summary: "Get book info by id", Produces: []string{Application_Json}, Parameters: map[string]Parameter{ "id": Parameter{InPath: &ValueInfo{Type: "string"}}, "version": Parameter{InHeader: &ValueInfo{Type: "string", Desc: "the version of api"}}, }, Responses: map[int]Response{ 200: Response{ Description: "successful operation", Model: &Book{}, }, 400: Response{ Description: "failed operation", Model: &ErrorMessage{}, }, }, } func HandleGETBook(c *gin.Context, err error) { if err != nil { c.JSON(400, &ErrorMessage{-1, "parameter error", err.Error()}) return } id := c.Param("id") book := &Book{ ID: id, Title: "Demo book", Summary: "This is a demo book", Authors: []string{"John"}, Images: BookImageUrls{ Small: "small-url", Medium: "medium-url", Large: "large-url", }, Pages: 121, Price: 40.50, HasReview: true, } c.JSON(200, book) } var DocPostBook = &APIDocCommon{ Summary: "new a book", Produces: []string{Application_Json}, Consumes: []string{Application_Json}, Parameters: map[string]Parameter{ "version": Parameter{InHeader: &ValueInfo{Type: "string", Desc: "the version of api"}}, }, Request: &Request{ Description: "the book info", Model: &Book{}, }, Responses: map[int]Response{ 200: Response{ Description: "successful operation", Model: &Book{}, }, 400: Response{ Description: "failed operation", Model: &ErrorMessage{}, }, }, } func HandlePostBook(c *gin.Context, err error) { if err != nil { c.JSON(400, newErrorMessage(ErrorCodeParameter, err)) return } body, err := ioutil.ReadAll(c.Request.Body) if err != nil { c.JSON(400, newErrorMessage(ErrorCodeReadBody, err)) return } book := &Book{} err = json.Unmarshal(body, book) if err != nil { c.JSON(400, newErrorMessage(ErrorCodeUnmarshalJSON, err)) return } c.JSON(200, book) } var DocDELETEBook = &APIDocCommon{ Summary: "delete book info by id", Produces: []string{Application_Json}, Parameters: map[string]Parameter{ "id": Parameter{InPath: &ValueInfo{Type: "string"}}, "version": Parameter{InHeader: &ValueInfo{Type: "string", Desc: "the version of api"}}, }, Responses: map[int]Response{ 200: Response{ Description: "successful operation", Model: &Book{}, }, 400: Response{ Description: "failed operation", Model: &ErrorMessage{}, }, }, } func HandleDELETEBook(c *gin.Context, err error) { if err != nil { c.JSON(400, &ErrorMessage{-1, "parameter error", err.Error()}) return } id := c.Param("id") book := &Book{ ID: id, Title: "Demo book", Summary: "This is a demo book", Authors: []string{"John"}, Images: BookImageUrls{ Small: "small-url", Medium: "medium-url", Large: "large-url", }, Pages: 121, Price: 40.50, HasReview: true, } c.JSON(200, book) } var DocGETBooks = &APIDocCommon{ Summary: "Get book info by id", Produces: []string{Application_Json}, Parameters: map[string]Parameter{ "version": Parameter{InHeader: &ValueInfo{Type: "string", Desc: "the version of api"}}, "limit": Parameter{InQuery: &ValueInfo{Type: "int64", Min: "0", Max: "1000", Required: true, Desc: "the limit of searching"}}, "offset": Parameter{InQuery: &ValueInfo{Type: "int64", Required: true, Desc: "the offset of searching"}}, "sort": Parameter{InQuery: &ValueInfo{Type: "string", Enum: "id -id price -price", Desc: "sort of searching"}}, "min_price": Parameter{InQuery: &ValueInfo{Type: "float32", Min: "0", Desc: "minimum price"}}, "max_price": Parameter{InQuery: &ValueInfo{Type: "float32", Min: "0", Desc: "minimum price"}}, }, Responses: map[int]Response{ 200: Response{ Description: "successful operation", Model: &Books{}, }, 400: Response{ Description: "failed operation", Model: &ErrorMessage{}, }, }, } func HandleGETBooks(c *gin.Context, err error) { if err != nil { c.JSON(400, newErrorMessage(ErrorCodeParameter, err)) return } limit, err := getInt64InQuery(c, "limit") if err != nil { c.JSON(400, newErrorMessage(ErrorCodeParameter, err)) return } offset, err := getInt64InQuery(c, "offset") if err != nil { c.JSON(400, newErrorMessage(ErrorCodeParameter, err)) return } _books := []*Book{} for i := int64(0); i < limit; i++ { _books = append(_books, &Book{ ID: fmt.Sprintf("book_%d", i+offset), Title: "Demo book1", Summary: "This is a demo book", Authors: []string{"John"}, Images: BookImageUrls{ Small: "small-url", Medium: "medium-url", Large: "large-url", }, Pages: 121, Price: 40.50, HasReview: true, }) } c.JSON(200, &Books{ Total: offset + limit + 100, Start: offset, Count: limit, Books: _books, }) } var conf = &Config{ Schemes: []Scheme{SchemeHTTP, SchemeHTTPS}, BasePath: "/dev", Version: "v1", Title: " demo APIS", Description: "demo APIS\n\n" + newCodeErrorMarkDownDoc(ErrorCodes), AllowOrigin: true, OpenAPIDocumentURL: true, } func TestEngin(t *testing.T) { testEngin(t) testEnginWithOrigins(t) } func testEngin(t *testing.T) { runServer(t, conf, "127.0.0.1:18000") if err := testGetBook("http://127.0.0.1:18000:/dev/books/123456"); err != nil { testError(t, err) } if err := testGetBooks("http://127.0.0.1:18000:/dev/books?limit=2&offset=10&sort=id&min_price=9.99&max_price=30.99"); err != nil { testError(t, err) } if err := testPost("http://127.0.0.1:18000/dev/books"); err != nil { testError(t, err) } if err := testGetSwaggerJSON("http://127.0.0.1:18000:/dev/docs/swagger.json"); err != nil { testError(t, err) } if err := testOptions(GET, "http://127.0.0.1:18000:/dev/books/123456", "http://xxx.example"); err != nil { testError(t, err) } // "limit": Parameter{InQuery: &ValueInfo{Type: "int64", Min: "0", Max: "1000", Required: true, Desc: "the limit of searching"}}, // err: parameter limit -1 < Min (0) getBooksURL := "http://127.0.0.1:18000:/dev/books?limit=-1&offset=10" if err := testGetBooks(getBooksURL); err != nil { testLog(t, err) } else { testError(t, "testGetBooks("+getBooksURL+") err should not be nil") } // err: parameter limit 1001 > Max (1000) getBooksURL = "http://127.0.0.1:18000:/dev/books?limit=1001&offset=10" if err := testGetBooks(getBooksURL); err != nil { testLog(t, err) } else { testError(t, "testGetBooks("+getBooksURL+") err should not be nil") } // "sort": Parameter{InQuery: &ValueInfo{Type: "string", Enum: "id -id price -price", Desc: "sort of searching"}}, // err: paramter sort invalid enum type getBooksURL = "http://127.0.0.1:18000:/dev/books?limit=5&offset=10&sort=title" if err := testGetBooks(getBooksURL); err != nil { testLog(t, err) } else { testError(t, "testGetBooks("+getBooksURL+") err should not be nil") } } func runServer(t *testing.T, conf *Config, addr ...string) { go func() { router := NewEngine(conf) err := router.GET("/books/:id", DocGETBook, HandleGETBook) if err != nil { testError(t, err) } err = router.GET("/books", DocGETBooks, HandleGETBooks) if err != nil { testError(t, err) } err = router.POST("/books", DocPostBook, HandlePostBook) if err != nil { testError(t, err) } // I'm lazy err = router.PUT("/books", DocPostBook, HandlePostBook) if err != nil { testError(t, err) } err = router.PATCH("/books", DocPostBook, HandlePostBook) if err != nil { testError(t, err) } err = router.DELETE("/books/:id", DocDELETEBook, HandleDELETEBook) if err != nil { testError(t, err) } router.Run(addr...) }() t.Log("waiting 1 second for server startup") time.Sleep(1 * time.Second) } func testEnginWithOrigins(t *testing.T) { conf := &Config{ Schemes: []Scheme{SchemeHTTP, SchemeHTTPS}, BasePath: "/dev", Version: "v1", Title: " demo APIS", Description: "demo APIS\n\n" + newCodeErrorMarkDownDoc(ErrorCodes), AllowOrigin: true, Origins: []string{"http://xxx.example"}, OpenAPIDocumentURL: false, } runServer(t, conf, "127.0.0.1:18001") if err := testOptions(GET, "http://127.0.0.1:18001:/dev/books/123456", "http://xxx.example"); err != nil { testError(t, err) } if err := testOptions(GET, "http://127.0.0.1:18001:/dev/books/123456", "http://YYY.example"); err != nil { testLog(t, err) } else { testError(t, `testOptions("http://YYY.example") err should not be nil`) } } func testEnginWithOpenAPIDocumentURL(t *testing.T) { conf := &Config{ Schemes: []Scheme{SchemeHTTP, SchemeHTTPS}, BasePath: "/dev", Version: "v1", Title: " demo APIS", Description: "demo APIS\n\n" + newCodeErrorMarkDownDoc(ErrorCodes), OpenAPIDocumentURL: true, APIDocumentURL: "/swagger/doc.json", } runServer(t, conf, "127.0.0.1:18002") if err := testGetSwaggerJSON("http://127.0.0.1:18002:/dev/swagger/doc.json"); err != nil { testError(t, err) } } func testGetBook(url string) error { resp, err := http.Get(url) if err != nil { return err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) switch resp.StatusCode { case 200: book := &Book{} return json.Unmarshal(body, book) case 400: errMessage := &ErrorMessage{} if err := json.Unmarshal(body, errMessage); err != nil { return err } return errors.New(string(body)) default: return errors.New("server error") } } func testGetBooks(url string) error { resp, err := http.Get(url) if err != nil { return err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) switch resp.StatusCode { case 200: books := &Books{} return json.Unmarshal(body, books) case 400: errMessage := &ErrorMessage{} if err := json.Unmarshal(body, errMessage); err != nil { return err } return errors.New(string(body)) default: return errors.New("server error") } } func testPost(url string) error { newBook := &Book{ ID: "01213342", Title: "Demo book", Summary: "This is a demo book", Authors: []string{"John"}, Images: BookImageUrls{ Small: "small-url", Medium: "medium-url", Large: "large-url", }, Pages: 121, Price: 40.50, HasReview: true, } b, err := json.Marshal(newBook) if err != nil { return err } requestBody := bytes.NewBuffer(b) resp, err := http.Post(url, Application_Json_utf8, requestBody) if err != nil { return err } defer resp.Body.Close() responseBody, err := ioutil.ReadAll(resp.Body) switch resp.StatusCode { case 200: book := &Book{} if err := json.Unmarshal(responseBody, book); err != nil { return err } return nil case 400: errMessage := &ErrorMessage{} if err := json.Unmarshal(responseBody, errMessage); err != nil { return err } return errors.New(string(responseBody)) default: return errors.New("server error") } } func testGetSwaggerJSON(url string) error { resp, err := http.Get(url) if err != nil { return err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) switch resp.StatusCode { case 200: doc := &swagger.Swagger{} if err := json.Unmarshal(body, doc); err != nil { return err } return nil default: return fmt.Errorf("http statusCode should not be %d", resp.StatusCode) } } func testOptions(method, url, origin string) error { req, err := http.NewRequest(OPTIONS, url, nil) if err != nil { return err } req.Header.Set("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8") req.Header.Set("Accept-Language", "en-us,en;q=0.5") req.Header.Set("Accept-Encoding", "gzip,deflate") req.Header.Set("Accept-Charset", "ISO-8859-1,utf-8;q=0.7,*;q=0.7") req.Header.Set("Connection", "keep-alive") req.Header.Set("Origin", origin) req.Header.Set("Access-Control-Request-Method", method) req.Header.Set("Access-Control-Request-Headers", "Content-Type, version") client := &http.Client{} resp, err := client.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != 200 { return fmt.Errorf("resp.StatusCode(%d) != 200", resp.StatusCode) } return nil } func handleFunc(c *gin.Context, err error) { } func TestInvalidDoc(t *testing.T) { missParamterInDoc(t) missHandlerFunc(t) } func missParamterInDoc(t *testing.T) { router := NewEngine(conf) err := router.GET("/books/:id", &APIDocCommon{ Summary: "Get book info by id", Produces: []string{Application_Json}, Responses: map[int]Response{ 200: Response{ Description: "successful operation", Model: &Book{}, }, 400: Response{ Description: "failed operation", Model: &ErrorMessage{}, }, }, }, handleFunc) if err != nil { testLog(t, err) } else { testError(t, "err should not be nil") } } func missHandlerFunc(t *testing.T) { router := NewEngine(conf) err := router.GET("/books/:id", &APIDocCommon{ Summary: "Get book info by id", Produces: []string{Application_Json}, Parameters: map[string]Parameter{ "id": Parameter{InPath: &ValueInfo{Type: "string"}}, "version": Parameter{InHeader: &ValueInfo{Type: "string", Desc: "the version of api"}}, }, Responses: map[int]Response{ 200: Response{ Description: "successful operation", Model: &Book{}, }, 400: Response{ Description: "failed operation", Model: &ErrorMessage{}, }, }, }, nil) if err != nil { testLog(t, err) } else { testError(t, "err should not be nil") } } func TestEngine_GetSwaggerJSONDocument(t *testing.T) { router := NewEngine(conf) if _, err := router.GetSwaggerJSONDocument(); err != nil { testError(t, err) } } func TestEngine_GetSwaggerYAMLDocument(t *testing.T) { router := NewEngine(conf) if _, err := router.GetSwaggerYAMLDocument(); err != nil { testError(t, err) } }
package dictparser import ( "regexp" "sort" "strings" ) type pair struct { Key string Value int } var regex *regexp.Regexp func init() { regex = regexp.MustCompile("[^a-zA-Z0-9А-Яа-я]+") } // Top10 - return top 10 words from dictionary func Top10(input string) []string { dictionary := map[string]int{} // calculate words and their count words := regex.Split(input, -1) for _, word := range words { if word != "" { dictionary[strings.ToLower(word)]++ } } // sorting map var i int pairList := make([]pair, len(dictionary)) for key, value := range dictionary { pairList[i] = pair{key, value} i++ } // sort decreasing sort.Slice(pairList, func(i, j int) bool { return pairList[i].Value > pairList[j].Value }) // get top10 if len(pairList) >= 10 { pairList = pairList[:10] } // return result := make([]string, len(pairList)) for pos, element := range pairList { result[pos] = element.Key //fmt.Printf("%s %d\n", element.Key, element.Value) } return result }
package goSolution import "sort" func searchRange(nums []int, target int) []int { l := sort.SearchInts(nums, target) r := sort.SearchInts(nums, target + 1) if (0 > l || l >= len(nums)) || nums[l] != target { return []int{-1, -1} } else { return []int{l, r - 1} } }
package io import ( "io" ) func DebugWriter(w io.Writer) io.Writer { return &debug_writer{ w, } } type debug_writer struct { io.Writer } func (d debug_writer) Write(p []byte) (n int, err error) { return d.Writer.Write(p) }
package main import ( "fmt" "io/ioutil" "nitlev/adventofcode2020/day4/validation" "strings" ) func isSeparator(r rune) bool { return r == '\n' || r == ' ' } var mandatoryFields = []string{ "byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid", } var validFields = append(mandatoryFields, "cid") // Passport holds a few data about a passport line from the input file type Passport map[string]string func readInput(filename string) []Passport { f, err := ioutil.ReadFile(filename) if err != nil { panic(err) } lines := strings.Split(string(f), "\n\n") passports := make([]Passport, 0, len(lines)) for _, line := range lines { fields := strings.FieldsFunc(line, isSeparator) p := make(map[string]string) for _, field := range fields { s := strings.Split(field, ":") p[s[0]] = s[1] } passports = append(passports, p) } return passports } // IsValid checks that all required fields are found in the passport info func IsValid(passport Passport) bool { for _, field := range mandatoryFields { value, found := passport[field] if !found { return false } if !validation.IsFieldValid(field, value) { return false } } return true } func countValidPassports(passports []Passport) int { n := 0 for _, p := range passports { if IsValid(p) { n ++ } } return n } func main() { passports := readInput("input.txt") n := countValidPassports(passports) fmt.Println(n) }
package main // Add func Add(a, b int) int { if b == 0 { return a } return Add(a^b, (a&b)<<1) } // Minus func Minus(a, b int) int { var i int = 1 for i > 0 && ((b & i) == 0) { i <<= 1 } for i > 0 { i <<= 1 b ^= i } return Add(a, b) } // Multi func Multi(a, b int) int { var ( ans int = 0 i int = 1 ) for i > 0 { if b&i > 0 { ans = Add(ans, a) } i <<= 1 a <<= 1 } return ans } // Div func Div(a, b int) int { if a < b { return 0 } // simplify for b&1 == 0 { a >>= 1 b >>= 1 } var ( ans int = 0 i int = 1 db int = 1 ) for a >= b { b <<= 1 db = Add(db, 1) } b >>= 1 db = Minus(db, 1) for a >= i { i <<= 1 } i >>= 1 for db > 0 { if a >= b && a&i > 0 { a = Minus(a, b) ans = Add(ans, 1) } for i > a { i >>= 1 } b >>= 1 db = Minus(db, 1) ans <<= 1 } ans >>= 1 return ans } // Mod func Mod(a, b int) int { if a < b { return a } if a == b { return 0 } var ( db int = 1 i int = 1 ) for a >= b { b <<= 1 db = Add(db, 1) } db = Minus(db, 1) for a >= i { i <<= 1 } i >>= 1 for db >= 0 { if a >= b && a&i > 0 { a = Minus(a, b) } for i > a { i >>= 1 } b >>= 1 db = Minus(db, 1) } return a } // Pow func Pow(a, b int) int { var r int = 1 for { t := Mod(b, 2) b = Div(b, 2) if t == 1 { r *= a } if b == 0 { break } a *= a } return r }
package usecase import "BottleneckStudio/keepmotivat.in/models" // UpdatePost interface ... type UpdatePost interface { UpdatePost(*models.Post) error } // UpdatePostUsecase ... type UpdatePostUsecase struct { repo models.PostRepository } // NewUpdatePostUsecase ... func NewUpdatePostUsecase(repo models.PostRepository) UpdatePost { return UpdatePostUsecase{ repo: repo, } } // UpdatePost ... func (uc UpdatePostUsecase) UpdatePost(post *models.Post) error { err := uc.repo.Update(post) return err }
package main import ( "Edwardz43/tgbot/config" "Edwardz43/tgbot/crawl/ptt" "Edwardz43/tgbot/err" "Edwardz43/tgbot/log" "Edwardz43/tgbot/log/zaplogger" "Edwardz43/tgbot/message/from" "Edwardz43/tgbot/worker" "Edwardz43/tgbot/worker/rabbitmqworker" "regexp" "strings" ) var logger log.Logger var jobWorker worker.Worker var failOnError = err.FailOnError func main() { logger = zaplogger.GetInstance() jobWorker = rabbitmqworker.GetInstance(logger) go jobWorker.Do(CrawlPTT) select {} //serve() } // CrawlPTT crawls the target board from PTT func CrawlPTT(arg ...interface{}) error { result := arg[0].(*from.Result) msg := strings.Split(result.Message.Text, "@") if len(msg) < 2 { return nil } cmd := msg[0] target := msg[1] if target != config.GetBotID() { return nil } isCommand, err := regexp.MatchString(`^/C[a-z]+$`, cmd) failOnError(err, "error when regex tgbot message") if !isCommand { return nil } m := ptt.BoardMap var board string if value, ok := m[cmd]; ok { board = value } else { return nil } crawler := ptt.GetInstance(board) s := crawler.Get() c := &Command{ ChatID: result.Message.Chat.ID, Text: s, ParseMode: "HTML", } return send(&c) }
package main import "fmt" // An IntSet is a set of small non-negative integers. // Its zero value represents the empty set. type IntSet struct { words []uint64 } func main() { w := IntSet{[]uint64{0, 1, 2, 3, 4, 5, 2, 3, 4}} fmt.Printf("Before Remove: %d\n", w) fmt.Printf("Remove Point : %d\n", w.Remove(2)) fmt.Printf("After Remove : %d\n", w) fmt.Printf("Before Copy : %d\n", w) cp := w.Copy() fmt.Printf("After Copy : %d\n", cp) } func (s *IntSet) Len() int { return len(s.words) } func (s *IntSet) Remove(x int) int { if x > len(s.words) { return -1 } t := make([]uint64, len(s.words)-1, 2*(len(s.words)-1)) index := 0 for i, v := range s.words { if i == x { continue } else { t[index] = v } index++ } s.Clear() s.words = make([]uint64, len(t), 2*len(t)) for i, v := range t { s.words[i] = v } return x } func (s *IntSet) Clear() { fmt.Printf("Before Clear -> %d\n", s.words) s.words = s.words[:0] fmt.Printf("After Clear -> %d\n", s.words) } func (s *IntSet) Copy() *IntSet { t := make([]uint64, len(s.words), 2*(len(s.words))) temp := new(IntSet) temp.words = t for i, v := range s.words { temp.words[i] = v } return temp }
package models import ( "time" "github.com/rs/xid" "github.com/thebigear/database" "github.com/tuvistavie/structomap" "gopkg.in/mgo.v2/bson" ) // DBTableExpressions collection name const DBTableExpressions = "expressions" // Expression structure type Expression struct { ID bson.ObjectId `json:"-" bson:"_id,omitempty"` URLToken string `json:"-" bson:"token,omitempty"` PostID int64 `json:"post_id,omitempty" bson:"post_id,omitempty"` FullText string `json:"full_text" bson:"full_text,omitempty"` CleanText string `json:"clean_text" bson:"clean_text,omitempty"` IsVerified *bool `json:"is_verified,omitempty" bson:"is_verified,omitempty"` HasAttachment *bool `json:"has_attachment,omitempty" bson:"has_attachment,omitempty"` Owner string `json:"owner,omitempty" bson:"owner,omitempty"` AttachmentLabels *string `json:"attachment_labels,omitempty" bson:"attachment_labels,omitempty"` MediaURL string `json:"media_url,omitempty" bson:"media_url,omitempty"` Followers *int `json:"followers,omitempty" bson:"followers,omitempty"` Following *int `json:"following,omitempty" bson:"following,omitempty"` PostCount *int `json:"post_count,omitempty" bson:"post_count,omitempty"` LastTenInteraction *int `json:"last_ten_interaction,omitempty" bson:"last_ten_interaction,omitempty"` TotalInteraction *int `json:"total_interaction,omitempty" bson:"total_interaction,omitempty"` //Analysis Analysis `json:"analysis,omitempty" bson:"analysis,omitempty"` CreatedAt time.Time `json:"-" bson:"created_at,omitempty"` UpdatedAt time.Time `json:"-" bson:"updated_at,omitempty"` DeletedAt time.Time `json:"-" bson:"deleted_at,omitempty"` } // Expressions array representation of Expression type Expressions []Expression // ListExpressions lists all expressions func ListExpressions(query database.Query, paginationParams *database.PaginationParams) (*Expressions, error) { var result Expressions if paginationParams == nil { paginationParams = database.NewPaginationParams() paginationParams.SortBy = "created_at" } else if paginationParams.SortBy == "-_id" { paginationParams.SortBy = "created_at" } err := database.Mongo.FindAll(DBTableExpressions, query, &result, paginationParams) if err != nil { return nil, err } return &result, nil } // GetExpression an expression title with token func GetExpression(query database.Query) (*Expression, error) { var result Expression err := database.Mongo.FindOne(DBTableExpressions, query, &result) if err != nil { return nil, err } return &result, nil } // Create a new expression func (expression *Expression) Create() (*Expression, error) { expression.URLToken = xid.New().String() expression.CreatedAt = time.Now() expression.UpdatedAt = expression.CreatedAt if err := database.Mongo.Insert(DBTableExpressions, expression); err != nil { return nil, err } return expression, nil } // Update an expression func (expression *Expression) Update() (*Expression, error) { query := database.Query{} query["token"] = expression.URLToken expression.UpdatedAt = time.Now() change := database.DocumentChange{ Update: expression, ReturnNew: true, } result := &Expression{} err := database.Mongo.Update(DBTableExpressions, query, change, result) return result, err } // Delete an expression func (expression *Expression) Delete() error { query := database.Query{} query["token"] = expression.URLToken expression.DeletedAt = time.Now() change := database.DocumentChange{ Update: expression, ReturnNew: true, } err := database.Mongo.Update(DBTableExpressions, query, change, nil) return err } // ExpressionSerializer used in constructing maps to output JSON type ExpressionSerializer struct { *structomap.Base } // NewExpressionSerializer creates a new ExpressionSerializer func NewExpressionSerializer() *ExpressionSerializer { s := &ExpressionSerializer{structomap.New()} s.Pick("RawText", "CleanText", "Source", "Image", "Owner", "Positive", "Polarity"). PickFunc(func(t interface{}) interface{} { return t.(time.Time).Format(time.RFC3339) }, "CreatedAt", "UpdatedAt"). AddFunc("ID", func(expression interface{}) interface{} { return expression.(Expression).URLToken }) return s } // WithDeletedAt includes deletedAt field func (s *ExpressionSerializer) WithDeletedAt() *ExpressionSerializer { s.PickFunc(func(t interface{}) interface{} { empty := time.Time{} if t.(time.Time) == empty { return nil } return t.(time.Time).Format(time.RFC3339) }, "DeletedAt") return s }
package core import( "fmt" "os" "strconv" "log" "flag" ) //命令行接口 type CLI struct{ Blockchain *Blockchain } func (cli *CLI)createBlockChain(address string){ bc:=createBlockChain(address) //创建区块链 bc.DB.Close() fmt.Println("创建成功",address) } func (cli *CLI) getBalance(address string){ bc:=NewBlockchain(address) //根据地址创建 defer bc.DB.Close() balance:=0 UTXOs:=bc.FindUTXO(address) //查找交易金额 for _,out:=range UTXOs{ balance+=out.Value //取出金额 } fmt.Printf("查询的金额如下%s :%d \n",address,balance) } //用法 func (cli *CLI)printUsage() { fmt.Println("用法如下") fmt.Println("getbalance -address 你输入的地址 根据地址查询金额") fmt.Println("createblockchain -address 你输入的地址 根据地址创建区块链") // fmt.Println("addblock 向区块链增加块") fmt.Println("send -from From -to To -amount Amount 转账") fmt.Println("showchain 显示区块链") } func (cli *CLI)validateArgs() { if len(os.Args)<2{ cli.printUsage() //显示用法 os.Exit(1) //退出 } } // func (cli *CLI)addBlock(data string) { // cli.Blockchain.AddBlock(data) // 增加区块 // fmt.Println("区块增加成功") // } func (cli *CLI)showBlockchain() { bc:=NewBlockchain("") defer bc.DB.Close() bci:=bc.Iterator() //创建循环迭代器 for{ block:=bci.next()//取得下一个区块 fmt.Printf("上一块hash:%x",block.PrevBlockHash) fmt.Println("\n") fmt.Printf("当前hash:%x",block.Hash) fmt.Println("\n") pow:=NewProofOfWork(block) fmt.Printf("pow: %s",strconv.FormatBool(pow.Validate())) fmt.Println("\n") fmt.Println("\n") // fmt.Printf("数据:%s",block.Data) // fmt.Println("\n") if len(block.PrevBlockHash)==0{ //遇到创世区块 break } } } func (cli *CLI) send (from ,to string,amount int) { bc:=NewBlockchain(from) defer bc.DB.Close() tx :=NewUTXOTransaction(from,to,amount,bc) //转账 bc.MineBlock([]*Transaction{tx}) //挖矿确认交易 fmt.Println("交易成功") } func (cli *CLI)Run() { cli.validateArgs() //校验 //处理命令行参数 getbalancecmd:=flag.NewFlagSet("getbalance",flag.ExitOnError) createblockchaincmd:=flag.NewFlagSet("createblockchain",flag.ExitOnError) sendcmd:=flag.NewFlagSet("send",flag.ExitOnError) // addblockcmd:=flag.NewFlagSet("addblock",flag.ExitOnError) showchaincmd:=flag.NewFlagSet("showchain",flag.ExitOnError) getbalanceaddress:=getbalancecmd.String("address","","查询地址") createblockchainaddress:=createblockchaincmd.String("address","","地址") sendfrom:=sendcmd.String("from","","谁给的") sendto:=sendcmd.String("to","","给谁的") sendamount:=sendcmd.Int("amount",0,"金额") switch os.Args[1] { case "getbalance": err:=getbalancecmd.Parse(os.Args[2:]) //解析参数 if err!=nil{ log.Panic(err) } case "createblockchain": err:=createblockchaincmd.Parse(os.Args[2:]) //解析参数 if err!=nil{ log.Panic(err) } case "send": err:=sendcmd.Parse(os.Args[2:]) //解析参数 if err!=nil{ log.Panic(err) } case "showchain": err:=showchaincmd.Parse(os.Args[2:]) //解析参数 if err!=nil{ log.Panic(err) } default: cli.printUsage() os.Exit(1) } // if addblockcmd.Parsed(){ // if *addBlockData==""{ // addblockcmd.Usage() // os.Exit(1) // }else{ // cli.addBlock(*addBlockData) //增加区块 // } // } if getbalancecmd.Parsed(){ if *getbalanceaddress==""{ getbalancecmd.Usage() os.Exit(1) }else{ cli.getBalance(*getbalanceaddress) //增加区块 } } if createblockchaincmd.Parsed(){ if *createblockchainaddress==""{ createblockchaincmd.Usage() os.Exit(1) }else{ cli.createBlockChain(*createblockchainaddress) //增加区块 } } if sendcmd.Parsed(){ if *sendfrom=="" || *sendto=="" || *sendamount<=0{ sendcmd.Usage() os.Exit(1) }else{ cli.send(*sendfrom,*sendto,*sendamount) //增加区块 } } if showchaincmd.Parsed(){ cli.showBlockchain() //显示区块链 } }
// ˅ package main import ( "fmt" "strconv" "time" ) // ˄ // Display values with digits. type DigitObserver struct { // ˅ // ˄ // ˅ // ˄ } func NewDigitObserver() *DigitObserver { // ˅ return &DigitObserver{} // ˄ } func (self *DigitObserver) Update(number *Number) { // ˅ fmt.Println("Digit : " + strconv.Itoa(number.value)) time.Sleep(100 * time.Millisecond) // ˄ } // ˅ // ˄
package model import ( "github.com/dgrijalva/jwt-go" "github.com/gin-gonic/gin" "time" ) // User table `user` type User struct { Username string `form:"username" json:"username"` Password string `form:"password" json:"password"` } // jwtCustomClaims are custom claims extending default ones. type JwtCustomClaims struct { Name string `json:"name"` Admin bool `json:"admin"` jwt.StandardClaims } var SignCode = "jwt_login" // Login 登陆 func Login(c *gin.Context) { var signCode = []byte(SignCode) username := c.PostForm("username") password := c.PostForm("password") if username == "admin" && password == "admin" { // Create the Claims claims := JwtCustomClaims{ username, true, jwt.StandardClaims{ ExpiresAt: time.Now().Add(time.Hour * 12).Unix(), Issuer: "test", }, } token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) t, err := token.SignedString(signCode) if err != nil { c.JSON(400, ErrMessage{Message: err.Error()}) return } c.JSON(200, t) } else { c.JSON(401, ErrMessage{Message: "登录失败!"}) return } } // Logout 退出 func Logout(c *gin.Context) { c.JSON(200, &User{}) }
package types import ( // HOFSTADTER_START import // HOFSTADTER_END import ) /* Name: AuthBasicUserLoginRequest About: */ // HOFSTADTER_START start // HOFSTADTER_END start /* Where's your docs doc?! */ type AuthBasicUserLoginRequest struct { Password string `json:"password" xml:"password" yaml:"password" form:"password" query:"password" validate:"required" ` Email string `json:"email" xml:"email" yaml:"email" form:"email" query:"email" validate:"required|email|min=1|max=64" ` } // HOFSTADTER_BELOW
package commands import ( "fmt" "os" "path/filepath" "testing" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestLoadXEnvCLIStringSliceValue(t *testing.T) { testCases := []struct { name string envKey, envValue, flagValue string flagDefault []string flag *pflag.Flag expected []string expectedResult XEnvCLIResult expectedErr string }{ { "ShouldParseFromEnv", "EXAMPLE_ONE", "abc", "example-one", []string{"flagdef"}, &pflag.Flag{Name: "example-one", Changed: false}, []string{"abc"}, XEnvCLIResultEnvironment, "", }, { "ShouldParseMultipleFromEnv", "EXAMPLE_ONE", "abc,123", "example-one", []string{"flagdef"}, &pflag.Flag{Name: "example-one", Changed: false}, []string{"abc", "123"}, XEnvCLIResultEnvironment, "", }, { "ShouldParseCLIExplicit", "EXAMPLE_ONE", "abc,123", "example-from-flag,123", []string{"flagdef"}, &pflag.Flag{Name: "example-one", Changed: true}, []string{"example-from-flag", "123"}, XEnvCLIResultCLIExplicit, "", }, { "ShouldParseCLIImplicit", "EXAMPLE_ONE", "", "example-one", []string{"example-from-flag-default", "123"}, &pflag.Flag{Name: "example-one", Changed: false}, []string{"example-from-flag-default", "123"}, XEnvCLIResultCLIImplicit, "", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { cmd := &cobra.Command{} if tc.flag != nil { cmd.Flags().StringSlice(tc.flag.Name, tc.flagDefault, "") if tc.flag.Changed { require.NoError(t, cmd.Flags().Set(tc.flag.Name, tc.flagValue)) } } if tc.envValue != "" { t.Setenv(tc.envKey, tc.envValue) } actual, actualResult, actualErr := loadXEnvCLIStringSliceValue(cmd, tc.envKey, tc.flag.Name) assert.Equal(t, tc.expected, actual) assert.Equal(t, tc.expectedResult, actualResult) if tc.expectedErr == "" { assert.NoError(t, actualErr) } else { assert.EqualError(t, actualErr, tc.expectedErr) } }) } } func TestLoadXNormalizedPaths(t *testing.T) { root := t.TempDir() configdir := filepath.Join(root, "config") otherdir := filepath.Join(root, "other") require.NoError(t, os.Mkdir(configdir, 0700)) require.NoError(t, os.Mkdir(otherdir, 0700)) var ( info os.FileInfo file *os.File err error ) ayml := filepath.Join(configdir, "a.yml") byml := filepath.Join(configdir, "b.yml") cyml := filepath.Join(otherdir, "c.yml") dyml := filepath.Join(otherdir, "d.yml") file, err = os.Create(ayml) require.NoError(t, err) require.NoError(t, file.Close()) file, err = os.Create(byml) require.NoError(t, err) require.NoError(t, file.Close()) file, err = os.Create(cyml) require.NoError(t, err) require.NoError(t, file.Close()) info, err = os.Stat(configdir) require.NoError(t, err) require.True(t, info.IsDir()) info, err = os.Stat(otherdir) require.NoError(t, err) require.True(t, info.IsDir()) info, err = os.Stat(ayml) require.NoError(t, err) require.False(t, info.IsDir()) info, err = os.Stat(byml) require.NoError(t, err) require.False(t, info.IsDir()) info, err = os.Stat(cyml) require.NoError(t, err) require.False(t, info.IsDir()) testCases := []struct { name string haveX XEnvCLIResult have, expected []string expectedErr string }{ {"ShouldAllowFiles", XEnvCLIResultCLIImplicit, []string{ayml}, []string{ayml}, "", }, {"ShouldSkipFilesNotExistImplicit", XEnvCLIResultCLIImplicit, []string{dyml}, []string(nil), "", }, {"ShouldNotErrFilesNotExistExplicit", XEnvCLIResultCLIExplicit, []string{dyml}, []string{dyml}, "", }, {"ShouldAllowDirectories", XEnvCLIResultCLIImplicit, []string{configdir}, []string{configdir}, "", }, {"ShouldAllowFilesDirectories", XEnvCLIResultCLIImplicit, []string{ayml, otherdir}, []string{ayml, otherdir}, "", }, {"ShouldRaiseErrOnOverlappingFilesDirectories", XEnvCLIResultCLIImplicit, []string{ayml, configdir}, nil, fmt.Sprintf("failed to load config directory '%s': the config file '%s' is in that directory which is not supported", configdir, ayml), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { actual, actualErr := loadXNormalizedPaths(tc.have, tc.haveX) assert.Equal(t, tc.expected, actual) if tc.expectedErr == "" { assert.NoError(t, actualErr) } else { assert.EqualError(t, actualErr, tc.expectedErr) } }) } }
package client const ( ErrMsgMissingURL = "missing required url" ErrMsgMissingAPIKey = "missing required apiKey" ErrMsgInvalidClient = "client missing required values" ErrMsgRecordsNotFound = "records not found" )
package zabbix import ( "encoding/json" "fmt" "net/http" "strings" ) func (api *API) HostGroupGet(name string) (map[string]interface{}, error) { payload := strings.NewReader(fmt.Sprintf(HostGroupGetTemplate, name, api.Session, api.ID)) req, err := http.NewRequest("POST", api.URL, payload) if err != nil { return nil, err } req.Header.Add("content-type", "application/json") res, err := http.DefaultClient.Do(req) if err != nil { return nil, err } defer res.Body.Close() result := map[string]interface{}{} if err = json.NewDecoder(res.Body).Decode(&result); err != nil { return nil, err } if len(result["result"].([]interface{})) != 0 { return result["result"].([]interface{})[0].(map[string]interface{}), nil } return nil, nil } func (api *API) HostGroupCreate(name string) error { payload := strings.NewReader(fmt.Sprintf(HostGroupPostTemplate, name, api.Session, api.ID)) req, err := http.NewRequest("POST", api.URL, payload) if err != nil { return err } req.Header.Add("content-type", "application/json") res, err := http.DefaultClient.Do(req) if err != nil { return err } defer res.Body.Close() if res.StatusCode != 200 { return fmt.Errorf("zabbix api return response code %v", res.StatusCode) } result := map[string]interface{}{} if err = json.NewDecoder(res.Body).Decode(&result); err != nil { return err } if errmsg, ok := result["error"]; ok { return fmt.Errorf("%v", errmsg) } return nil }
package pie import ( "math/rand" "testing" ) func BenchmarkFloatMedianSmall(b *testing.B) { benchmarkFloatMedian(b, 20) } func BenchmarkFloatMedianMedium(b *testing.B) { benchmarkFloatMedian(b, 800) } func BenchmarkFloatMedianLarge(b *testing.B) { benchmarkFloatMedian(b, 1000000) } func benchmarkFloatMedian(b *testing.B, size int) { // Make the random numbers below deterministic rand.Seed(123) a := make(Float64s, size) for i := range a { // As many possible values as slots in the slice. // Positives and negatives. // Variety, with some duplicates. a[i] = -0.5 + rand.Float64() } b.ResetTimer() for i := 0; i < b.N; i++ { // m := a.MedianOld() // m := a.medianCheck() m := a.Median() sinkFloats += m } } // Prevent compiler from agressively optimizing away the result var sinkFloats float64
/* * Copyright 2012-2019 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package SpringBootStarterLogrus import ( "os" "strings" "github.com/sirupsen/logrus" "github.com/didi/go-spring/spring-core" "github.com/didi/go-spring/spring-logrus" "github.com/didi/go-spring/spring-logger" "github.com/go-spring/go-spring-boot/spring-boot" ) func init() { SpringLogger.SetLogger(logrus.StandardLogger()) logrus.SetFormatter(new(SpringLogrus.TextFormatter)) logrus.SetLevel(logrus.TraceLevel) output := SpringLogrus.NewNullOutput() logrus.SetOutput(output) SpringBoot.RegisterModule(func(context SpringCore.SpringContext) { properties := context.GetPrefixProperties("logger.appender.") appenderMap := make(map[string]SpringLogger.LoggerAppender) for key := range properties { ss := strings.Split(key, ".") appenderMap[ss[2]] = nil } for key := range appenderMap { if t, ok := properties["logger.appender."+key+".type"]; ok { switch t { case "ConsoleAppender": level := logrus.DebugLevel if l, ok := properties["logger.appender."+key+".level"]; ok { level, _ = logrus.ParseLevel(l) } appender := SpringLogger.NewConsoleAppender() appenderMap[key] = appender logrus.AddHook(SpringLogrus.NewSpringLogrusHook(appender, level)) case "FileAppender": level := logrus.DebugLevel if l, ok := properties["logger.appender."+key+".level"]; ok { level, _ = logrus.ParseLevel(l) } workDir, _ := os.Getwd() app := context.GetProperties("spring.application.name") filePath := workDir + "/log/" + app + ".log" if pattern, ok := properties["logger.appender."+key+".pattern"]; ok { filePath = workDir + "/log/" + pattern } logFile, _ := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) appender := SpringLogger.NewFileAppender(logFile) appenderMap[key] = appender logrus.AddHook(SpringLogrus.NewSpringLogrusHook(appender, level)) } } } output.Output(appenderMap) }) }
package database import ( "log" "github.com/solrac97gr/cryptoAPI/models" ) func SaveLog(method string, url string) { ref := DatabaseClient.NewRef("/") logRef := ref.Child("log") newLog, err := logRef.Push(FirebaseCtx, nil) if err != nil { log.Fatalln("Error pushing child node:", err) } if err := newLog.Set(FirebaseCtx, &models.Log{ Method: method, URL: url, }); err != nil { log.Fatalln("Error setting value:", err) } }
//line expr.y:17 package main import __yyfmt__ "fmt" //line expr.y:18 import ( "bytes" "errors" "fmt" "log" "unicode/utf8" ) var result_value int var result_error string //line expr.y:33 type exprSymType struct { yys int num int } const NUM = 57346 var exprToknames = []string{ "'+'", "'-'", "'*'", "'/'", "'('", "')'", "NUM", } var exprStatenames = []string{} const exprEofCode = 1 const exprErrCode = 2 const exprMaxDepth = 200 //line expr.y:92 // The parser expects the lexer to return 0 on EOF. Give it a name // for clarity. const eof = 0 // The parser uses the type <prefix>Lex as a lexer. It must provide // the methods Lex(*<prefix>SymType) int and Error(string). type exprLex struct { line []byte peek rune } // The parser calls this method to get each new token. This // implementation returns operators and NUM. func (x *exprLex) Lex(yylval *exprSymType) int { for { c := x.next() switch c { case eof: return eof case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return x.num(c, yylval) case '+', '-', '*', '/', '(', ')': return int(c) case ' ', '\t', '\n', '\r': default: log.Printf("unrecognized character %q", c) } } } // Lex a number. func (x *exprLex) num(c rune, yylval *exprSymType) int { add := func(b *bytes.Buffer, c rune) { if _, err := b.WriteRune(c); err != nil { log.Fatalf("WriteRune: %s", err) } } var b bytes.Buffer add(&b, c) L: for { c = x.next() switch c { case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'x', 'a', 'b', 'c', 'd', 'e', 'f': add(&b, c) default: break L } } if c != eof { x.peek = c } value := 0 if n, _ := fmt.Sscanf(b.String(), "%v", &value); n != 1 { log.Printf("bad number %q", b.String()) return eof } yylval.num = value return NUM } // Return the next rune for the lexer. func (x *exprLex) next() rune { if x.peek != eof { r := x.peek x.peek = eof return r } if len(x.line) == 0 { return eof } c, size := utf8.DecodeRune(x.line) x.line = x.line[size:] if c == utf8.RuneError && size == 1 { log.Print("invalid utf8") return x.next() } return c } // The parser calls this method on a parse error. func (x *exprLex) Error(s string) { result_error = s } func evaluateExpression(line string) (int, error) { result_value = -1 result_error = "" exprParse(&exprLex{line: []byte(line)}) if result_error == "" { return result_value, nil } return result_value, errors.New(result_error) } //line yacctab:1 var exprExca = []int{ -1, 1, 1, -1, -2, 0, } const exprNprod = 13 const exprPrivate = 57344 var exprTokenNames []string var exprStates []string const exprLast = 23 var exprAct = []int{ 7, 4, 5, 2, 21, 9, 6, 8, 12, 13, 9, 1, 8, 16, 3, 19, 20, 17, 18, 14, 15, 10, 11, } var exprPact = []int{ -3, -1000, -1000, 17, -3, -3, 13, -1000, -1000, -3, 2, 2, -1000, -1000, 2, 2, -5, 13, 13, -1000, -1000, -1000, } var exprPgo = []int{ 0, 3, 14, 6, 0, 11, } var exprR1 = []int{ 0, 5, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, } var exprR2 = []int{ 0, 1, 1, 2, 2, 1, 3, 3, 1, 3, 3, 1, 3, } var exprChk = []int{ -1000, -5, -1, -2, 4, 5, -3, -4, 10, 8, 4, 5, -1, -1, 6, 7, -1, -3, -3, -4, -4, 9, } var exprDef = []int{ 0, -2, 1, 2, 0, 0, 5, 8, 11, 0, 0, 0, 3, 4, 0, 0, 0, 6, 7, 9, 10, 12, } var exprTok1 = []int{ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 8, 9, 6, 4, 3, 5, 3, 7, } var exprTok2 = []int{ 2, 3, 10, } var exprTok3 = []int{ 0, } //line yaccpar:1 /* parser for yacc output */ var exprDebug = 0 type exprLexer interface { Lex(lval *exprSymType) int Error(s string) } const exprFlag = -1000 func exprTokname(c int) string { // 4 is TOKSTART above if c >= 4 && c-4 < len(exprToknames) { if exprToknames[c-4] != "" { return exprToknames[c-4] } } return __yyfmt__.Sprintf("tok-%v", c) } func exprStatname(s int) string { if s >= 0 && s < len(exprStatenames) { if exprStatenames[s] != "" { return exprStatenames[s] } } return __yyfmt__.Sprintf("state-%v", s) } func exprlex1(lex exprLexer, lval *exprSymType) int { c := 0 char := lex.Lex(lval) if char <= 0 { c = exprTok1[0] goto out } if char < len(exprTok1) { c = exprTok1[char] goto out } if char >= exprPrivate { if char < exprPrivate+len(exprTok2) { c = exprTok2[char-exprPrivate] goto out } } for i := 0; i < len(exprTok3); i += 2 { c = exprTok3[i+0] if c == char { c = exprTok3[i+1] goto out } } out: if c == 0 { c = exprTok2[1] /* unknown char */ } if exprDebug >= 3 { __yyfmt__.Printf("lex %s(%d)\n", exprTokname(c), uint(char)) } return c } func exprParse(exprlex exprLexer) int { var exprn int var exprlval exprSymType var exprVAL exprSymType exprS := make([]exprSymType, exprMaxDepth) Nerrs := 0 /* number of errors */ Errflag := 0 /* error recovery flag */ exprstate := 0 exprchar := -1 exprp := -1 goto exprstack ret0: return 0 ret1: return 1 exprstack: /* put a state and value onto the stack */ if exprDebug >= 4 { __yyfmt__.Printf("char %v in %v\n", exprTokname(exprchar), exprStatname(exprstate)) } exprp++ if exprp >= len(exprS) { nyys := make([]exprSymType, len(exprS)*2) copy(nyys, exprS) exprS = nyys } exprS[exprp] = exprVAL exprS[exprp].yys = exprstate exprnewstate: exprn = exprPact[exprstate] if exprn <= exprFlag { goto exprdefault /* simple state */ } if exprchar < 0 { exprchar = exprlex1(exprlex, &exprlval) } exprn += exprchar if exprn < 0 || exprn >= exprLast { goto exprdefault } exprn = exprAct[exprn] if exprChk[exprn] == exprchar { /* valid shift */ exprchar = -1 exprVAL = exprlval exprstate = exprn if Errflag > 0 { Errflag-- } goto exprstack } exprdefault: /* default state action */ exprn = exprDef[exprstate] if exprn == -2 { if exprchar < 0 { exprchar = exprlex1(exprlex, &exprlval) } /* look through exception table */ xi := 0 for { if exprExca[xi+0] == -1 && exprExca[xi+1] == exprstate { break } xi += 2 } for xi += 2; ; xi += 2 { exprn = exprExca[xi+0] if exprn < 0 || exprn == exprchar { break } } exprn = exprExca[xi+1] if exprn < 0 { goto ret0 } } if exprn == 0 { /* error ... attempt to resume parsing */ switch Errflag { case 0: /* brand new error */ exprlex.Error("syntax error") Nerrs++ if exprDebug >= 1 { __yyfmt__.Printf("%s", exprStatname(exprstate)) __yyfmt__.Printf(" saw %s\n", exprTokname(exprchar)) } fallthrough case 1, 2: /* incompletely recovered error ... try again */ Errflag = 3 /* find a state where "error" is a legal shift action */ for exprp >= 0 { exprn = exprPact[exprS[exprp].yys] + exprErrCode if exprn >= 0 && exprn < exprLast { exprstate = exprAct[exprn] /* simulate a shift of "error" */ if exprChk[exprstate] == exprErrCode { goto exprstack } } /* the current p has no shift on "error", pop stack */ if exprDebug >= 2 { __yyfmt__.Printf("error recovery pops state %d\n", exprS[exprp].yys) } exprp-- } /* there is no state on the stack with an error shift ... abort */ goto ret1 case 3: /* no shift yet; clobber input char */ if exprDebug >= 2 { __yyfmt__.Printf("error recovery discards %s\n", exprTokname(exprchar)) } if exprchar == exprEofCode { goto ret1 } exprchar = -1 goto exprnewstate /* try again in the same state */ } } /* reduction by production exprn */ if exprDebug >= 2 { __yyfmt__.Printf("reduce %v in:\n\t%v\n", exprn, exprStatname(exprstate)) } exprnt := exprn exprpt := exprp _ = exprpt // guard against "declared and not used" exprp -= exprR2[exprn] exprVAL = exprS[exprp+1] /* consult goto table to find next state */ exprn = exprR1[exprn] exprg := exprPgo[exprn] exprj := exprg + exprS[exprp].yys + 1 if exprj >= exprLast { exprstate = exprAct[exprg] } else { exprstate = exprAct[exprj] if exprChk[exprstate] != -exprn { exprstate = exprAct[exprg] } } // dummy call; replaced with literal code switch exprnt { case 1: //line expr.y:47 { result_value = exprS[exprpt-0].num } case 2: exprVAL.num = exprS[exprpt-0].num case 3: //line expr.y:54 { exprVAL.num = exprS[exprpt-0].num } case 4: //line expr.y:58 { exprVAL.num = -exprS[exprpt-0].num } case 5: exprVAL.num = exprS[exprpt-0].num case 6: //line expr.y:65 { exprVAL.num = exprS[exprpt-2].num + exprS[exprpt-0].num } case 7: //line expr.y:69 { exprVAL.num = exprS[exprpt-2].num - exprS[exprpt-0].num } case 8: exprVAL.num = exprS[exprpt-0].num case 9: //line expr.y:76 { exprVAL.num = exprS[exprpt-2].num * exprS[exprpt-0].num } case 10: //line expr.y:80 { exprVAL.num = exprS[exprpt-2].num / exprS[exprpt-0].num } case 11: exprVAL.num = exprS[exprpt-0].num case 12: //line expr.y:87 { exprVAL.num = exprS[exprpt-1].num } } goto exprstack /* stack new state and value */ }
package server import ( "net/http" "reflect" "strconv" "github.com/ItsJimi/casa/logger" "github.com/ItsJimi/casa/utils" "github.com/labstack/echo" ) type addRoomReq struct { Name string } // AddRoom route create and add user to an room func AddRoom(c echo.Context) error { req := new(addRoomReq) if err := c.Bind(req); err != nil { logger.WithFields(logger.Fields{"code": "CSRAR001"}).Errorf("%s", err.Error()) return c.JSON(http.StatusBadRequest, ErrorResponse{ Code: "CSRAR001", Message: "Wrong parameters", }) } if err := utils.MissingFields(c, reflect.ValueOf(req).Elem(), []string{"Name"}); err != nil { logger.WithFields(logger.Fields{"code": "CSRAR002"}).Warnf("%s", err.Error()) return c.JSON(http.StatusBadRequest, ErrorResponse{ Code: "CSRAR002", Message: err.Error(), }) } user := c.Get("user").(User) row, err := DB.Query("INSERT INTO rooms (id, name, home_id, creator_id) VALUES (generate_ulid(), $1, $2, $3) RETURNING id;", req.Name, c.Param("homeId"), user.ID) if err != nil { logger.WithFields(logger.Fields{"code": "CSRAR003"}).Errorf("%s", err.Error()) return c.JSON(http.StatusBadRequest, ErrorResponse{ Code: "CSRAR003", Message: "Room can't be created", }) } var roomID string row.Next() err = row.Scan(&roomID) if err != nil { logger.WithFields(logger.Fields{"code": "CSRAR004"}).Errorf("%s", err.Error()) return c.JSON(http.StatusBadRequest, ErrorResponse{ Code: "CSRAR004", Message: "Room can't be created", }) } newPermission := Permission{ UserID: user.ID, Type: "room", TypeID: roomID, Read: true, Write: true, Manage: true, Admin: true, } _, err = DB.NamedExec("INSERT INTO permissions (id, user_id, type, type_id, read, write, manage, admin) VALUES (generate_ulid(), :user_id, :type, :type_id, :read, :write, :manage, :admin)", newPermission) if err != nil { logger.WithFields(logger.Fields{"code": "CSRAR005"}).Errorf("%s", err.Error()) return c.JSON(http.StatusInternalServerError, ErrorResponse{ Code: "CSRAR005", Message: "Room can't be created", }) } return c.JSON(http.StatusCreated, MessageResponse{ Message: roomID, }) } // UpdateRoom route update room func UpdateRoom(c echo.Context) error { req := new(addRoomReq) if err := c.Bind(req); err != nil { logger.WithFields(logger.Fields{"code": "CSRUR001"}).Errorf("%s", err.Error()) return err } if err := utils.MissingFields(c, reflect.ValueOf(req).Elem(), []string{"Name"}); err != nil { logger.WithFields(logger.Fields{"code": "CSRUR002"}).Warnf("%s", err.Error()) return c.JSON(http.StatusBadRequest, ErrorResponse{ Code: "CSRUR002", Message: err.Error(), }) } user := c.Get("user").(User) var permission Permission err := DB.Get(&permission, "SELECT * FROM permissions WHERE user_id=$1 AND type=$2 AND type_id=$3", user.ID, "room", c.Param("roomId")) if err != nil { logger.WithFields(logger.Fields{"code": "CSRUR003"}).Errorf("%s", err.Error()) return c.JSON(http.StatusNotFound, ErrorResponse{ Code: "CSRUR003", Message: "Room not found", }) } if permission.Manage == false && permission.Admin == false { logger.WithFields(logger.Fields{"code": "CSRUR004"}).Warnf("Unauthorized") return c.JSON(http.StatusUnauthorized, ErrorResponse{ Code: "CSRUR004", Message: "Unauthorized modifications", }) } _, err = DB.Exec("UPDATE rooms SET Name=$1 WHERE id=$2", req.Name, c.Param("roomId")) if err != nil { logger.WithFields(logger.Fields{"code": "CSRUR005"}).Errorf("%s", err.Error()) return c.JSON(http.StatusInternalServerError, ErrorResponse{ Code: "CSRUR005", Message: "Room can't be updated", }) } return c.JSON(http.StatusOK, MessageResponse{ Message: "Room updated", }) } // DeleteRoom route delete room func DeleteRoom(c echo.Context) error { user := c.Get("user").(User) var permission Permission err := DB.Get(&permission, "SELECT * FROM permissions WHERE user_id=$1 AND type=$2 AND type_id=$3", user.ID, "room", c.Param("roomId")) if err != nil { logger.WithFields(logger.Fields{"code": "CSRDR001"}).Errorf("%s", err.Error()) return c.JSON(http.StatusNotFound, ErrorResponse{ Code: "CSRDR001", Message: "Room not found", }) } if permission.Admin == false { logger.WithFields(logger.Fields{"code": "CSRDR002"}).Warnf("Unauthorized") return c.JSON(http.StatusUnauthorized, ErrorResponse{ Code: "CSRDR002", Message: "Unauthorized modifications", }) } _, err = DB.Exec("DELETE FROM rooms WHERE id=$1", c.Param("roomId")) if err != nil { logger.WithFields(logger.Fields{"code": "CSRDR003"}).Errorf("%s", err.Error()) return c.JSON(http.StatusInternalServerError, ErrorResponse{ Code: "CSRDR003", Message: "Room can't be deleted", }) } _, err = DB.Exec("DELETE FROM permissions WHERE type=$1 AND type_id=$2", "room", c.Param("roomId")) if err != nil { logger.WithFields(logger.Fields{"code": "CSRDR004"}).Errorf("%s", err.Error()) return c.JSON(http.StatusInternalServerError, ErrorResponse{ Code: "CSRDR004", Message: "Room can't be deleted", }) } return c.JSON(http.StatusOK, MessageResponse{ Message: "Room deleted", }) } type permissionRoom struct { Permission User RoomID string `db:"r_id"` RoomName string `db:"r_name"` RoomHomeID string `db:"r_homeid"` RoomCreatedAt string `db:"r_createdat"` RoomUpdatedAt string `db:"r_updatedat"` } type roomRes struct { ID string `json:"id"` Name string `json:"name"` HomeID string `json:"homeId"` CreatedAt string `json:"createdAt"` UpdatedAt string `json:"updatedAt"` Creator User `json:"creator"` Read bool `json:"read"` Write bool `json:"write"` Manage bool `json:"manage"` Admin bool `json:"admin"` } // GetRooms route get list of user rooms func GetRooms(c echo.Context) error { user := c.Get("user").(User) rows, err := DB.Queryx(` SELECT permissions.*, users.*, rooms.id as r_id, rooms.name AS r_name, rooms.home_id AS r_homeid, rooms.created_at AS r_createdat, rooms.updated_at AS r_updatedat FROM permissions JOIN rooms ON permissions.type_id = rooms.id JOIN users ON rooms.creator_id = users.id WHERE permissions.type=$1 AND permissions.user_id=$2 AND rooms.home_id=$3 AND (permissions.read=true OR permissions.admin=true) `, "room", user.ID, c.Param("homeId")) if err != nil { logger.WithFields(logger.Fields{"code": "CSRGRS001"}).Errorf("%s", err.Error()) return c.JSON(http.StatusInternalServerError, ErrorResponse{ Code: "CSRGRS001", Message: "Rooms can't be retrieved", }) } defer rows.Close() rooms := []roomRes{} for rows.Next() { var permission permissionRoom err := rows.StructScan(&permission) if err != nil { logger.WithFields(logger.Fields{"code": "CSRGRS002"}).Errorf("%s", err.Error()) return c.JSON(http.StatusInternalServerError, ErrorResponse{ Code: "CSRGRS002", Message: "Rooms can't be retrieved", }) } rooms = append(rooms, roomRes{ ID: permission.RoomID, Name: permission.RoomName, HomeID: permission.RoomHomeID, CreatedAt: permission.RoomCreatedAt, UpdatedAt: permission.RoomUpdatedAt, Creator: permission.User, Read: permission.Permission.Read, Write: permission.Permission.Write, Manage: permission.Permission.Manage, Admin: permission.Permission.Admin, }) } totalRooms := strconv.Itoa(len(rooms)) c.Response().Header().Set("Content-Range", "0-"+totalRooms+"/"+totalRooms) return c.JSON(http.StatusOK, rooms) } // GetRoom route get specific room with id func GetRoom(c echo.Context) error { user := c.Get("user").(User) var permission permissionRoom err := DB.QueryRowx(` SELECT permissions.*, users.*, rooms.id as r_id, rooms.name AS r_name, rooms.home_id AS r_homeid, rooms.created_at AS r_createdat, rooms.updated_at AS r_updatedat FROM permissions JOIN rooms ON permissions.type_id = rooms.id JOIN users ON rooms.creator_id = users.id WHERE permissions.type=$1 AND permissions.type_id=$2 AND permissions.user_id=$3 `, "room", c.Param("roomId"), user.ID).StructScan(&permission) if err != nil { logger.WithFields(logger.Fields{"code": "CSRGR001"}).Errorf("QueryRowx: Select error") return c.JSON(http.StatusNotFound, ErrorResponse{ Code: "CSRGR001", Message: "Room can't be found", }) } return c.JSON(http.StatusOK, roomRes{ ID: permission.RoomID, Name: permission.RoomName, HomeID: permission.RoomHomeID, CreatedAt: permission.RoomCreatedAt, UpdatedAt: permission.RoomUpdatedAt, Creator: permission.User, Read: permission.Permission.Read, Write: permission.Permission.Write, Manage: permission.Permission.Manage, Admin: permission.Permission.Admin, }) }
package file import ( "io" "strconv" "time" ) // Single returns a single file writer. func Single(filename string) io.WriteCloser { return newSingle(filename) } // Rotate returns a rotating file writer. func Rotate(rotator Rotator) io.WriteCloser { return newRotate(rotator) } // PrefixSuffix returns a rotator with prefix and suffix func PrefixSuffix(prefix, suffix string, rotator Rotator) Rotator { return &decorateNameRotator{ rot: rotator, decorate: func(name string) string { return prefix + name + suffix }, } } // Prefix returns a rotator with prefix. func Prefix(prefix string, rotator Rotator) Rotator { return &decorateNameRotator{ rot: rotator, decorate: func(name string) string { return prefix + name }, } } // Suffix returns a rotator with suffix. func Suffix(suffix string, rotator Rotator) Rotator { return &decorateNameRotator{ rot: rotator, decorate: func(name string) string { return name + suffix }, } } // SecondRotator returns a rotator that rotates every second with file name as // {UNIX_SECOND}.{IDX} and file size limited to `size` bytes. func SecondRotator(size int) Rotator { return &baseRotator{ next: func() string { return strconv.Itoa(int(time.Now().Unix())) }, size: size, } } const dateFmt = "20060102" // DateRotator returns a rotator that rotates every day with file name as // {YYYYMMDD}.{IDX} and file size limited to `size` bytes. func DateRotator(size int) Rotator { return &baseRotator{ next: func() string { return time.Now().Format(dateFmt) }, size: size, } }
package interp import ( "fmt" "go.starlark.net/starlark" ) func ExecFile(filename string) error { predefined := starlark.StringDict{ "glob": starlark.NewBuiltin("glob", FnGlob), "register_object": starlark.NewBuiltin("register_object", FnRegisterObject), } thread := &starlark.Thread{Name: filename, Print: printer, Load: loader()} _, err := starlark.ExecFile(thread, filename, nil, predefined) return err } func printer(thread *starlark.Thread, msg string) { fmt.Printf("[%s] %s", thread.Name, msg) }
package linkedlist import "fmt" type ListNode struct { Val int Next *ListNode } func newListNodes(val []int, cycle bool) *ListNode { if val == nil { return nil } l := &ListNode{ Val: val[0], } if len(val) == 1 { return l } pre := l vRemain := val[1:] for i, v := range vRemain { q := &ListNode{ Val: v, } pre.Next = q pre = pre.Next if i == len(vRemain)-1 && cycle { q.Next = l } } return l } func printListNodes(l *ListNode) { if l == nil { fmt.Println("nil list") } fmt.Println("List printing:") for l != nil { fmt.Printf("%d ", l.Val) l = l.Next } fmt.Println() } func equalTwoList(l1 *ListNode, l2 *ListNode) bool { if l1 == nil && l2 == nil { return true } for l1 != nil { if l2 == nil { return false } if l1.Val != l2.Val { return false } l1 = l1.Next l2 = l2.Next } if l2 != nil { return false } return true }
package facades import ( "github.com/gophergala2016/source/core/foundation" "github.com/gophergala2016/source/core/models" "github.com/gophergala2016/source/internal/services" ) type TagFacade struct { RootFacade } func NewTagFacade(ctx foundation.Context) TagFacade { return TagFacade{ RootFacade: NewRootFacade(ctx), } } func (f TagFacade) FindPopularTag(limit int) ([]models.Tag, error) { tagService := services.NewTagService(f.ctx) return tagService.FindPopularTagByCollection(limit, 0) } func (f TagFacade) CreateTag(name, color string, score uint) (*models.Tag, error) { tagService := services.NewTagService(f.ctx) return tagService.CreateTag(name, color, score) } func (f TagFacade) FindTagByIDs(ids []uint64) ([]models.Tag, error) { tagService := services.NewTagService(f.ctx) return tagService.FindTagByIDs(ids) } func (f TagFacade) FindTagByScore(userID uint64, limit int) ([]models.Tag, error) { userTagService := services.NewUserTagService(f.ctx) userTags, err := userTagService.FindLatestByUserIDAndCollection(userID, limit) if err != nil { return nil, err } tagIDs := make([]uint64, len(userTags)) for i, userTag := range userTags { tagIDs[i] = userTag.TagID } tagService := services.NewTagService(f.ctx) return tagService.FindTagByIDs(tagIDs) } func (f TagFacade) ScoringTag(userID, tagID uint64) (*models.UserTag, error) { userTagService := services.NewUserTagService(f.ctx) userTag, err := userTagService.GetFirstOrCreate(userID, tagID) if err != nil { return nil, err } userTag.Score = userTag.Score + 1 return userTagService.UpdateByID(userTag) }
package main import ( "sync/atomic" "github.com/BorisBorshevsky/GolangDemos/common" "sync" ) func main() { Track(1000) } func Track(n int) { //common.TimeTrack(simpleRun, n, "simple") common.TimeTrack(mutexRun, n, "mutex") common.TimeTrack(atomicRun, n, "atomic") common.TimeTrack(semaphoreRun, n, "sem") } func simpleRun(n int) int64 { var a int64 = 0 wg := sync.WaitGroup{} wg.Add(n) for i := 0; i < n; i++ { go func() { for j := 0; j < n; j++ { a += 1 } wg.Done() }() } wg.Wait() return a } func mutexRun(n int) int64 { var a int64 = 0 m := &sync.Mutex{} wg := sync.WaitGroup{} wg.Add(n) for i := 0; i < n; i++ { go func() { for j := 0; j < n; j++ { m.Lock() a += 1 m.Unlock() } wg.Done() }() } wg.Wait() return a } func atomicRun(n int) int64 { var a int64 = 0 wg := sync.WaitGroup{} wg.Add(n) for i := 0; i < n; i++ { go func() { for j := 0; j < n; j++ { atomic.AddInt64(&a, 1) } wg.Done() }() } wg.Wait() return a } func semaphoreRun(n int) int64 { var a int64 = 0 var sem = make(chan int, 1) wg := sync.WaitGroup{} wg.Add(n) for i := 0; i < n; i++ { go func() { for j := 0; j < n; j++ { sem <- 1 a += 1 <-sem } wg.Done() }() } wg.Wait() return a }
package schedule import ( "time" "github.com/gorhill/cronexpr" ) type CronSchedule struct { Expression *cronexpr.Expression } // Cron returns a CronSchedule using the cron expression giving as parameter of the function. func Cron(expression string) CronSchedule { expr := cronexpr.MustParse(expression) return CronSchedule{ Expression: expr, } } func (c CronSchedule) Next(current time.Time) time.Time { return c.Expression.Next(current) }
// Copyright 2016 The Chromium Authors, 2018 Elco Industrie Automation GmbH. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package internal type SymbolType int const ( Invalid SymbolType = iota External Slack Error Dummy ) type Symbol struct { Type SymbolType } type Tag struct { Marker *Symbol Other *Symbol } func FromTag(tag *Tag) *Tag { return &Tag{ Marker: tag.Marker, Other: tag.Other, } } type Row struct { Cells map[*Symbol]float64 Constant float64 } func NewRow(c float64) *Row { return &Row{ Cells: make(map[*Symbol]float64), Constant: c, } } func (row *Row) SolveForSymbol(symbol *Symbol) { if _, ok := row.Cells[symbol]; !ok { panic("Symbol not contained by Row") } coefficient := -1.0 / row.Cells[symbol] delete(row.Cells, symbol) row.Constant *= coefficient for key, value := range row.Cells { row.Cells[key] = value * coefficient } } func (row *Row) Substitute(symbol *Symbol, secRow *Row) { coefficient, ok := row.Cells[symbol] if !ok { return } delete(row.Cells, symbol) row.InsertRow(secRow, coefficient) } func (row *Row) InsertRow(secRow *Row, coefficient float64) { row.Constant += secRow.Constant * coefficient for symbol, v := range secRow.Cells { row.InsertSymbol(symbol, v*coefficient) } } func (row *Row) ReverseSign() { row.Constant = -row.Constant for key, val := range row.Cells { row.Cells[key] = -val } } func (row *Row) InsertSymbol(symbol *Symbol, coefficient float64) { var val float64 = 0 if oldVal, ok := row.Cells[symbol]; ok { val = oldVal } val += coefficient if IsNearZero(val) { delete(row.Cells, symbol) } else { row.Cells[symbol] = val } } func (row *Row) Add(val float64) float64 { row.Constant += val return row.Constant } func (row *Row) SolveForSymbols(lhs *Symbol, rhs *Symbol) { row.InsertSymbol(lhs, -1) row.SolveForSymbol(rhs) } func (row *Row) CoefficientForSymbol(symbol *Symbol) float64 { if val, ok := row.Cells[symbol]; ok { return val } return 0 } func IsNearZero(value float64) bool { const epsilon = 1.0e-8 if value < 0 { return -value < epsilon } else { return value < epsilon } } func AnyPivotableSymbol(row *Row) *Symbol { for symbol := range row.Cells { if symbol.Type == Slack || symbol.Type == Error { return symbol } } return &Symbol{Invalid} } func CheckIfAllDummiesInRow(row *Row) bool { for symbol := range row.Cells { if symbol.Type != Dummy { return false } } return true } func CopyRow(srcRow *Row) *Row { result := NewRow(srcRow.Constant) for key, val := range srcRow.Cells { result.Cells[key] = val } return result }
package controller import ( "aplicacoes/projeto-zumbie/config" "encoding/json" "fmt" "net/http" "github.com/gorilla/mux" ) // APP ... type APP struct { Versao int64 `json:"versao"` Descricao string `json:"descrisao"` Data string `json:"data"` Linguagem string `json:"linguagem"` } // Sobreviventes ... type Sobreviventes struct { Codigo uint `json:"codigosobrevivente"` Nome string `json:"nome"` Idade int `json:"idade"` Genero string `json:"genero"` Infectado bool `json:"infectado"` Inventario Inventarios `json:"inventario"` } // Inventarios ... type Inventarios struct { Agua int `json:"agua"` Comida int `json:"comida"` Medicamento int `json:"medicamento"` Municao int `json:"municao"` } // Trocas ... type Trocas struct { Sobrevivente1 Sobreviventes Sobrevivente2 Sobreviventes } // ErroTroca ... type ErroTroca struct { NomeSobrevivente string `json:"nomesobrevivente"` Mensagem string `json:"mensagem"` } var app []APP var sobreviventes []Sobreviventes var inventarios []Inventarios var errotroca []ErroTroca var inventarioS1 int var inventarioS2 int var trocas []Trocas var db = config.DB // var do banco var versao int64 var descricao, data, linguagem string var query string // AtualizaAPP ... func AtualizaAPP(v int64, d string, data string, l string) { app = append(app, APP{ Versao: v, Descricao: d, Data: data, Linguagem: l, }) } // HomeAPI ... func HomeAPI(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") versao = 1 descricao = "Aplicação Sobrevivência Zumbi" data = "25/07/2018" linguagem = "Go" AtualizaAPP(versao, descricao, data, linguagem) json.NewEncoder(w).Encode(app) } // BuscarTodosSobrevivente ... func BuscarTodosSobrevivente(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") query = "SELECT sobrevivente.codigo, nome, idade, genero, infectado, " + "agua, comida, medicamento, municao " + "FROM sobrevivente, inventario" + " WHERE " + "sobrevivente.codigo = inventario.codigosobrevivente" rows, err := db.Query(query) CheckError(err) sobreviventes = sobreviventes[:0] for rows.Next() { sobrevivente := Sobreviventes{} rows.Scan( &sobrevivente.Codigo, &sobrevivente.Nome, &sobrevivente.Idade, &sobrevivente.Genero, &sobrevivente.Infectado, &sobrevivente.Inventario.Agua, &sobrevivente.Inventario.Comida, &sobrevivente.Inventario.Medicamento, &sobrevivente.Inventario.Municao) sobreviventes = append(sobreviventes, sobrevivente) } json.NewEncoder(w).Encode(sobreviventes) } // AdicionarNovoSobrevivente ... func AdicionarNovoSobrevivente(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") sobreviventes = sobreviventes[:0] inventarios = inventarios[:0] sobrevivente := Sobreviventes{} _ = json.NewDecoder(r.Body).Decode(&sobrevivente) sobreviventes = append(sobreviventes, sobrevivente) nome := sobrevivente.Nome idade := sobrevivente.Idade genero := sobrevivente.Genero infectado := sobrevivente.Infectado agua := sobrevivente.Inventario.Agua comida := sobrevivente.Inventario.Comida medicamento := sobrevivente.Inventario.Medicamento municao := sobrevivente.Inventario.Municao sobreviventes = sobreviventes[:0] inventarios = inventarios[:0] querySobrevivente := "INSERT INTO sobrevivente (nome,idade,genero,infectado) VALUES(?,?,?,?)" queryInventario := "INSERT INTO inventario (codigosobrevivente, agua, comida, medicamento, municao) VALUES (LAST_INSERT_ID(), ?, ?, ?, ?);" stmt, err := db.Prepare(querySobrevivente) CheckError(err) _, err = stmt.Exec(nome, idade, genero, infectado) CheckError(err) stmt, err = db.Prepare(queryInventario) CheckError(err) _, err = stmt.Exec(agua, comida, medicamento, municao) json.NewEncoder(w).Encode("Sobrevivente adicionado !!") } // BuscarSobreviventes ... func BuscarSobreviventes(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") sobrevivente1 := mux.Vars(r)["sobrevivente1"] sobrevivente2 := mux.Vars(r)["sobrevivente2"] query = "(SELECT sobrevivente.codigo, nome, idade, genero, infectado, " + "agua, comida, medicamento, municao " + "FROM sobrevivente, inventario" + " WHERE " + "sobrevivente.codigo = ? AND inventario.codigosobrevivente = ?) " + "UNION" + " (SELECT sobrevivente.codigo, nome, idade, genero, infectado, " + "agua, comida, medicamento, municao " + "FROM sobrevivente, inventario" + " WHERE " + "sobrevivente.codigo = ? AND inventario.codigosobrevivente = ?) " rows, err := db.Query(query, sobrevivente1, sobrevivente1, sobrevivente2, sobrevivente2) CheckError(err) sobreviventes = sobreviventes[:0] sobrevivente := Sobreviventes{} for rows.Next() { rows.Scan(&sobrevivente.Codigo, &sobrevivente.Nome, &sobrevivente.Idade, &sobrevivente.Genero, &sobrevivente.Infectado, &sobrevivente.Inventario.Agua, &sobrevivente.Inventario.Comida, &sobrevivente.Inventario.Medicamento, &sobrevivente.Inventario.Municao) sobreviventes = append(sobreviventes, sobrevivente) } json.NewEncoder(w).Encode(sobreviventes) } // RealizarTroca ... func RealizarTroca(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") trocas = trocas[:0] errotroca = errotroca[:0] troca := Trocas{} erroTroca := ErroTroca{} // sobrevivente := Sobreviventes{} _ = json.NewDecoder(r.Body).Decode(&troca) trocas = append(trocas, troca) codigoSobrevivente1 := troca.Sobrevivente1.Codigo codigoSobrevivente2 := troca.Sobrevivente2.Codigo s1 := troca.Sobrevivente1 s2 := troca.Sobrevivente2 for _, s := range sobreviventes { if s.Codigo == codigoSobrevivente1 { // Aqui to tentando recuperar os valores dos alimentos... if s1.Inventario.Agua > 0 { fmt.Println("Agua será trocada !!") inventarioS1 = s1.Inventario.Agua if inventarioS1 > s.Inventario.Comida { erroTroca.NomeSobrevivente = s.Nome erroTroca.Mensagem = "Não possui água suficiente !! ;(" json.NewEncoder(w).Encode(erroTroca) } } if s1.Inventario.Comida > 0 { fmt.Println("comida será trocada !!") inventarioS1 = s1.Inventario.Comida if inventarioS1 > s.Inventario.Comida { erroTroca.NomeSobrevivente = s.Nome erroTroca.Mensagem = "Não possui comida suficiente !! ;(" json.NewEncoder(w).Encode(erroTroca) } } if s1.Inventario.Medicamento > 0 { fmt.Println("Medicamento será trocada !!") inventarioS1 = s1.Inventario.Medicamento if inventarioS1 > s.Inventario.Medicamento { erroTroca.NomeSobrevivente = s.Nome erroTroca.Mensagem = "Não possui medicamento suficiente !! ;(" json.NewEncoder(w).Encode(erroTroca) } } if s1.Inventario.Municao > 0 { fmt.Println("Munição será trocada !!") inventarioS1 = s1.Inventario.Municao if inventarioS1 > s.Inventario.Municao { erroTroca.NomeSobrevivente = s.Nome erroTroca.Mensagem = "Não possui munição suficiente !! ;(" json.NewEncoder(w).Encode(erroTroca) } } } } for _, s := range sobreviventes { if s.Codigo == codigoSobrevivente2 { // Aqui to tentando recuperar os valores dos alimentos... if s2.Inventario.Agua > 0 { fmt.Println("Agua será trocada !!") inventarioS2 = s2.Inventario.Agua if inventarioS2 > s.Inventario.Comida { erroTroca.NomeSobrevivente = s2.Nome erroTroca.Mensagem = "Não possui água suficiente !! ;(" json.NewEncoder(w).Encode(erroTroca) } } if s2.Inventario.Comida > 0 { fmt.Println("comida será trocada !!") inventarioS2 = s2.Inventario.Comida if inventarioS2 > s.Inventario.Comida { erroTroca.NomeSobrevivente = s2.Nome erroTroca.Mensagem = "Não possui comida suficiente !! ;(" json.NewEncoder(w).Encode(erroTroca) } } if s2.Inventario.Medicamento > 0 { fmt.Println("Medicamento será trocada !!") inventarioS2 = s2.Inventario.Medicamento if inventarioS2 > s.Inventario.Medicamento { erroTroca.NomeSobrevivente = s2.Nome erroTroca.Mensagem = "Não possui medicamento suficiente !! ;(" json.NewEncoder(w).Encode(erroTroca) } } if s2.Inventario.Municao > 0 { fmt.Println("Munição será trocada !!") inventarioS2 = s2.Inventario.Municao if inventarioS2 > s.Inventario.Municao { erroTroca.NomeSobrevivente = s2.Nome erroTroca.Mensagem = "Não possui munição suficiente !! ;(" json.NewEncoder(w).Encode(erroTroca) } } } } if ComparaTroca(trocas) { fmt.Println("ok vamos trocar...") } else { fmt.Println("não vamos.") } } // ComparaTroca ... func ComparaTroca(trocas []Trocas) bool { // agua := 4 // comida := 3 // medicamento := 2 // municao := 1 var s1Agua int var s1Comida int var s1Med int var s1Mun int var s2Agua int var s2Comida int var s2Med int var s2Mun int var somaS1 int var somaS2 int for _, s := range trocas { s1Agua = s.Sobrevivente1.Inventario.Agua * 2 s1Comida = s.Sobrevivente1.Inventario.Comida * 2 s1Med = s.Sobrevivente1.Inventario.Medicamento * 2 s1Mun = s.Sobrevivente1.Inventario.Municao * 2 somaS1 = s1Agua + s1Comida + s1Med + s1Mun s2Agua = s.Sobrevivente2.Inventario.Agua * 2 s2Comida = s.Sobrevivente2.Inventario.Comida * 2 s2Med = s.Sobrevivente2.Inventario.Medicamento * 2 s2Mun = s.Sobrevivente2.Inventario.Municao * 2 somaS2 = s2Agua + s2Comida + s2Med + s2Mun } if somaS1 != somaS2 { return false } return true } // CheckError ... func CheckError(err error) { if err != nil { panic(err.Error()) } }
package todo func NewList() *todoList { return &todoList{} } func NewTodo(title string) *todoModel { return &todoModel{ title: title, } }
package minedive type MinediveState int const ( MinediveStateNew MinediveState = iota + 1 MinediveStateConnecting ) const ( MinediveStateNewStr = "new" MinediveStateConnectingStr = "connecting" ) func (t MinediveState) String() string { switch t { case MinediveStateNew: return MinediveStateNewStr case MinediveStateConnecting: return MinediveStateConnectingStr default: return ErrUnknownType.Error() } }
package main import "fmt" /* 接口定义了一个对象的行为规范,只定义规范不实现,由具体的对象来实现规范的细节 接口(interface)是一种抽象类型,是一组method的集合 定义格式如下: type 接口类型名 interface{ 方法名1(参数列表1) 返回值列表1 ... } 接口名,一般会在单词后面加上er。如Writer、Stringer 实现接口的条件 一个对象只要全部实现了接口中的方法,那么就实现了这个接口 接口类型变量 能够存储所有实现了该接口的实例 值接收者实现接口:不管结构体还是结构体指针类型都能赋值给接口变量 指针接收者实现接口:只能接收结构体指针类型变量 一个类型可以实现多个接口 接口嵌套 接口与接口间可以通过嵌套创造出新的接口 type Sayer interface{ say() } type Mover interface{ move() } type animal interface{ Sayer Mover } 空接口 空接口是没有定义任何方法的接口,因此任何类型都实现了空接口 空接口类型的变量可以存储任意类型的变量 var x interface{} x = ... 作用: 空接口作为函数的参数 空接口作为map的值 类型断言 x.(T) x:表示类型为interface{}的变量 T:表示断言x可能是的类型 返回两个参数,第一个参数是x转化为T类型后的变量,第二个是一个布尔值, 若为true则表示断言成功,为false则表示断言失败 */ type Sayer interface { say() //无参无返回值 } type Cat struct{} func (c Cat) say() { //实现接口 fmt.Println("喵喵喵") } type Dog struct{} func (d *Dog) say() { //实现接口 fmt.Println("汪汪汪") } func main() { var say Sayer //以下二者都可以 say = Cat{} say = &Cat{} //Go语言中有对指针类型变量求值的语法糖,内部会自动求值*Cat() say.say() //注意这个 // say = Dog{}//会报错 say = &Dog{} say.say() //空接口 var x interface{} s := "Hello 沙河" x = s fmt.Printf("type:%T value:%v\n", x, x) //string i := 100 x = i fmt.Printf("type:%T value:%v\n", x, x) //int b := true x = b fmt.Printf("type:%T value:%v\n", x, x) //bool var m = make(map[string]interface{}) m["name"] = "hello" m["age"] = 18 m["married"] = false fmt.Println(m) //类型断言 v, ok := x.(bool) if ok { fmt.Printf("它是个布尔类型,值为%v\n", v) } else { fmt.Println("断言失败") } justifyType(x) } func justifyType(i interface{}) { switch v := i.(type) { //多用switch来做类型断言 case string: fmt.Printf("x is a string,value is %v\n", v) case int: fmt.Printf("x is a int is %v\n", v) case bool: fmt.Printf("x is a bool is %v\n", v) default: fmt.Println("unsupport type!") } }
package rakuten import ( "context" "fmt" ) type TravelHotelChainParams struct{} type TravelHotelChainResponse struct { LargeClasses []struct { LargeClass []struct { LargeClassCode string `json:"largeClassCode"` HotelChains []struct { HotelChain struct { HotelChainCode string `json:"hotelChainCode"` HotelChainName string `json:"hotelChainName"` HotelChainNameKana string `json:"hotelChainNameKana"` HotelChainComment string `json:"hotelChainComment"` } `json:"hotelChain"` } `json:"hotelChains"` } `json:"largeClass"` } `json:"largeClasses"` } func (s *TravelService) HotelChain(ctx context.Context, opt *TravelHotelChainParams) (*TravelHotelChainResponse, *Response, error) { urlSuffix := fmt.Sprintf("Travel/GetHotelChainList/20131024?") req, err := s.client.NewRequest("GET", urlSuffix, opt, nil) if err != nil { return nil, nil, err } respBody := &TravelHotelChainResponse{} resp, err := s.client.Do(ctx, req, respBody) if err != nil { return nil, resp, err } return respBody, resp, nil }
// All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v20200107 import ( "encoding/json" tchttp "github.com/tencentyun/tcecloud-sdk-go/tcecloud/common/http" ) type GetStatDayRequest struct { *tchttp.BaseRequest // 存储桶所在 COS 地域 CosRegion *string `json:"CosRegion,omitempty" name:"CosRegion"` // 存储桶名称 Bucket *string `json:"Bucket,omitempty" name:"Bucket"` // 统计信息日期 Date *string `json:"Date,omitempty" name:"Date"` // 存储类型 1:标准存储 2:低频存储 StorageType *int64 `json:"StorageType,omitempty" name:"StorageType"` } func (r *GetStatDayRequest) ToJsonString() string { b, _ := json.Marshal(r) return string(b) } func (r *GetStatDayRequest) FromJsonString(s string) error { return json.Unmarshal([]byte(s), &r) } type GetStatDayResponse struct { *tchttp.BaseResponse Response *struct { // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 RequestId *string `json:"RequestId,omitempty" name:"RequestId"` } `json:"Response"` } func (r *GetStatDayResponse) ToJsonString() string { b, _ := json.Marshal(r) return string(b) } func (r *GetStatDayResponse) FromJsonString(s string) error { return json.Unmarshal([]byte(s), &r) }
package block import ( "time" ) // Chain type Chain struct { Created time.Time Genesis *Block Difficulty int numberOfBlocks int } // IncrNumberOfBlocks func (c *Chain) IncrNumberOfBlocks() int { c.numberOfBlocks = c.numberOfBlocks + 1 return c.numberOfBlocks } // CreateChain func CreateChain(difficulty int) *Chain { chain := &Chain{ Created: time.Now(), Difficulty: difficulty, } b := NewBlock(map[string]string{ "Name": "Genesis Block", }) b.PreviousHash = "0000" b.Hash = b.GenerateHash() chain.Genesis = b return chain } // AddBlock func (c *Chain) AddBlock(b *Block) { block := c.Genesis prevHash := block.Hash for block.Next != nil { block = block.Next prevHash = block.Hash } b.PreviousHash = prevHash b.Idx = c.IncrNumberOfBlocks() b.MineBlock(c.Difficulty) block.Next = b } // Validate func (c *Chain) Validate() *Block { var foundInvalid *Block = nil c.EveryBlock(func(b *Block) { if !b.Verify() { foundInvalid = b } }) return foundInvalid } // EveryBlock func (c *Chain) EveryBlock(fn func(b *Block)) { cur := c.Genesis for cur.Next != nil { fn(cur) cur = cur.Next } fn(cur) }
/* * Copyright (c) 2020, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package cmd import ( "bytes" "crypto/tls" "encoding/json" "encoding/xml" "fmt" "io/ioutil" "log" "net/http" "strings" ) type Parts struct { GrantTypes []string `json:"grantTypes"` CallbackURLs []string `json:"callbackURLs"` PublicClient bool `json:"publicClient"` } type Parts1 struct { Oidc Parts `json:"oidc"` } type ServiceProviderOAuth struct { Name string `json:"name"` Description string `json:"description"` InboundProtocolConfiguration Parts1 `json:"inboundProtocolConfiguration"` } type ServiceProviderXml struct { XMLName xml.Name `xml:"ServiceProvider"` Text string `xml:",chardata"` ApplicationName string `xml:"ApplicationName"` Description string `xml:"Description"` JwksUri string `xml:"JwksUri"` InboundAuthenticationConfig struct { Text string `xml:",chardata"` InboundAuthenticationRequestConfigs struct { Text string `xml:",chardata"` InboundAuthenticationRequestConfig []struct { Text string `xml:",chardata"` InboundAuthKey string `xml:"InboundAuthKey"` InboundAuthType string `xml:"InboundAuthType"` InboundConfigType string `xml:"InboundConfigType"` Properties string `xml:"Properties"` InboundConfiguration string `xml:"inboundConfiguration"` } `xml:"InboundAuthenticationRequestConfig"` } `xml:"InboundAuthenticationRequestConfigs"` } `xml:"InboundAuthenticationConfig"` LocalAndOutBoundAuthenticationConfig struct { Text string `xml:",chardata"` AuthenticationSteps string `xml:"AuthenticationSteps"` AuthenticationType string `xml:"AuthenticationType"` AlwaysSendBackAuthenticatedListOfIdPs string `xml:"alwaysSendBackAuthenticatedListOfIdPs"` UseTenantDomainInUsername string `xml:"UseTenantDomainInUsername"` UseUserstoreDomainInRoles string `xml:"UseUserstoreDomainInRoles"` UseUserstoreDomainInUsername string `xml:"UseUserstoreDomainInUsername"` SkipConsent string `xml:"SkipConsent"` SkipLogoutConsent string `xml:"skipLogoutConsent"` EnableAuthorization string `xml:"EnableAuthorization"` } `xml:"LocalAndOutBoundAuthenticationConfig"` RequestPathAuthenticatorConfigs string `xml:"RequestPathAuthenticatorConfigs"` InboundProvisioningConfig struct { Text string `xml:",chardata"` ProvisioningUserStore string `xml:"ProvisioningUserStore"` IsProvisioningEnabled string `xml:"IsProvisioningEnabled"` IsDumbModeEnabled string `xml:"IsDumbModeEnabled"` } `xml:"InboundProvisioningConfig"` OutboundProvisioningConfig struct { Text string `xml:",chardata"` ProvisioningIdentityProviders string `xml:"ProvisioningIdentityProviders"` } `xml:"OutboundProvisioningConfig"` ClaimConfig struct { Text string `xml:",chardata"` RoleClaimURI string `xml:"RoleClaimURI"` LocalClaimDialect string `xml:"LocalClaimDialect"` IdpClaim string `xml:"IdpClaim"` ClaimMappings string `xml:"ClaimMappings"` AlwaysSendMappedLocalSubjectId string `xml:"AlwaysSendMappedLocalSubjectId"` SPClaimDialects string `xml:"SPClaimDialects"` } `xml:"ClaimConfig"` PermissionAndRoleConfig struct { Text string `xml:",chardata"` Permissions string `xml:"Permissions"` RoleMappings string `xml:"RoleMappings"` IdpRoles string `xml:"IdpRoles"` } `xml:"PermissionAndRoleConfig"` IsSaaSApp string `xml:"IsSaaSApp"` ImageUrl string `xml:"ImageUrl"` AccessUrl string `xml:"AccessUrl"` IsDiscoverable string `xml:"IsDiscoverable"` } type Export struct { ApplicationID string `json:"applicationId"` } func createSPOauthApplication(oauthAppName string, description string, callbackURLs string, grantTypes []string) { SERVER, CLIENTID, CLIENTSECRET, TENANTDOMAIN = readSPConfig() var ADDAPPURL = SERVER + "/t/" + TENANTDOMAIN + "/api/server/v1/applications" var err error var status int var xmlData ServiceProviderXml token := readFile() toJson := ServiceProviderOAuth{ Name: oauthAppName, Description: description, InboundProtocolConfiguration: Parts1{ Parts{ grantTypes, []string{callbackURLs}, false, }, }, } jsonData, err := json.Marshal(toJson) if err != nil { log.Fatalln(err) } http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} req, err := http.NewRequest("POST", ADDAPPURL, bytes.NewBuffer(jsonData)) if err != nil { log.Fatalln(err) } req.Header.Set("Authorization", "Bearer "+token) req.Header.Set("accept", "*/*") req.Header.Set("Content-Type", "application/json") defer req.Body.Close() httpClient := &http.Client{} resp, err := httpClient.Do(req) if err != nil { log.Fatalln(err) } status = resp.StatusCode defer resp.Body.Close() if status == 401 { fmt.Println("Unauthorized access.\nPlease enter your UserName and password for server.") setServerWithInit(SERVER) createSPOauthApplication(oauthAppName, description, callbackURLs, grantTypes) } else if status == 400 { fmt.Println("Provided parameters are not in correct format.") } else if status == 403 { fmt.Println("Forbidden") } else if status == 201 { fmt.Println("Successfully created the service provider named '" + oauthAppName + "' at " + resp.Header.Get("Date")) location := resp.Header.Get("Location") splits := strings.SplitAfter(location, "applications/") serviceProviderID := splits[1] http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} req, err := http.NewRequest("GET", ADDAPPURL+"/"+serviceProviderID+"/export", bytes.NewBuffer(nil)) query := req.URL.Query() query.Add("exportSecrets", "true") req.URL.RawQuery = query.Encode() req.Header.Set("Authorization", "Bearer "+token) req.Header.Set("accept", "*/*") defer req.Body.Close() httpClient := &http.Client{} resp, err := httpClient.Do(req) if err != nil { log.Fatalln(err) } body, err := ioutil.ReadAll(resp.Body) if err != nil { log.Fatalln(err) } err = xml.Unmarshal(body, &xmlData) if err != nil { log.Fatalln(err) } configuration := xmlData.InboundAuthenticationConfig.InboundAuthenticationRequestConfigs.InboundAuthenticationRequestConfig[0].InboundConfiguration fmt.Println("oauthConsumerKey: " + between(configuration, "<oauthConsumerKey>", "</oauthConsumerKey>")) fmt.Println("oauthConsumerSecret: " + between(configuration, "<oauthConsumerSecret>", "</oauthConsumerSecret>")) } else if status == 409 { fmt.Println("Already exists an application with same name:" + oauthAppName) } } func between(fullString string, start string, end string) string { // Get substring between two strings. posFirst := strings.Index(fullString, start) if posFirst == -1 { return "" } posLast := strings.Index(fullString, end) if posLast == -1 { return "" } posFirstAdjusted := posFirst + len(start) if posFirstAdjusted >= posLast { return "" } return fullString[posFirstAdjusted:posLast] }
// Good morning! Here's your coding interview problem for today. // This problem was asked by Microsoft. // Given a dictionary of words and a string made up of those words (no spaces), return the original sentence in a list. If there is more than one possible reconstruction, return any of them. If there is no possible reconstruction, then return null. // For example, given the set of words 'quick', 'brown', 'the', 'fox', and the string "thequickbrownfox", you should return ['the', 'quick', 'brown', 'fox']. // Given the set of words 'bed', 'bath', 'bedbath', 'and', 'beyond', and the string "bedbathandbeyond", return either ['bed', 'bath', 'and', 'beyond] or ['bedbath', 'and', 'beyond']. package twentytwo import ( "strings" ) // Twentytwo returns the strings from dict that make up compositeWord, or nil if the word cannot be composed from dict. func Twentytwo(dict map[string]bool, compositeWord string) []string { if len(compositeWord) == 0 { return []string{} } builder := strings.Builder{} for _, r := range compositeWord { builder.WriteRune(r) if _, ok := dict[builder.String()]; ok { words := Twentytwo(dict, strings.TrimPrefix(compositeWord, builder.String())) if words != nil { return append(words, builder.String()) } } } return nil }
package task import ( trans "./../" "./../models" "cydex" "cydex/transfer" . "github.com/smartystreets/goconvey/convey" "testing" "time" ) func Test_XidResource(t *testing.T) { Convey("Test XidResource", t, func() { X := NewXidResource() X.Add("x1", "n1", 1*time.Second) So(X.Len(), ShouldEqual, 1) time.Sleep(5) X.DelExpired() So(X.Len(), ShouldEqual, 1) time.Sleep(1100 * time.Millisecond) X.DelExpired() So(X.Len(), ShouldEqual, 0) }) } func Test_RestrictUploadScheduler(t *testing.T) { // var err error Convey("Test Restrict", t, func() { Convey("Test restrict by pid", func() { S := NewRestrictUploadScheduler(TASK_RESTRICT_BY_PID) node := &trans.Node{ Nid: "n1", Info: trans.NodeInfo{ FreeStorage: 10000, }, } trans.NodeMgr.AddNode(node) t1 := &Task{ Task: &models.Task{ TaskId: "t0", JobId: "jobid", Pid: "1234567890ab1111122222", Type: cydex.UPLOAD, NodeId: "n1", Fid: "1234567890ab111112222201", }, } S.AddTask(t1) r1 := &UploadReq{ UploadTaskReq: &transfer.UploadTaskReq{ TaskId: "t1", Uid: "1234567890ab", Pid: "1234567890ab1111122222", Fid: "1234567890ab111112222202", }, } n, err := S.DispatchUpload(r1) So(err, ShouldBeNil) So(n, ShouldEqual, node) r2 := &UploadReq{ UploadTaskReq: &transfer.UploadTaskReq{ TaskId: "t2", Uid: "1234567890ab", Pid: "1234567890ab1111122223", Fid: "1234567890ab111112222301", }, } n, err = S.DispatchUpload(r2) So(err, ShouldBeNil) So(n, ShouldBeNil) }) Convey("Test restrict by fid", func() { S := NewRestrictUploadScheduler(TASK_RESTRICT_BY_FID) node := &trans.Node{ Nid: "n1", Info: trans.NodeInfo{ FreeStorage: 10000, }, } trans.NodeMgr.AddNode(node) t1 := &Task{ Task: &models.Task{ TaskId: "t0", Type: cydex.UPLOAD, Pid: "pid", Fid: "1234567890ab111112222201", NodeId: "n1", }, } S.AddTask(t1) r1 := &UploadReq{ UploadTaskReq: &transfer.UploadTaskReq{ TaskId: "t1", Uid: "1234567890ab", Fid: "1234567890ab111112222202", }, } n, err := S.DispatchUpload(r1) So(err, ShouldBeNil) So(n, ShouldBeNil) }) Convey("Test restrict by fid with size", func() { S := NewRestrictUploadScheduler(TASK_RESTRICT_BY_FID) node := &trans.Node{ Nid: "n1", Info: trans.NodeInfo{ FreeStorage: 128, }, } trans.NodeMgr.AddNode(node) t1 := &Task{ Task: &models.Task{ TaskId: "t0", JobId: "jobid", Fid: "1234567890ab111112222201", Type: cydex.UPLOAD, NodeId: "n1", }, } S.AddTask(t1) r1 := &UploadReq{ LeftPkgSize: 1234, FileSize: 127, UploadTaskReq: &transfer.UploadTaskReq{ TaskId: "t1", Uid: "1234567890ab", Fid: "1234567890ab111112222201", Size: 127, }, } n, err := S.DispatchUpload(r1) So(err, ShouldBeNil) So(n, ShouldEqual, node) So(r1.restrict_mode, ShouldEqual, TASK_RESTRICT_BY_FID) r2 := &UploadReq{ LeftPkgSize: 1234, FileSize: 129, UploadTaskReq: &transfer.UploadTaskReq{ TaskId: "t2", Uid: "1234567890ab", Fid: "1234567890ab111112222201", Size: 129, }, } n, err = S.DispatchUpload(r2) So(err, ShouldBeNil) So(n, ShouldBeNil) }) Convey("Test restrict by pid with size", func() { S := NewRestrictUploadScheduler(TASK_RESTRICT_BY_PID) node := &trans.Node{ Nid: "n1", Info: trans.NodeInfo{ FreeStorage: 1000, }, } trans.NodeMgr.AddNode(node) t1 := &Task{ Task: &models.Task{ TaskId: "t0", JobId: "jobid", Fid: "1234567890ab111112222201", Type: cydex.UPLOAD, NodeId: "n1", }, } S.AddTask(t1) r1 := &UploadReq{ LeftPkgSize: 999, FileSize: 127, UploadTaskReq: &transfer.UploadTaskReq{ TaskId: "t1", Uid: "1234567890ab", Fid: "1234567890ab111112222201", Size: 127, }, } So(r1.restrict_mode, ShouldEqual, 0) n, err := S.DispatchUpload(r1) So(err, ShouldBeNil) So(n, ShouldEqual, node) So(r1.restrict_mode, ShouldEqual, TASK_RESTRICT_BY_PID) r2 := &UploadReq{ LeftPkgSize: 1001, FileSize: 129, UploadTaskReq: &transfer.UploadTaskReq{ TaskId: "t2", Uid: "1234567890ab", Fid: "1234567890ab111112222201", Size: 129, }, } n, err = S.DispatchUpload(r2) So(err, ShouldBeNil) So(n, ShouldBeNil) }) }) }
package yolopb func AllModels() []interface{} { return []interface{}{ // remote data &MergeRequest{}, &Commit{}, &Build{}, &Artifact{}, &Release{}, &Entity{}, &Project{}, // internal &Download{}, } }
package atomic import "sync/atomic" // IncWrapInt64 atomically increments a 64-bit signed integer, wrapping around zero. // // Specifically if p points to a value of math.MaxInt64 the result of calling // IncWrapInt64 will be 0. func IncWrapInt64(p *int64) int64 { for { o := atomic.LoadInt64(p) n := o + 1 if n < 0 { n = 0 } if atomic.CompareAndSwapInt64(p, o, n) { return n } } }
package contact type Repository interface { New(*Contact) (*Contact, error) Update(*Contact) (*Contact, error) Delete(*Contact) error Get(uint) (*Contact, error) List() ([]*Contact, error) Close() }
package main import ( "fmt" "net/http" "html/template" "database/sql" _ "github.com/go-sql-driver/mysql" //"math/rand" "log" ) const conn_string = "root:imonomy@/goblog?charset=utf8" const driver_name = "mysql" var db, top_error = sql.Open(driver_name, conn_string) func indexPage(resp http.ResponseWriter, req *http.Request){ log.Println(req.Method) t, err := template.ParseFiles("D:\\Golangprojects\\test\\src\\main\\home") var ( id int title string text string ) //posts := make(chan []map[string]string) post_go := make(chan map[string]string) var post = make(map[string]string) if err != nil { panic("Something went wrong with template") } go func() { res, err := db.Query("SELECT id, title, text FROM blog_post") if err != nil { panic("Something went wrong with DB query") } defer res.Close() for res.Next() { err := res.Scan(&id, &title, &text) if err != nil { log.Fatal(err) } post["title"] = title post["text"] = text post_go <- post } err = res.Err() if err != nil { log.Fatal(err) } }() posts_go := <- post_go t.ExecuteTemplate(resp, "home", posts_go) } func contactPage(resp http.ResponseWriter, req *http.Request){ fmt.Fprintf(resp, "You request is %s", req.Method) } func main() { if top_error != nil{ panic("Error connecting to database") } var port = ":8000" http.HandleFunc("/", indexPage) http.HandleFunc("/contact", contactPage) fmt.Printf("Server started, port %s", port) http.ListenAndServe(port, nil) }
package robot import ( "fmt" "github.com/ev3go/ev3dev" ) var TOUCH int var COLOR int var IR int func saveIndex(st string, ind int) { switch st { case "lego-ev3-touch": TOUCH = ind case "lego-ev3-color": COLOR = ind case "lego-ev3-ir": IR = ind } } func GetSensors() []*ev3dev.Sensor { fmt.Println("sensorFactory:: GetSensors()") inPorts := []string{"in1", "in2", "in3", "in4"} sensorTypes := []string{"lego-ev3-touch", "lego-ev3-color", "lego-ev3-ir"} sensorTable := []*ev3dev.Sensor{} sensorTableInd := 0 for _, ip := range inPorts { for _, st := range sensorTypes { fmt.Println("Searching " + ip + " for " + st + "...") sensor, err := ev3dev.SensorFor(ip, st) if err == nil { fmt.Println("Found " + st + " connected to " + ip) sensorTable = append(sensorTable, sensor) saveIndex(st, sensorTableInd) sensorTableInd++ break } else { fmt.Println("Not found") } } } return sensorTable }
package main import ( . "fmt" . "math" . "strconv" . "os" ) func main() { if len(Args) != 4 { Println("Usage: palette RR GG BB") return } r, err := ParseUint(Args[1], 16, 8) g, err := ParseUint(Args[2], 16, 8) b, err := ParseUint(Args[3], 16, 8) if err != nil { Println(err) return } R := uint64(Floor(float64(r) / 8 + 0.5)) G := uint64(Floor(float64(g) / 8 + 0.5)) B := uint64(Floor(float64(b) / 8 + 0.5)) rgb := R + (G + B * 0x20) * 0x20 if R * 8 != r || G * 8 != g || B * 8 != b { Printf(" dw %%%016b ; #%x%x%x (from #%02x%02x%02x)\n", rgb, R * 8, G * 8, B * 8, r, g, b) Printf(" db %%%08b, %%%08b ; #%x%x%x (from #%02x%02x%02x)\n", rgb >> 8, rgb & 0xff, R * 8, G * 8, B * 8, r, g, b) Printf(" ld [hl],%%%08b ; #%x%x%x (from #%02x%02%02x)\n", rgb & 0xff, R * 8, G * 8, B * 8, r, g, b) Printf(" ld [hl],%%%08b\n", rgb >> 8) return } Printf(" dw %%%016b ; #%x%x%x\n", rgb, r, g, b) Printf(" db %%%08b, %%%08b ; #%x%x%x\n", rgb & 0xff, rgb >> 8, r, g, b) Printf(" ld [hl],%%%08b ; #%x%x%x\n", rgb & 0xff, R * 8, G * 8, B * 8) Printf(" ld [hl],%%%08b\n", rgb >> 8) }
package controller import ( "gopkg.in/go-playground/validator.v9" ) type AddQuestionParam struct { Name string `form:"name" json:"name" binding:"required"` NestedParam NestedParam } type NestedParam struct { Nested1 string `json:"nested1"` Nested2 int `form:"n2" json:"nested2" binding:"required,gt=0"` } //to use this function //validate.RegisterValidation("is-unique", ValidateUnique) func ValidateUnique(fl validator.FieldLevel) bool { return true }
package main import ( "context" "flag" "fmt" "os" "testing" "time" runner "github.com/SentientTechnologies/studio-go-runner/internal/runner" "github.com/karlmutch/envflag" "github.com/karlmutch/errors" // MIT License ) var ( parsedFlags = false TestStopC = make(chan bool) TestRunMain string useGPU = flag.Bool("no-gpu", false, "Used to skip test and other initialization GPU hardware code") // cleanupDirs is a list of working directories that need to be expunged when the test is finally all over // within this package cleanupDirs = []string{} // InitError is used to track an failures occurring during static initialization InitError errors.Error // TestOptions are externally visible symbols that this package is asking the unit test suite to pickup and use // when the testing is managed by an external entity, this allows build level variations that include or // exclude GPUs for example to run their tests appropriately. It also allows the top level build logic // to inspect source code for executables and run their testing without knowledge of how they work. DuatTestOptions = [][]string{ {"-cache-dir=/tmp/cache-runner", "-cache-size=1Gib", "--cache-create"}, } ) // When the runner tests are done we need to build the scenarios we want tested // and their command line options for each case func init() { cleanupDirs = append(cleanupDirs, "/tmp/cache-runner") } func cleanup() { for _, tmpDir := range cleanupDirs { os.RemoveAll(tmpDir) } } // TestRunMain can be used to run the server in production mode as opposed to // funit or unit testing mode. Traditionally gathering coverage data and running // in production are done separately. This unit test allows the runner to do // both at the same time. To do this a test binary is generated using the command // // cd $(GOROOT)/src/github.com/SentientTechnologies/studio-go-runner // go test -coverpkg="." -c -o bin/runner-cpu-run-coverage -tags 'NO_CUDA' cmd/runner/*.go // // Then the resulting /bin/runner-cpu-run-coverage binary is run as through it were a traditional // server binary for the go runner using the command below. The difference being that the // binary now has coverage instrumentation. In order to collect the coverage run any production // workload and use cases you need then CTRL-C the server. // // ./bin/runner-cpu-run-coverage -test.run "^TestRunMain$" -test.coverprofile=system.out // // As an additional feature coverage files have is that they can also be merged using // commands similar to the following: // // $ go get github.com/wadey/gocovmerge // $ gocovmerge unit.out system.out > all.out // $ go tool cover -html all.out // // Using the coverage merge tool testing done using a fully deployed system with // real projects, proxies, projects, and workloads along with integration testing can be merged // together from different test steps in an integration and test pipeline. // // TestMain is invoked by the GoLang entry point for the runtime of compiled GoLang // programs when the compiled and linked image has been run using the 'go test' // command // // This function will invoke the applications entry point to initiate the normal execution flow // of the server with the tests remaining under the scheduling control of the // GoLang test runtime. For more information please read https://golang.org/pkg/testing/ // func TestMain(m *testing.M) { defer cleanup() TestMode = true if InitError != nil { fmt.Fprintln(os.Stderr, InitError) } // Only perform this Parsed check inside the test framework. Do not be tempted // to do this in the main of our production package // if !flag.Parsed() { envflag.Parse() } parsedFlags = true runner.UseGPU = useGPU quitCtx, quit := context.WithCancel(context.Background()) initializedC := make(chan struct{}) // Start Rabbit MQ test queues client for testing purposes, essentially a real // server being used in what would otherwise be a mocking context. This can fail // if the context the tests are run in dont allow for test deployments of // RabbitMQ. This is OK as the tests are responsible for determining if // they should run and if they would fail due to the initialization error here. runner.PingRMQServer(*amqpURL) resultCode := -1 { // Start the server under test go func() { logger.Info("starting server") if errs := EntryPoint(quitCtx, quit, initializedC); len(errs) != 0 { for _, err := range errs { logger.Error(err.Error()) } logger.Fatal("test setup failed, aborting all testing") } <-quitCtx.Done() // When using benchmarking in production mode, that is no tests running the // user can park the server on a single unit test that only completes when this // channel is close, which happens only when there is a quitCtx from the application // due to a CTRL-C key sequence or kill -n command // // If the test was not selected for by the tester then this will be essentially a // NOP // close(TestStopC) logger.Info("forcing test mode server down") func() { defer func() { recover() }() quit() }() }() // The initialization is done inline so that we know the test S3 server is // running prior to any testing starting logger.Info("starting interfaces such as minio (S3), and message queuing") errC := runner.LocalMinio(quitCtx) go func() { // Wait for any errors from the S3 server and log them, continuing until // the testing stops for { select { case err := <-errC: if err != nil { logger.Error(err.Error()) } case <-quitCtx.Done(): break } } }() // Wait for the server to signal it is ready for work <-initializedC // If there are any tests to be done we now start them if len(TestRunMain) != 0 { <-TestStopC } else { resultCode = m.Run() quit() } } logger.Info("waiting for server down to complete") // Wait until the main server is shutdown <-quitCtx.Done() time.Sleep(2 * time.Second) if resultCode != 0 { os.Exit(resultCode) } }
package api import ( "encoding/json" "github.com/CuCTeMeH/gopher_translate/translator" "github.com/go-chi/render" "net/http" ) func postWord(w http.ResponseWriter, r *http.Request) { var word map[string]string err := json.NewDecoder(r.Body).Decode(&word) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } gopherWord, err := translator.TranslateWord(word["english-word"]) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } result := map[string]string{"gopher-word": gopherWord} render.JSON(w, r, result) } func postSentence(w http.ResponseWriter, r *http.Request) { var sentence map[string]string err := json.NewDecoder(r.Body).Decode(&sentence) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } gopherSentence, err := translator.TranslateSentence(sentence["english-sentence"]) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } result := map[string]string{"gopher-sentence": gopherSentence} render.JSON(w, r, result) // A chi router helper for serializing and returning json }
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. package cmd import ( "fmt" "testing" "github.com/Azure/aks-engine/pkg/api/common" "github.com/Azure/aks-engine/pkg/api" "github.com/Azure/aks-engine/pkg/armhelpers" . "github.com/onsi/gomega" "github.com/pkg/errors" "github.com/spf13/cobra" ) var validVersionsBackup map[string]bool func setupValidVersions(validVersions map[string]bool) { validVersionsBackup = common.AllKubernetesSupportedVersions common.AllKubernetesSupportedVersions = validVersions } func resetValidVersions() { common.AllKubernetesSupportedVersions = validVersionsBackup } func TestUpgradeCommandShouldBeValidated(t *testing.T) { g := NewGomegaWithT(t) r := &cobra.Command{} cases := []struct { uc *upgradeCmd expectedErr error name string }{ { uc: &upgradeCmd{ resourceGroupName: "", apiModelPath: "./not/used", deploymentDirectory: "", upgradeVersion: "1.8.9", location: "centralus", timeoutInMinutes: 60, cordonDrainTimeoutInMinutes: 60, }, expectedErr: errors.New("--resource-group must be specified"), name: "NeedsResourceGroup", }, { uc: &upgradeCmd{ resourceGroupName: "test", apiModelPath: "./not/used", deploymentDirectory: "", upgradeVersion: "1.8.9", location: "", timeoutInMinutes: 60, cordonDrainTimeoutInMinutes: 60, }, expectedErr: errors.New("--location must be specified"), name: "NeedsLocation", }, { uc: &upgradeCmd{ resourceGroupName: "test", apiModelPath: "./not/used", deploymentDirectory: "", upgradeVersion: "", location: "southcentralus", timeoutInMinutes: 60, cordonDrainTimeoutInMinutes: 60, }, expectedErr: errors.New("--upgrade-version must be specified"), name: "NeedsUpgradeVersion", }, { uc: &upgradeCmd{ resourceGroupName: "test", apiModelPath: "", deploymentDirectory: "", upgradeVersion: "1.9.0", location: "southcentralus", timeoutInMinutes: 60, cordonDrainTimeoutInMinutes: 60, }, expectedErr: errors.New("--api-model must be specified"), name: "NeedsAPIModel", }, { uc: &upgradeCmd{ resourceGroupName: "test", apiModelPath: "./somefile", deploymentDirectory: "aDir/anotherDir", upgradeVersion: "1.9.0", location: "southcentralus", timeoutInMinutes: 60, cordonDrainTimeoutInMinutes: 60, }, expectedErr: errors.New("ambiguous, please specify only one of --api-model and --deployment-dir"), name: "NeedsNonAmbiguous", }, { uc: &upgradeCmd{ resourceGroupName: "test", apiModelPath: "./not/used", deploymentDirectory: "", upgradeVersion: "1.9.0", location: "southcentralus", }, expectedErr: nil, name: "IsValid", }, } for _, tc := range cases { c := tc t.Run(c.name, func(t *testing.T) { t.Parallel() err := c.uc.validate(r) if c.expectedErr != nil && err != nil { g.Expect(err.Error()).To(Equal(c.expectedErr.Error())) } else { g.Expect(err).To(BeNil()) g.Expect(c.expectedErr).To(BeNil()) } }) } } func TestCreateUpgradeCommand(t *testing.T) { t.Parallel() g := NewGomegaWithT(t) command := newUpgradeCmd() g.Expect(command.Use).Should(Equal(upgradeName)) g.Expect(command.Short).Should(Equal(upgradeShortDescription)) g.Expect(command.Long).Should(Equal(upgradeLongDescription)) g.Expect(command.Flags().Lookup("location")).NotTo(BeNil()) g.Expect(command.Flags().Lookup("resource-group")).NotTo(BeNil()) g.Expect(command.Flags().Lookup("api-model")).NotTo(BeNil()) g.Expect(command.Flags().Lookup("upgrade-version")).NotTo(BeNil()) command.SetArgs([]string{}) if err := command.Execute(); err == nil { t.Fatalf("expected an error when calling upgrade with no arguments") } } func TestUpgradeShouldFailForSameVersion(t *testing.T) { versionToUse := common.RationalizeReleaseAndVersion(api.Kubernetes, "", "", false, false, false) setupValidVersions(map[string]bool{ versionToUse: true, }) g := NewGomegaWithT(t) upgradeCmd := &upgradeCmd{ resourceGroupName: "rg", apiModelPath: "./not/used", upgradeVersion: versionToUse, location: "centralus", timeoutInMinutes: 60, cordonDrainTimeoutInMinutes: 60, client: &armhelpers.MockAKSEngineClient{}, } containerServiceMock := api.CreateMockContainerService("testcluster", versionToUse, 3, 2, false) containerServiceMock.Location = "centralus" upgradeCmd.containerService = containerServiceMock err := upgradeCmd.initialize() g.Expect(err).To(HaveOccurred()) g.Expect(err.Error()).To(ContainSubstring(fmt.Sprintf("upgrading from Kubernetes version %s to version %s is not supported", versionToUse, versionToUse))) resetValidVersions() } func TestUpgradeShouldFailForInvalidUpgradePath(t *testing.T) { setupValidVersions(map[string]bool{ "1.10.13": false, "1.10.12": true, }) g := NewGomegaWithT(t) upgradeCmd := &upgradeCmd{ resourceGroupName: "rg", apiModelPath: "./not/used", upgradeVersion: "1.10.13", location: "centralus", timeoutInMinutes: 60, cordonDrainTimeoutInMinutes: 60, client: &armhelpers.MockAKSEngineClient{}, } containerServiceMock := api.CreateMockContainerService("testcluster", "1.10.12", 3, 2, false) containerServiceMock.Location = "centralus" upgradeCmd.containerService = containerServiceMock err := upgradeCmd.initialize() g.Expect(err).To(HaveOccurred()) g.Expect(err.Error()).To(ContainSubstring("upgrading from Kubernetes version 1.10.12 to version 1.10.13 is not supported")) resetValidVersions() } func TestUpgradeShouldSuceedForValidUpgradePath(t *testing.T) { setupValidVersions(map[string]bool{ "1.10.13": true, "1.10.12": true, }) g := NewGomegaWithT(t) upgradeCmd := &upgradeCmd{ resourceGroupName: "rg", apiModelPath: "./not/used", upgradeVersion: "1.10.13", location: "centralus", timeoutInMinutes: 60, cordonDrainTimeoutInMinutes: 60, client: &armhelpers.MockAKSEngineClient{}, } containerServiceMock := api.CreateMockContainerService("testcluster", "1.10.12", 3, 2, false) containerServiceMock.Location = "centralus" upgradeCmd.containerService = containerServiceMock err := upgradeCmd.initialize() g.Expect(err).NotTo(HaveOccurred()) resetValidVersions() } func TestUpgradeFailWithPathWhenAzureDeployJsonIsInvalid(t *testing.T) { g := NewGomegaWithT(t) upgradeCmd := &upgradeCmd{ resourceGroupName: "rg", apiModelPath: "./not/used", upgradeVersion: "1.13.3", location: "centralus", timeoutInMinutes: 60, cordonDrainTimeoutInMinutes: 60, force: true, client: &armhelpers.MockAKSEngineClient{}, } containerServiceMock := api.CreateMockContainerService("testcluster", "1.13.2", 3, 2, false) containerServiceMock.Location = "centralus" upgradeCmd.containerService = containerServiceMock err := upgradeCmd.initialize() g.Expect(err).NotTo(HaveOccurred()) resetValidVersions() } func TestUpgradeForceSameVersionShouldSucceed(t *testing.T) { setupValidVersions(map[string]bool{ "1.10.13": false, }) g := NewGomegaWithT(t) upgradeCmd := &upgradeCmd{ resourceGroupName: "rg", apiModelPath: "./not/used", upgradeVersion: "1.10.13", location: "centralus", timeoutInMinutes: 60, cordonDrainTimeoutInMinutes: 60, client: &armhelpers.MockAKSEngineClient{}, } containerServiceMock := api.CreateMockContainerService("testcluster", "1.10.13", 3, 2, false) containerServiceMock.Location = "centralus" upgradeCmd.containerService = containerServiceMock upgradeCmd.force = true err := upgradeCmd.initialize() g.Expect(err).NotTo(HaveOccurred()) resetValidVersions() } func TestUpgradeForceDowngradeShouldSetVersionOnContainerService(t *testing.T) { setupValidVersions(map[string]bool{ "1.10.12": true, "1.10.13": true, }) g := NewGomegaWithT(t) upgradeCmd := &upgradeCmd{ resourceGroupName: "rg", apiModelPath: "./not/used", upgradeVersion: "1.10.12", location: "centralus", timeoutInMinutes: 60, cordonDrainTimeoutInMinutes: 60, client: &armhelpers.MockAKSEngineClient{}, } containerServiceMock := api.CreateMockContainerService("testcluster", "1.10.13", 3, 2, false) containerServiceMock.Location = "centralus" upgradeCmd.containerService = containerServiceMock upgradeCmd.force = true err := upgradeCmd.initialize() g.Expect(err).NotTo(HaveOccurred()) g.Expect(upgradeCmd.containerService.Properties.OrchestratorProfile.OrchestratorVersion).To(Equal("1.10.12")) resetValidVersions() } func TestIsVMSSNameInAgentPoolsArray(t *testing.T) { cases := []struct { vmssName string cs *api.ContainerService expected bool name string }{ { vmssName: "k8s-agentpool1-41325566-vmss", cs: &api.ContainerService{ Properties: &api.Properties{ OrchestratorProfile: &api.OrchestratorProfile{ OrchestratorType: api.Kubernetes, OrchestratorVersion: "1.15.4", KubernetesConfig: &api.KubernetesConfig{ ContainerRuntime: api.Docker, }, }, AgentPoolProfiles: []*api.AgentPoolProfile{ { Name: "agentpool1", Count: 1, AvailabilityProfile: api.VirtualMachineScaleSets, }, }, }, }, expected: true, name: "vmss is in the api model spec", }, { vmssName: "my-vmss", cs: &api.ContainerService{ Properties: &api.Properties{ OrchestratorProfile: &api.OrchestratorProfile{ OrchestratorType: api.Kubernetes, OrchestratorVersion: "1.15.4", KubernetesConfig: &api.KubernetesConfig{ ContainerRuntime: api.Docker, }, }, AgentPoolProfiles: []*api.AgentPoolProfile{ { Name: "agentpool1", Count: 1, AvailabilityProfile: api.VirtualMachineScaleSets, }, }, }, }, expected: false, name: "vmss unrecognized", }, { vmssName: "k8s-frontendpool-41325566-vmss", cs: &api.ContainerService{ Properties: &api.Properties{ OrchestratorProfile: &api.OrchestratorProfile{ OrchestratorType: api.Kubernetes, OrchestratorVersion: "1.15.4", KubernetesConfig: &api.KubernetesConfig{ ContainerRuntime: api.Docker, }, }, AgentPoolProfiles: []*api.AgentPoolProfile{ { Name: "frontendpool", Count: 30, AvailabilityProfile: api.VirtualMachineScaleSets, }, { Name: "backendpool", Count: 7, AvailabilityProfile: api.VirtualMachineScaleSets, }, { Name: "canary", Count: 5, AvailabilityProfile: api.VirtualMachineScaleSets, }, }, }, }, expected: true, name: "multiple pools, frontendpool vmss is in spec", }, { vmssName: "k8s-backendpool-41325566-vmss", cs: &api.ContainerService{ Properties: &api.Properties{ OrchestratorProfile: &api.OrchestratorProfile{ OrchestratorType: api.Kubernetes, OrchestratorVersion: "1.15.4", KubernetesConfig: &api.KubernetesConfig{ ContainerRuntime: api.Docker, }, }, AgentPoolProfiles: []*api.AgentPoolProfile{ { Name: "frontendpool", Count: 30, AvailabilityProfile: api.VirtualMachineScaleSets, }, { Name: "backendpool", Count: 7, AvailabilityProfile: api.VirtualMachineScaleSets, }, { Name: "canary", Count: 5, AvailabilityProfile: api.VirtualMachineScaleSets, }, }, }, }, expected: true, name: "multiple pools, backendpool vmss is in spec", }, { vmssName: "k8s-canary-41325566-vmss", cs: &api.ContainerService{ Properties: &api.Properties{ OrchestratorProfile: &api.OrchestratorProfile{ OrchestratorType: api.Kubernetes, OrchestratorVersion: "1.15.4", KubernetesConfig: &api.KubernetesConfig{ ContainerRuntime: api.Docker, }, }, AgentPoolProfiles: []*api.AgentPoolProfile{ { Name: "frontendpool", Count: 30, AvailabilityProfile: api.VirtualMachineScaleSets, }, { Name: "backendpool", Count: 7, AvailabilityProfile: api.VirtualMachineScaleSets, }, { Name: "canary", Count: 5, AvailabilityProfile: api.VirtualMachineScaleSets, }, }, }, }, expected: true, name: "multiple pools, canary vmss is in spec", }, { vmssName: "k8s-canary-41325566-vmss", cs: &api.ContainerService{ Properties: &api.Properties{ OrchestratorProfile: &api.OrchestratorProfile{ OrchestratorType: api.Kubernetes, OrchestratorVersion: "1.15.4", KubernetesConfig: &api.KubernetesConfig{ ContainerRuntime: api.Docker, }, }, AgentPoolProfiles: []*api.AgentPoolProfile{}, }, }, expected: false, name: "no pools", }, { vmssName: "k8s-canary-41325566-vmss", cs: &api.ContainerService{ Properties: &api.Properties{ OrchestratorProfile: &api.OrchestratorProfile{ OrchestratorType: api.Kubernetes, OrchestratorVersion: "1.15.4", KubernetesConfig: &api.KubernetesConfig{ ContainerRuntime: api.Docker, }, }, AgentPoolProfiles: []*api.AgentPoolProfile{ { Name: "canary", Count: 1, AvailabilityProfile: api.AvailabilitySet, }, }, }, }, expected: false, name: "availability set", }, } for _, tc := range cases { c := tc t.Run(c.name, func(t *testing.T) { t.Parallel() ret := isVMSSNameInAgentPoolsArray(c.vmssName, c.cs) if ret != c.expected { t.Errorf("expected %t to be %t", ret, c.expected) } }) } }
package boltrepo import ( "bytes" "encoding/binary" "encoding/json" "github.com/boltdb/bolt" "github.com/scjalliance/drivestream/binpath" "github.com/scjalliance/drivestream/driveversion" "github.com/scjalliance/drivestream/resource" ) var _ driveversion.Sequence = (*DriveVersions)(nil) // DriveVersions accesses a sequence of drive versions in a bolt repository. type DriveVersions struct { db *bolt.DB drive resource.ID } // Path returns the path of the drive versions. func (ref DriveVersions) Path() binpath.Text { return binpath.Text{RootBucket, DriveBucket, ref.drive.String(), VersionBucket} } // Next returns the next version number in the sequence. func (ref DriveVersions) Next() (n resource.Version, err error) { err = ref.db.View(func(tx *bolt.Tx) error { versions := driveVersionsBucket(tx, ref.drive) if versions == nil { return nil } cursor := versions.Cursor() k, _ := cursor.Last() if k == nil { return nil } if len(k) != 8 { key := append(k[:0:0], k...) // Copy key bytes return BadDriveVersionKey{Drive: ref.drive, BadKey: key} } n = resource.Version(binary.BigEndian.Uint64(k)) + 1 return nil }) return n, err } // Read reads drive data for a range of drive versions starting at the // given version number. Up to len(p) entries will be returned in p. // The number of entries is returned as n. func (ref DriveVersions) Read(start resource.Version, p []resource.DriveData) (n int, err error) { err = ref.db.View(func(tx *bolt.Tx) error { versions := driveVersionsBucket(tx, ref.drive) if versions == nil { return driveversion.NotFound{Drive: ref.drive, Version: start} } cursor := versions.Cursor() pos := start key := makeVersionKey(pos) k, v := cursor.Seek(key[:]) if k == nil || !bytes.Equal(key[:], k) { return driveversion.NotFound{Drive: ref.drive, Version: start} } for n < len(p) { if v == nil { return driveversion.InvalidData{Drive: ref.drive, Version: pos} // All versions must be non-nil } if err := json.Unmarshal(v, &p[n]); err != nil { // TODO: Wrap the error in InvalidData? return err } n++ k, v = cursor.Next() if k == nil { break } if len(k) != 8 { key := append(k[:0:0], k...) // Copy key bytes return BadDriveVersionKey{Drive: ref.drive, BadKey: key} } pos = start + resource.Version(n) key = makeVersionKey(pos) if !bytes.Equal(key[:], k) { // The next key doesn't match the expected sequence number // TODO: Consider returning an error here? break } } return nil }) return n, err } // Ref returns a drive version reference for the version number. func (ref DriveVersions) Ref(v resource.Version) driveversion.Reference { return DriveVersion{ db: ref.db, drive: ref.drive, version: v, } }
package proc import ( "encoding/json" "fmt" "io/ioutil" "os" "os/exec" "strconv" "syscall" "time" "github.com/DemoHn/obsidian-panel/infra" "github.com/DemoHn/obsidian-panel/util" "github.com/moby/moby/pkg/reexec" ) const ( ipcPipe = 4 ) type ipcMessage struct { Status string `json:"status"` Message string `json:"message"` } // StartDaemon - start child worker // there're 2 types to start <obs-daemon> (i.e. the child worker): // // 1. run worker foreground - func StartDaemon(rootPath string, debug bool, foreground bool) error { pidFile := fmt.Sprintf("%s/proc/obs-daemon.pid", rootPath) // DO NOT start daemon TWICE if exists, pid := daemonExists(pidFile); exists { if isPidRunning(pid) { infra.Log.Info("obs-daemon has been started") return nil } } // remove os-daemon.sock os.Remove(fmt.Sprintf("%s/proc/obs-daemon.sock", rootPath)) // 0. For foreground process, just call core function directly if foreground { // the existance of pidFile is unnecessary but confusing - thus for FG mode, // we delete it to ensure no background process spawned. // (and we ignore the errors here) removePidFile(pidFile) return childCoreWorker(workerEnv{rootPath, debug}, nil) } // I. start worker (background) infra.Log.Info("start obs worker...") rp, cmd, err := registerCmd(rootPath, debug) if err != nil { return err } // II. start cmd if err := cmd.Start(); err != nil { return err } doneErr := make(chan error, 1) // wait for background writePid(pidFile, cmd.Process.Pid) go handleIpcMessageBG(rp, doneErr) select { case <-time.After(3 * time.Second): infra.Log.Info("wait for child worker response timeout (3s)") case err := <-doneErr: return err } return nil } // KillDaemon - func KillDaemon(rootPath string) error { pidFile := fmt.Sprintf("%s/proc/obs-daemon.pid", rootPath) sockFile := fmt.Sprintf("%s/proc/obs-daemon.sock", rootPath) exists, pid := daemonExists(pidFile) if !exists { infra.Log.Warn("could not kill a non-existing worker process") return nil } // I. kill process - send SIGTERM signal if err := syscall.Kill(pid, syscall.SIGTERM); err != nil { return err } // II. check if sock file has been deleted (for 5 seconds) countDown := 25 for { if countDown == 0 { infra.Log.Error("kill daemon timeout (after 5s)") return nil } if !util.FileExists(sockFile) { infra.Log.Info("kill worker success") return nil } time.Sleep(200 * time.Millisecond) countDown = countDown - 1 } } // registerCmd - only for background process func registerCmd(rootPath string, debug bool) (*os.File, *exec.Cmd, error) { cmd := reexec.Command("<obs-daemon>") var rp *os.File var err error if rp, err = setProcPipeBG(rootPath, cmd); err != nil { return nil, nil, err } // II. set env cmd.Env = append(os.Environ(), fmt.Sprintf("OBS_DAEMON_ROOTPATH=%s", rootPath), fmt.Sprintf("OBS_DAEMON_DEBUG_MODE=%s", bool2str(debug)), ) infra.Log.Debugf("obs worker env: %+v", cmd.Env) // set daemon flags cmd.SysProcAttr = &syscall.SysProcAttr{ Foreground: false, Setsid: true, } return rp, cmd, nil } // DaemonExists - if obs-daemon has been started already // Notice: any errors occured during reading pidFile // will return false directly! func daemonExists(pidFile string) (bool, int) { data, err := ioutil.ReadFile(pidFile) if err != nil { return false, 0 } // II. get pid pid, err := strconv.Atoi(string(data)) if err != nil { return false, 0 } // III. find process if !isPidRunning(pid) { return false, 0 } return true, pid } // set stdin/stdout/stderr pipe (for background processes) func setProcPipeBG(rootPath string, cmd *exec.Cmd) (*os.File, error) { logFile := fmt.Sprintf("%s/log/obs-daemon.log", rootPath) rp, wp, err := os.Pipe() if err != nil { return nil, err } // redirect stdout/stderr to log file fi, err := util.OpenFileNS(logFile, true) infra.Log.Debugf("going to open %s", logFile) if err != nil { infra.Log.Info("open obs-worker logFile failed") return nil, err } // redirect stdout/stderr to file cmd.Stdin = os.Stdin cmd.Stdout = fi cmd.Stderr = fi cmd.ExtraFiles = []*os.File{nil, wp} return rp, nil } // handleIpcMessageBG - when child worker started at background // we could wait for a moment to recv message from child worker to // indicate its status - thus we can easily realize whether it starts fail or success. func handleIpcMessageBG(rp *os.File, doneErr chan error) { // for background - recv data from child proc's ipc channel dec := json.NewDecoder(rp) for { var msg ipcMessage if err := dec.Decode(&msg); err != nil { doneErr <- err return } // handle message if msg.Status == "ok-start" { infra.Log.Info("start worker success") doneErr <- nil return } // or return fail message infra.Log.Info("start child worker failed:", msg.Message) doneErr <- fmt.Errorf(msg.Message) return } } func sendIpcMessage(enc *json.Encoder, status string, message string) error { ipcMsg := ipcMessage{ Status: status, Message: message, } return enc.Encode(&ipcMsg) } //// helpers func bool2str(data bool) string { if data { return "1" } return "0" } func writePid(pidFile string, pid int) error { infra.Log.Debugf("start daemon pid: %d", pid) return util.WriteFileNS(pidFile, false, []byte(strconv.Itoa(pid))) } func removePidFile(pidFile string) { os.Remove(pidFile) }
// Copyright 2021 BoCloud // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "github.com/bep/debounce" "github.com/fsnotify/fsnotify" "github.com/spf13/viper" "k8s.io/klog/v2" "time" ) func eventOpIs(ent fsnotify.Event, Op fsnotify.Op) bool { return ent.Op&Op == Op } func onConfigFileChange(fileToWatch string, callback func()) { watcher, err := fsnotify.NewWatcher() if err != nil { klog.Errorf("failed to initialize fsnotify: %s", err) return } defer func() { if err = watcher.Close(); err != nil { klog.Errorf("failed to close fsnotify watcher: %s", err) } }() if err = watcher.Add(fileToWatch); err != nil { klog.Errorf("failed to monitor %s. Error: %s", fileToWatch, err) return } // use debounce to avoid too much fsnotify events debounced := debounce.New(viper.GetDuration("debounceDuration")) for { select { case event, _ := <-watcher.Events: klog.Infof("network configuration may changed. event: %s", event) switch { case eventOpIs(event, fsnotify.Create): fallthrough case eventOpIs(event, fsnotify.Write): fallthrough case eventOpIs(event, fsnotify.Rename): debounced(callback) default: // fsnotify monitors the inode, but vi change the inode. // try to add it back, after removed/renamed time.Sleep(1 * time.Second) // wait fs to ready, maybe not needed if err = watcher.Add(fileToWatch); err != nil { klog.Errorf("failed to monitor %s. Error: %s", fileToWatch, err) return } } case err, _ = <-watcher.Errors: klog.Errorf("fsnotify has an error: %s", err) // not encounter it so far, hope it can be recovered after some time time.Sleep(5 * time.Minute) if err = watcher.Add(fileToWatch); err != nil { klog.Errorf("failed to monitor %s. Error: %s", fileToWatch, err) return } } } }
package main import "fmt" func check(param uint8) (uint8, error) { if param < 0 { return 0, fmt.Errorf("param is not nagative number param:%d", param) } return param * param, nil } func main() { m := 2 result, err := check(uint8(m)) if err != nil { fmt.Printf("Error: %v\n", err) return } fmt.Println(result) }
package resolver import ( "github.com/taktakty/netlabi/testdata" "github.com/stretchr/testify/require" "strings" "testing" ) func TestHostQueries(t *testing.T) { testData := hostTestData t.Run("GetSingle", func(t *testing.T) { p := string(testData[0].ID) q := strings.Join([]string{`query {getHost(input:{id:"`, p, `"})`, testdata.HostResp, "}"}, "") var resp struct { GetHost testdata.HostRespStruct } c.MustPost(q, &resp) require.Equal(t, p, resp.GetHost.ID) require.Equal(t, testData[0].Name, resp.GetHost.Name) require.Equal(t, testData[0].Status, resp.GetHost.Status) require.Equal(t, testData[0].Protocol, resp.GetHost.Protocol) require.Equal(t, testData[0].Note, resp.GetHost.Note) }) t.Run("GetMultiple", func(t *testing.T) { forSearchTestData := testData[5:8] q := strings.Join([]string{`query {getHosts(input:{name:"for search"})`, testdata.HostResp, "}"}, "") var resp struct { GetHosts []testdata.HostRespStruct } c.MustPost(q, &resp) require.Len(t, resp.GetHosts, len(forSearchTestData)) for i, r := range resp.GetHosts { require.Equal(t, forSearchTestData[i].Name, r.Name) require.Equal(t, forSearchTestData[i].Status, r.Status) require.Equal(t, forSearchTestData[i].Protocol, r.Protocol) require.Equal(t, forSearchTestData[i].Note, r.Note) } }) }
var _ = Resource("subtemplates", func() { DefaultMedia(SubtemplateMedia) BasePath("/api/subtemplate") Action("show", func() { Description("Get subtemplate") Routing(GET("/:subTemplateID")) Params(func() { Param("subTemplateID", Integer) }) Response(OK, SubtemplateMedia) Response(NotFound) Response(BadRequest, ErrorMedia) }) // ... })
// Copyright 2020 The go-bindata Authors. All rights reserved. // Use of this source code is governed by a CC0 1.0 Universal (CC0 1.0) // Public Domain Dedication license that can be found in the LICENSE file. package bindata import ( "testing" ) func TestNewInputConfig(t *testing.T) { tests := []struct { desc string path string exp *InputConfig }{{ desc: `With suffix /...`, path: `./...`, exp: &InputConfig{ Path: `.`, Recursive: true, }, }, { desc: `Without suffix /...`, path: `.`, exp: &InputConfig{ Path: `.`, }, }} for _, test := range tests { t.Log(test.desc) got := newInputConfig(test.path) assert(t, test.exp, got, true) } }
package delivery import ( "testing" "github.com/stretchr/testify/suite" ) type positionServiceTestSuite struct { baseTestSuite } func TestPositionService(t *testing.T) { suite.Run(t, new(positionServiceTestSuite)) } func (s *positionServiceTestSuite) TestChangeLeverage() { data := []byte(`{ "leverage": 21, "maxQty": "1000", "symbol": "BTCUSD_200925" }`) s.mockDo(data, nil) defer s.assertDo() symbol := "BTCUSD_200925" leverage := 21 s.assertReq(func(r *request) { e := newSignedRequest().setFormParams(params{ "symbol": symbol, "leverage": leverage, }) s.assertRequestEqual(e, r) }) res, err := s.client.NewChangeLeverageService().Symbol(symbol).Leverage(leverage).Do(newContext()) s.r().NoError(err) e := &SymbolLeverage{ Symbol: symbol, Leverage: leverage, MaxQuantity: "1000", } s.r().Equal(e.Symbol, res.Symbol, "Symbol") s.r().Equal(e.Leverage, res.Leverage, "Leverage") s.r().Equal(e.MaxQuantity, res.MaxQuantity, "MaxQuantity") } func (s *positionServiceTestSuite) TestChangeMarginType() { data := []byte(`{ "code": 200, "msg": "success" }`) s.mockDo(data, nil) defer s.assertDo() symbol := "BTCUSDT" marginType := MarginTypeIsolated s.assertReq(func(r *request) { e := newSignedRequest().setFormParams(params{ "symbol": symbol, "marginType": marginType, }) s.assertRequestEqual(e, r) }) err := s.client.NewChangeMarginTypeService().Symbol(symbol).MarginType(marginType).Do(newContext()) s.r().NoError(err) } func (s *positionServiceTestSuite) TestUpdatePositionMargin() { data := []byte(`{ "amount": 100.0, "code": 200, "msg": "Successfully modify position margin.", "type": 1 }`) s.mockDo(data, nil) defer s.assertDo() symbol := "BTCUSDT" positionSide := PositionSideTypeLong amount := "100.0" actionType := 1 s.assertReq(func(r *request) { e := newSignedRequest().setFormParams(params{ "symbol": symbol, "positionSide": positionSide, "amount": amount, "type": actionType, }) s.assertRequestEqual(e, r) }) err := s.client.NewUpdatePositionMarginService().Symbol(symbol). PositionSide(positionSide).Amount(amount).Type(actionType).Do(newContext()) s.r().NoError(err) } func (s *positionServiceTestSuite) TestChangePositionMode() { data := []byte(`{ "code": 200, "msg": "success" }`) s.mockDo(data, nil) defer s.assertDo() s.assertReq(func(r *request) { e := newSignedRequest().setFormParams(params{ "dualSidePosition": "true", }) s.assertRequestEqual(e, r) }) err := s.client.NewChangePositionModeService().DualSide(true).Do(newContext()) s.r().NoError(err) } func (s *positionServiceTestSuite) TestGetPositionMode() { data := []byte(`{ "dualSidePosition": true }`) s.mockDo(data, nil) defer s.assertDo() s.assertReq(func(r *request) { e := newSignedRequest().setFormParams(params{}) s.assertRequestEqual(e, r) }) res, err := s.client.NewGetPositionModeService().Do(newContext()) s.r().NoError(err) s.r().Equal(res.DualSidePosition, true) }
package bot func (r *Reinforcement) Init(Alpha, Gamma, RandomProb, TempDelta float64) { r.Alpha = Alpha r.Gamma = Gamma r.RandomProb = RandomProb r.TempDelta = TempDelta r.Rewards = make([][]float64, 22) for i := range r.Rewards { r.Rewards[i] = make([]float64, 3) } // If your score is 21, your best option is to stand. r.Rewards[21][1] = 100000 }
package domain import "gopkg.in/mgo.v2/bson" //Video ... type Video struct { VideoURL string `json:"video_url" bson:"video_url"` ThumbnailURL string `json:"thumbnail_url" bson:"thumbnail_url"` UserID bson.ObjectId `json:"user_id" bson:"user_id"` ChannelIDS []ChannelID `json:"channel_ids" bson:"channel_ids"` } //ChannelID ... type ChannelID struct { ID bson.ObjectId `json:"_id" bson:"_id"` }
// +build !windows package term import ( "io" "os" "os/exec" "strconv" "strings" ) // ClearLines will move the cursor up and clear the line out for re-rendering func ClearLines(out io.Writer, linecount int) { out.Write([]byte(strings.Repeat("\x1b[0G\x1b[1A\x1b[0K", linecount))) } const ( defaultTermWidth = 80 defaultTermHeight = 60 ) func size() (width, height int) { cmd := exec.Command("stty", "size") cmd.Stdin = os.Stdin out, err := cmd.Output() if err != nil { return defaultTermWidth, defaultTermHeight } parts := strings.Split(strings.TrimRight(string(out), "\n"), " ") height, err = strconv.Atoi(parts[0]) if err != nil { return defaultTermWidth, defaultTermHeight } width, err = strconv.Atoi(parts[1]) if err != nil { return defaultTermWidth, defaultTermHeight } return width, height } // Width returns the column width of the terminal func Width() int { w, _ := size() return w } // Height returns the row size of the terminal func Height() int { _, h := size() return h }
package remote import ( "testing" "github.com/stretchr/testify/assert" redfish "opendev.org/airship/airshipctl/pkg/remote/redfish" ) func TestUnknownRemoteType(t *testing.T) { rdCfg := RemoteDirectConfig{ RemoteType: "new-remote", RemoteURL: "http://localhost:8000", EphemeralNodeId: "test-node", IsoPath: "/test.iso", } err := DoRemoteDirect(rdCfg) _, ok := err.(*RemoteDirectError) assert.True(t, ok) } func TestRedfishRemoteDirectWithBogusConfig(t *testing.T) { rdCfg := RemoteDirectConfig{ RemoteType: "redfish", RemoteURL: "http://nolocalhost:8888", EphemeralNodeId: "test-node", IsoPath: "/test.iso", } err := DoRemoteDirect(rdCfg) _, ok := err.(*redfish.RedfishClientError) assert.True(t, ok) } func TestRedfishRemoteDirectWithEmptyURL(t *testing.T) { rdCfg := RemoteDirectConfig{ RemoteType: "redfish", RemoteURL: "", EphemeralNodeId: "test-node", IsoPath: "/test.iso", } err := DoRemoteDirect(rdCfg) _, ok := err.(*redfish.RedfishConfigError) assert.True(t, ok) } func TestRedfishRemoteDirectWithEmptyNodeID(t *testing.T) { rdCfg := RemoteDirectConfig{ RemoteType: "redfish", RemoteURL: "http://nolocalhost:8888", EphemeralNodeId: "", IsoPath: "/test.iso", } err := DoRemoteDirect(rdCfg) _, ok := err.(*redfish.RedfishConfigError) assert.True(t, ok) } func TestRedfishRemoteDirectWithEmptyIsoPath(t *testing.T) { rdCfg := RemoteDirectConfig{ RemoteType: "redfish", RemoteURL: "http://nolocalhost:8888", EphemeralNodeId: "123", IsoPath: "", } err := DoRemoteDirect(rdCfg) _, ok := err.(*redfish.RedfishConfigError) assert.True(t, ok) }
package main import ( "./lib" ) func main() { var a float64 = 5 var b float64 = 5 lib.Sum(a, b) }
// +build windows,!dockerless /* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package dockershim import ( "context" "time" "github.com/Microsoft/hcsshim" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/klog/v2" ) func (ds *dockerService) getContainerStats(containerID string) (*runtimeapi.ContainerStats, error) { info, err := ds.client.Info() if err != nil { return nil, err } hcsshim_container, err := hcsshim.OpenContainer(containerID) if err != nil { // As we moved from using Docker stats to hcsshim directly, we may query HCS with already exited container IDs. // That will typically happen with init-containers in Exited state. Docker still knows about them but the HCS does not. // As we don't want to block stats retrieval for other containers, we only log errors. if !hcsshim.IsNotExist(err) && !hcsshim.IsAlreadyStopped(err) { klog.Errorf("Error opening container (stats will be missing) '%s': %v", containerID, err) } return nil, nil } defer func() { closeErr := hcsshim_container.Close() if closeErr != nil { klog.Errorf("Error closing container '%s': %v", containerID, closeErr) } }() stats, err := hcsshim_container.Statistics() if err != nil { return nil, err } containerJSON, err := ds.client.InspectContainerWithSize(containerID) if err != nil { return nil, err } statusResp, err := ds.ContainerStatus(context.Background(), &runtimeapi.ContainerStatusRequest{ContainerId: containerID}) if err != nil { return nil, err } status := statusResp.GetStatus() timestamp := time.Now().UnixNano() containerStats := &runtimeapi.ContainerStats{ Attributes: &runtimeapi.ContainerAttributes{ Id: containerID, Metadata: status.Metadata, Labels: status.Labels, Annotations: status.Annotations, }, Cpu: &runtimeapi.CpuUsage{ Timestamp: timestamp, // have to multiply cpu usage by 100 since stats units is in 100's of nano seconds for Windows UsageCoreNanoSeconds: &runtimeapi.UInt64Value{Value: stats.Processor.TotalRuntime100ns * 100}, }, Memory: &runtimeapi.MemoryUsage{ Timestamp: timestamp, WorkingSetBytes: &runtimeapi.UInt64Value{Value: stats.Memory.UsagePrivateWorkingSetBytes}, }, WritableLayer: &runtimeapi.FilesystemUsage{ Timestamp: timestamp, FsId: &runtimeapi.FilesystemIdentifier{Mountpoint: info.DockerRootDir}, UsedBytes: &runtimeapi.UInt64Value{Value: uint64(*containerJSON.SizeRw)}, }, } return containerStats, nil }
package LiquidCrystal import ( "fmt" "time" "github.com/hybridgroup/gobot" "github.com/hybridgroup/gobot/platforms/i2c" ) const ( // commands LCD_CLEARDISPLAY byte = 0x01 LCD_RETURNHOME byte = 0x02 LCD_ENTRYMODESET byte = 0x04 LCD_DISPLAYCONTROL byte = 0x08 LCD_CURSORSHIFT byte = 0x10 LCD_FUNCTIONSET byte = 0x20 LCD_SETCGRAMADDR byte = 0x40 LCD_SETDDRAMADDR byte = 0x80 // flags for display entry mode LCD_ENTRYRIGHT byte = 0x00 LCD_ENTRYLEFT byte = 0x02 LCD_ENTRYSHIFTINCREMENT byte = 0x01 LCD_ENTRYSHIFTDECREMENT byte = 0x00 // flags for display on/off control LCD_DISPLAYON byte = 0x04 LCD_DISPLAYOFF byte = 0x00 LCD_CURSORON byte = 0x02 LCD_CURSOROFF byte = 0x00 LCD_BLINKON byte = 0x01 LCD_BLINKOFF byte = 0x00 // flags for display/cursor shift LCD_DISPLAYMOVE byte = 0x08 LCD_CURSORMOVE byte = 0x00 LCD_MOVERIGHT byte = 0x04 LCD_MOVELEFT byte = 0x00 // flags for function set LCD_8BITMODE byte = 0x10 LCD_4BITMODE byte = 0x00 LCD_2LINE byte = 0x08 LCD_1LINE byte = 0x00 LCD_5x10DOTS byte = 0x04 LCD_5x8DOTS byte = 0x00 // flags for backlight control LCD_BACKLIGHT byte = 0x08 LCD_NOBACKLIGHT byte = 0x00 En byte = 1 << 2 // Enable bit Rw byte = 1 << 1 // Read/Write bit Rs byte = 1 << 0 // Register select bit ) var _ gobot.Driver = (*LiquidCrystalDriver)(nil) type LiquidCrystalDriver struct { name string connection i2c.I2c addr int backlight byte cols int rows int charsize int displayfunc byte displayctrl byte displaymode byte } // NewLiquidCrystalDriver creates a new driver with specified name and i2c interface func NewLiquidCrystalDriver(a i2c.I2c, name string, addr int, cols int, rows int) *LiquidCrystalDriver { return &LiquidCrystalDriver{ name: name, connection: a, addr: addr, backlight: LCD_BACKLIGHT, cols: cols, rows: rows, charsize: int(LCD_5x8DOTS), } } func (h *LiquidCrystalDriver) SetCharSize(size int) { h.charsize = size } func (h *LiquidCrystalDriver) Name() string { return h.name } func (h *LiquidCrystalDriver) Connection() gobot.Connection { return h.connection.(gobot.Connection) } // Start initialized the LIDAR func (h *LiquidCrystalDriver) Start() (errs []error) { if err := h.connection.I2cStart(h.addr); err != nil { return []error{err} } h.displayfunc = LCD_4BITMODE | LCD_1LINE | LCD_5x8DOTS if h.rows > 1 { h.displayfunc |= LCD_2LINE } // for some 1 line displays you can select a 10 pixel high font if h.charsize != 0 && h.rows == 1 { h.displayfunc |= LCD_5x10DOTS } // SEE PAGE 45/46 FOR INITIALIZATION SPECIFICATION! // according to datasheet, we need at least 40ms after power rises above 2.7V // before sending commands. Arduino can turn on way befer 4.5V so we'll wait 50 <-time.After(50 * time.Millisecond) // Now we pull both RS and R/W low to begin commands h.expanderWrite(h.backlight) // reset expanderand turn backlight off (Bit 8 =1) <-time.After(1 * time.Second) //put the LCD into 4 bit mode // this is according to the hitachi HD44780 datasheet // figure 24, pg 46 // we start in 8bit mode, try to set 4 bit mode h.write4bits(0x03 << 4) <-time.After(4500 * time.Microsecond) // wait min 4.1ms // second try h.write4bits(0x03 << 4) <-time.After(4500 * time.Microsecond) // wait min 4.1ms // third go! h.write4bits(0x03 << 4) <-time.After(150 * time.Microsecond) // finally, set to 4-bit interface h.write4bits(0x02 << 4) // set # lines, font size, etc. h.command(LCD_FUNCTIONSET | h.displayfunc) // turn the display on with no cursor or blinking default h.displayctrl = LCD_DISPLAYON | LCD_CURSOROFF | LCD_BLINKOFF h.Display() // clear it off h.Clear() // Initialize to default text direction (for roman languages) h.displaymode = LCD_ENTRYLEFT | LCD_ENTRYSHIFTDECREMENT // set the entry mode h.command(LCD_ENTRYMODESET | h.displaymode) h.Home() return } // Halt returns true if devices is halted successfully func (h *LiquidCrystalDriver) Halt() (errs []error) { h.Clear() h.NoBacklight() h.NoCursor() h.NoDisplay() return } /********** high level commands, for the user! */ func (h *LiquidCrystalDriver) Clear() error { var err = h.command(LCD_CLEARDISPLAY) // clear display, set cursor position to zero <-time.After(2 * time.Millisecond) // this command takes a long time! return err } func (h *LiquidCrystalDriver) Home() error { var err = h.command(LCD_RETURNHOME) // set cursor position to zero <-time.After(2 * time.Millisecond) // this command takes a long time! return err } func (h *LiquidCrystalDriver) SetCursor(col int, row int) error { var row_offsets = []byte{0x00, 0x40, 0x14, 0x54} if row > h.rows { row = h.rows - 1 // we count rows starting w/0 } return h.command(LCD_SETDDRAMADDR | (byte(col) + row_offsets[row])) } // Turn the display on/off (quickly) func (h *LiquidCrystalDriver) NoDisplay() error { h.displayctrl &= ^LCD_DISPLAYON return h.command(LCD_DISPLAYCONTROL | h.displayctrl) } func (h *LiquidCrystalDriver) Display() error { h.displayctrl |= LCD_DISPLAYON return h.command(LCD_DISPLAYCONTROL | h.displayctrl) } // Turns the underline cursor on/off func (h *LiquidCrystalDriver) NoCursor() error { h.displayctrl &= ^LCD_CURSORON return h.command(LCD_DISPLAYCONTROL | h.displayctrl) } func (h *LiquidCrystalDriver) Cursor() error { h.displayctrl |= LCD_CURSORON return h.command(LCD_DISPLAYCONTROL | h.displayctrl) } // Turn on and off the blinking cursor func (h *LiquidCrystalDriver) NoBlink() error { h.displayctrl &= ^LCD_BLINKON return h.command(LCD_DISPLAYCONTROL | h.displayctrl) } func (h *LiquidCrystalDriver) Blink() error { h.displayctrl |= LCD_BLINKON return h.command(LCD_DISPLAYCONTROL | h.displayctrl) } // These commands scroll the display without changing the RAM func (h *LiquidCrystalDriver) ScrollDisplayLeft() error { return h.command(LCD_CURSORSHIFT | LCD_DISPLAYMOVE | LCD_MOVELEFT) } func (h *LiquidCrystalDriver) ScrollDisplayRight() error { return h.command(LCD_CURSORSHIFT | LCD_DISPLAYMOVE | LCD_MOVERIGHT) } // This is for text that flows Left to Right func (h *LiquidCrystalDriver) LeftToRight() error { h.displaymode |= LCD_ENTRYLEFT return h.command(LCD_ENTRYMODESET | h.displaymode) } // This is for text that flows Right to Left func (h *LiquidCrystalDriver) RightToLeft() error { h.displaymode &= ^LCD_ENTRYLEFT return h.command(LCD_ENTRYMODESET | h.displaymode) } // This will 'right justify' text from the cursor func (h *LiquidCrystalDriver) Autoscroll() error { h.displaymode |= LCD_ENTRYSHIFTINCREMENT return h.command(LCD_ENTRYMODESET | h.displaymode) } // This will 'left justify' text from the cursor func (h *LiquidCrystalDriver) NoAutoscroll() error { h.displaymode &= ^LCD_ENTRYSHIFTINCREMENT return h.command(LCD_ENTRYMODESET | h.displaymode) } // Allows us to fill the first 8 CGRAM locations // with custom characters func (h *LiquidCrystalDriver) CreateChar(location byte, charmap []byte) error { location &= 0x7 // we only have 8 locations 0-7 err := h.command(LCD_SETCGRAMADDR | (location << 3)) for _, char := range charmap { h.Write(char) } return err } // Turn the (optional) backlight off/on func (h *LiquidCrystalDriver) NoBacklight() { h.backlight = LCD_NOBACKLIGHT h.expanderWrite(0) } func (h *LiquidCrystalDriver) Backlight() { h.backlight = LCD_BACKLIGHT h.expanderWrite(0) } /*********** mid level commands, for sending data/cmds */ func (h *LiquidCrystalDriver) command(value byte) error { return h.send(value, 0) } func (h *LiquidCrystalDriver) Write(value byte) (int, error) { if err := h.send(value, Rs); err != nil { return 0, err } return 1, nil } /************ low level data pushing commands **********/ func (h *LiquidCrystalDriver) expanderWrite(data byte) error { return h.connection.I2cWrite(h.addr, []byte{data | h.backlight}) } func (h *LiquidCrystalDriver) pulseEnable(data byte) (err error) { if err = h.expanderWrite(data | En); err != nil { // En high return } <-time.After(1 * time.Microsecond) // enable pulse must be >450ns if err = h.expanderWrite(data & ^En); err != nil { // En low return } <-time.After(50 * time.Microsecond) // commands need > 37us to settle return } func (h *LiquidCrystalDriver) write4bits(value byte) (err error) { if err = h.expanderWrite(value); err != nil { return } if err = h.pulseEnable(value); err != nil { return } return } func (h *LiquidCrystalDriver) send(value, mode byte) (err error) { var highnib = value & 0xf0 var lownib = (value << 4) & 0xf0 if err = h.write4bits(highnib | mode); err != nil { return } if err = h.write4bits(lownib | mode); err != nil { return } return } func (h *LiquidCrystalDriver) LoadCustomCharacter(char_num byte, rows []byte) { h.CreateChar(char_num, rows) } func (h *LiquidCrystalDriver) SetBacklight(new_val bool) { if new_val { h.Backlight() // turn backlight on } else { h.NoBacklight() // turn backlight off } } func (h *LiquidCrystalDriver) Print(str string) { var charmap = []byte(str) for _, char := range charmap { h.Write(char) } } func (h *LiquidCrystalDriver) Printf(str string, v ...interface{}) { h.Print(fmt.Sprintf(str, v...)) }
package ghosts type Banshee struct { } func (b Banshee) Name() string { return "Banshee" } func (b Banshee) Evidence() [3]string { return [3]string{"Freezing", "EMF 5", "Fingerprints"} }
package service import ( "go.uber.org/zap" "mix/test/codes" dto "mix/test/dto/core/transaction" entity "mix/test/entity/core/transaction" "mix/test/pb/core/transaction" "mix/test/utils/status" ) func (p *Transaction) createHotAccount(ctx *Context, in *transaction.CreateHotAccountInput, out *transaction.HotAccountOutput) (err error) { logger := ctx.logger.With(zap.String("func", "createHotAccount")) db := ctx.db hotAccountEntity := new(entity.HotAccount) hotAccountEntity.MerchantId = in.MerchantId hotAccountEntity.AccountId = in.AccountId hotAccountEntity.Hash = in.Hash id, err := p.dao.CreateHotAccount(logger, db, hotAccountEntity) if err != nil { return } hotAccountEntity, err = p.dao.MustGetHotAccount(logger, db, id) if err != nil { return } dto.ToHotAccountOutput(hotAccountEntity, out) return } func (p *Transaction) getHotAccount(ctx *Context, in *transaction.GetHotAccountInput, out *transaction.HotAccountOutput) (err error) { logger := ctx.logger.With(zap.String("func", "getHotAccount")) db := ctx.db hotAccountEntity, err := p.dao.GetHotAccount(logger, db, in.Id) if err != nil { return } if hotAccountEntity == nil { err = status.Code(codes.HotAccountNotFound) return } dto.ToHotAccountOutput(hotAccountEntity, out) return } func (p *Transaction) getHotAccountList(ctx *Context, in *transaction.Empty, out *transaction.HotAccountListOutput) (err error) { logger := ctx.logger.With(zap.String("func", "getHotAccountList")) db := ctx.db hotAccountEntities, err := p.dao.GetHotAccountList(logger, db) if err != nil { return } dto.ToHotAccountListOutput(hotAccountEntities, out) return } func (p *Transaction) removeHotAccount(ctx *Context, in *transaction.RemoveHotAccountInput, out *transaction.Empty) (err error) { logger := ctx.logger.With(zap.String("func", "removeHotAccount")) db := ctx.db hotAccountEntity, err := p.dao.GetHotAccount(logger, db, in.Id) if err != nil { return } if hotAccountEntity == nil { err = status.Code(codes.HotAccountNotFound) return } err = p.dao.RemoveHotAccount(logger, db, in.Id) if err != nil { return } return } func (p *Transaction) updateHotAccount(ctx *Context, in *transaction.UpdateHotAccountInput, out *transaction.HotAccountOutput) (err error) { logger := ctx.logger.With(zap.String("func", "updateHotAccount")) db := ctx.db hotAccountEntity := new(entity.HotAccount) hotAccountEntity.Id = in.Id hotAccountEntity.MerchantId = in.MerchantId hotAccountEntity.AccountId = in.AccountId hotAccountEntity.Hash = in.Hash err = p.dao.UpdateHotAccount(logger, p.db, db, hotAccountEntity) if err != nil { return } dto.ToHotAccountOutput(hotAccountEntity, out) return }
package 数组 import ( "sort" ) func numSmallerByFrequency(queries []string, words []string) []int { queryFrequencyArray := getFrequencyArray(queries) wordsFrequencyArray := getFrequencyArray(words) sort.Ints(wordsFrequencyArray) result := make([]int, 0) for i := 0; i < len(queryFrequencyArray); i++ { result = append(result, getCountOfGreaterElement(wordsFrequencyArray, queryFrequencyArray[i])) } return result } func getCountOfGreaterElement(nums []int, ref int) int { return len(nums) - getIndexOfFirstGreater(nums, ref) } func getIndexOfFirstGreater(nums []int, ref int) int { left, right := 0, len(nums)-1 for left <= right { mid := (left + right) / 2 if nums[mid] > ref { right = mid - 1 } else { left = mid + 1 } } return left } func getFrequencyArray(words []string) []int { array := make([]int, 0) for _, word := range words { array = append(array, getFrequencyOfMinChar(word)) } return array } func getFrequencyOfMinChar(word string) int { countOfChar := make(map[byte]int) for i := 0; i < len(word); i++ { countOfChar[word[i]]++ } var minChar byte = 'z' + 1 frequency := 0 for char, count := range countOfChar { if minChar > char { frequency = count minChar = char } } return frequency } /* 题目链接: https://leetcode-cn.com/problems/compare-strings-by-frequency-of-the-smallest-character/ 总结 1. 这题题意有点难懂,操作还是很简单的 */
package main import "fmt" func main() { fmt.Println(verifyPostorder([]int{1, 6, 3, 2, 5})) fmt.Println(verifyPostorder([]int{1, 3, 2, 6, 5})) } func verifyPostorder(postorder []int) bool { var verify func(left, right int) bool verify = func(left, right int) bool { if left >= right { return true } rootValue := postorder[right] k := left for k < right && postorder[k] < rootValue { k++ } // k 为临界值,比 root 大 // 比 k 小的都是左子树的 // 大于等于 k 的位置都是右子树的 for i := k; i < right; i++ { // 右子树的值原则上应该比 root 大 if postorder[i] < rootValue { return false } } if !verify(left, k-1) { return false } if !verify(k, right-1) { return false } return true } return verify(0, len(postorder)-1) }
/* Copyright 2021 The KubeVela Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package addon import ( "fmt" "os" "path/filepath" "reflect" "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/stretchr/testify/assert" "helm.sh/helm/v3/pkg/chartutil" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/yaml" "github.com/oam-dev/kubevela/apis/core.oam.dev/common" "github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1" velatypes "github.com/oam-dev/kubevela/apis/types" "github.com/oam-dev/kubevela/pkg/oam" "github.com/oam-dev/kubevela/pkg/oam/util" ) var _ = Describe("Test definition check", func() { var compDef v1beta1.ComponentDefinition var traitDef v1beta1.TraitDefinition var wfStepDef v1beta1.WorkflowStepDefinition BeforeEach(func() { compDef = v1beta1.ComponentDefinition{} traitDef = v1beta1.TraitDefinition{} wfStepDef = v1beta1.WorkflowStepDefinition{} Expect(yaml.Unmarshal([]byte(compDefYaml), &compDef)).Should(BeNil()) Expect(k8sClient.Create(ctx, &compDef)).Should(SatisfyAny(BeNil(), util.AlreadyExistMatcher{})) Expect(yaml.Unmarshal([]byte(traitDefYaml), &traitDef)).Should(BeNil()) Expect(k8sClient.Create(ctx, &traitDef)).Should(SatisfyAny(BeNil(), util.AlreadyExistMatcher{})) Expect(yaml.Unmarshal([]byte(wfStepDefYaml), &wfStepDef)).Should(BeNil()) Expect(k8sClient.Create(ctx, &wfStepDef)).Should(SatisfyAny(BeNil(), util.AlreadyExistMatcher{})) }) It("Test pass def to app annotation", func() { c := v1beta1.ComponentDefinition{TypeMeta: metav1.TypeMeta{APIVersion: "core.oam.dev/v1beta1", Kind: "ComponentDefinition"}} c.SetName("my-comp") t := v1beta1.TraitDefinition{TypeMeta: metav1.TypeMeta{APIVersion: "core.oam.dev/v1beta1", Kind: "TraitDefinition"}} t.SetName("my-trait") w := v1beta1.WorkflowStepDefinition{TypeMeta: metav1.TypeMeta{APIVersion: "core.oam.dev/v1beta1", Kind: "WorkflowStepDefinition"}} w.SetName("my-wfstep") var defs []*unstructured.Unstructured cDef, err := util.Object2Unstructured(c) Expect(err).Should(BeNil()) defs = append(defs, cDef) tDef, err := util.Object2Unstructured(t) defs = append(defs, tDef) Expect(err).Should(BeNil()) wDef, err := util.Object2Unstructured(w) Expect(err).Should(BeNil()) defs = append(defs, wDef) addonApp := v1beta1.Application{ObjectMeta: metav1.ObjectMeta{Name: "addon-app", Namespace: velatypes.DefaultKubeVelaNS}} err = passDefInAppAnnotation(defs, &addonApp) Expect(err).Should(BeNil()) anno := addonApp.GetAnnotations() Expect(len(anno)).Should(BeEquivalentTo(3)) Expect(anno[compDefAnnotation]).Should(BeEquivalentTo("my-comp")) Expect(anno[traitDefAnnotation]).Should(BeEquivalentTo("my-trait")) Expect(anno[workflowStepDefAnnotation]).Should(BeEquivalentTo("my-wfstep")) }) It("Test checkAddonHasBeenUsed func", func() { addonApp := v1beta1.Application{} Expect(yaml.Unmarshal([]byte(addonAppYaml), &addonApp)).Should(BeNil()) app1 := v1beta1.Application{} Expect(yaml.Unmarshal([]byte(testApp1Yaml), &app1)).Should(BeNil()) Expect(k8sClient.Create(ctx, &app1)).Should(BeNil()) app2 := v1beta1.Application{} Expect(yaml.Unmarshal([]byte(testApp2Yaml), &app2)).Should(BeNil()) Expect(k8sClient.Create(ctx, &app2)).Should(BeNil()) Expect(k8sClient.Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-ns"}})) app3 := v1beta1.Application{} Expect(yaml.Unmarshal([]byte(testApp3Yaml), &app3)).Should(BeNil()) Expect(k8sClient.Create(ctx, &app3)).Should(BeNil()) app4 := v1beta1.Application{} Expect(yaml.Unmarshal([]byte(testApp4Yaml), &app4)).Should(BeNil()) Expect(k8sClient.Create(ctx, &app4)).Should(BeNil()) usedApps, err := checkAddonHasBeenUsed(ctx, k8sClient, "my-addon", addonApp, cfg) Expect(err).Should(BeNil()) Expect(len(usedApps)).Should(BeEquivalentTo(4)) }) }) func TestMerge2Map(t *testing.T) { res := make(map[string]bool) merge2DefMap(compDefAnnotation, "my-comp1,my-comp2", res) merge2DefMap(traitDefAnnotation, "my-trait1,my-trait2", res) merge2DefMap(workflowStepDefAnnotation, "my-wfStep1,my-wfStep2", res) assert.Equal(t, 6, len(res)) } func TestUsingAddonInfo(t *testing.T) { apps := []v1beta1.Application{ {ObjectMeta: metav1.ObjectMeta{Namespace: "namespace-1", Name: "app-1"}}, {ObjectMeta: metav1.ObjectMeta{Namespace: "namespace-2", Name: "app-2"}}, {ObjectMeta: metav1.ObjectMeta{Namespace: "namespace-1", Name: "app-3"}}, {ObjectMeta: metav1.ObjectMeta{Namespace: "namespace-3", Name: "app-3"}}, } res := appsDependsOnAddonErrInfo(apps) assert.Contains(t, res, "and other 1 more applications. Please delete all of them before removing.") apps = []v1beta1.Application{ {ObjectMeta: metav1.ObjectMeta{Namespace: "namespace-1", Name: "app-1"}}, {ObjectMeta: metav1.ObjectMeta{Namespace: "namespace-2", Name: "app-2"}}, {ObjectMeta: metav1.ObjectMeta{Namespace: "namespace-1", Name: "app-3"}}, } res = appsDependsOnAddonErrInfo(apps) assert.Contains(t, res, "Please delete all of them before removing.") apps = []v1beta1.Application{ {ObjectMeta: metav1.ObjectMeta{Namespace: "namespace-1", Name: "app-1"}}, } res = appsDependsOnAddonErrInfo(apps) assert.Contains(t, res, "this addon is being used by: namespace-1/app-1 applications. Please delete all of them before removing.") apps = []v1beta1.Application{ {ObjectMeta: metav1.ObjectMeta{Namespace: "namespace-1", Name: "app-1"}}, {ObjectMeta: metav1.ObjectMeta{Namespace: "namespace-2", Name: "app-2"}}, } res = appsDependsOnAddonErrInfo(apps) assert.Contains(t, res, ". Please delete all of them before removing.") } func TestIsAddonDir(t *testing.T) { var isAddonDir bool var err error var meta *Meta var metaYaml []byte // Non-existent dir isAddonDir, err = IsAddonDir("non-existent-dir") assert.Equal(t, isAddonDir, false) assert.Error(t, err) // Not a directory (a file) isAddonDir, err = IsAddonDir(filepath.Join("testdata", "local", "metadata.yaml")) assert.Equal(t, isAddonDir, false) assert.Contains(t, err.Error(), "not a directory") // No metadata.yaml isAddonDir, err = IsAddonDir(".") assert.Equal(t, isAddonDir, false) assert.Contains(t, err.Error(), "exists in directory") // Empty metadata.yaml err = os.MkdirAll(filepath.Join("testdata", "testaddon"), 0700) assert.NoError(t, err) defer func() { os.RemoveAll(filepath.Join("testdata", "testaddon")) }() err = os.WriteFile(filepath.Join("testdata", "testaddon", MetadataFileName), []byte{}, 0644) assert.NoError(t, err) isAddonDir, err = IsAddonDir(filepath.Join("testdata", "testaddon")) assert.Equal(t, isAddonDir, false) assert.Contains(t, err.Error(), "missing") // Empty addon name meta = &Meta{} metaYaml, err = yaml.Marshal(meta) assert.NoError(t, err) err = os.WriteFile(filepath.Join("testdata", "testaddon", MetadataFileName), metaYaml, 0644) assert.NoError(t, err) isAddonDir, err = IsAddonDir(filepath.Join("testdata", "testaddon")) assert.Equal(t, isAddonDir, false) assert.Contains(t, err.Error(), "addon name is empty") // Empty addon version meta = &Meta{ Name: "name", } metaYaml, err = yaml.Marshal(meta) assert.NoError(t, err) err = os.WriteFile(filepath.Join("testdata", "testaddon", MetadataFileName), metaYaml, 0644) assert.NoError(t, err) isAddonDir, err = IsAddonDir(filepath.Join("testdata", "testaddon")) assert.Equal(t, isAddonDir, false) assert.Contains(t, err.Error(), "addon version is empty") // No metadata.yaml meta = &Meta{ Name: "name", Version: "1.0.0", } metaYaml, err = yaml.Marshal(meta) assert.NoError(t, err) err = os.WriteFile(filepath.Join("testdata", "testaddon", MetadataFileName), metaYaml, 0644) assert.NoError(t, err) isAddonDir, err = IsAddonDir(filepath.Join("testdata", "testaddon")) assert.Equal(t, isAddonDir, false) assert.Contains(t, err.Error(), "exists in directory") // Empty template.yaml err = os.WriteFile(filepath.Join("testdata", "testaddon", TemplateFileName), []byte{}, 0644) assert.NoError(t, err) isAddonDir, err = IsAddonDir(filepath.Join("testdata", "testaddon")) assert.Equal(t, isAddonDir, false) assert.Contains(t, err.Error(), "missing") // Empty template.cue err = os.WriteFile(filepath.Join("testdata", "testaddon", AppTemplateCueFileName), []byte{}, 0644) assert.NoError(t, err) isAddonDir, err = IsAddonDir(filepath.Join("testdata", "testaddon")) assert.Equal(t, isAddonDir, false) assert.Contains(t, err.Error(), renderOutputCuePath) // Pass all checks cmd := InitCmd{ Path: filepath.Join("testdata", "testaddon2"), AddonName: "testaddon2", } err = cmd.CreateScaffold() assert.NoError(t, err) defer func() { _ = os.RemoveAll(filepath.Join("testdata", "testaddon2")) }() isAddonDir, err = IsAddonDir(filepath.Join("testdata", "testaddon2")) assert.Equal(t, isAddonDir, true) assert.NoError(t, err) } func TestMakeChart(t *testing.T) { var err error // Not a addon dir err = MakeChartCompatible(".", true) assert.Contains(t, err.Error(), "not an addon dir") // Valid addon dir cmd := InitCmd{ Path: filepath.Join("testdata", "testaddon"), AddonName: "testaddon", } err = cmd.CreateScaffold() assert.NoError(t, err) defer func() { _ = os.RemoveAll(filepath.Join("testdata", "testaddon")) }() err = MakeChartCompatible(filepath.Join("testdata", "testaddon"), true) assert.NoError(t, err) isChartDir, err := chartutil.IsChartDir(filepath.Join("testdata", "testaddon")) assert.NoError(t, err) assert.Equal(t, isChartDir, true) // Already a chart dir err = MakeChartCompatible(filepath.Join("testdata", "testaddon"), false) assert.NoError(t, err) isChartDir, err = chartutil.IsChartDir(filepath.Join("testdata", "testaddon")) assert.NoError(t, err) assert.Equal(t, isChartDir, true) } func TestCheckObjectBindingComponent(t *testing.T) { existingBindingDef := unstructured.Unstructured{} existingBindingDef.SetAnnotations(map[string]string{oam.AnnotationAddonDefinitionBondCompKey: "kustomize"}) emptyAnnoDef := unstructured.Unstructured{} emptyAnnoDef.SetAnnotations(map[string]string{"test": "onlyForTest"}) legacyAnnoDef := unstructured.Unstructured{} legacyAnnoDef.SetAnnotations(map[string]string{oam.AnnotationIgnoreWithoutCompKey: "kustomize"}) testCases := map[string]struct { object unstructured.Unstructured app v1beta1.Application res bool }{ "bindingExist": {object: existingBindingDef, app: v1beta1.Application{Spec: v1beta1.ApplicationSpec{Components: []common.ApplicationComponent{{Name: "kustomize"}}}}, res: true}, "NotExisting": {object: existingBindingDef, app: v1beta1.Application{Spec: v1beta1.ApplicationSpec{Components: []common.ApplicationComponent{{Name: "helm"}}}}, res: false}, "NoBidingAnnotation": {object: emptyAnnoDef, app: v1beta1.Application{Spec: v1beta1.ApplicationSpec{Components: []common.ApplicationComponent{{Name: "kustomize"}}}}, res: true}, "EmptyApp": {object: existingBindingDef, app: v1beta1.Application{Spec: v1beta1.ApplicationSpec{Components: []common.ApplicationComponent{}}}, res: false}, "LegacyApp": {object: legacyAnnoDef, app: v1beta1.Application{Spec: v1beta1.ApplicationSpec{Components: []common.ApplicationComponent{{Name: "kustomize"}}}}, res: true, }, "LegacyAppWithoutComp": {object: legacyAnnoDef, app: v1beta1.Application{Spec: v1beta1.ApplicationSpec{Components: []common.ApplicationComponent{{}}}}, res: false, }, } for _, s := range testCases { result := checkBondComponentExist(s.object, s.app) assert.Equal(t, result, s.res) } } func TestFilterDependencyRegistries(t *testing.T) { testCases := []struct { registries []Registry index int res []Registry origin []Registry }{ { registries: []Registry{{Name: "r1"}, {Name: "r2"}, {Name: "r3"}}, index: 0, res: []Registry{{Name: "r2"}, {Name: "r3"}}, origin: []Registry{{Name: "r1"}, {Name: "r2"}, {Name: "r3"}}, }, { registries: []Registry{{Name: "r1"}, {Name: "r2"}, {Name: "r3"}}, index: 1, res: []Registry{{Name: "r1"}, {Name: "r3"}}, origin: []Registry{{Name: "r1"}, {Name: "r2"}, {Name: "r3"}}, }, { registries: []Registry{{Name: "r1"}, {Name: "r2"}, {Name: "r3"}}, index: 2, res: []Registry{{Name: "r1"}, {Name: "r2"}}, origin: []Registry{{Name: "r1"}, {Name: "r2"}, {Name: "r3"}}, }, { registries: []Registry{{Name: "r1"}, {Name: "r2"}, {Name: "r3"}}, index: 3, res: []Registry{{Name: "r1"}, {Name: "r2"}, {Name: "r3"}}, origin: []Registry{{Name: "r1"}, {Name: "r2"}, {Name: "r3"}}, }, { registries: []Registry{{Name: "r1"}, {Name: "r2"}, {Name: "r3"}}, index: -1, res: []Registry{{Name: "r1"}, {Name: "r2"}, {Name: "r3"}}, origin: []Registry{{Name: "r1"}, {Name: "r2"}, {Name: "r3"}}, }, { registries: []Registry{}, index: 0, res: []Registry{}, origin: []Registry{}, }, } for _, testCase := range testCases { res := FilterDependencyRegistries(testCase.index, testCase.registries) assert.Equal(t, res, testCase.res) assert.Equal(t, testCase.registries, testCase.origin) } } func TestCheckAddonPackageValid(t *testing.T) { testCases := []struct { testCase Meta err error }{{ testCase: Meta{}, err: fmt.Errorf("the addon package doesn't have `metadata.yaml`"), }, { testCase: Meta{Version: "v1.4.0"}, err: fmt.Errorf("`matadata.yaml` must define the name of addon"), }, { testCase: Meta{Name: "test-addon"}, err: fmt.Errorf("`matadata.yaml` must define the version of addon"), }, { testCase: Meta{Name: "test-addon", Version: "1.4.5"}, err: nil, }, } for _, testCase := range testCases { err := validateAddonPackage(&InstallPackage{Meta: testCase.testCase}) assert.Equal(t, reflect.DeepEqual(err, testCase.err), true) } } const ( compDefYaml = ` apiVersion: core.oam.dev/v1beta1 kind: ComponentDefinition metadata: name: my-comp namespace: vela-system ` traitDefYaml = ` apiVersion: core.oam.dev/v1beta1 kind: TraitDefinition metadata: name: my-trait namespace: vela-system ` wfStepDefYaml = ` apiVersion: core.oam.dev/v1beta1 kind: WorkflowStepDefinition metadata: name: my-wfstep namespace: vela-system ` ) const ( addonAppYaml = ` apiVersion: core.oam.dev/v1beta1 kind: Application metadata: labels: addons.oam.dev/name: myaddon addons.oam.dev/registry: KubeVela annotations: addon.oam.dev/componentDefinitions: "my-comp" addon.oam.dev/traitDefinitions: "my-trait" addon.oam.dev/workflowStepDefinitions: "my-wfstep" addon.oam.dev/policyDefinitions: "my-policy" name: addon-myaddon namespace: vela-system spec: ` testApp1Yaml = ` apiVersion: core.oam.dev/v1beta1 kind: Application metadata: labels: name: app-1 namespace: default spec: components: - name: comp1 type: my-comp traits: - type: my-trait ` testApp2Yaml = ` apiVersion: core.oam.dev/v1beta1 kind: Application metadata: labels: name: app-2 namespace: default spec: components: - name: comp2 type: webservice traits: - type: my-trait ` testApp3Yaml = ` apiVersion: core.oam.dev/v1beta1 kind: Application metadata: name: app-3 namespace: test-ns spec: components: - name: podinfo type: webservice workflow: steps: - type: my-wfstep name: deploy ` testApp4Yaml = ` apiVersion: core.oam.dev/v1beta1 kind: Application metadata: name: app-4 namespace: test-ns spec: components: - name: podinfo type: webservice policies: - type: my-policy name: topology ` registryCmYaml = ` apiVersion: v1 data: registries: '{ "KubeVela":{ "name": "KubeVela", "oss": { "end_point": "TEST_SERVER_URL", "bucket": "", "path": "" } } }' kind: ConfigMap metadata: name: vela-addon-registry namespace: vela-system ` addonDisableTestAppYaml = ` apiVersion: core.oam.dev/v1beta1 kind: Application metadata: name: addon-test-disable-addon namespace: vela-system labels: addons.oam.dev/name: test-disable-addon addons.oam.dev/registry: KubeVela spec: components: - name: podinfo type: webservice ` )
package db import ( "database/sql" "fmt" "time" "github.com/danielkvist/botio/proto" // postgres driver _ "github.com/jackc/pgx/v4" ) // Postgres wraps a sql.DB client for PostgreSQL and // satisfies the DB interface. type Postgres struct { Host string Port string User string Password string DB string Table string client *sql.DB MaxConns int MaxConnLifetime time.Duration } // Connect tries to connect to a PostgreSQL database. If it fails it returns // a non-nil error. It also tries to create a table for the commands if not exist. func (ps *Postgres) Connect() error { psqlInfo := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=disable", ps.Host, ps.Port, ps.User, ps.Password, ps.DB) client, err := sql.Open("postgres", psqlInfo) if err != nil { return fmt.Errorf("while validating arguments to connect to DB: %v", err) } client.SetMaxOpenConns(ps.MaxConns) client.SetConnMaxLifetime(ps.MaxConnLifetime) ps.client = client if err := ps.client.Ping(); err != nil { return fmt.Errorf("while opening a connection to DB: %v", err) } statement := ` CREATE IF NOT EXISTS $1 ( command TEXT NOT NULL PRIMARY KEY, response TEXT NOT NULL );` if _, err := ps.client.Exec(statement, ps.Table); err != nil { return fmt.Errorf("while creating a table for commands: %v", err) } return nil } // Add receives a *proto.BotCommand and adds it to the // table designated. If something goes wrong executing the // SQL statement it returns a non-nil error. func (ps *Postgres) Add(cmd *proto.BotCommand) error { statement := `INSERT INTO $1 (command, response) VALUES ($2, $3);` el := cmd.GetCmd().GetCommand() val := cmd.GetResp().GetResponse() if _, err := ps.client.Exec(statement, ps.Table, el, val); err != nil { return fmt.Errorf("while adding command %q: %v", el, err) } return nil } // Get receives a *proto.Command and returns the respective *proto.BotCommand // if exists in the designated table. If not or there is any problem // while executing the SQL statement it returns a non-nil error. func (ps *Postgres) Get(cmd *proto.Command) (*proto.BotCommand, error) { el := cmd.GetCommand() statement := `SELECT * FROM $1 WHERE command=$2;` row := ps.client.QueryRow(statement, ps.Table, el) var command *proto.BotCommand if err := row.Scan(command.Cmd.Command, command.Resp.Response); err != nil { return nil, fmt.Errorf("while getting command %q: %v", el, err) } return command, nil } // GetAll ranges over all the entries of the designated table for the commands // and returns a *proto.BotCommands with all the *proto.BotCommand found. // If something goes wrong while executing the SQL statement or while // getting some command it returns a non-nil error. func (ps *Postgres) GetAll() (*proto.BotCommands, error) { statement := `SELECT * FROM $1;` rows, err := ps.client.Query(statement, ps.Table) if err != nil { return nil, fmt.Errorf("while getting commands from DB: %v", err) } defer rows.Close() var commands []*proto.BotCommand for rows.Next() { var command *proto.BotCommand if err := rows.Scan(command.Cmd.Command, command.Resp.Response); err != nil { return nil, fmt.Errorf("while getting command: %v", err) } commands = append(commands, command) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("while getting commands: %v", err) } return &proto.BotCommands{ Commands: commands, }, nil } // Remove removes a *proto.BotCommand from the designated table. It returns a // non-nil error if there is some problem while executing the // SQL statement or deleting the command. func (ps *Postgres) Remove(cmd *proto.Command) error { el := cmd.GetCommand() statement := `DELETE FROM $1 WHERE command=$2;` if _, err := ps.client.Exec(statement, ps.Table, el); err != nil { return fmt.Errorf("while removing command %q: %v", el, err) } return nil } // Update updates the Response of an existing *proto.BotCommand // with the Response of the received *proto.BotCommand. If // there is any error while executing the SQL statement // it returns a non-nil error. func (ps *Postgres) Update(cmd *proto.BotCommand) error { el := cmd.GetCmd().GetCommand() val := cmd.GetResp().GetResponse() statement := ` UPDATE $1 SET response=$2 WHERE command=$3;` if _, err := ps.client.Exec(statement, val, el); err != nil { return fmt.Errorf("while updating command %q: %v", el, err) } return nil } // Close tries to close the connection to the PostgreSQL database. // If fails it returns a non-nil error. func (ps *Postgres) Close() error { if err := ps.client.Close(); err != nil { return fmt.Errorf("while closing connection to DB: %v", err) } return nil }
package app import "github.com/bryanl/dolb/entity" // Cluster manages load balancer agent clusters. type Cluster interface { Bootstrap(lb *entity.LoadBalancer, bootstrapConfig *BootstrapConfig) (chan int, error) }
/* * Minio Cloud Storage, (C) 2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cmd import ( "net/url" "path" "github.com/minio/cli" "github.com/minio/mc/pkg/console" ) var serviceCmd = cli.Command{ Name: "service", Usage: "Service command line to manage Minio server.", Action: serviceControl, Flags: globalFlags, CustomHelpTemplate: `NAME: minio control {{.Name}} - {{.Usage}} USAGE: minio control {{.Name}} [status|restart|stop] http[s]://[access_key[:secret_key]@]server_ip:port/ FLAGS: {{range .Flags}}{{.}} {{end}} EXAMPLES: 1. Prints current status information of the cluster. $ minio control service status http://10.1.10.92:9000/ 2. Restarts the url and all the servers in the cluster. $ minio control service restart http://localhost:9000/ 3. Shuts down the url and all the servers in the cluster. $ minio control service stop http://localhost:9000/ `, } // "minio control service" entry point. func serviceControl(c *cli.Context) { if !c.Args().Present() && len(c.Args()) != 2 { cli.ShowCommandHelpAndExit(c, "service", 1) } var signal serviceSignal switch c.Args().Get(0) { case "status": signal = serviceStatus case "restart": signal = serviceRestart case "stop": signal = serviceStop default: fatalIf(errInvalidArgument, "Unrecognized service %s", c.Args().Get(0)) } parsedURL, err := url.Parse(c.Args().Get(1)) fatalIf(err, "Unable to parse URL %s", c.Args().Get(1)) accessKey := serverConfig.GetCredential().AccessKeyID secretKey := serverConfig.GetCredential().SecretAccessKey // Username and password specified in URL will override prior configuration if parsedURL.User != nil { accessKey = parsedURL.User.Username() if key, set := parsedURL.User.Password(); set { secretKey = key } } authCfg := &authConfig{ accessKey: accessKey, secretKey: secretKey, secureConn: parsedURL.Scheme == "https", address: parsedURL.Host, path: path.Join(reservedBucket, controlPath), loginMethod: "Control.LoginHandler", } client := newAuthClient(authCfg) args := &ServiceArgs{ Signal: signal, } // This is necessary so that the remotes, // don't end up sending requests back and forth. args.Remote = true reply := &ServiceReply{} err = client.Call("Control.ServiceHandler", args, reply) fatalIf(err, "Service command %s failed for %s", c.Args().Get(0), parsedURL.Host) if signal == serviceStatus { console.Println(getStorageInfoMsg(reply.StorageInfo)) } }
package generate import ( "encoding/json" "errors" "net/url" ) // AdditionalProperties handles additional properties present in the JSON schema. type AdditionalProperties Schema // Schema represents JSON schema. type Schema struct { // SchemaType identifies the schema version. // http://json-schema.org/draft-07/json-schema-core.html#rfc.section.7 SchemaType string `json:"$schema"` // ID{04,06} is the schema URI identifier. // http://json-schema.org/draft-07/json-schema-core.html#rfc.section.8.2 ID04 string `json:"id"` // up to draft-04 ID06 string `json:"$id"` // from draft-06 onwards // Title and Description state the intent of the schema. Title string Description string // TypeValue is the schema instance type. // http://json-schema.org/draft-07/json-schema-validation.html#rfc.section.6.1.1 TypeValue interface{} `json:"type"` // Definitions are inline re-usable schemas. // http://json-schema.org/draft-07/json-schema-validation.html#rfc.section.9 Definitions map[string]*Schema // Properties, Required and AdditionalProperties describe an object's child instances. // http://json-schema.org/draft-07/json-schema-validation.html#rfc.section.6.5 Properties map[string]*Schema Required []string // "additionalProperties": {...} AdditionalProperties *AdditionalProperties // "additionalProperties": false AdditionalPropertiesBool *bool `json:"-"` AnyOf []*Schema AllOf []*Schema OneOf []*Schema // Default can be used to supply a default JSON value associated with a particular schema. // http://json-schema.org/draft-07/json-schema-validation.html#rfc.section.10.2 Default interface{} // Examples ... // http://json-schema.org/draft-07/json-schema-validation.html#rfc.section.10.4 Examples []interface{} // Reference is a URI reference to a schema. // http://json-schema.org/draft-07/json-schema-core.html#rfc.section.8 Reference string `json:"$ref"` // Items represents the types that are permitted in the array. // http://json-schema.org/draft-07/json-schema-validation.html#rfc.section.6.4 Items *Schema // NameCount is the number of times the instance name was encountered across the schema. NameCount int `json:"-" ` // Parent schema Parent *Schema `json:"-" ` // Key of this schema i.e. { "JSONKey": { "type": "object", .... JSONKey string `json:"-" ` // path element - for creating a path by traversing back to the root element PathElement string `json:"-"` // calculated struct name of this object, cached here GeneratedType string `json:"-"` } // UnmarshalJSON handles unmarshalling AdditionalProperties from JSON. func (ap *AdditionalProperties) UnmarshalJSON(data []byte) error { var b bool if err := json.Unmarshal(data, &b); err == nil { *ap = (AdditionalProperties)(Schema{AdditionalPropertiesBool: &b}) return nil } // support anyOf, allOf, oneOf a := map[string][]*Schema{} if err := json.Unmarshal(data, &a); err == nil { for k, v := range a { switch k { case "oneOf": ap.OneOf = append(ap.OneOf, v...) case "allOf": ap.AllOf = append(ap.AllOf, v...) case "anyOf": ap.AnyOf = append(ap.AnyOf, v...) } } return nil } s := Schema{} err := json.Unmarshal(data, &s) if err == nil { *ap = AdditionalProperties(s) } return err } // ID returns the schema URI id. func (schema *Schema) ID() string { // prefer "$id" over "id" if schema.ID06 == "" && schema.ID04 != "" { return schema.ID04 } return schema.ID06 } // Type returns the type which is permitted or an empty string if the type field is missing. // The 'type' field in JSON schema also allows for a single string value or an array of strings. // Examples: // "a" => "a", false // [] => "", false // ["a"] => "a", false // ["a", "b"] => "a", true func (schema *Schema) Type() (firstOrDefault string, multiple bool) { // We've got a single value, e.g. { "type": "object" } if ts, ok := schema.TypeValue.(string); ok { firstOrDefault = ts multiple = false return } // We could have multiple types in the type value, e.g. { "type": [ "object", "array" ] } if a, ok := schema.TypeValue.([]interface{}); ok { multiple = len(a) > 1 for _, n := range a { if s, ok := n.(string); ok { firstOrDefault = s return } } } return "", multiple } // MultiType returns "type" as an array func (schema *Schema) MultiType() ([]string, bool) { // We've got a single value, e.g. { "type": "object" } if ts, ok := schema.TypeValue.(string); ok { return []string{ts}, false } // We could have multiple types in the type value, e.g. { "type": [ "object", "array" ] } if a, ok := schema.TypeValue.([]interface{}); ok { rv := []string{} for _, n := range a { if s, ok := n.(string); ok { rv = append(rv, s) } } return rv, len(rv) > 1 } return nil, false } // GetRoot returns the root schema. func (schema *Schema) GetRoot() *Schema { if schema.Parent != nil { return schema.Parent.GetRoot() } return schema } // Parse parses a JSON schema from a string. func Parse(schema string, uri *url.URL) (*Schema, error) { return ParseWithSchemaKeyRequired(schema, uri, true) } // ParseWithSchemaKeyRequired parses a JSON schema from a string with a flag to set whether the schema key is required. func ParseWithSchemaKeyRequired(schema string, uri *url.URL, schemaKeyRequired bool) (*Schema, error) { s := &Schema{} err := json.Unmarshal([]byte(schema), s) if err != nil { return s, err } if s.ID() == "" { s.ID06 = uri.String() } if schemaKeyRequired && s.SchemaType == "" { return s, errors.New("JSON schema must have a $schema key unless schemaKeyRequired flag is set") } // validate root URI, it MUST be an absolute URI abs, err := url.Parse(s.ID()) if err != nil { return nil, errors.New("error parsing $id of document \"" + uri.String() + "\": " + err.Error()) } if !abs.IsAbs() { return nil, errors.New("$id of document not absolute URI: \"" + uri.String() + "\": \"" + s.ID() + "\"") } s.Init() return s, nil } // Init schema. func (schema *Schema) Init() { root := schema.GetRoot() root.updateParentLinks() root.ensureSchemaKeyword() root.updatePathElements() } func (schema *Schema) updatePathElements() { if schema.IsRoot() { schema.PathElement = "#" } for k, d := range schema.Definitions { d.PathElement = "definitions/" + k d.updatePathElements() } for k, p := range schema.Properties { p.PathElement = "properties/" + k p.updatePathElements() } if schema.AdditionalProperties != nil { schema.AdditionalProperties.PathElement = "additionalProperties" (*Schema)(schema.AdditionalProperties).updatePathElements() } if schema.Items != nil { schema.Items.PathElement = "items" schema.Items.updatePathElements() } } func (schema *Schema) updateParentLinks() { for k, d := range schema.Definitions { d.JSONKey = k d.Parent = schema d.updateParentLinks() } for k, p := range schema.Properties { p.JSONKey = k p.Parent = schema p.updateParentLinks() } if schema.AdditionalProperties != nil { schema.AdditionalProperties.Parent = schema (*Schema)(schema.AdditionalProperties).updateParentLinks() } if schema.Items != nil { schema.Items.Parent = schema schema.Items.updateParentLinks() } } func (schema *Schema) ensureSchemaKeyword() error { check := func(k string, s *Schema) error { if s.SchemaType != "" { return errors.New("invalid $schema keyword: " + k) } return s.ensureSchemaKeyword() } for k, d := range schema.Definitions { if err := check(k, d); err != nil { return err } } for k, d := range schema.Properties { if err := check(k, d); err != nil { return err } } if schema.AdditionalProperties != nil { if err := check("additionalProperties", (*Schema)(schema.AdditionalProperties)); err != nil { return err } } if schema.Items != nil { if err := check("items", schema.Items); err != nil { return err } } return nil } // FixMissingTypeValue is backwards compatible, guessing the users intention when they didn't specify a type. func (schema *Schema) FixMissingTypeValue() { if schema.TypeValue == nil { if schema.Reference == "" && len(schema.Properties) > 0 { schema.TypeValue = "object" return } if schema.Items != nil { schema.TypeValue = "array" return } } } // IsRoot returns true when the schema is the root. func (schema *Schema) IsRoot() bool { return schema.Parent == nil }
package openapi import "C" import ( "context" "encoding/json" "github.com/getkin/kin-openapi/openapi3filter" "github.com/gin-gonic/gin" "net/http" ) func ValidateRequests(path string) gin.HandlerFunc { spec := openapi3filter.NewRouter().WithSwaggerFromFile(path) errorEncoder := &openapi3filter.ValidationErrorEncoder{ Encoder: errorEncoder, } return func(c *gin.Context) { httpReq := c.Request route, pathParams, err := spec.FindRoute(httpReq.Method, httpReq.URL) if err != nil { errorEncoder.Encode(c, err, c.Writer) c.Abort() return } requestValidationInput := &openapi3filter.RequestValidationInput{ Request: httpReq, PathParams: pathParams, Route: route, } if err := openapi3filter.ValidateRequest(c.Request.Context(), requestValidationInput); err != nil { errorEncoder.Encode(c, err, c.Writer) c.Abort() return } c.Next() } } func statusCode(err error) int { code := http.StatusInternalServerError if sc, ok := err.(openapi3filter.StatusCoder); ok { code = sc.StatusCode() } return code } func errorEncoder(ctx context.Context, err error, w http.ResponseWriter) { if headerer, ok := err.(openapi3filter.Headerer); ok { for k, values := range headerer.Headers() { for _, v := range values { w.Header().Add(k, v) } } } code := statusCode(err) w.WriteHeader(code) if vErr, ok := err.(*openapi3filter.ValidationError); ok { w.Header().Set("Content-Type", "application/json; charset=utf-8\"") json.NewEncoder(w).Encode(vErr) } else { body := []byte(err.Error()) json.NewEncoder(w).Encode(&openapi3filter.ValidationError{ Status: 500, Title:string(body), }) } }
package dcmdata import ( "github.com/grayzone/godcm/ofstd" ) /** a class handling the DICOM dataset format (files without meta header) */ type DcmDataset struct { DcmItem OriginalXfer E_TransferSyntax /// original transfer syntax of the dataset CurrentXfer E_TransferSyntax /// current transfer syntax of the dataset } func NewDcmDataset() *DcmDataset { var result DcmDataset result.DcmItem = *NewDcmItem(DCM_ItemTag, DCM_UndefinedLength) result.OriginalXfer = EXS_Unknown // the default transfer syntax is explicit VR with local endianness if GLocalByteOrder == EBO_BigEndian { result.CurrentXfer = EXS_BigEndianExplicit } else { result.CurrentXfer = EXS_LittleEndianExplicit } return &result } /** load object from a DICOM file. * This method only supports DICOM objects stored as a dataset, i.e. without meta header. * Use DcmFileFormat::loadFile() to load files with meta header. * @param fileName name of the file to load (may contain wide chars if support enabled). * Since there are various constructors for the OFFilename class, a "char *", "OFString" * or "wchar_t *" can also be passed directly to this parameter. * @param readXfer transfer syntax used to read the data (auto detection if EXS_Unknown) * @param groupLength flag, specifying how to handle the group length tags * @param maxReadLength maximum number of bytes to be read for an element value. * Element values with a larger size are not loaded until their value is retrieved * (with getXXX()) or loadAllDataIntoMemory() is called. * @return status, EC_Normal if successful, an error code otherwise */ func (ds *DcmDataset) LoadFile(filename string, readXfer E_TransferSyntax, groupLength E_GrpLenEncoding, maxReadLength uint32) ofstd.OFCondition { err := ofstd.EC_InvalidFilename if len(filename) == 0 { return err } fileStream := NewDcmInputFileStream(filename, 0) err = fileStream.Status() if !err.Good() { return err } err = ds.Clear() if !err.Good() { return err } ds.TransferInit() err = ds.Read(fileStream, readXfer, groupLength, maxReadLength) ds.TransferEnd() // err = fileStream return err } func (ds *DcmDataset) Read(instream *DcmInputFileStream, xfer E_TransferSyntax, glenc E_GrpLenEncoding, maxReadLength uint32) ofstd.OFCondition { ds.errorFlag = instream.Status() if ds.errorFlag.Good() && instream.Eos() { ds.errorFlag = EC_EndOfStream } else if ds.errorFlag.Good() && ds.GetTransferState() != ERW_ready { } if ds.errorFlag.Good() || ds.errorFlag == EC_EndOfStream { ds.errorFlag = ofstd.EC_Normal ds.SetTransferState(ERW_ready) } return ds.errorFlag }
package practice01 import ( "os" "fmt" ) //获取操作系统名称和path环境变量 func GetOSInfo() { var goos string= os.Getenv("GOOS") fmt.Printf("the operating system is: %s \n", goos) path := os.Getenv("PATH") fmt.Printf("the os path is: %s \n", path) }
package tomlconf import ( "fmt" ) // Game holds the config for a Game instance type Game struct { Name string AutoStart bool `toml:"auto_start"` AutoRestart int `toml:"auto_restart"` Comment string `comment:"A message to be added to the status line of this Game"` Transport ConfigHolder PreRoll struct { Regexp string Replace string } `comment:"regex to be applied to all outgoing lines before\n they are parsed by the below regexps"` Chat Chat `comment:"Configuration for how this game will interact with chat"` CommandImports []string `toml:"import_commands"` Commands map[string]Command `comment:"Commands that can be executed on the chat side"` RegexpImports []string `toml:"import_regexps"` Regexps []Regexp `toml:"regexp"` } // Chat is a config for game.Chat type Chat struct { BridgedChannel string `toml:"bridged_channel" comment:"The channel to bridge chat between"` // string ptr to check for null ImportFormat *string `toml:"import_format"` Formats FormatSet BridgeChat bool `toml:"bridge_chat" default:"true" comment:"Should this game bridge its chat (default true)"` DumpStdout bool `toml:"dump_stdout" comment:"Dump stdout to the bridged channel (This is a spammy debug option)"` DumpStderr bool `toml:"dump_stderr" comment:"Dump stdout to the bridged channel (This is a spammy debug option)"` AllowForwards bool `toml:"allow_forwards" default:"true" comment:"Allow messages from other games (default true)"` Transformer *ConfigHolder `comment:"How to transform messages to and from this game. (leave out for StripTransformer)"` } // Command holds commands that can be executed by users type Command struct { Format string `comment:"go template based formatter"` Help string `comment:"help for the command"` RequiresAdmin int `toml:"requires_admin" comment:"the admin level required to execute this command (0 for none)"` } // Regexp is a representation of a game regexp type Regexp struct { Name string Regexp string // TODO: maybe do a string pointer here, then checking in game will be easier when it comes to regexps // TODO: designed to eat things Format string Priority int `toml:",omitempty"` Eat bool `default:"true" comment:"Stop processing regexps after this is matched. (default true)"` SendToChan bool `toml:"send_to_chan" default:"true" comment:"Send the formatted message to the bridged channel (default true)"` //nolint:lll // Cant shorten them SendToOthers bool `toml:"send_to_others" default:"true" comment:"Send the formatted message to other running games (default true)"` //nolint:lll // Cant shorten them SendToLocal bool `toml:"send_to_local" comment:"Send the formatted message to the game it came from (default false)"` } // FormatSet holds a set of formatters to be converted to a format.Format type FormatSet struct { // string ptr to allow to check for null Message *string Join *string Part *string Nick *string Quit *string Kick *string External *string Extra map[string]string } // Indices into a FormatSet const ( MESSAGE = iota JOIN PART NICK QUIT KICK EXTERNAL ) func (f *FormatSet) index(i int) *string { switch i { case MESSAGE: return f.Message case JOIN: return f.Join case PART: return f.Part case NICK: return f.Nick case QUIT: return f.Quit case KICK: return f.Kick case EXTERNAL: return f.External default: panic(fmt.Sprintf("Unexpected index %d into FormatSet", i)) } } func (f *FormatSet) setIndex(i int, s *string) { switch i { case MESSAGE: f.Message = s case JOIN: f.Join = s case PART: f.Part = s case NICK: f.Nick = s case QUIT: f.Quit = s case KICK: f.Kick = s case EXTERNAL: f.External = s default: panic(fmt.Sprintf("Unexpected index %d into FormatSet", i)) } } func (g *Game) resolveImports(c *Config) error { if err := g.resolveFormatImports(c); err != nil { return err } if err := g.resolveRegexpImports(c); err != nil { return err } if err := g.resolveCommandImports(c); err != nil { return err } return nil } func (g *Game) resolveFormatImports(c *Config) error { // TODO: store the current Chat.Formats to allow for overrides if g.Chat.ImportFormat == nil { return nil } currentFormats := g.Chat.Formats fmtTemplate, exists := c.FormatTemplates[*g.Chat.ImportFormat] if !exists { return fmt.Errorf( "could not resolve format import %q as it does not exist", *g.Chat.ImportFormat, ) } g.Chat.Formats = fmtTemplate for i := MESSAGE; i <= EXTERNAL; i++ { if str := currentFormats.index(i); str != nil { g.Chat.Formats.setIndex(i, str) } } return nil } func (g *Game) resolveRegexpImports(c *Config) error { for _, templateName := range g.RegexpImports { importedRegexps, exists := c.RegexpTemplates[templateName] if !exists { return fmt.Errorf( "could not resolve regexp import %q as it does not exist", templateName, ) } g.Regexps = append(g.Regexps, importedRegexps...) } return nil } func (g *Game) resolveCommandImports(c *Config) error { for _, templateName := range g.CommandImports { commands, exists := c.CommandTemplates[templateName] if !exists { return fmt.Errorf( "could not resolve command import %q as it does not exist", templateName, ) } if g.Commands == nil { g.Commands = make(map[string]Command) } for name, command := range commands { g.Commands[name] = command } } return nil }
package main import ( "fmt" _ "github.com/go-sql-driver/mysql" "github.com/jmoiron/sqlx" ) var db *sqlx.DB //是一个连接池对象 func initDb() (err error) { //数据库信息 dsn := "root:root@tcp(127.0.0.1:3306)/goday10" //连接数据库 //db 全局的db db, err = sqlx.Connect("mysql", dsn) if err != nil { return } db.SetMaxOpenConns(10) //设置数据库连接池的最大连接数 db.SetConnMaxLifetime(5) //设置最大空闲连接数 return } //查询 type user struct { Id int `json:"id"` Name string `json:"name"` Age int `json:"age"` } //Go连接mysql示例 func main() { err := initDb() if err != nil { fmt.Println("init db failed,", err) return } fmt.Println("连接数据库成功!") sqlStr1 := `select id ,name,age from user where id=1` var u user db.Get(&u, sqlStr1) fmt.Println(u) var userList = make([]user, 0) sqlStr2 := `select id ,name,age from user` db.Select(&userList, sqlStr2) fmt.Println(userList) }
package service import ( . "CoffeeMachineDunzo/domain" "fmt" ) type CoffeeMachineService struct { coffeeMachine *CoffeeMachine } func NewCoffeeMachineService(machine *CoffeeMachine) *CoffeeMachineService { return &CoffeeMachineService{ coffeeMachine: machine, } } func (service *CoffeeMachineService) MakeBeverage(beverage Beverage, selectedOutlet int) ResponseMessage { err := service.coffeeMachine.MakeBeverage(beverage, selectedOutlet) if err != nil { return ResponseMessage(fmt.Sprintf("%s cannot be prepared because %s", beverage, err)) } return ResponseMessage(fmt.Sprintf("%s is prepared", string(beverage))) } type ResponseMessage string
package suites import ( "github.com/go-rod/rod" "github.com/stretchr/testify/suite" ) func NewRodSuite(name string) *RodSuite { return &RodSuite{ BaseSuite: &BaseSuite{ Name: name, }, } } // RodSuite is a go-rod suite. type RodSuite struct { *BaseSuite *RodSession *rod.Page } type BaseSuite struct { suite.Suite Name string } // CommandSuite is a command line interface suite. type CommandSuite struct { *BaseSuite *DockerEnvironment }
package http import ( "net/http" "time" ) type Client interface { Do(r *http.Request) (*http.Response, error) } func NewDefaultClient() *http.Client { return NewClient(5 * time.Second) } func NewClient(timeout time.Duration) *http.Client { return &http.Client{ Timeout: timeout, } }
package pgsql import ( "testing" ) func TestMoney(t *testing.T) { testlist2{{ valuer: MoneyFromInt64, scanner: MoneyToInt64, data: []testdata{ {input: int64(0), output: int64(0)}, {input: int64(99), output: int64(99)}, {input: int64(120), output: int64(120)}, }, }, { data: []testdata{ {input: string(`$0.00`), output: string(`$0.00`)}, {input: string(`$0.99`), output: string(`$0.99`)}, {input: string(`$1.20`), output: string(`$1.20`)}, }, }, { data: []testdata{ {input: []byte(`$0.00`), output: []byte(`$0.00`)}, {input: []byte(`$0.99`), output: []byte(`$0.99`)}, {input: []byte(`$1.20`), output: []byte(`$1.20`)}, }, }}.execute(t, "money") }
package main import ( "bytes" "crypto/tls" "io" "log" "mime" "net" "net/http" "path" "sync" "time" "github.com/remogatto/ftpget" ) // A TLSRedialTransport is an http.RoundTripper that sends all the requests // over a TLS connection to one server. It will automatically reconnect to the // server as needed. type TLSRedialTransport struct { http.Transport // ServerConn is the initial connection to the server. ServerConn *tls.Conn // ServerName is the SNI to send when reconnecting. ServerName string serverAddr string publicKey []byte once sync.Once timeout *time.Timer } func NewTLSRedialTransport(conn *tls.Conn, serverName string) *TLSRedialTransport { t := &TLSRedialTransport{ ServerConn: conn, ServerName: serverName, serverAddr: conn.RemoteAddr().String(), publicKey: conn.ConnectionState().PeerCertificates[0].RawSubjectPublicKeyInfo, } t.Dial = t.dial t.timeout = time.AfterFunc(10*time.Second, t.CloseIdleConnections) return t } func (t *TLSRedialTransport) dial(network, addr string) (conn net.Conn, err error) { t.once.Do(func() { conn = t.ServerConn }) if conn != nil { return conn, nil } newConn, err := tls.Dial("tcp", t.serverAddr, &tls.Config{ ServerName: t.ServerName, InsecureSkipVerify: true, }) if err != nil { return nil, err } if !bytes.Equal(t.publicKey, newConn.ConnectionState().PeerCertificates[0].RawSubjectPublicKeyInfo) { newConn.Close() log.Printf("TLS private key at %s changed", t.ServerName) // Our little certificate-pinning trick failed because the server changed // certificates (or we've been MITM'd). See if the server has a valid // certificate, even if it's not the same one. return tls.Dial("tcp", t.serverAddr, &tls.Config{ServerName: t.ServerName}) } return newConn, nil } func (t *TLSRedialTransport) CloseIdleConnections() { // If the Once hasn't fired yet, the original connection hasn't been used. t.once.Do(func() { t.ServerConn.Close() }) t.Transport.CloseIdleConnections() } func (t *TLSRedialTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { // Temporarily change the scheme to HTTP, since we're taking care of TLS // and we don't want the underlying Transport to try to do TLS too. realScheme := req.URL.Scheme req.URL.Scheme = "http" resp, err = t.Transport.RoundTrip(req) req.URL.Scheme = realScheme if !t.timeout.Reset(10 * time.Second) { t.timeout = time.AfterFunc(10*time.Second, t.CloseIdleConnections) } return } // An FTPTransport fetches files via FTP. type FTPTransport struct{} func (FTPTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { if req.Method != "GET" { return &http.Response{ StatusCode: http.StatusMethodNotAllowed, Request: req, }, nil } fullPath := req.URL.Host + req.URL.Path r, w := io.Pipe() xfer, err := ftp.GetAsync(fullPath, w) if err != nil { return nil, err } go func() { for stat := range xfer.Status { switch stat { case ftp.COMPLETED: w.Close() return case ftp.ERROR: err := <-xfer.Error log.Printf("FTP: error downloading %v: %v", req.URL, err) w.CloseWithError(err) return } } }() resp = &http.Response{ StatusCode: 200, ProtoMajor: 1, ProtoMinor: 1, Request: req, Body: r, Header: make(http.Header), } ext := path.Ext(req.URL.Path) if ext != "" { ct := mime.TypeByExtension(ext) if ct != "" { resp.Header.Set("Content-Type", ct) } } return resp, nil }
package fetchall import ( "net/http" "os" "testing" ) func TestRunFetchAll(t *testing.T) { // URLs for requesting urls := []string{"https://tanaikech.github.io/"} p := &Params{} for _, e := range urls { req, err := http.NewRequest("GET", e, nil) if err != nil { os.Exit(1) } r := &Request{ Request: req, Client: &http.Client{}, } p.Requests = append(p.Requests, *r) } // Run fetchall res := Do(p) // Show result for _, e := range res { t.Log(e.Response.Status) } }