text stringlengths 11 4.05M |
|---|
// This basic example illustrates how to setup a Self-Service client to make a simple
// API call. The reference for the API can be found at
// http://reference.rightscale.com/selfservice/manager/index.html#/
package main
import (
"flag"
"fmt"
"io"
"os"
"sort"
"strings"
"text/tabwriter"
"github.com/rightscale/rsc/httpclient"
"github.com/rightscale/rsc/rsapi"
"github.com/rightscale/rsc/ss"
"github.com/rightscale/rsc/ss/ssm"
)
// For testing
var osStdout io.Writer = os.Stdout
func main() {
// 1. Retrieve login and endpoint information
email := flag.String("e", "", "Login email")
pwd := flag.String("p", "", "Login password")
account := flag.Int("a", 0, "Account id")
host := flag.String("h", "us-3.rightscale.com", "RightScale API host")
insecure := flag.Bool("insecure", false, "Use HTTP instead of HTTPS - used for testing")
flag.Parse()
if *email == "" {
fail("Login email required")
}
if *pwd == "" {
fail("Login password required")
}
if *account == 0 {
fail("Account id required")
}
if *host == "" {
fail("Host required")
}
// 2. Setup client using basic auth
auth := rsapi.NewBasicAuthenticator(*email, *pwd, *account)
ssAuth := rsapi.NewSSAuthenticator(auth, *account)
client := ssm.New(*host, ssAuth)
if *insecure {
httpclient.Insecure = true
}
if err := client.CanAuthenticate(); err != nil {
fail("invalid credentials: %s", err)
}
// 3. Make execution index call using expanded view
l := client.ExecutionLocator(fmt.Sprintf("/api/manager/projects/%d/executions", *account))
execs, err := l.Index(rsapi.APIParams{})
if err != nil {
fail("failed to list executions: %s", err)
}
sort.Sort(ByName(execs))
// 4. Print executions launch from
w := new(tabwriter.Writer)
w.Init(osStdout, 5, 0, 1, ' ', 0)
fmt.Fprintln(w, "Name\tState\tBy\tLink")
fmt.Fprintln(w, "-----\t-----\t-----\t-----")
for _, e := range execs {
link := fmt.Sprintf("https://%s/api/manager/projects/#/executions/%s", ss.HostFromLogin(client.Host),
e.Id)
fmt.Fprintln(w, fmt.Sprintf("%s\t%s\t%s\t%s", e.Name, e.Status, e.CreatedBy.Email, link))
}
w.Flush()
}
// ByName makes it possible to sort executions by name.
type ByName []*ssm.Execution
func (b ByName) Len() int { return len(b) }
func (b ByName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b ByName) Less(i, j int) bool { return b[i].Name < b[j].Name }
// Print error message and exit with code 1
// Make it overridable for testing
var fail = func(format string, v ...interface{}) {
if !strings.HasSuffix(format, "\n") {
format += "\n"
}
fmt.Println(fmt.Sprintf(format, v...))
os.Exit(1)
}
|
package controllers
import (
"os"
"ss-backend/models"
"time"
"github.com/astaxie/beego"
)
type (
// ReportController ...
ReportController struct {
beego.Controller
}
)
// GetNilaiBarang to get all nilai barang
func (c *ReportController) GetNilaiBarang() {
var resp RespData
var rep models.ReportNilaiBarang
res, errGet := rep.GetAll()
if errGet != nil {
resp.Error = errGet
} else {
resp.Body = res
}
err := c.Ctx.Output.JSON(resp, false, false)
if err != nil {
panic("ERROR OUTPUT JSON LEVEL MIDDLEWARE")
}
// c.TplName = "index.tpl"
}
// GetPenjualan is controller for get data penjualan
func (c *ReportController) GetPenjualan() {
var resp RespData
var rep models.ReportPenjualan
var reqDt = models.RequestGet{
FromDate: c.Ctx.Input.Query("fromDate"),
ToDate: c.Ctx.Input.Query("toDate"),
Query: c.Ctx.Input.Query("query"),
}
res, errGet := rep.GetAll(reqDt)
if errGet != nil {
resp.Error = errGet
} else {
resp.Body = res
}
err := c.Ctx.Output.JSON(resp, false, false)
if err != nil {
panic("ERROR OUTPUT JSON LEVEL MIDDLEWARE")
}
// c.TplName = "index.tpl"
}
// GetNilaiBarangCSV to get all nilai barang
func (c *ReportController) GetNilaiBarangCSV() {
var rep models.ReportNilaiBarang
// path for directory
gopath := os.Getenv("GOPATH")
dt := time.Now()
nameFl := "report_nilai_barang" + dt.Format("20060102")
path := gopath + "/src/ss-backend/storages/" +
nameFl + ".csv"
errGet := rep.GetAllAndWriteCSV(path)
if errGet != nil {
beego.Debug("Error get csv", errGet)
}
c.Ctx.Output.Download(path, nameFl+".csv")
// c.TplName = "index.tpl"
}
// GetPenjualanCSV is controller for get data penjualan
func (c *ReportController) GetPenjualanCSV() {
var rep models.ReportPenjualan
var reqDt = models.RequestGet{
FromDate: c.Ctx.Input.Query("fromDate"),
ToDate: c.Ctx.Input.Query("toDate"),
Query: c.Ctx.Input.Query("query"),
}
gopath := os.Getenv("GOPATH")
dt := time.Now()
nameFl := "report_penjualan" + dt.Format("20060102")
path := gopath + "/src/ss-backend/storages/" +
nameFl + ".csv"
errGet := rep.GetAllAndWriteCSV(reqDt, path)
if errGet != nil {
beego.Debug("Error get csv", errGet)
}
c.Ctx.Output.Download(path, nameFl+".csv")
// c.TplName = "index.tpl"
}
|
/*
You are given an array A of N elements. For any ordered triplet (i,j,k) such that i, j, and k are pairwise distinct and 1≤i,j,k≤N, the value of this triplet is (Ai−Aj)⋅Ak.
You need to find the maximum value among all possible ordered triplets.
Note: Two ordered triplets (a,b,c) and (d,e,f) are only equal when a=d and b=e and c=f. As an example, (1,2,3) and (2,3,1) are two different ordered triplets.
Input Format
The first line of the input contains a single integer T - the number of test cases. The test cases then follow.
The first line of each test case contains an integer N.
The second line of each test case contains N space-separated integers A1,A2,…,AN.
Output Format
For each test case, output the maximum value among all different ordered triplets.
Constraints
1≤T≤100
3≤N≤3⋅10^5
1≤Ai≤10^6
Sum of N over all testcases won't exceed 3⋅10^5
*/
package main
import "sort"
func main() {
assert(triplet([]int{1, 1, 3}) == 2)
assert(triplet([]int{3, 4, 4, 1, 2}) == 12)
assert(triplet([]int{23, 17, 21, 18, 19}) == 126)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func triplet(a []int) int {
n := len(a)
if n < 3 {
return 0
}
sort.Ints(a)
return (a[n-1] - a[0]) * a[n-2]
}
|
package di
import (
"fmt"
"reflect"
"github.com/goava/di/internal/reflection"
)
// providerInterface
type providerInterface struct {
res id
provider provider
}
// newProviderInterface
func newProviderInterface(provider provider, as interface{}) (*providerInterface, error) {
i, err := reflection.InspectInterfacePtr(as)
if err != nil {
return nil, err
}
if !provider.ID().Type.Implements(i.Type) {
return nil, fmt.Errorf("%s not implement %s", provider.ID(), i.Type)
}
return &providerInterface{
res: id{
Name: provider.ID().Name,
Type: i.Type,
},
provider: provider,
}, nil
}
func (i *providerInterface) ID() id {
return i.res
}
func (i *providerInterface) ParameterList() parameterList {
var plist parameterList
plist = append(plist, parameter{
name: i.provider.ID().Name,
typ: i.provider.ID().Type,
optional: false,
embed: false,
})
return plist
}
func (i *providerInterface) Provide(values ...reflect.Value) (reflect.Value, func(), error) {
return values[0], nil, nil
}
|
package main
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"time"
"github.com/Sirupsen/logrus"
"github.com/spf13/afero"
)
// appFs is an aero filesystem. It is stored in a variable so that we can replace it
// with in-memory filesystems for unit tests.
var appFs = afero.NewOsFs()
type logger struct {
rootDir string
cmdLogger fileLogger
testerLogger fileLogger
ip string
}
// Creates a new fileLogger for the given test, in the appropriate
// "passed" or "failed" directory.
func (l logger) testLogger(passed bool, testName string) fileLogger {
filename := fmt.Sprintf("%s.txt", testName)
folder := filepath.Join(l.rootDir, "passed")
if !passed {
folder = filepath.Join(l.rootDir, "failed")
}
return fileLogger(filepath.Join(folder, filename))
}
// Retrieve the URL for the logger output.
func (l logger) url() string {
return fmt.Sprintf("http://%s/%s", l.ip, filepath.Base(l.rootDir))
}
// Create a new logger that will log in the proper directory.
// Also initializes all necessary directories and files.
func newLogger(myIP string) (logger, error) {
webDir := filepath.Join(webRoot, time.Now().Format("02-01-2006_15h04m05s"))
passedDir := filepath.Join(webDir, "passed")
failedDir := filepath.Join(webDir, "failed")
logDir := filepath.Join(webDir, "log")
buildinfoPath := filepath.Join(webDir, "buildinfo")
if err := os.MkdirAll(logDir, 0755); err != nil {
return logger{}, err
}
if err := os.MkdirAll(passedDir, 0755); err != nil {
return logger{}, err
}
if err := os.MkdirAll(failedDir, 0755); err != nil {
return logger{}, err
}
if err := exec.Command("cp", "/buildinfo", buildinfoPath).Run(); err != nil {
logrus.WithError(err).Error("Failed to copy build info.")
}
latestSymlink := filepath.Join(webRoot, "latest")
os.Remove(latestSymlink)
if err := os.Symlink(webDir, latestSymlink); err != nil {
return logger{}, err
}
return logger{
ip: myIP,
rootDir: webDir,
testerLogger: fileLogger(filepath.Join(logDir, "quilt-tester.log")),
cmdLogger: fileLogger(filepath.Join(logDir, "container.log")),
}, nil
}
type fileLogger string
func (l fileLogger) infoln(msg string) {
timestamp := time.Now().Format("[15:04:05] ")
toWrite := "\n" + timestamp + "=== " + msg + " ===\n"
if err := writeTo(string(l), toWrite); err != nil {
logrus.WithError(err).Errorf("Failed to write %s to %s.", msg, string(l))
}
}
func (l fileLogger) errorln(msg string) {
toWrite := "\n=== Error Text ===\n" + msg + "\n"
if err := writeTo(string(l), toWrite); err != nil {
logrus.WithError(err).Errorf("Failed to write %s to %s.", msg, string(l))
}
}
func (l fileLogger) println(msg string) {
if err := writeTo(string(l), msg+"\n"); err != nil {
logrus.WithError(err).Errorf("Failed to write %s to %s.", msg, string(l))
}
}
func writeTo(file string, message string) error {
a := afero.Afero{
Fs: appFs,
}
f, err := a.OpenFile(file, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
logrus.WithError(err).Errorf("Couldn't open %s for writing", file)
return err
}
defer f.Close()
_, err = f.WriteString(message)
return err
}
func overwrite(file string, message string) error {
a := afero.Afero{
Fs: appFs,
}
return a.WriteFile(file, []byte(message), 0666)
}
func fileContents(file string) (string, error) {
a := afero.Afero{
Fs: appFs,
}
contents, err := a.ReadFile(file)
if err != nil {
return "", err
}
return string(contents), nil
}
type message struct {
Title string `json:"title"`
Short bool `json:"short"`
Value string `json:"value"`
}
type slackPost struct {
Channel string `json:"channel"`
Color string `json:"color"`
Fields []message `json:"fields"`
Pretext string `json:"pretext"`
Username string `json:"username"`
Iconemoji string `json:"icon_emoji"`
}
// Post to slack.
func slack(hookurl string, p slackPost) error {
body, err := json.Marshal(p)
if err != nil {
return err
}
resp, err := http.Post(hookurl, "application/json", bytes.NewReader(body))
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
t, _ := ioutil.ReadAll(resp.Body)
return errors.New(string(t))
}
return nil
}
// Update the given spec to have the given namespace.
func updateNamespace(specfile string, namespace string) error {
specContents, err := fileContents(specfile)
if err != nil {
return err
}
// Set the namespace of the global deployment to be `namespace`.
updatedSpec := specContents +
fmt.Sprintf("; deployment.namespace = %q;", namespace)
return overwrite(specfile, updatedSpec)
}
|
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package drpcmux
import (
"context"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/textproto"
"reflect"
"github.com/zeebo/errs"
"storj.io/drpc"
"storj.io/drpc/drpcerr"
)
// ServeHTTP handles unitary rpcs over an http request. The rpcs are hosted at a
// path based on their name, like `/service.Server/Method` and accept the request
// protobuf in json. The response will either be of the form
//
// {
// "status": "ok",
// "response": ...
// }
//
// if the request was successful, or
//
// {
// "status": "error",
// "error": ...,
// "code": ...
// }
//
// where error is a textual description of the error, and code is the numeric code
// that was set with drpcerr, if any.
//
// Metadata can be attached by adding the "X-Drpc-Metadata" header to the request
// possibly multiple times. The format is
//
// X-Drpc-Metadata: percentEncode(key)=percentEncode(value)
//
// where percentEncode is the encoding used for query strings. Only the '%' and '='
// characters are necessary to be escaped.
func (m *Mux) ServeHTTP(w http.ResponseWriter, req *http.Request) {
ctx, err := buildContext(req.Context(), headerValues(req.Header, "X-Drpc-Metadata"))
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
data, err := m.serveHTTP(ctx, req.URL.Path, req.Body)
if err != nil {
data, err = json.MarshalIndent(map[string]interface{}{
"status": "error",
"error": err.Error(),
"code": drpcerr.Code(err),
}, "", " ")
} else {
data, err = json.MarshalIndent(map[string]interface{}{
"status": "ok",
"response": json.RawMessage(data),
}, "", " ")
}
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write(data)
}
func (m *Mux) serveHTTP(ctx context.Context, rpc string, body io.Reader) ([]byte, error) {
data, ok := m.rpcs[rpc]
if !ok {
return nil, drpc.ProtocolError.New("unknown rpc: %q", rpc)
} else if !data.unitary {
return nil, drpc.ProtocolError.New("non-unitary rpc: %q", rpc)
}
const maxSize = 4 << 20
bodyData, err := ioutil.ReadAll(io.LimitReader(body, maxSize))
if err != nil {
return nil, err
} else if len(bodyData) >= maxSize {
return nil, drpc.ProtocolError.New("incoming message size limit exceeded")
}
in, ok := reflect.New(data.in1.Elem()).Interface().(drpc.Message)
if !ok {
return nil, drpc.InternalError.New("invalid rpc input type")
}
if err := data.enc.JSONUnmarshal(bodyData, in); err != nil {
return nil, drpc.ProtocolError.Wrap(err)
}
out, err := data.receiver(data.srv, ctx, in, nil)
if err != nil {
return nil, errs.Wrap(err)
} else if out == nil {
return nil, nil
}
buf, err := data.enc.JSONMarshal(out)
if err != nil {
return nil, drpc.InternalError.Wrap(err)
}
return buf, nil
}
func headerValues(h http.Header, key string) []string {
if h == nil {
return nil
}
return h[textproto.CanonicalMIMEHeaderKey(key)]
}
|
package main
import (
"io/ioutil"
parser "github.com/romshark/llparser"
"github.com/romshark/llparser/misc"
)
// FragKind represents a dick-lang fragment kind
type FragKind parser.FragmentKind
const (
_ = misc.FrSign + iota
// FrBalls represents the balls
FrBalls
// FrShaft represents the shaft
FrShaft
// FrHead represents the head
FrHead
// FrDick represents the entire dick
FrDick
)
// Parse parses a dick-lang file
func Parse(filePath string) (*ModelDicks, error) {
// Read the source file into memory
bt, err := ioutil.ReadFile(filePath)
if err != nil {
return nil, err
}
// Initialize model
mod := &ModelDicks{}
// Define the grammar
ruleShaft := &parser.Rule{
Designation: "shaft",
Kind: FrShaft,
Pattern: parser.OneOrMore{
Pattern: parser.Either{
parser.TermExact{Kind: misc.FrSign, Expectation: []rune("=")},
parser.TermExact{Kind: misc.FrSign, Expectation: []rune(":")},
parser.TermExact{Kind: misc.FrWord, Expectation: []rune("x")},
},
},
}
ruleDickRight := &parser.Rule{
Designation: "dick(right)",
Kind: FrDick,
Pattern: parser.Sequence{
parser.Either{
parser.TermExact{Kind: FrBalls, Expectation: []rune("8")},
parser.TermExact{Kind: FrBalls, Expectation: []rune("B")},
},
ruleShaft,
parser.TermExact{Kind: FrHead, Expectation: []rune(">")},
},
Action: mod.onDickDetected,
}
ruleDickLeft := &parser.Rule{
Designation: "dick(left)",
Kind: FrDick,
Pattern: parser.Sequence{
parser.TermExact{Kind: FrHead, Expectation: []rune("<")},
ruleShaft,
parser.Either{
parser.TermExact{Kind: FrBalls, Expectation: []rune("8")},
parser.TermExact{Kind: FrBalls, Expectation: []rune("3")},
},
},
Action: mod.onDickDetected,
}
ruleFile := &parser.Rule{
Designation: "file",
Pattern: parser.Sequence{
parser.Optional{Pattern: parser.Term(misc.FrSpace)},
parser.ZeroOrMore{
Pattern: parser.Sequence{
parser.Either{
ruleDickLeft,
ruleDickRight,
},
parser.Optional{Pattern: parser.Term(misc.FrSpace)},
},
},
},
}
// Initialize lexer and parser
par := parser.NewParser()
lex := misc.NewLexer(&parser.SourceFile{
Name: filePath,
Src: []rune(string(bt)),
})
// Parse the source file
mainFrag, err := par.Parse(lex, ruleFile)
if err != nil {
return nil, err
}
mod.Frag = mainFrag
return mod, nil
}
|
package c38_simplified_srp
import (
"crypto/sha256"
"math/big"
)
type Client struct {
Email []byte
Password []byte
salt []byte
Key []byte
u *big.Int
priv *big.Int
Pub *big.Int
sPub *big.Int
}
func (obj *Client) computeK() {
xH := sha256.Sum256(append(obj.salt, obj.Password...))
x := new(big.Int).SetBytes(xH[:])
// S = B**(a + ux) % n
s := new(big.Int).Mul(obj.u, x)
s.Add(obj.priv, s)
s.Exp(obj.sPub, s, N)
key := sha256.Sum256(s.Bytes())
obj.Key = key[:]
}
func (obj *Client) sendPub(s auther) {
if obj.Pub == nil {
obj.Pub = new(big.Int).Exp(g, obj.priv, N)
}
s.receivePub(obj.Email, obj.Pub)
}
func (obj *Client) receivePub(salt []byte, pk, u *big.Int) {
obj.salt = salt
obj.sPub = pk
obj.u = u
}
func initClient(email, password []byte) Client {
return Client{
Email: email,
Password: password,
priv: privKey(),
}
}
|
package main
import (
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"reflect"
"runtime"
"sort"
"strings"
"testing"
"github.com/bradleyjkemp/cupaloy/v2"
"golang.org/x/tools/go/packages/packagestest"
)
func CreateTestContext(t *testing.T, mode TestMode, debug bool) *TestContext {
t.Helper()
tc := &TestContext{mode: mode, debug: debug}
tc.setupSandbox(t)
return tc
}
type TestContext struct {
mode TestMode
exported *packagestest.Exported
debug bool
closers []func(t *testing.T)
}
func (tc *TestContext) setupSandbox(t *testing.T) {
t.Helper()
tc.exported = packagestest.Export(t, tc.mode.Exporter(), []packagestest.Module{
{Name: "sampleapp", Files: map[string]interface{}{".keep": "", "main.go": "package main"}},
})
tc.closers = append(tc.closers, func(t *testing.T) {
if tc.debug {
t.Log("Keep the test environment on debug mode")
return
}
tc.exported.Cleanup()
})
if tc.mode == TestModeDep {
tc.exported.Config.Dir = filepath.Join(tc.exported.Config.Dir, "sampleapp")
}
t.Logf("root directory: %s", tc.rootDir())
switch tc.mode {
case TestModeMod:
// no-op
case TestModeDep:
tc.ExecCmd(t, "dep", "init", "-v")
default:
panic("unreachable")
}
}
func (tc *TestContext) Close(t *testing.T) {
for i := len(tc.closers) - 1; i >= 0; i-- {
tc.closers[i](t)
}
}
func (tc *TestContext) rootDir() string {
return tc.exported.Config.Dir
}
func (tc *TestContext) binDir() string {
return filepath.Join(tc.rootDir(), "bin")
}
func (tc *TestContext) environ() []string {
env := make([]string, 0, len(tc.exported.Config.Env))
for _, kv := range tc.exported.Config.Env {
if strings.HasPrefix(kv, "GOPROXY=") {
continue
}
if tc.mode == TestModeDep && runtime.GOOS == "darwin" && strings.HasPrefix(kv, "GOPATH=/var") {
kv = strings.Replace(kv, "GOPATH=/var", "GOPATH=/private/var", 1)
}
env = append(env, kv)
}
return env
}
func (tc *TestContext) SnapshotManifest(t *testing.T) {
t.Helper()
t.Run("tools.go", func(t *testing.T) {
data, err := ioutil.ReadFile(filepath.Join(tc.rootDir(), "tools.go"))
tc.checkErr(t, err)
cupaloy.SnapshotT(t, string(data))
})
}
func (tc *TestContext) CheckBinaries(t *testing.T, wantBins []string) {
files, err := ioutil.ReadDir(tc.binDir())
tc.checkErr(t, err)
var gotBins []string
for _, f := range files {
if f.IsDir() {
continue
}
gotBins = append(gotBins, f.Name())
}
sort.Strings(gotBins)
sort.Strings(wantBins)
if got, want := gotBins, wantBins; !reflect.DeepEqual(got, want) {
t.Errorf("generated bins list is %v, want %v", got, want)
}
}
func (tc *TestContext) RemoveBinaries(t *testing.T) {
tc.checkErr(t, os.RemoveAll(tc.binDir()))
}
func (tc *TestContext) Bin(name string) string {
return filepath.Join(tc.binDir(), name)
}
func (tc *TestContext) ExecCmd(t *testing.T, name string, args ...string) {
tc.ExecCmdWithOut(t, name, args, NewTestWriter(t), NewTestWriter(t))
}
func (tc *TestContext) ExecCmdWithOut(t *testing.T, name string, args []string, outW, errW io.Writer) {
cmd := exec.Command(name, args...)
cmd.Dir = tc.rootDir()
cmd.Env = tc.environ()
cmd.Stdout = outW
cmd.Stderr = errW
tc.checkErr(t, cmd.Run())
}
func (tc *TestContext) checkErr(t *testing.T, err error) {
t.Helper()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
|
package main
import (
"encoding/json"
log "github.com/Sirupsen/logrus"
"github.com/gorilla/websocket"
"github.com/satori/go.uuid"
)
type Conn struct {
ws *websocket.Conn `json:"-"`
SessID string `json:"peerId"`
Name string `json:"-"`
Room string `json:"-"` // room name
outChan chan []byte `json:"-"` // channel for data to be written
}
func newConn(ws *websocket.Conn) *Conn {
return &Conn{
ws: ws,
SessID: uuid.NewV4().String(),
outChan: make(chan []byte, 2),
}
}
func (c *Conn) String() string {
return c.SessID
}
func (c *Conn) mainLoop() {
defer c.cleanup()
go c.sendLoop()
//go c.readPubsubLoop()
c.ReadLoop()
}
// cleanup:
// - leave from room
// - send leave_room event
func (c *Conn) cleanup() {
room := rooms.getRoom(c.Room)
if room == nil {
return
}
room.remove(c)
evt := Event{
Name: "peer_leave_room",
Data: EventData{
PeerID: c.SessID,
Name: c.Name,
},
}
room.broadcast(c, &evt, true)
}
func (c *Conn) send(evt *Event) error {
b, err := json.Marshal(evt)
if err != nil {
return err
}
c.sendRaw(b)
return nil
}
func (c *Conn) sendRaw(b []byte) {
c.outChan <- b
}
func (c *Conn) sendLoop() {
for b := range c.outChan {
log.Debugf("sending =%v", string(b))
c.ws.WriteMessage(websocket.TextMessage, b)
}
}
var handlers = map[string]func(*Conn, Event, []byte){
"join_room": joinRoom,
"offer": handleOffer,
"answer": handleAnswer,
"ice_candidate": handleIceCandidate,
}
func (c *Conn) ReadLoop() {
for {
// read message
_, msg, err := c.ws.ReadMessage()
if err != nil {
log.Errorf("ReadLoop():read error. peer = %v, err = %v", c.String(), err)
break
}
log.Infof("[ReadLoop] msg = %v", string(msg))
// unmarshal
var evt Event
if err := json.Unmarshal(msg, &evt); err != nil {
log.Errorf("ReadLoop():failed to unmarshal perr = %v, message=%v, err=%v", c.String(), string(msg), err)
continue
}
// handle
fn, ok := handlers[evt.Name]
if !ok {
log.Errorf("invalid event:%v, peer = %v", evt.Name, c.String())
break
}
fn(c, evt, msg)
}
log.Info("exiting ReadLoop")
}
|
/***************************************************************
*
* Copyright (c) 2016, Menglong TAN <tanmenglong@gmail.com>
*
* This program is free software; you can redistribute it
* and/or modify it under the terms of the BSD licence
*
**************************************************************/
/**
*
*
* @file memobird.go
* @author Menglong TAN <tanmenglong@gmail.com>
* @date Wed Mar 9 19:19:09 2016
*
**/
package memobird
import (
//"fmt"
)
//===================================================================
// Public APIs
//===================================================================
type Result struct {
ReturnCode int
ReturnMessage string
Data []string
}
//===================================================================
// Private
//===================================================================
|
package engine
import (
"bytes"
"encoding/json"
"errors"
"fmt"
)
const (
telemetryInfoVersion1 = "v1"
)
var ErrInvalidInstanceTelemetryInfo = errors.New("invalid instance telemetry info")
// InstanceTelemetryInfo keeps information useful to our telemetry logic.
type InstanceTelemetryInfo struct {
Version string // to let us identify and correct outdated versions of this struct
TraceID string
SpanID string
CallPath string
NamespaceName string
}
func (info *InstanceTelemetryInfo) MarshalJSON() ([]byte, error) {
if info == nil {
return json.Marshal(&instanceTelemetryInfoV1{
Version: telemetryInfoVersion1,
})
}
return json.Marshal(&instanceTelemetryInfoV1{
Version: telemetryInfoVersion1,
TraceID: info.TraceID,
SpanID: info.SpanID,
CallPath: info.CallPath,
NamespaceName: info.NamespaceName,
})
}
type instanceTelemetryInfoV1 struct {
Version string `json:"version"`
TraceID string `json:"trace_id"`
SpanID string `json:"span_id"`
CallPath string `json:"call_path"`
NamespaceName string
}
//nolint:dupl
func LoadInstanceTelemetryInfo(data []byte) (*InstanceTelemetryInfo, error) {
m := make(map[string]interface{})
err := json.Unmarshal(data, &m)
if err != nil {
return nil, err
}
version, defined := m["version"]
if !defined {
return nil, fmt.Errorf("failed to load instance telemetry info: %w: missing version", ErrInvalidInstanceTelemetryInfo)
}
var info *InstanceTelemetryInfo
dec := json.NewDecoder(bytes.NewReader(data))
dec.DisallowUnknownFields()
switch version {
case telemetryInfoVersion1:
var v1 instanceTelemetryInfoV1
err = dec.Decode(&v1)
if err != nil {
return nil, fmt.Errorf("failed to load instance telemetry info: %w: %w", ErrInvalidInstanceTelemetryInfo, err)
}
info = &InstanceTelemetryInfo{
Version: v1.Version,
TraceID: v1.TraceID,
SpanID: v1.SpanID,
CallPath: v1.CallPath,
NamespaceName: v1.NamespaceName,
}
default:
return nil, fmt.Errorf("failed to load instance telemetry info: %w: unknown version", ErrInvalidInstanceTelemetryInfo)
}
return info, nil
}
|
package main
import (
"compress/gzip"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
)
var port string
func main() {
assetServer := http.NewServeMux() // Create a separate ServeMux for the asset files
if len(os.Args) < 1 && strings.Contains(os.Args[1], "-port=") {
port = strings.Replace(os.Args[1], "-port=", "", 1)
} else {
port = "4500" // The Port the server will run on
}
assetServer.Handle("/", http.StripPrefix("/", http.FileServer(http.Dir("./assets")))) // Handle and Serve Static files from the assetServer ServeMux
http.HandleFunc("/", serveGZIP(func(res http.ResponseWriter, req *http.Request) {
res.Header().Set("Cache-Control", "public ,max-age=0")
if strings.Contains(req.URL.Path, ".") {
assetServer.ServeHTTP(res, req)
} else if req.URL.Path == "/" {
readCopyServe("./assets/filebrowser.html", res, req)
} else {
errorCodeHandler(res, req, http.StatusNotFound)
}
}))
http.HandleFunc("/files/", serveGZIP(func(res http.ResponseWriter, req *http.Request) {
Dir := strings.Replace(req.URL.Query().Get("dir"), "~!", " ", -1)
if exists(Dir) {
res.Write(getDirInfoList(Dir))
} else {
res.Write([]byte("Invalid Path"))
}
}))
http.HandleFunc("/getfile/", serveGZIP(func(res http.ResponseWriter, req *http.Request) {
Dir := strings.Replace(req.URL.Query().Get("dir"), "~!", " ", -1)
if Dir != "" && existsAS(Dir) == "file" {
file, err := os.Open(Dir)
if err != nil {
errorCodeHandler(res, req, http.StatusNotFound)
}
io.Copy(res, file)
file.Close()
} else {
res.Write([]byte("Invalid : " + existsAS(Dir)))
}
}))
fmt.Println("Go File-Browser Running on Port: " + port)
// Listen on Chosen Port and check for errors
errHTTP := http.ListenAndServe(":"+port, nil)
check(errHTTP)
}
func readCopyServe(filename string, res http.ResponseWriter, req *http.Request) {
file, err := os.Open(filename)
if err != nil {
errorCodeHandler(res, req, http.StatusNotFound)
}
io.Copy(res, file)
file.Close()
}
// getLocalpath Simple function to get the Script/binarry's Current Directory path as a String
func getLocalpath() string {
localpath, err := os.Getwd()
if err != nil {
panic(err)
}
return localpath
}
func exists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
func existsAS(name string) string {
Stat, err := os.Stat(name)
if err == nil && !os.IsNotExist(err) {
if Stat.Mode().IsDir() {
return "directory"
} else if Stat.Mode().IsRegular() {
return "file"
}
}
return "invalid"
}
//FileInfo : File Details Datatype
type FileInfo struct {
Name string
Size int64
Mode os.FileMode
ModTime time.Time
IsDir bool
}
func getDirInfoList(Directory string) []byte {
dir, err := os.Open(Directory)
check(err)
entries, err := dir.Readdir(0)
check(err)
list := []FileInfo{}
for _, entry := range entries {
f := FileInfo{
Name: entry.Name(),
Size: entry.Size(),
Mode: entry.Mode(),
ModTime: entry.ModTime(),
IsDir: entry.IsDir(),
}
list = append(list, f)
}
output, err := json.Marshal(list)
check(err)
DirInfoList := output
return DirInfoList
}
func check(e error) {
if e != nil {
panic(e)
}
}
type gzipResponseWriter struct {
io.Writer
http.ResponseWriter
}
func (w gzipResponseWriter) Write(b []byte) (int, error) {
if "" == w.Header().Get("Content-Type") {
// If no content type, apply sniffing algorithm to un-gzipped body.
w.Header().Set("Content-Type", http.DetectContentType(b))
}
return w.Writer.Write(b)
}
func serveGZIP(fn http.HandlerFunc) http.HandlerFunc {
return http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
if !strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") {
fn(res, req)
return
}
res.Header().Set("Content-Encoding", "gzip")
gz := gzip.NewWriter(res)
defer gz.Close()
gzr := gzipResponseWriter{Writer: gz, ResponseWriter: res}
fn(gzr, req)
})
}
func errorCodeHandler(res http.ResponseWriter, r *http.Request, status int) {
res.WriteHeader(status)
if status == http.StatusNotFound {
fmt.Fprint(res, "Error 404 , What you're lookin for... , well it ain't here")
}
}
|
package main
import (
"flag"
"io"
"log"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"strings"
".."
)
var (
stderr = flag.Bool("stderr", false, "true if this logs messages from stderr instead of stdout")
x = flag.Bool("X", false, "show internal logs")
)
func init() {
flag.Parse()
}
func main() {
version := "1.0.2"
instance, _ := os.Hostname()
envVar := os.Getenv("BOXFUSE_ENV")
if envVar == "" {
log.Fatal("Missing BOXFUSE_ENV environment variable")
}
env := "boxfuse/" + envVar
image := os.Getenv("BOXFUSE_IMAGE_COORDINATES")
if image == "" {
log.Fatal("Missing BOXFUSE_IMAGE_COORDINATES environment variable")
}
app := strings.Split(image, ":")[0]
endpoint := os.Getenv("BOXFUSE_CLOUDWATCHLOGS_ENDPOINT")
endpointMsg := "";
var awsSession *session.Session
if endpoint != "" {
endpointMsg = " at " + endpoint;
awsSession = session.New(&aws.Config{Region: aws.String("us-east-1"), Credentials: credentials.NewStaticCredentials("dummy", "dummy", "")})
} else {
awsSession = session.New()
}
level := "INFO"
if *stderr {
level = "ERROR"
}
log.Println("Boxfuse CloudWatch Logs Agent " + version + " redirecting " + level + " logs for " + image + " to CloudWatch Logs" + endpointMsg + " (group: " + env + ", stream: " + app + ") ...")
logger1, err := logger.NewLogger(awsSession, endpoint, env, app, level, time.Second, image, instance, x)
if err != nil {
log.Fatal(err)
}
if _, err := io.Copy(logger1, os.Stdin); err != nil {
log.Println("copy err", err)
}
if err := logger1.Close(); err != nil {
log.Println(err)
}
log.Println("Exiting...")
os.Exit(0)
}
|
package rpc
import (
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap"
"github.com/packethost/pkg/log/logr"
"github.com/philippgille/gokv"
"github.com/philippgille/gokv/freecache"
v1 "github.com/tinkerbell/pbnj/api/v1"
"github.com/tinkerbell/pbnj/grpc/persistence"
"github.com/tinkerbell/pbnj/grpc/taskrunner"
"github.com/tinkerbell/pbnj/pkg/logging"
"github.com/tinkerbell/pbnj/pkg/zaplog"
)
const tempIPMITool = "/tmp/ipmitool"
var (
log logging.Logger
ctx context.Context
taskRunner *taskrunner.Runner
bmcService BmcService
)
func TestMain(m *testing.M) {
setup()
code := m.Run()
teardown()
os.Exit(code)
}
func setup() {
ctx = context.Background()
l, zapLogger, _ := logr.NewPacketLogr()
log = zaplog.RegisterLogger(l)
ctx = ctxzap.ToContext(ctx, zapLogger)
f := freecache.NewStore(freecache.DefaultOptions)
s := gokv.Store(f)
repo := &persistence.GoKV{
Store: s,
Ctx: ctx,
}
taskRunner = &taskrunner.Runner{
Repository: repo,
Ctx: ctx,
Log: log,
}
bmcService = BmcService{
Log: log,
TaskRunner: taskRunner,
UnimplementedBMCServer: v1.UnimplementedBMCServer{},
}
_, err := exec.LookPath("ipmitool")
if err != nil {
err := ioutil.WriteFile(tempIPMITool, []byte{}, 0o777)
if err != nil {
fmt.Println("didnt find ipmitool in PATH and couldnt create one in /tmp")
os.Exit(3) //nolint:revive // deep-exit here is OK
}
path := os.Getenv("PATH")
os.Setenv("PATH", fmt.Sprintf("%v:/tmp", path))
}
}
func teardown() {
os.Remove(tempIPMITool)
}
func TestConfigNetworkSource(t *testing.T) {
testCases := []struct {
name string
req *v1.NetworkSourceRequest
message string
expectedErr error
}{
{
name: "status good",
req: &v1.NetworkSourceRequest{
Authn: &v1.Authn{
Authn: &v1.Authn_DirectAuthn{
DirectAuthn: &v1.DirectAuthn{
Host: &v1.Host{
Host: "127.0.0.1",
},
Username: "ADMIN",
Password: "ADMIN",
},
},
},
Vendor: &v1.Vendor{
Name: "",
},
NetworkSource: 0,
},
message: "good",
expectedErr: errors.New("not implemented"),
},
}
for _, tc := range testCases {
testCase := tc
t.Run(testCase.name, func(t *testing.T) {
response, err := bmcService.NetworkSource(ctx, testCase.req)
if response != nil {
t.Fatalf("response should be nil, got: %v", response)
}
if diff := cmp.Diff(tc.expectedErr.Error(), err.Error()); diff != "" {
t.Fatal(diff)
}
})
}
}
func newResetRequest(authErr bool) *v1.ResetRequest {
var auth *v1.DirectAuthn
if authErr {
auth = &v1.DirectAuthn{
Host: &v1.Host{
Host: "",
},
Username: "ADMIN",
Password: "ADMIN",
}
} else {
auth = &v1.DirectAuthn{
Host: &v1.Host{
Host: "127.0.0.1",
},
Username: "ADMIN",
Password: "ADMIN",
}
}
return &v1.ResetRequest{
Authn: &v1.Authn{
Authn: &v1.Authn_DirectAuthn{
DirectAuthn: auth,
},
},
Vendor: &v1.Vendor{
Name: "local",
},
ResetKind: 0,
}
}
func TestReset(t *testing.T) {
testCases := []struct {
name string
expectedErr error
in *v1.ResetRequest
out *v1.ResetResponse
}{
{"success", nil, newResetRequest(false), &v1.ResetResponse{TaskId: ""}},
{"missing auth err", errors.New("input arguments are invalid: invalid field Authn.DirectAuthn.Host.Host: value '' must not be an empty string"), newResetRequest(true), nil},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
response, err := bmcService.Reset(ctx, tc.in)
if err != nil {
diff := cmp.Diff(tc.expectedErr.Error(), err.Error())
if diff != "" {
t.Fatal(diff)
}
} else if response.TaskId == "" {
t.Fatal("expected taskId, got:", response.TaskId)
}
})
}
}
func newCreateUserRequest(authErr bool) *v1.CreateUserRequest {
var auth *v1.DirectAuthn
if authErr {
auth = &v1.DirectAuthn{
Host: &v1.Host{
Host: "",
},
Username: "ADMIN",
Password: "ADMIN",
}
} else {
auth = &v1.DirectAuthn{
Host: &v1.Host{
Host: "127.0.0.1",
},
Username: "ADMIN",
Password: "ADMIN",
}
}
return &v1.CreateUserRequest{
Authn: &v1.Authn{
Authn: &v1.Authn_DirectAuthn{
DirectAuthn: auth,
},
},
Vendor: &v1.Vendor{
Name: "local",
},
UserCreds: &v1.UserCreds{
Username: "",
Password: "",
UserRole: 0,
},
}
}
func TestCreateUser(t *testing.T) {
testCases := []struct {
name string
expectedErr error
in *v1.CreateUserRequest
out *v1.CreateUserResponse
}{
{"success", nil, newCreateUserRequest(false), &v1.CreateUserResponse{TaskId: ""}},
{"missing auth err", errors.New("input arguments are invalid: invalid field Authn.DirectAuthn.Host.Host: value '' must not be an empty string"), newCreateUserRequest(true), nil},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
response, err := bmcService.CreateUser(ctx, tc.in)
if err != nil {
diff := cmp.Diff(tc.expectedErr.Error(), err.Error())
if diff != "" {
t.Fatal(diff)
}
} else if response.TaskId == "" {
t.Fatal("expected taskId, got:", response.TaskId)
}
})
}
}
func newUpdateUserRequest(authErr bool) *v1.UpdateUserRequest {
var auth *v1.DirectAuthn
if authErr {
auth = &v1.DirectAuthn{
Host: &v1.Host{
Host: "",
},
Username: "ADMIN",
Password: "ADMIN",
}
} else {
auth = &v1.DirectAuthn{
Host: &v1.Host{
Host: "127.0.0.1",
},
Username: "ADMIN",
Password: "ADMIN",
}
}
return &v1.UpdateUserRequest{
Authn: &v1.Authn{
Authn: &v1.Authn_DirectAuthn{
DirectAuthn: auth,
},
},
Vendor: &v1.Vendor{
Name: "local",
},
UserCreds: &v1.UserCreds{
Username: "",
Password: "",
UserRole: 0,
},
}
}
func TestUpdateUser(t *testing.T) {
testCases := []struct {
name string
expectedErr error
in *v1.UpdateUserRequest
out *v1.UpdateUserResponse
}{
{"success", nil, newUpdateUserRequest(false), &v1.UpdateUserResponse{TaskId: ""}},
{"missing auth err", errors.New("input arguments are invalid: invalid field Authn.DirectAuthn.Host.Host: value '' must not be an empty string"), newUpdateUserRequest(true), nil},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
response, err := bmcService.UpdateUser(ctx, tc.in)
if err != nil {
diff := cmp.Diff(tc.expectedErr.Error(), err.Error())
if diff != "" {
t.Fatal(diff)
}
} else if response.TaskId == "" {
t.Fatal("expected taskId, got:", response.TaskId)
}
})
}
}
func newDeleteUserRequest(authErr bool) *v1.DeleteUserRequest {
var auth *v1.DirectAuthn
if authErr {
auth = &v1.DirectAuthn{
Host: &v1.Host{
Host: "",
},
Username: "ADMIN",
Password: "ADMIN",
}
} else {
auth = &v1.DirectAuthn{
Host: &v1.Host{
Host: "127.0.0.1",
},
Username: "ADMIN",
Password: "ADMIN",
}
}
return &v1.DeleteUserRequest{
Authn: &v1.Authn{
Authn: &v1.Authn_DirectAuthn{
DirectAuthn: auth,
},
},
Vendor: &v1.Vendor{
Name: "local",
},
Username: "blah",
}
}
func TestDeleteUser(t *testing.T) {
testCases := []struct {
name string
expectedErr error
in *v1.DeleteUserRequest
out *v1.UpdateUserResponse
}{
{"success", nil, newDeleteUserRequest(false), &v1.UpdateUserResponse{TaskId: ""}},
{"missing auth err", errors.New("input arguments are invalid: invalid field Authn.DirectAuthn.Host.Host: value '' must not be an empty string"), newDeleteUserRequest(true), nil},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
response, err := bmcService.DeleteUser(ctx, tc.in)
if err != nil {
diff := cmp.Diff(tc.expectedErr.Error(), err.Error())
if diff != "" {
t.Fatal(diff)
}
} else if response.TaskId == "" {
t.Fatal("expected taskId, got:", response.TaskId)
}
})
}
}
|
package entity
type BlockInfoEntity struct {
Previous string `json:"previous"`
Timestamp string `json:"timestamp"`
TransactionMerkleRoot string `json:"transaction_merkle_root"`
Producer string `json:"producer"`
ProducerChanges []interface{} `json:"producer_changes"`
ProducerSignature string `json:"producer_signature"`
Cycles []interface{} `json:"cycles"`
ID string `json:"id"`
BlockNum int `json:"block_num"`
RefBlockPrefix int `json:"ref_block_prefix"`
} |
package main
import (
"fmt"
"regexp"
"github.com/qaisjp/go-discord-irc/irc/format"
)
var colorRegexRepl = regexp.MustCompile(`\x03\d{0,2}(,\d{0,2}|\x02\x02)?`)
var msg = "Hello, \u0002Wor\x1dld\u000304,07\x1d!\u000f My name is \x1fqais\x1f patankar. Testing reset\x1f\x1d\x02\x16ONETWO\x0fTHREE. And \x16reverse\x16!"
func main() {
stripped := colorRegexRepl.ReplaceAllString(msg, "")
fmt.Println("Blocks:\n")
for _, block := range ircf.Parse(stripped) {
fmt.Printf("%+v\n", *block)
}
fmt.Println("\nMarkdown:\n")
fmt.Println(ircf.IRCToMarkdown(stripped))
}
|
package main
// Configuration management for the http server part
import (
"encoding/json"
"os"
)
// Main configuration struct
type Configuration struct {
// IP on which the webserver should listen on. By default it's 127.0.0.1.
// Name in the config file: listener_ip
IP string `json:"listener_ip"`
// Port on which the webserver should listen on. By default it's 8080.
// Name in the config file: listener_port
Port uint16 `json:"listener_port"`
// Comma-separated list of IP to only listen from. Queries from other
// IPs will be discarded. Empty to serve all the clients without restrictions
// which is the default value.
// For the list of the current IP Catchpoint is pushing from, see:
// https://support.catchpoint.com/hc/en-us/articles/202459889-Alerts-API
// Name in the config file: authorized_ips
AuthIPs string `json:"authorized_ips"`
// The number of concurrent CPUs the HTTP server is allowed to use.
// This sets GOMAXPROCS at the run time.
// If the GOMAXPROCS environment variable is set and to a bigger value than
// this number, the value of the environment variable will be taken.
// For more details, see https://golang.org/src/runtime/debug.go?s=995:1021#L16
// By default this configuration value is set to 1.
// Name in the config file: max_procs
Procs int `json:"max_procs"`
// Path to the log file you want to have the output to. Keep it empty if you
// want to log to the console.
// Defaults to empty so console logging.
LogFile string `json:"log_file"`
// Endpoints list specifying which plugin should handle which endpoint
Endpoints []Endpoint `json:"endpoints"`
// Emitter. Listener to give results back
// Contains: if emitter is enabled, templateDir,
// templateName, listenerURI
Emitter Emitter `json:"emitter"`
// Configuration of the nsca plugin
NSCA Nsca `json:"nsca"`
}
// The endpoints define which plugin is used for each supported endpoint
type Endpoint struct {
// The definition of the Path of the endpoint (for example "/catchpoint/alerts")
URIPath string `json:"uri_path"`
// The name of the plugin that is supposed to handle this endpoint.
// Currently supported values:
// - catchpoint_alerts
PluginName string `json:"plugin_name"`
}
// Configuration of NSCA
type Nsca struct {
// Wether or not we want to send alerts with this method. If empty the default
// value will be false (as it is the default value of a boolean in Go)
Enabled bool `json:"enabled"`
// The name of the NSCA server to send the data to. No default value.
Server string `json:"server"`
// Full path of the send_nsca command on the system
// Defaults to "/usr/sbin/send_nsca"
OsCommand string `json:"os_command_path"`
// Configuration file path for the send_nsca command
// Defaults to "/etc/send_nsca.cfg"
ConfigFile string `json:"config_file"`
// The name of the host you want to use when sending the nsca messages
ClientHost string `json:"client_host"`
}
type Emitter struct {
// Wether or not swith alerts emitter for TM Health
Enabled bool `json:"enabled"`
// Queue for checks.
// Actually a placeholder for host value
Queue string `json:"queue"`
// URI for health check emission
URI []Listener `json:"uri"`
// Directory for templates.
// Templates are used for TM Health check
TemplateDir string `json:"template_dir"`
// Temaplate name
Template string `json:"template"`
}
type Listener struct {
// Path to the emitter endpoint
// For example, /catchpoint/health
URIPath string `json:"uri_path"`
}
// This function loads the configuration file given in parameter and returns a
// pointer to a Configuration object
func (cfg *Configuration) loadConfig(confFilePath string) error {
file, oserr := os.Open(confFilePath)
if oserr != nil {
return oserr
}
decoder := json.NewDecoder(file)
err := decoder.Decode(&cfg)
if err != nil {
return err
}
// Loading the default configurations
if cfg.IP == "" {
cfg.IP = "127.0.0.1"
}
if cfg.Port == 0 {
cfg.Port = 8080
}
if cfg.Procs == 0 {
cfg.Procs = 1
}
if len(cfg.NSCA.ClientHost) == 0 {
cfg.NSCA.ClientHost, err = os.Hostname()
return err
}
if len(cfg.NSCA.OsCommand) == 0 {
cfg.NSCA.OsCommand = "/usr/sbin/send_nsca"
}
if len(cfg.NSCA.ConfigFile) == 0 {
cfg.NSCA.ConfigFile = "/etc/send_nsca.cfg"
}
if len(cfg.Emitter.Queue) == 0 {
cfg.Emitter.Queue = "Catchpoint"
}
if len(cfg.Emitter.URI) == 0 {
u := Listener{URIPath: "/api/reports"}
cfg.Emitter.URI = append(cfg.Emitter.URI, u)
}
return nil
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"github.com/phayes/freeport"
"github.com/pingcap/log"
"go.uber.org/zap"
)
var (
count uint
)
func init() {
flag.UintVar(&count, "count", 1, "number of generated ports")
}
func generatePorts(count int) []int {
var (
err error
ports []int
)
if ports, err = freeport.GetFreePorts(count); err != nil {
log.Fatal("no more free ports", zap.Error(err))
}
return ports
}
func main() {
flag.Parse()
for _, port := range generatePorts(int(count)) {
fmt.Println(port)
}
}
|
package cluster
import (
"fmt"
"github.com/nokamoto/grpc-proxy/codec"
"github.com/nokamoto/grpc-proxy/server"
"github.com/nokamoto/grpc-proxy/yaml"
"golang.org/x/net/context"
"google.golang.org/grpc"
"sync"
)
type roundRobin struct {
mu sync.Mutex
proxies []*proxy
next int
}
// NewRoundRobin returns Cluster with round robin load balancing.
func NewRoundRobin(c yaml.Cluster) (Cluster, error) {
proxies := make([]*proxy, 0)
for _, address := range c.RoundRobin {
proxy, err := newProxy(address)
if err != nil {
return nil, err
}
proxies = append(proxies, proxy)
}
if len(proxies) == 0 {
return nil, fmt.Errorf("cluster %s empty round robin", c.Name)
}
return &roundRobin{proxies: proxies}, nil
}
func (c *roundRobin) nextProxy() *proxy {
c.mu.Lock()
defer c.mu.Unlock()
c.next = (c.next + 1) % len(c.proxies)
return c.proxies[c.next]
}
func (c *roundRobin) InvokeUnary(ctx context.Context, m *codec.RawMessage, method string) (*codec.RawMessage, error) {
return c.nextProxy().invokeUnary(ctx, m, method)
}
func (c *roundRobin) InvokeStreamC(stream server.RawServerStreamC, desc *grpc.StreamDesc, method string) error {
return c.nextProxy().invokeStreamC(stream, desc, method)
}
func (c *roundRobin) InvokeStreamS(stream server.RawServerStreamS, desc *grpc.StreamDesc, method string) error {
return c.nextProxy().invokeStreamS(stream, desc, method)
}
func (c *roundRobin) InvokeStreamB(stream server.RawServerStreamB, desc *grpc.StreamDesc, method string) error {
return c.nextProxy().invokeStreamB(stream, desc, method)
}
|
package kit
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
)
// FatalOn be careful with it in production,
// since it uses os.Exit(1) which affects the control flow.
// use pattern:
// if err != nil {
// ....
// }
func FatalOn(err error, str ...string) {
if err != nil {
_, fn, line, _ := runtime.Caller(1)
log.Fatalf("%s:%d %s -> %s", fn, line, str, err.Error())
}
}
// B2S converts []byte to string with all leading
// and trailing white space removed, as defined by Unicode.
func B2S(b []byte) string {
return strings.TrimSpace(string(b))
}
// FindExecutable starting from `dir` and then PATH env
func FindExecutable(fileName string, dir string) (string, error) {
dirPath, err := filepath.Abs(dir)
if err != nil {
return dirPath, err
}
bin, err := exec.LookPath(dir + string(os.PathSeparator) + fileName)
if err != nil {
bin, err = exec.LookPath(fileName)
if err != nil {
return "", fmt.Errorf("%s binary not found in PATH or %s", fileName, dirPath)
}
}
bin, err = filepath.Abs(bin)
return bin, err
}
|
/**
* Author: Admiral Helmut
* Created: 12.06.2019
*
* (C)
**/
package classes
import "time"
type Firmware struct {
firmware_id int `db:"firmware_id"`
name string `db:"name"`
version string `db:"version"`
binwalkOutput string `db:"binwalkOutput"`
sizeInBytes int `db:"sizeInBytes"`
project_id int `db:"project_id"`
Created time.Time `db:"created"`
msg string
}
func (f *Firmware) Msg() string {
return f.msg
}
func (f *Firmware) SetMsg(msg string) {
f.msg = msg
}
func NewFirmware(firmware_id int, name string, version string, binwalkOutput string, sizeInBytes int, project_id int, created time.Time) *Firmware {
return &Firmware{firmware_id: firmware_id, name: name, version: version, binwalkOutput: binwalkOutput, sizeInBytes: sizeInBytes, project_id: project_id, Created: created}
}
func (f *Firmware) Firmware_id() int {
return f.firmware_id
}
func (f *Firmware) SetFirmware_id(firmware_id int) {
f.firmware_id = firmware_id
}
func (f *Firmware) SizeInBytes() int {
return f.sizeInBytes
}
func (f *Firmware) SetSizeInBytes(sizeInBytes int) {
f.sizeInBytes = sizeInBytes
}
func (f *Firmware) BinwalkOutput() string {
return f.binwalkOutput
}
func (f *Firmware) SetBinwalkOutput(binwalkOutput string) {
f.binwalkOutput = binwalkOutput
}
func (f *Firmware) Name() string {
return f.name
}
func (f *Firmware) SetName(name string) {
f.name = name
}
func (f *Firmware) Version() string {
return f.version
}
func (f *Firmware) SetVersion(version string) {
f.version = version
}
func (f *Firmware) SetCreated(created time.Time) {
f.Created = created
}
|
package tracing
import (
"testing"
"github.com/DavidCai1993/request"
"github.com/opentracing/basictracer-go"
"github.com/opentracing/opentracing-go"
"github.com/stretchr/testify/assert"
"github.com/teambition/gear"
"github.com/teambition/gear/middleware/requestid"
)
func TestGearSession(t *testing.T) {
t.Run("should work", func(t *testing.T) {
assert := assert.New(t)
opentracing.SetGlobalTracer(basictracer.New(basictracer.NewInMemoryRecorder()))
app := gear.New()
app.Use(requestid.New())
router := gear.NewRouter()
router.Use(New())
router.Get("/", func(ctx *gear.Context) error {
span := opentracing.SpanFromContext(ctx)
assert.NotNil(span)
span.SetTag("testing", "testing")
return ctx.End(204)
})
app.UseHandler(router)
srv := app.Start()
defer srv.Close()
url := "http://" + srv.Addr().String()
res, err := request.Get(url).End()
assert.Nil(err)
assert.Equal(204, res.StatusCode)
})
}
|
// Copyright © 2021 Attestant Limited.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package standard
import (
"context"
"crypto/sha256"
"encoding/binary"
"fmt"
"sync"
"time"
eth2client "github.com/attestantio/go-eth2-client"
"github.com/attestantio/go-eth2-client/spec/altair"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/attestantio/vouch/services/accountmanager"
"github.com/attestantio/vouch/services/chaintime"
"github.com/attestantio/vouch/services/metrics"
"github.com/attestantio/vouch/services/signer"
"github.com/attestantio/vouch/services/submitter"
"github.com/attestantio/vouch/services/synccommitteeaggregator"
"github.com/attestantio/vouch/services/synccommitteemessenger"
"github.com/pkg/errors"
"github.com/rs/zerolog"
zerologger "github.com/rs/zerolog/log"
e2wtypes "github.com/wealdtech/go-eth2-wallet-types/v2"
"golang.org/x/sync/semaphore"
)
// Service is a beacon block attester.
type Service struct {
monitor metrics.SyncCommitteeMessageMonitor
processConcurrency int64
slotsPerEpoch uint64
syncCommitteeSize uint64
syncCommitteeSubnetCount uint64
targetAggregatorsPerSyncCommittee uint64
chainTimeService chaintime.Service
syncCommitteeAggregator synccommitteeaggregator.Service
validatingAccountsProvider accountmanager.ValidatingAccountsProvider
beaconBlockRootProvider eth2client.BeaconBlockRootProvider
syncCommitteeMessagesSubmitter submitter.SyncCommitteeMessagesSubmitter
syncCommitteeSelectionSigner signer.SyncCommitteeSelectionSigner
syncCommitteeRootSigner signer.SyncCommitteeRootSigner
}
// module-wide log.
var log zerolog.Logger
// New creates a new sync committee messenger.
func New(ctx context.Context, params ...Parameter) (*Service, error) {
parameters, err := parseAndCheckParameters(params...)
if err != nil {
return nil, errors.Wrap(err, "problem with parameters")
}
// Set logging.
log = zerologger.With().Str("service", "synccommitteemessenger").Str("impl", "standard").Logger()
if parameters.logLevel != log.GetLevel() {
log = log.Level(parameters.logLevel)
}
spec, err := parameters.specProvider.Spec(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to obtain spec")
}
slotsPerEpoch, err := specUint64(spec, "SLOTS_PER_EPOCH")
if err != nil {
return nil, errors.Wrap(err, "failed to obtain SLOTS_PER_EPOCH from spec")
}
syncCommitteeSize, err := specUint64(spec, "SYNC_COMMITTEE_SIZE")
if err != nil {
return nil, errors.Wrap(err, "failed to obtain SYNC_COMMITTEE_SIZE from spec")
}
syncCommitteeSubnetCount, err := specUint64(spec, "SYNC_COMMITTEE_SUBNET_COUNT")
if err != nil {
return nil, errors.Wrap(err, "failed to obtain SYNC_COMMITTEE_SUBNET_COUNT from spec")
}
targetAggregatorsPerSyncCommittee, err := specUint64(spec, "TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE")
if err != nil {
return nil, errors.Wrap(err, "failed to obtain TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE from spec")
}
s := &Service{
monitor: parameters.monitor,
processConcurrency: parameters.processConcurrency,
slotsPerEpoch: slotsPerEpoch,
syncCommitteeSize: syncCommitteeSize,
syncCommitteeSubnetCount: syncCommitteeSubnetCount,
targetAggregatorsPerSyncCommittee: targetAggregatorsPerSyncCommittee,
chainTimeService: parameters.chainTimeService,
syncCommitteeAggregator: parameters.syncCommitteeAggregator,
validatingAccountsProvider: parameters.validatingAccountsProvider,
beaconBlockRootProvider: parameters.beaconBlockRootProvider,
syncCommitteeMessagesSubmitter: parameters.syncCommitteeMessagesSubmitter,
syncCommitteeSelectionSigner: parameters.syncCommitteeSelectionSigner,
syncCommitteeRootSigner: parameters.syncCommitteeRootSigner,
}
return s, nil
}
// Prepare prepares in advance of a sync committee message.
func (s *Service) Prepare(ctx context.Context, data interface{}) error {
started := time.Now()
duty, ok := data.(*synccommitteemessenger.Duty)
if !ok {
s.monitor.SyncCommitteeMessagesCompleted(started, len(duty.ValidatorIndices()), "failed")
return errors.New("passed invalid data structure")
}
// Decide if we are an aggregator.
for _, validatorIndex := range duty.ValidatorIndices() {
subcommittees := make(map[uint64]bool)
for _, contributionIndex := range duty.ContributionIndices()[validatorIndex] {
subcommittee := uint64(contributionIndex) / (s.syncCommitteeSize / s.syncCommitteeSubnetCount)
subcommittees[subcommittee] = true
}
for subcommittee := range subcommittees {
isAggregator, sig, err := s.isAggregator(ctx, duty.Account(validatorIndex), duty.Slot(), subcommittee)
if err != nil {
return errors.Wrap(err, "failed to calculate if this is an aggregator")
}
if isAggregator {
duty.SetAggregatorSubcommittees(validatorIndex, subcommittee, sig)
}
}
}
return nil
}
// Message generates and broadcasts sync committee messages for a slot.
// It returns a list of messages made.
func (s *Service) Message(ctx context.Context, data interface{}) ([]*altair.SyncCommitteeMessage, error) {
started := time.Now()
duty, ok := data.(*synccommitteemessenger.Duty)
if !ok {
s.monitor.SyncCommitteeMessagesCompleted(started, len(duty.ValidatorIndices()), "failed")
return nil, errors.New("passed invalid data structure")
}
// Fetch the beacon block root.
beaconBlockRoot, err := s.beaconBlockRootProvider.BeaconBlockRoot(ctx, "head")
if err != nil {
s.monitor.SyncCommitteeMessagesCompleted(started, len(duty.ValidatorIndices()), "failed")
return nil, errors.Wrap(err, "failed to obtain beacon block root")
}
log.Trace().Dur("elapsed", time.Since(started)).Msg("Obtained beacon block root")
s.syncCommitteeAggregator.SetBeaconBlockRoot(duty.Slot(), *beaconBlockRoot)
// Sign in parallel.
msgs := make([]*altair.SyncCommitteeMessage, 0, len(duty.ContributionIndices()))
var msgsMu sync.Mutex
validatorIndices := make([]phase0.ValidatorIndex, 0, len(duty.ContributionIndices()))
for validatorIndex := range duty.ContributionIndices() {
validatorIndices = append(validatorIndices, validatorIndex)
}
sem := semaphore.NewWeighted(s.processConcurrency)
var wg sync.WaitGroup
for i := range validatorIndices {
wg.Add(1)
go func(ctx context.Context,
sem *semaphore.Weighted,
wg *sync.WaitGroup,
i int,
) {
defer wg.Done()
sig, err := s.contribute(ctx, duty.Account(validatorIndices[i]), s.chainTimeService.SlotToEpoch(duty.Slot()), *beaconBlockRoot)
if err != nil {
log.Error().Err(err).Msg("Failed to sign sync committee message")
return
}
log.Trace().Uint64("slot", uint64(duty.Slot())).Uint64("validator_index", uint64(validatorIndices[i])).Str("signature", fmt.Sprintf("%#x", sig)).Msg("Signed sync committee message")
msg := &altair.SyncCommitteeMessage{
Slot: duty.Slot(),
BeaconBlockRoot: *beaconBlockRoot,
ValidatorIndex: validatorIndices[i],
Signature: sig,
}
msgsMu.Lock()
msgs = append(msgs, msg)
msgsMu.Unlock()
}(ctx, sem, &wg, i)
}
wg.Wait()
if err := s.syncCommitteeMessagesSubmitter.SubmitSyncCommitteeMessages(ctx, msgs); err != nil {
log.Trace().Dur("elapsed", time.Since(started)).Err(err).Msg("Failed to submit sync committee messages")
s.monitor.SyncCommitteeMessagesCompleted(started, len(msgs), "failed")
return nil, errors.Wrap(err, "failed to submit sync committee messages")
}
log.Trace().Dur("elapsed", time.Since(started)).Msg("Submitted sync committee messages")
return msgs, nil
}
func (s *Service) contribute(ctx context.Context,
account e2wtypes.Account,
epoch phase0.Epoch,
root phase0.Root,
) (
phase0.BLSSignature,
error,
) {
sig, err := s.syncCommitteeRootSigner.SignSyncCommitteeRoot(ctx, account, epoch, root)
if err != nil {
return phase0.BLSSignature{}, err
}
return sig, err
}
func (s *Service) isAggregator(ctx context.Context, account e2wtypes.Account, slot phase0.Slot, subcommitteeIndex uint64) (bool, phase0.BLSSignature, error) {
modulo := s.syncCommitteeSize / s.syncCommitteeSubnetCount / s.targetAggregatorsPerSyncCommittee
if modulo < 1 {
modulo = 1
}
// Sign the slot.
signature, err := s.syncCommitteeSelectionSigner.SignSyncCommitteeSelection(ctx, account, slot, subcommitteeIndex)
if err != nil {
return false, phase0.BLSSignature{}, errors.Wrap(err, "failed to sign the slot")
}
// Hash the signature.
sigHash := sha256.New()
n, err := sigHash.Write(signature[:])
if err != nil {
return false, phase0.BLSSignature{}, errors.Wrap(err, "failed to hash the slot signature")
}
if n != len(signature) {
return false, phase0.BLSSignature{}, errors.New("failed to write all bytes of the slot signature to the hash")
}
hash := sigHash.Sum(nil)
return binary.LittleEndian.Uint64(hash[:8])%modulo == 0, signature, nil
}
func specUint64(spec map[string]interface{}, item string) (uint64, error) {
tmp, exists := spec[item]
if !exists {
return 0, fmt.Errorf("%s not found in spec", item)
}
val, ok := tmp.(uint64)
if !ok {
return 0, fmt.Errorf("%s of unexpected type", item)
}
return val, nil
}
|
package ginkgotest_test
import (
"bytes"
"testing"
"github.com/mumoshu/gosh"
"github.com/mumoshu/gosh/examples/ginkgotest"
"github.com/mumoshu/gosh/goshtest"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var app *gosh.Shell
var tt *testing.T
func TestAcc(t *testing.T) {
app = ginkgotest.New()
goshtest.Run(t, app, func() {
tt = t
RegisterFailHandler(Fail)
RunSpecs(t, "Your App's Suite")
})
}
var _ = Describe("Your App", func() {
var (
config struct {
cmd string
args []interface{}
}
err error
stdout string
)
JustBeforeEach(func() {
// This doesn't work as then we have no way to "hook" into the test framework
// for handling indirectly run commands.
//
// sh := ginkgotest.New()
var stdoutBuf bytes.Buffer
var args []interface{}
args = append(args, tt)
args = append(args, config.cmd)
args = append(args, config.args...)
args = append(args, gosh.WriteStdout(&stdoutBuf))
err = app.Run(args...)
stdout = stdoutBuf.String()
})
Describe("hello", func() {
BeforeEach(func() {
config.cmd = "hello"
})
Context("world", func() {
BeforeEach(func() {
config.args = []interface{}{"world"}
})
It("should output \"hello world\"", func() {
Expect(stdout).To(Equal("hello world\n"))
})
It("should not error", func() {
Expect(err).ToNot(HaveOccurred())
})
})
Context("sekai", func() {
BeforeEach(func() {
config.args = []interface{}{"sekai"}
})
It("should output \"hello sekai\"", func() {
Expect(stdout).To(Equal("hello sekai\n"))
})
It("should not error", func() {
Expect(err).ToNot(HaveOccurred())
})
})
})
})
|
package acmt
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00100107 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:acmt.001.001.07 Document"`
Message *AccountOpeningInstructionV07 `xml:"AcctOpngInstr"`
}
func (d *Document00100107) AddMessage() *AccountOpeningInstructionV07 {
d.Message = new(AccountOpeningInstructionV07)
return d.Message
}
// Scope
// The AccountOpeningInstruction message is sent by an account owner, for example, an investor or its designated agent to the account servicer, for example, a registrar, transfer agent, custodian or securities depository, to instruct the opening of an account or the opening of an account and the establishment of an investment plan.
// Usage
// The AccountOpeningInstruction is used to open an account directly or indirectly with the account servicer or an intermediary.
// In some markets, for example, Australia, and for some products in the United Kingdom, a first order (also known as a deposit instruction) is placed at the same time as the account opening. To cater for this scenario, an order message can be linked (via references in the message) to the AccountOpeningInstruction message when needed.
// Execution of the AccountOpeningInstruction is confirmed via an AccountDetailsConfirmation message.
type AccountOpeningInstructionV07 struct {
// Reference that uniquely identifies the message from a business application standpoint.
MessageIdentification *iso20022.MessageIdentification1 `xml:"MsgId"`
// Identifies a related order or settlement transaction.
OrderReference *iso20022.InvestmentFundOrder4 `xml:"OrdrRef,omitempty"`
// Reference to a linked message that was previously sent.
PreviousReference *iso20022.AdditionalReference6 `xml:"PrvsRef,omitempty"`
// Information about the opening instruction.
InstructionDetails *iso20022.InvestmentAccountOpening3 `xml:"InstrDtls"`
// Detailed information about the account to be opened.
InvestmentAccount *iso20022.InvestmentAccount61 `xml:"InvstmtAcct"`
// Information related to parties that are related to the account, for example, primary account owner.
AccountParties *iso20022.AccountParties15 `xml:"AcctPties"`
// Intermediary or other party related to the management of the account.
Intermediaries []*iso20022.Intermediary36 `xml:"Intrmies,omitempty"`
// Referral information.
Placement *iso20022.ReferredAgent2 `xml:"Plcmnt,omitempty"`
// Eligibility conditions applicable when there is an allocation of new issues for hedge fund account opening.
NewIssueAllocation *iso20022.NewIssueAllocation2 `xml:"NewIsseAllcn,omitempty"`
// Plan that allows individuals to set aside a fixed amount of money at specified intervals, usually for a special purpose, for example, retirement.
SavingsInvestmentPlan []*iso20022.InvestmentPlan14 `xml:"SvgsInvstmtPlan,omitempty"`
// Plan through which holdings are depleted through regular withdrawals at specified intervals.
WithdrawalInvestmentPlan []*iso20022.InvestmentPlan14 `xml:"WdrwlInvstmtPlan,omitempty"`
// Cash settlement standing instruction associated to transactions on the account.
CashSettlement []*iso20022.CashSettlement1 `xml:"CshSttlm,omitempty"`
// Identifies documents to be provided for the account opening.
ServiceLevelAgreement []*iso20022.DocumentToSend3 `xml:"SvcLvlAgrmt,omitempty"`
// Additional information such as remarks or notes that must be conveyed about the account management activity and or any limitations and restrictions.
AdditionalInformation []*iso20022.AdditiononalInformation12 `xml:"AddtlInf,omitempty"`
// Identifies the market practice to which the message conforms.
MarketPracticeVersion *iso20022.MarketPracticeVersion1 `xml:"MktPrctcVrsn,omitempty"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
Extension []*iso20022.Extension1 `xml:"Xtnsn,omitempty"`
}
func (a *AccountOpeningInstructionV07) AddMessageIdentification() *iso20022.MessageIdentification1 {
a.MessageIdentification = new(iso20022.MessageIdentification1)
return a.MessageIdentification
}
func (a *AccountOpeningInstructionV07) AddOrderReference() *iso20022.InvestmentFundOrder4 {
a.OrderReference = new(iso20022.InvestmentFundOrder4)
return a.OrderReference
}
func (a *AccountOpeningInstructionV07) AddPreviousReference() *iso20022.AdditionalReference6 {
a.PreviousReference = new(iso20022.AdditionalReference6)
return a.PreviousReference
}
func (a *AccountOpeningInstructionV07) AddInstructionDetails() *iso20022.InvestmentAccountOpening3 {
a.InstructionDetails = new(iso20022.InvestmentAccountOpening3)
return a.InstructionDetails
}
func (a *AccountOpeningInstructionV07) AddInvestmentAccount() *iso20022.InvestmentAccount61 {
a.InvestmentAccount = new(iso20022.InvestmentAccount61)
return a.InvestmentAccount
}
func (a *AccountOpeningInstructionV07) AddAccountParties() *iso20022.AccountParties15 {
a.AccountParties = new(iso20022.AccountParties15)
return a.AccountParties
}
func (a *AccountOpeningInstructionV07) AddIntermediaries() *iso20022.Intermediary36 {
newValue := new(iso20022.Intermediary36)
a.Intermediaries = append(a.Intermediaries, newValue)
return newValue
}
func (a *AccountOpeningInstructionV07) AddPlacement() *iso20022.ReferredAgent2 {
a.Placement = new(iso20022.ReferredAgent2)
return a.Placement
}
func (a *AccountOpeningInstructionV07) AddNewIssueAllocation() *iso20022.NewIssueAllocation2 {
a.NewIssueAllocation = new(iso20022.NewIssueAllocation2)
return a.NewIssueAllocation
}
func (a *AccountOpeningInstructionV07) AddSavingsInvestmentPlan() *iso20022.InvestmentPlan14 {
newValue := new(iso20022.InvestmentPlan14)
a.SavingsInvestmentPlan = append(a.SavingsInvestmentPlan, newValue)
return newValue
}
func (a *AccountOpeningInstructionV07) AddWithdrawalInvestmentPlan() *iso20022.InvestmentPlan14 {
newValue := new(iso20022.InvestmentPlan14)
a.WithdrawalInvestmentPlan = append(a.WithdrawalInvestmentPlan, newValue)
return newValue
}
func (a *AccountOpeningInstructionV07) AddCashSettlement() *iso20022.CashSettlement1 {
newValue := new(iso20022.CashSettlement1)
a.CashSettlement = append(a.CashSettlement, newValue)
return newValue
}
func (a *AccountOpeningInstructionV07) AddServiceLevelAgreement() *iso20022.DocumentToSend3 {
newValue := new(iso20022.DocumentToSend3)
a.ServiceLevelAgreement = append(a.ServiceLevelAgreement, newValue)
return newValue
}
func (a *AccountOpeningInstructionV07) AddAdditionalInformation() *iso20022.AdditiononalInformation12 {
newValue := new(iso20022.AdditiononalInformation12)
a.AdditionalInformation = append(a.AdditionalInformation, newValue)
return newValue
}
func (a *AccountOpeningInstructionV07) AddMarketPracticeVersion() *iso20022.MarketPracticeVersion1 {
a.MarketPracticeVersion = new(iso20022.MarketPracticeVersion1)
return a.MarketPracticeVersion
}
func (a *AccountOpeningInstructionV07) AddExtension() *iso20022.Extension1 {
newValue := new(iso20022.Extension1)
a.Extension = append(a.Extension, newValue)
return newValue
}
|
package main
import (
"encoding/json"
"os"
"github.com/mattermost/mattermost-cloud-database-factory/model"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
func init() {
clusterCmd.PersistentFlags().String("server", "http://localhost:8077", "The DB factory server whose API will be queried.")
clusterCreateCmd.Flags().String("vpc-id", "", "The VPC id to create a RDS Aurora Cluster")
clusterCreateCmd.Flags().String("cluster-id", "", "A random 8 character identifier of the Aurora cluster")
clusterCreateCmd.Flags().String("environment", "dev", "The environment used for the deployment. Can be dev, test, staging or prod")
clusterCreateCmd.Flags().String("state-store", "terraform-database-factory-state-bucket-dev", "The s3 bucket to store the terraform state")
clusterCreateCmd.Flags().Bool("apply", false, "If disabled, only a Terraform plan will run instead of Terraform apply")
clusterCreateCmd.Flags().String("instance-type", "db.r4.large", "The instance type used for Aurora cluster replicas")
clusterCreateCmd.Flags().String("backup-retention-period", "15", "The retention period for the DB instance backups")
clusterCmd.AddCommand(clusterCreateCmd)
}
var clusterCmd = &cobra.Command{
Use: "cluster",
Short: "Manipulate RDS clusters managed by the database factory server.",
}
var clusterCreateCmd = &cobra.Command{
Use: "create",
Short: "Create a RDS Aurora cluster.",
RunE: func(command *cobra.Command, args []string) error {
command.SilenceUsage = true
serverAddress, _ := command.Flags().GetString("server")
client := model.NewClient(serverAddress)
vpcID, _ := command.Flags().GetString("vpc-id")
clusterID, _ := command.Flags().GetString("cluster-id")
environment, _ := command.Flags().GetString("environment")
stateStore, _ := command.Flags().GetString("state-store")
apply, _ := command.Flags().GetBool("apply")
instanceType, _ := command.Flags().GetString("instance-type")
backupRetentionPeriod, _ := command.Flags().GetString("backup-retention-period")
cluster, err := client.CreateCluster(&model.CreateClusterRequest{
VPCID: vpcID,
ClusterID: clusterID,
Environment: environment,
StateStore: stateStore,
Apply: apply,
InstanceType: instanceType,
BackupRetentionPeriod: backupRetentionPeriod,
})
if err != nil {
return errors.Wrap(err, "failed to create RDS cluster")
}
err = printJSON(cluster)
if err != nil {
return err
}
return nil
},
}
func printJSON(data interface{}) error {
encoder := json.NewEncoder(os.Stdout)
encoder.SetIndent("", " ")
return encoder.Encode(data)
}
|
package main
import (
"strings"
)
func isAnagram(str1 string, str2 string) bool {
if len(str1) == 0 && len(str2) == 0 {
return true
}
if len(str1) != len(str2) {
return false
}
// I could make this array's length to 26 but Idk maybe for special characters and whitespaces
// not bad after all
count := make([]int, 150)
for i := 0; i < len(str1); i++ {
count[str1[i]]++
count[str2[i]]--
}
for _, c := range count {
if c != 0 {
return false
}
}
return true
}
func main() {
words := []string{"Madam Curie!", "ABC", "madiur mace!"}
word := "Radium came!"
for _, w := range words {
if isAnagram(strings.ToLower(w), strings.ToLower(word)) {
println(w)
}
}
}
|
package repository
import (
. "2019_2_IBAT/pkg/pkg/models"
"fmt"
"reflect"
"testing"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
sqlmock "gopkg.in/DATA-DOG/go-sqlmock.v1"
)
func TestDBUserStorage_GetVacancies_Correct(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
rows := sqlmock.
NewRows([]string{"id", "own_id", "company_name", "experience",
"position", "tasks", "requirements", "wage_from", "wage_to", "conditions", "about",
})
expect := []Vacancy{
{
ID: uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642ad8a"),
OwnerID: uuid.MustParse("92b77a73-bac7-4597-ab71-7b5fbe53052d"),
CompanyName: "MC",
Experience: "7 years",
Position: "cleaner",
Tasks: "cleaning rooms",
Requirements: "work for 24 hours per week",
WageFrom: "100 500 руб",
WageTo: "101 500.00 руб",
Conditions: "Nice geolocation",
About: "Hello employer",
},
}
for _, item := range expect {
rows = rows.AddRow(item.ID.String(), item.OwnerID.String(), item.CompanyName, item.Experience,
item.Position, item.Tasks, item.Requirements,
item.WageFrom, item.WageTo, item.Conditions, item.About,
)
}
mock.
ExpectQuery("SELECT v.id, v.own_id, c.company_name, v.experience," +
"v.position, v.tasks, v.requirements, v.wage_from, v.wage_to, v.conditions, v.about, " +
"v.region, v.type_of_employment, v.work_schedule " +
" FROM vacancies AS v JOIN companies AS c ON v.own_id = c.own_id;").
WithArgs().
WillReturnRows(rows)
repo := DBUserStorage{
DbConn: sqlxDB,
}
dummyMap := make(map[string]interface{})
AuthRec := AuthStorageValue{
ID: uuid.New(),
}
vacancies, err := repo.GetVacancies(AuthRec, dummyMap)
if err != nil {
t.Errorf("unexpected err: %s", err)
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
return
}
if !reflect.DeepEqual(vacancies, expect) {
t.Errorf("results not match,\n want\n%v,\n have\n %v\n", expect, vacancies)
return
}
}
func TestDBUserStorage_GetVacancies_Fail(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
mock.
ExpectQuery("SELECT v.id, v.own_id, c.company_name, v.experience," +
"v.position, v.tasks, v.requirements, v.wage_from, v.wage_to, v.conditions, v.about, " +
"v.region, v.type_of_employment, v.work_schedule " +
" FROM vacancies AS v JOIN companies AS c ON v.own_id = c.own_id;").
WithArgs().
WillReturnError(errors.New("GetVacancies: error while querying"))
repo := DBUserStorage{
DbConn: sqlxDB,
}
dummyMap := make(map[string]interface{})
authRec := AuthStorageValue{
ID: uuid.New(),
}
vacancies, err := repo.GetVacancies(authRec, dummyMap)
fmt.Println(vacancies)
if err == nil {
t.Errorf("Expected err")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
return
}
}
func TestDBUserStorage_GetVacancy_Correct(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
defer sqlxDB.Close()
rows := sqlmock.
NewRows([]string{"id", "own_id", "company_name", "experience",
"position", "tasks", "requirements", "wage_from", "wage_to", "conditions", "about",
})
expect := []Vacancy{
{
ID: uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642ad8a"),
OwnerID: uuid.MustParse("92b77a73-bac7-4597-ab71-7b5fbe53052d"),
CompanyName: "MC",
Experience: "7 years",
Position: "mid",
Tasks: "cleaning rooms",
Requirements: "work for 24 hours per week",
WageFrom: "100 500.00 руб",
WageTo: "101 500.00 руб",
Conditions: "Nice geolocation",
About: "Hello employer",
},
}
for _, item := range expect {
rows = rows.AddRow(item.ID.String(), item.OwnerID.String(), item.CompanyName, item.Experience,
item.Position, item.Tasks, item.Requirements,
item.WageFrom, item.WageTo, item.Conditions, item.About,
)
}
mock.
ExpectQuery("SELECT v.id, v.own_id, c.company_name, v.experience," +
"v.position, v.tasks, v.requirements, v.wage_from, v.wage_to, v.conditions, v.about, " +
"v.region, v.type_of_employment, v.work_schedule FROM vacancies AS v JOIN companies AS c ON v.own_id = c.own_id WHERE").
WithArgs().
WillReturnRows(rows)
repo := DBUserStorage{
DbConn: sqlxDB,
}
id := uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642ad8a")
userID := uuid.New()
item, err := repo.GetVacancy(id, userID)
if err != nil {
t.Errorf("unexpected err: %s", err)
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
return
}
if !reflect.DeepEqual(item, expect[0]) {
t.Errorf("results not match,\n want\n%v,\n have\n %v\n", expect[0], item)
return
}
}
func TestDBUserStorage_GetVacancy_Fail(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
defer sqlxDB.Close()
id := uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642bbba")
mock.
ExpectQuery("SELECT v.id, v.own_id, c.company_name, v.experience," +
"v.position, v.tasks, v.requirements, v.wage_from, v.wage_to, v.conditions, v.about, " +
"v.region, v.type_of_employment, v.work_schedule FROM vacancies AS v JOIN companies AS c ON v.own_id = c.own_id WHERE").
WithArgs(id).
WillReturnError(errors.New("GetVacancy: error while querying"))
repo := DBUserStorage{
DbConn: sqlxDB,
}
userID := uuid.New()
vacancy, err := repo.GetVacancy(id, userID)
fmt.Println(vacancy)
if err == nil {
t.Errorf("Expected err")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
return
}
}
func TestDBUserStorage_CreateVacancy_Correct(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
defer sqlxDB.Close()
vacancy := Vacancy{
ID: uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642ad8a"),
OwnerID: uuid.MustParse("92b77a73-bac7-4597-ab71-7b5fbe53052d"),
CompanyName: "MC",
Experience: "7 years",
Position: "mid",
Tasks: "cleaning rooms",
Requirements: "work for 24 hours per week",
WageFrom: "100 500.00 руб",
WageTo: "120 500.00 руб",
Conditions: "Nice geolocation",
About: "Hello employer",
TypeOfEmployment: "someType",
WorkSchedule: "WorkSchedule",
Region: "Moscow",
}
mock.
ExpectExec(`INSERT INTO vacancies`).
WithArgs(
vacancy.ID, vacancy.OwnerID, vacancy.Experience, vacancy.Position, vacancy.Tasks,
vacancy.Requirements, vacancy.Conditions, vacancy.WageFrom, vacancy.WageTo, vacancy.About,
vacancy.Region, vacancy.TypeOfEmployment, vacancy.WorkSchedule,
).
WillReturnResult(sqlmock.NewResult(1, 1))
repo := DBUserStorage{
DbConn: sqlxDB,
}
ok := repo.CreateVacancy(vacancy)
if !ok {
t.Error("Failed to create vacancy\n")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
}
func TestDBUserStorage_CreateVacancy_False(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
defer sqlxDB.Close()
vacancy := Vacancy{
ID: uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642ad8a"),
OwnerID: uuid.MustParse("92b77a73-bac7-4597-ab71-7b5fbe53052d"),
CompanyName: "MC",
Experience: "7 years",
Position: "mid",
Tasks: "cleaning rooms",
Requirements: "work for 24 hours per week",
WageFrom: "100 500.00 руб",
WageTo: "120 500.00 руб",
Conditions: "Nice geolocation",
About: "Hello employer",
TypeOfEmployment: "someType",
WorkSchedule: "WorkSchedule",
Region: "Moscow",
}
mock.
ExpectExec(`INSERT INTO vacancies`).
WithArgs(
vacancy.ID, vacancy.OwnerID, vacancy.Experience, vacancy.Position, vacancy.Tasks,
vacancy.Requirements, vacancy.Conditions, vacancy.WageFrom, vacancy.WageTo, vacancy.About,
vacancy.Region, vacancy.TypeOfEmployment, vacancy.WorkSchedule,
).
WillReturnError(fmt.Errorf("bad query"))
repo := DBUserStorage{
DbConn: sqlxDB,
}
ok := repo.CreateVacancy(vacancy)
if ok {
t.Errorf("expected false, got true")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
}
func TestDBUserStorage_DeleteVacancy_Correct(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
defer sqlxDB.Close()
id := uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642ad8a")
mock.
ExpectExec(`DELETE FROM vacancies`).
WithArgs(id).
WillReturnResult(sqlmock.NewResult(1, 1))
repo := DBUserStorage{
DbConn: sqlxDB,
}
err = repo.DeleteVacancy(id)
if err != nil {
t.Errorf("unexpected err: %s", err)
return
}
mock.
ExpectQuery("SELECT v.id, v.own_id, c.company_name, v.experience," +
"v.position, v.tasks, v.requirements, v.wage_from, v.wage_to, v.conditions, v.about, " +
"v.region, v.type_of_employment, v.work_schedule FROM vacancies AS v JOIN companies AS c ON v.own_id = c.own_id WHERE").
WithArgs(id).
WillReturnError(errors.New("GetVacancy: error while querying"))
userID := uuid.New()
_, err = repo.GetVacancy(id, userID)
if err == nil {
t.Errorf("Expected err")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
return
}
}
func TestDBUserStorage_DeleteVacancy_False(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
defer sqlxDB.Close()
id := uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642ad8a")
mock.
ExpectExec(`DELETE FROM vacancies`).
WithArgs(id).
WillReturnError(errors.Errorf("error"))
repo := DBUserStorage{
DbConn: sqlxDB,
}
err = repo.DeleteVacancy(id)
if err == nil {
t.Errorf("Expected err")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
return
}
}
func TestDBUserStorage_PutVacancy_Correct(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
defer sqlxDB.Close()
vacancy := Vacancy{
ID: uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642ad8a"),
OwnerID: uuid.MustParse("92b77a73-bac7-4597-ab71-7b5fbe53052d"),
CompanyName: "MC",
Experience: "7 years",
Position: "mid",
Tasks: "cleaning rooms",
Requirements: "work for 24 hours per week",
WageFrom: "100 500.00 руб",
WageTo: "101 500.00 руб",
Conditions: "Nice geolocation",
About: "Hello employer",
TypeOfEmployment: "someType",
WorkSchedule: "WorkSchedule",
Region: "Moscow",
}
mock.
ExpectExec(`UPDATE vacancies SET`).
WithArgs(
vacancy.Experience, vacancy.Position, vacancy.Tasks, vacancy.Requirements,
vacancy.WageFrom, vacancy.WageTo, vacancy.Conditions, vacancy.About,
vacancy.Region, vacancy.TypeOfEmployment, vacancy.WorkSchedule,
vacancy.ID, vacancy.OwnerID,
).
WillReturnResult(sqlmock.NewResult(1, 1))
repo := DBUserStorage{
DbConn: sqlxDB,
}
ok := repo.PutVacancy(vacancy, vacancy.OwnerID, vacancy.ID)
if !ok {
t.Error("Failed to put vacancy\n")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
}
func TestDBUserStorage_PutVacancy_False(t *testing.T) {
db, mock, err := sqlmock.New()
defer db.Close()
sqlxDB := sqlx.NewDb(db, "sqlmock")
if err != nil {
t.Fatalf("cant create mock: %s", err)
}
defer sqlxDB.Close()
vacancy := Vacancy{
ID: uuid.MustParse("f14c6104-3430-413b-ab4e-e31c8642ad8a"), //invalid id
OwnerID: uuid.MustParse("92b77a73-bac7-4597-ab71-7b5fbe53052d"),
CompanyName: "MC",
Experience: "7 years",
Position: "cleaner",
Tasks: "cleaning rooms",
Requirements: "work for 24 hours per week",
WageFrom: "100 500.00 руб",
WageTo: "101 500.00 руб",
Conditions: "Nice geolocation",
About: "Hello employer",
TypeOfEmployment: "someType",
WorkSchedule: "WorkSchedule",
Region: "Moscow",
}
mock.
ExpectExec(`UPDATE vacancies SET`).
WithArgs(
vacancy.Experience, vacancy.Position, vacancy.Tasks, vacancy.Requirements,
vacancy.WageFrom, vacancy.WageTo, vacancy.Conditions, vacancy.About,
vacancy.Region, vacancy.TypeOfEmployment, vacancy.WorkSchedule,
vacancy.ID, vacancy.OwnerID,
).
WillReturnError(fmt.Errorf("bad query"))
repo := DBUserStorage{
DbConn: sqlxDB,
}
ok := repo.PutVacancy(vacancy, vacancy.OwnerID, vacancy.ID)
if ok {
t.Errorf("expected false, got true")
return
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
}
|
package table
type Args struct {
Quiet bool `name:"quiet" usage:"only print ID" short:"q"`
Format string `name:"format" usage:"format(yaml/json/jsoncompact/raw)"`
}
|
package auth
import "time"
/**
* @author 16计算机 Moriaty
* @version 1.0
* @copyright :Moriaty 版权所有 © 2020
* @date 2020/4/6 19:35
* @Description TODO
* Auth 接口
*/
type Auth interface {
CheckToken(tokenCode string) *Token
PutTokenStorage(token Token) string
RemoveToken(tokenCode string)
}
func NewAuth(name string) Auth {
switch name {
case "memory":
return newMemoryAuth()
default:
return nil
}
}
type Token struct {
// token code
Code string `json:"code"`
// 有效时间,单位:时,-1 为永久存储
EffectiveTime int16 `json:"effective_time"`
// 生成时间
GenerationTime time.Time `json:"generation_time"`
// 权限等级
Level uint8 `json:"level"`
// 数据
Data interface{} `json:"data"`
}
|
package command
import (
"errors"
"flag"
"fmt"
"math"
"strconv"
"strings"
"rsc.io/getopt"
"github.com/Vovan-VE/maze-go/internal/cli"
"github.com/Vovan-VE/maze-go/pkg/maze"
"github.com/Vovan-VE/maze-go/pkg/maze/format"
)
// Generate is a "gen" cli command
type Generate struct {
*command
}
// NewGenerate creates an instance of Generate
func NewGenerate(pool Pooler, name string) *Generate {
return &Generate{newCommand(pool, name)}
}
// Run implements Commaner interface
func (g *Generate) Run(args []string) (int, error) {
config, err := initConfig(g.Name(), args)
if err != nil {
return 1, err
}
exporter := format.NewExporter(config.Format)
exporter.ConfigureExport(config.Options)
maze := maze.NewGenerator(config).Generate()
result := exporter.ExportMaze(maze)
fmt.Fprintln(stdout, result)
return 0, nil
}
// Usage implements Commander interface
func (g *Generate) Usage() string {
return `maze [gen] [options]
Generate a maze and export it to stdout.
Since 'gen' is the default command, the command name 'gen' is optional.
Options:
-W <WIDTH>, --width=<WIDTH>
Maze width in number of CELLs. Default is 30.
-H <HEIGHT>, --height=<HEIGHT>
Maze height in number of CELLs. Default is 10.
Notice about uppercase -H to not mix with -h which is "help".
-s <SIZE>, --size=<SIZE>
Alternative way to set both width and height at once. The SIZE
must be in form <WIDTH>x<HEIGHT>. So, the default size is 30x10.
-B <BL>, --branch-length=<BL>
The "branch length" option for generation. BL can be an integer > 1
(a number of CELLs), string 'max' (which is WIDTH * HEIGHT'), or
decimal from 0 to 1 as fraction of max (for example, 0.2 is
round(0.2 * W * H)). Default is 10.
-f <FORMAT>, --format=<FORMAT>
Output format. Can be one of 'json' or 'text'. The default is 'text' to
be human readable.
-c <NAME>=<VALUE>
Output format option. The '<NAME>' depends on chosen format in '-f'.
`
}
func initConfig(name string, args []string) (*maze.Config, error) {
config := maze.NewConfig()
var bl, size string
options := cli.NewMapValue()
fs := getopt.NewFlagSet(name, flag.ExitOnError)
fs.IntVar(&config.Width, "width", 30, "maze width in number of CELLs")
fs.IntVar(&config.Height, "height", 10, "maze height in number of CELLs")
fs.StringVar(&bl, "branch-length", "10", "\"branch length\" option")
fs.StringVar(&size, "size", "", "both width and height together")
fs.StringVar(&config.Format, "format", "text", "outout format name")
fs.Var(options, "c", "output options depending of given format")
fs.Alias("W", "width")
fs.Alias("H", "height")
fs.Alias("B", "branch-length")
fs.Alias("s", "size")
fs.Alias("f", "format")
fs.SetOutput(stderr)
if err := fs.Parse(args); err != nil {
return nil, err
}
if size != "" {
// TODO: check mixing -s with -W -H
parts := strings.SplitN(size, "x", 3)
if len(parts) != 2 {
return nil, errors.New("size must be in form `<WIDTH>x<HEIGHT>`, like `30x10`")
}
widthStr, heightStr := parts[0], parts[1]
var err error
if config.Width, err = parsePlusIntValue(widthStr); err != nil {
return nil, fmt.Errorf("<WIDTH> %s in `-s` (`--size`)", err.Error())
}
if config.Height, err = parsePlusIntValue(heightStr); err != nil {
return nil, fmt.Errorf("<HEIGHT> %s in `-s` (`--size`)", err.Error())
}
} else {
if err := validatePlusIntValue(config.Width); err != nil {
return nil, fmt.Errorf("<WIDTH> %s in `-W` (`--width`)", err.Error())
}
if err := validatePlusIntValue(config.Height); err != nil {
return nil, fmt.Errorf("<HEIGHT> %s in `-H` (`--height`)", err.Error())
}
}
if bl != "" {
var err error
if config.BranchLength, err = parseBranchLength(bl, config.Width*config.Height); err != nil {
return nil, fmt.Errorf("<BL> %s in `-B` (`--branch-length`)", err.Error())
}
}
if !format.HasExporter(config.Format) {
return nil, errors.New("unknown format name in `-F` (`--format`)")
}
config.Options = options.Values()
return config, nil
}
func parseBranchLength(input string, max int) (int, error) {
if input == "max" {
return max, nil
}
if n, err := parsePlusIntValue(input); err == nil {
if n > max {
n = max
}
return n, nil
}
if f, err := strconv.ParseFloat(input, 16); err == nil && f >= 0 && f <= 1 {
return int(math.Round(f * float64(max))), nil
}
return 0, errors.New(
"must be either integer greater then 1 (number of CELLs), " +
"a string `max` to set <WIDTH>*<HEIGHT>, " +
"or decimal from 0 to 1 as fraction of max",
)
}
func parsePlusIntValue(input string) (int, error) {
n, err := strconv.ParseUint(input, 10, 16)
if err == nil {
i := int(n)
if err := validatePlusIntValue(i); err != nil {
return 0, err
}
return i, nil
}
ne, ok := err.(*strconv.NumError)
if ok {
if ne.Err == strconv.ErrSyntax {
return 0, errors.New("must be positive integer")
}
if ne.Err == strconv.ErrRange {
return 0, errors.New("is out of range")
}
}
return 0, err
}
func validatePlusIntValue(n int) error {
if n <= 0 {
return errors.New("must be greater then zero")
}
if n > 0xFFFF {
return errors.New("is too big")
}
return nil
}
|
package kafka
import (
"fmt"
"github.com/bsm/sarama-cluster"
"os"
"os/signal"
)
/*
kafka消费者
*/
type KafkaConsumer struct {
consumer *cluster.Consumer
subscribers map[string]func(msg string) error
}
/* 创建kafka消费者实例 */
func NewKafkaConsumer(conf KafkaConsumerConf) (*KafkaConsumer, error) {
if conf.Config == nil {
conf.DefaultConsumerConfig()
}
topics := make([]string, 0)
sbs := make(map[string]func(msg string) error, 0)
for _, subscriber := range conf.Subscribers {
if len(subscriber.TopicName) > 0 && subscriber.HandleMsg != nil {
sbs[subscriber.TopicName] = subscriber.HandleMsg
topics = append(topics, subscriber.TopicName)
}
}
if len(topics) == 0 {
return nil, fmt.Errorf("no subscribe topics")
}
// 创建消费实例
consumer, err := cluster.NewConsumer(conf.BrokerServers, conf.GroupId, topics, conf.Config)
if err != nil {
return nil, err
}
kc := &KafkaConsumer{consumer: consumer, subscribers: sbs}
// 启动消费
go kc.start()
return kc, nil
}
// 启动消费
func (c *KafkaConsumer) start() {
// trap SIGINT to trigger a shutdown.
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
// consume partitions
for {
select {
case part, ok := <-c.consumer.Partitions():
if !ok {
return
}
// start a separate goroutine to consume messages
go func(pc cluster.PartitionConsumer) {
for msg := range pc.Messages() {
if invokeHandle, ok := c.subscribers[msg.Topic]; ok {
// 消息回调
er := invokeHandle(string(msg.Value))
if er == nil {
// 标记已处理
c.consumer.MarkOffset(msg, "")
}
}
}
}(part)
case <-signals:
return
}
}
}
/* 释放资源 */
func (c *KafkaConsumer) Destroy() error {
return c.consumer.Close()
}
|
package rkt
import (
"github.com/r3boot/rkt-registrator/utils"
"os"
)
var (
Log utils.Log
Rkt_dir string
Cni_dir string
)
func Setup(l utils.Log, rkt_dir string, cni_dir string) (err error) {
Log = l
// Sanity check
if _, err = os.Stat(rkt_dir); err != nil {
Log.Fatal(rkt_dir + " does not exist")
}
if _, err = os.Stat(cni_dir); err != nil {
Log.Fatal(cni_dir + " does not exist")
}
if _, err = os.Stat(rkt_dir + "/pods"); err != nil {
Log.Fatal(rkt_dir + "/pods does not exist, forgot to run setup-data-dir.sh?")
}
Rkt_dir = rkt_dir
Cni_dir = cni_dir
return
}
|
// Copyright (c) 2019 Chair of Applied Cryptography, Technische Universität
// Darmstadt, Germany. All rights reserved. This file is part of go-perun. Use
// of this source code is governed by a MIT-style license that can be found in
// the LICENSE file.
// +build !wrap_test
package wallet // import "perun.network/go-perun/backend/sim/wallet"
import (
"perun.network/go-perun/wallet"
"perun.network/go-perun/wallet/test"
)
func init() {
wallet.SetBackend(new(Backend))
test.SetRandomizer(newRandomizer())
}
|
package main
import (
"github.com/KyriakosMilad/hello-go/functions"
)
func main() {
//hello.SayHello()
//variables.Variables()
//primitives.Primitives()
//constants.Constants()
//arrays.Arrays()
//slices.Slices()
//maps.Maps()
//structs.Structs()
//conditions.Conditions()
//loops.Loops()
//controls.Controls()
//pointers.Pointers()
functions.Functions()
}
|
package main
import "github.com/golang/protobuf/proto"
type msg struct {
t uint32
session uint32
data []byte
}
type msgCB func(*Agent, proto.Message)
type msgHandler struct {
p proto.Message
cb msgCB
}
var handlers = make(map[uint32]msgHandler)
func registerHandler(t uint32, p proto.Message, cb msgCB) {
handlers[t] = msgHandler{p, cb}
}
func dispatchOutsideMsg(agent *Agent, m *msg) {
h, ok := handlers[m.t]
if ok != true {
log(ERROR, "msg[%d] handler not found\n", m.t)
return
}
if err := proto.Unmarshal(m.data, h.p); err != nil {
log(ERROR, "msg[%d] Unmarshal failed: %s\n", m.t, err)
return
}
h.cb(agent, h.p)
}
|
// Copyright 2020-2021 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package netconfig
import (
"fmt"
"sort"
)
type remoteProvider struct {
addressToRemote map[string]Remote
}
func newRemoteProvider(externalRemotes []ExternalRemote) (*remoteProvider, error) {
addressToRemote := make(map[string]Remote, len(externalRemotes))
for _, externalRemote := range externalRemotes {
remote, err := newRemote(
externalRemote.Address,
externalRemote.Login.Token,
)
if err != nil {
return nil, err
}
if _, ok := addressToRemote[remote.Address()]; ok {
return nil, fmt.Errorf("remote address %s is specified more than once", remote.Address())
}
addressToRemote[remote.Address()] = remote
}
return newRemoteProviderInternal(addressToRemote), nil
}
func newRemoteProviderInternal(addressToRemote map[string]Remote) *remoteProvider {
return &remoteProvider{
addressToRemote: addressToRemote,
}
}
func (r *remoteProvider) GetRemote(address string) (Remote, bool) {
remote, ok := r.addressToRemote[address]
return remote, ok
}
func (r *remoteProvider) WithUpdatedRemote(address string, updatedToken string) (RemoteProvider, error) {
updatedRemote, err := newRemote(
address,
updatedToken,
)
if err != nil {
return nil, err
}
newAddressToRemote := map[string]Remote{
address: updatedRemote,
}
for curAddress, curRemote := range r.addressToRemote {
if curAddress != address {
newAddressToRemote[curAddress] = curRemote
}
}
return newRemoteProviderInternal(newAddressToRemote), nil
}
func (r *remoteProvider) WithoutRemote(address string) (RemoteProvider, bool) {
found := false
newAddressToRemote := make(map[string]Remote, len(r.addressToRemote))
for curAddress, curRemote := range r.addressToRemote {
if curAddress == address {
found = true
} else {
newAddressToRemote[curAddress] = curRemote
}
}
if !found {
return r, false
}
return newRemoteProviderInternal(newAddressToRemote), true
}
func (r *remoteProvider) ToExternalRemotes() []ExternalRemote {
externalRemotes := make([]ExternalRemote, 0, len(r.addressToRemote))
for _, remote := range r.addressToRemote {
externalRemotes = append(externalRemotes, remote.toExternalRemote())
}
sort.Slice(
externalRemotes,
func(i int, j int) bool {
return externalRemotes[i].Address < externalRemotes[j].Address
},
)
return externalRemotes
}
|
package main
import (
"testing"
"github.com/jackytck/projecteuler/tools"
)
func TestP64(t *testing.T) {
cases := []tools.TestCase{
{In: 10000, Out: 1322},
}
tools.TestIntInt(t, cases, solve, "P64")
}
|
package lawin
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
)
// TransformStrToMap : To string to json map
// jsonstr - json String
func TransformStrToMap(jsonstr string) map[string]interface{} {
var jsonMap map[string]interface{}
json.Unmarshal([]byte(jsonstr), &jsonMap)
return jsonMap
}
// ExtractRespBody : Extract response body and transform to map
// resp - http response
func ExtractRespBody(resp *http.Response) string {
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println("Payload Transform Fail")
}
defer resp.Body.Close()
bodyString := string(bodyBytes)
return bodyString
}
// HTTPGet : http GET Request
// URL - http URL
// queryString - query string params
// headers - request headers
func HTTPGet(URL string, queryString map[string]interface{},
headers map[string]string) *http.Response {
client := &http.Client{}
req, _ := http.NewRequest("GET", URL, nil)
if headers != nil {
for key, value := range headers {
req.Header.Add(key, value)
}
}
if queryString != nil {
q := req.URL.Query()
for key, value := range queryString {
strValue, _ := value.(string)
q.Add(key, strValue)
}
req.URL.RawQuery = q.Encode()
}
resp, err := client.Do(req)
if err != nil {
fmt.Println("HTTP GET Failed")
}
return resp
}
// HTTPPost : HTTP Post request
// URL - http URL
// headers - request headers
func HTTPPost(URL string, payload map[string]interface{},
headers map[string]string) *http.Response {
client := &http.Client{}
jsonPayload, _ := json.Marshal(payload)
jsonString := string(jsonPayload)
fmt.Println(jsonString)
req, _ := http.NewRequest("POST", URL, bytes.NewBuffer(jsonPayload))
if headers != nil {
for key, value := range headers {
req.Header.Add(key, value)
}
}
resp, err := client.Do(req)
if err != nil {
fmt.Println("HTTP POST Failed")
}
return resp
}
|
package command
import (
"github.com/mitchellh/cli"
"github.com/romantomjak/b2/version"
)
// Commands returns the mapping of CLI commands for B2
func Commands(ui cli.Ui) map[string]cli.CommandFactory {
baseCommand := &baseCommand{
ui: ui,
}
commands := map[string]cli.CommandFactory{
"create": func() (cli.Command, error) {
return &CreateBucketCommand{
baseCommand: baseCommand,
}, nil
},
"list": func() (cli.Command, error) {
return &ListCommand{
baseCommand: baseCommand,
}, nil
},
"get": func() (cli.Command, error) {
return &GetCommand{
baseCommand: baseCommand,
}, nil
},
"put": func() (cli.Command, error) {
return &PutCommand{
baseCommand: baseCommand,
}, nil
},
"version": func() (cli.Command, error) {
return &VersionCommand{
baseCommand: baseCommand,
Version: version.FullVersion(),
}, nil
},
}
return commands
}
|
package main
func main() {
}
func reverseWords(s string) string {
length := len(s)
ret := []byte{}
for i := 0; i < length; {
start := i
for i < length && s[i] != ' ' {
i++
}
for p := start; p < i; p++ {
ret = append(ret, s[start+i-1-p])
}
for i < length && s[i] == ' ' {
i++
ret = append(ret, ' ')
}
}
return string(ret)
}
|
package sessionmanager
import (
"context"
"testing"
"time"
bssrs "gx/ipfs/QmYJ48z7NEzo3u2yCvUvNtBQ7wJWd5dX2nxxc7FeA6nHq1/go-bitswap/sessionrequestsplitter"
bssession "gx/ipfs/QmYJ48z7NEzo3u2yCvUvNtBQ7wJWd5dX2nxxc7FeA6nHq1/go-bitswap/session"
peer "gx/ipfs/QmPJxxDsX2UbchSHobbYuvz7qnyJTFKvaKMzE2rZWJ4x5B/go-libp2p-peer"
cid "gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid"
blocks "gx/ipfs/QmWoXtvgC8inqFkAATB7cp2Dax7XBi9VDvSg9RCCZufmRk/go-block-format"
)
type fakeSession struct {
interested bool
receivedBlock bool
updateReceiveCounters bool
id uint64
pm *fakePeerManager
srs *fakeRequestSplitter
}
func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) {
return nil, nil
}
func (*fakeSession) GetBlocks(context.Context, []cid.Cid) (<-chan blocks.Block, error) {
return nil, nil
}
func (fs *fakeSession) InterestedIn(cid.Cid) bool { return fs.interested }
func (fs *fakeSession) ReceiveBlockFrom(peer.ID, blocks.Block) { fs.receivedBlock = true }
func (fs *fakeSession) UpdateReceiveCounters(blocks.Block) { fs.updateReceiveCounters = true }
type fakePeerManager struct {
id uint64
}
func (*fakePeerManager) FindMorePeers(context.Context, cid.Cid) {}
func (*fakePeerManager) GetOptimizedPeers() []peer.ID { return nil }
func (*fakePeerManager) RecordPeerRequests([]peer.ID, []cid.Cid) {}
func (*fakePeerManager) RecordPeerResponse(peer.ID, cid.Cid) {}
type fakeRequestSplitter struct {
}
func (frs *fakeRequestSplitter) SplitRequest(peers []peer.ID, keys []cid.Cid) []*bssrs.PartialRequest {
return nil
}
func (frs *fakeRequestSplitter) RecordDuplicateBlock() {}
func (frs *fakeRequestSplitter) RecordUniqueBlock() {}
var nextInterestedIn bool
func sessionFactory(ctx context.Context, id uint64, pm bssession.PeerManager, srs bssession.RequestSplitter) Session {
return &fakeSession{
interested: nextInterestedIn,
receivedBlock: false,
id: id,
pm: pm.(*fakePeerManager),
srs: srs.(*fakeRequestSplitter),
}
}
func peerManagerFactory(ctx context.Context, id uint64) bssession.PeerManager {
return &fakePeerManager{id}
}
func requestSplitterFactory(ctx context.Context) bssession.RequestSplitter {
return &fakeRequestSplitter{}
}
func TestAddingSessions(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory)
p := peer.ID(123)
block := blocks.NewBlock([]byte("block"))
// we'll be interested in all blocks for this test
nextInterestedIn = true
currentID := sm.GetNextSessionID()
firstSession := sm.NewSession(ctx).(*fakeSession)
if firstSession.id != firstSession.pm.id ||
firstSession.id != currentID+1 {
t.Fatal("session does not have correct id set")
}
secondSession := sm.NewSession(ctx).(*fakeSession)
if secondSession.id != secondSession.pm.id ||
secondSession.id != firstSession.id+1 {
t.Fatal("session does not have correct id set")
}
sm.GetNextSessionID()
thirdSession := sm.NewSession(ctx).(*fakeSession)
if thirdSession.id != thirdSession.pm.id ||
thirdSession.id != secondSession.id+2 {
t.Fatal("session does not have correct id set")
}
sm.ReceiveBlockFrom(p, block)
if !firstSession.receivedBlock ||
!secondSession.receivedBlock ||
!thirdSession.receivedBlock {
t.Fatal("should have received blocks but didn't")
}
}
func TestReceivingBlocksWhenNotInterested(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory)
p := peer.ID(123)
block := blocks.NewBlock([]byte("block"))
// we'll be interested in all blocks for this test
nextInterestedIn = false
firstSession := sm.NewSession(ctx).(*fakeSession)
nextInterestedIn = true
secondSession := sm.NewSession(ctx).(*fakeSession)
nextInterestedIn = false
thirdSession := sm.NewSession(ctx).(*fakeSession)
sm.ReceiveBlockFrom(p, block)
if firstSession.receivedBlock ||
!secondSession.receivedBlock ||
thirdSession.receivedBlock {
t.Fatal("did not receive blocks only for interested sessions")
}
}
func TestRemovingPeersWhenManagerContextCancelled(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory)
p := peer.ID(123)
block := blocks.NewBlock([]byte("block"))
// we'll be interested in all blocks for this test
nextInterestedIn = true
firstSession := sm.NewSession(ctx).(*fakeSession)
secondSession := sm.NewSession(ctx).(*fakeSession)
thirdSession := sm.NewSession(ctx).(*fakeSession)
cancel()
// wait for sessions to get removed
time.Sleep(10 * time.Millisecond)
sm.ReceiveBlockFrom(p, block)
if firstSession.receivedBlock ||
secondSession.receivedBlock ||
thirdSession.receivedBlock {
t.Fatal("received blocks for sessions after manager is shutdown")
}
}
func TestRemovingPeersWhenSessionContextCancelled(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
sm := New(ctx, sessionFactory, peerManagerFactory, requestSplitterFactory)
p := peer.ID(123)
block := blocks.NewBlock([]byte("block"))
// we'll be interested in all blocks for this test
nextInterestedIn = true
firstSession := sm.NewSession(ctx).(*fakeSession)
sessionCtx, sessionCancel := context.WithCancel(ctx)
secondSession := sm.NewSession(sessionCtx).(*fakeSession)
thirdSession := sm.NewSession(ctx).(*fakeSession)
sessionCancel()
// wait for sessions to get removed
time.Sleep(10 * time.Millisecond)
sm.ReceiveBlockFrom(p, block)
if !firstSession.receivedBlock ||
secondSession.receivedBlock ||
!thirdSession.receivedBlock {
t.Fatal("received blocks for sessions that are canceled")
}
}
|
package models
import (
"encoding/xml"
"reflect"
"testing"
"github.com/k0kubun/pp"
"github.com/ymomoi/goval-parser/oval"
)
func TestWalkDebian(t *testing.T) {
var tests = []struct {
oval string
expected []distroPackage
}{
{
oval: `
<?xml version="1.0" ?>
<oval_definitions>
<generator>
<oval:product_name>Debian</oval:product_name>
<oval:schema_version>5.3</oval:schema_version>
<oval:timestamp>2017-04-07T03:47:55.188-04:00</oval:timestamp>
</generator>
<definitions>
<definition class="vulnerability" id="oval:org.debian:def:20140001" version="1">
<metadata>
<title>CVE-2014-0001</title>
<affected family="unix">
<platform>Debian GNU/Linux 7.0</platform>
<platform>Debian GNU/Linux 8.2</platform>
<platform>Debian GNU/Linux 9.0</platform>
<product>mysql-5.5</product>
</affected>
<reference ref_id="CVE-2014-0001" ref_url="http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-0001" source="CVE"/>
<description>Buffer overflow in client/mysql.cc in Oracle MySQL and MariaDB before 5.5.35 allows remote database servers to cause a denial of service (crash) and possibly execute arbitrary code via a long server version string.</description>
<debian>
<date>2014-05-03</date>
<moreinfo>
DSA-2919
Several issues have been discovered in the MySQL database server. The
vulnerabilities are addressed by upgrading MySQL to the new upstream
version 5.5.37. Please see the MySQL 5.5 Release Notes and Oracle's
Critical Patch Update advisory for further details:
</moreinfo>
</debian>
</metadata>
<criteria comment="Platform section" operator="OR">
<criteria comment="Release section" operator="AND">
<criterion comment="Debian 7.0 is installed" test_ref="oval:org.debian.oval:tst:1"/>
<criteria comment="Architecture section" operator="OR">
<criteria comment="Architecture independent section" operator="AND">
<criterion comment="all architecture" test_ref="oval:org.debian.oval:tst:2"/>
<criterion comment="mysql-5.5 DPKG is earlier than 5.5.37-0+wheezy1" test_ref="oval:org.debian.oval:tst:3"/>
</criteria>
</criteria>
</criteria>
<criteria comment="Release section" operator="AND">
<criterion comment="Debian 8.2 is installed" test_ref="oval:org.debian.oval:tst:4"/>
<criteria comment="Architecture section" operator="OR">
<criteria comment="Architecture independent section" operator="AND">
<criterion comment="all architecture" test_ref="oval:org.debian.oval:tst:2"/>
<criterion comment="mysql-5.5 DPKG is earlier than 5.5.37-1" test_ref="oval:org.debian.oval:tst:5"/>
</criteria>
</criteria>
</criteria>
<criteria comment="Release section" operator="AND">
<criterion comment="Debian 9.0 is installed" test_ref="oval:org.debian.oval:tst:6"/>
<criteria comment="Architecture section" operator="OR">
<criteria comment="Architecture independent section" operator="AND">
<criterion comment="all architecture" test_ref="oval:org.debian.oval:tst:2"/>
<criterion comment="mysql-5.5 DPKG is earlier than 5.5.37-1" test_ref="oval:org.debian.oval:tst:7"/>
</criteria>
</criteria>
<criteria comment="Architecture section" operator="OR">
<criteria comment="Architecture independent section" operator="AND">
<criterion comment="all architecture" test_ref="oval:org.debian.oval:tst:2"/>
<criterion comment="mysql-5.6 DPKG is earlier than 5.6.37-1" test_ref="oval:org.debian.oval:tst:7"/>
</criteria>
</criteria>
</criteria>
</criteria>
</definition>
</definitions>
</oval_definitions>
`,
expected: []distroPackage{
{
osVer: "7.0",
pack: Package{
Name: "mysql-5.5",
Version: "5.5.37-0+wheezy1",
},
},
{
osVer: "8.2",
pack: Package{
Name: "mysql-5.5",
Version: "5.5.37-1",
},
},
{
osVer: "9.0",
pack: Package{
Name: "mysql-5.5",
Version: "5.5.37-1",
},
},
{
osVer: "9.0",
pack: Package{
Name: "mysql-5.6",
Version: "5.6.37-1",
},
},
},
},
}
for i, tt := range tests {
var root *oval.Root
if err := xml.Unmarshal([]byte(tt.oval), &root); err != nil {
t.Errorf("[%d] marshall error", i)
}
c := root.Definitions.Definitions[0].Criteria
actual := collectDebianPacks(c)
if !reflect.DeepEqual(tt.expected, actual) {
e := pp.Sprintf("%v", tt.expected)
a := pp.Sprintf("%v", actual)
t.Errorf("[%d]: expected: %s\n, actual: %s\n", i, e, a)
}
}
}
|
package realm_test
import (
"testing"
"github.com/10gen/realm-cli/internal/cloud/realm"
u "github.com/10gen/realm-cli/internal/utils/test"
"github.com/10gen/realm-cli/internal/utils/test/assert"
)
func TestRealmLogs(t *testing.T) {
u.SkipUnlessRealmServerRunning(t)
t.Run("with an active session", func(t *testing.T) {
client := newAuthClient(t)
groupID := u.CloudGroupID()
app, teardown := setupTestApp(t, client, groupID, "logs-test")
defer teardown()
t.Run("getting logs should return an empty list if there are none", func(t *testing.T) {
logs, err := client.Logs(groupID, app.ID, realm.LogsOptions{})
assert.Nil(t, err)
assert.Equal(t, 0, len(logs))
})
})
}
|
package ch06
// these functions are used across examples and exercises
func less(value1, value2 int) bool {
return value1 < value2
}
func more(value1, value2 int) bool {
return value1 > value2
} |
package main
import (
"bytes"
"fmt"
"os"
"sort"
"strings"
)
// Key–value mappings for the representation of client and server options.
// Args maps a string key to a list of values. It is similar to url.Values.
type Args map[string][]string
// Get the first value associated with the given key. If there are any values
// associated with the key, the value return has the value and ok is set to
// true. If there are no values for the given key, value is "" and ok is false.
// If you need access to multiple values, use the map directly.
func (args Args) Get(key string) (value string, ok bool) {
if args == nil {
return "", false
}
vals, ok := args[key]
if !ok || len(vals) == 0 {
return "", false
}
return vals[0], true
}
// Add Append value to the list of values for key.
func (args Args) Add(key, value string) {
args[key] = append(args[key], value)
}
// Return the index of the next unescaped byte in s that is in the term set, or
// else the length of the string if no terminators appear. Additionally return
// the unescaped string up to the returned index.
func indexUnescaped(s string, term []byte) (int, string, error) {
var i int
unesc := make([]byte, 0)
for i = 0; i < len(s); i++ {
b := s[i]
// A terminator byte?
if bytes.IndexByte(term, b) != -1 {
break
}
if b == '\\' {
i++
if i >= len(s) {
return 0, "", fmt.Errorf("nothing following final escape in %q", s)
}
b = s[i]
}
unesc = append(unesc, b)
}
return i, string(unesc), nil
}
// Parse SS_PLUGIN options from environment variables
func parseEnv() (opts Args, err error) {
opts = make(Args)
ssRemoteHost := os.Getenv("SS_REMOTE_HOST")
ssRemotePort := os.Getenv("SS_REMOTE_PORT")
ssLocalHost := os.Getenv("SS_LOCAL_HOST")
ssLocalPort := os.Getenv("SS_LOCAL_PORT")
if len(ssRemoteHost) == 0 {
return
}
if len(ssRemotePort) == 0 {
return
}
if len(ssLocalHost) == 0 {
return
}
if len(ssLocalHost) == 0 {
return
}
opts.Add("listen", ssRemoteHost+":"+ssRemotePort)
opts.Add("target", ssLocalHost+":"+ssLocalPort)
ssPluginOptions := os.Getenv("SS_PLUGIN_OPTIONS")
if len(ssPluginOptions) > 0 {
otherOpts, err := parsePluginOptions(ssPluginOptions)
if err != nil {
return nil, err
}
for k, v := range otherOpts {
opts[k] = v
}
}
return opts, nil
}
// Parse a name–value mapping as from SS_PLUGIN_OPTIONS.
//
// "<value> is a k=v string value with options that are to be passed to the
// transport. semicolons, equal signs and backslashes must be escaped
// with a backslash."
// Example: secret=nou;cache=/tmp/cache;secret=yes
func parsePluginOptions(s string) (opts Args, err error) {
opts = make(Args)
if len(s) == 0 {
return
}
i := 0
for {
var key, value string
var offset, begin int
if i >= len(s) {
break
}
begin = i
// Read the key.
offset, key, err = indexUnescaped(s[i:], []byte{'=', ';'})
if err != nil {
return
}
if len(key) == 0 {
err = fmt.Errorf("empty key in %q", s[begin:i])
return
}
i += offset
// End of string or no equals sign?
if i >= len(s) || s[i] != '=' {
opts.Add(key, "1")
// Skip the semicolon.
i++
continue
}
// Skip the equals sign.
i++
// Read the value.
offset, value, err = indexUnescaped(s[i:], []byte{';'})
if err != nil {
return
}
i += offset
opts.Add(key, value)
// Skip the semicolon.
i++
}
return opts, nil
}
// Escape backslashes and all the bytes that are in set.
func backslashEscape(s string, set []byte) string {
var buf bytes.Buffer
for _, b := range []byte(s) {
if b == '\\' || bytes.IndexByte(set, b) != -1 {
buf.WriteByte('\\')
}
buf.WriteByte(b)
}
return buf.String()
}
// Encode a name–value mapping so that it is suitable to go in the ARGS option
// of an SMETHOD line. The output is sorted by key. The "ARGS:" prefix is not
// added.
//
// "Equal signs and commas [and backslashes] must be escaped with a backslash."
func encodeSmethodArgs(args Args) string {
if args == nil {
return ""
}
keys := make([]string, 0, len(args))
for key := range args {
keys = append(keys, key)
}
sort.Strings(keys)
escape := func(s string) string {
return backslashEscape(s, []byte{'=', ','})
}
var pairs []string
for _, key := range keys {
for _, value := range args[key] {
pairs = append(pairs, escape(key)+"="+escape(value))
}
}
return strings.Join(pairs, ",")
}
|
package benchhash2
import (
"sync"
)
type HashMap interface {
Get(key uint32) (uint32, bool)
Put(key, val uint32)
}
type GoMap struct {
sync.RWMutex
m map[uint32]uint32
}
func (s *GoMap) Get(key uint32) (uint32, bool) {
s.RLock()
defer s.RUnlock()
val, found := s.m[key]
return val, found
}
func (s *GoMap) Put(key, val uint32) {
s.Lock()
defer s.Unlock()
s.m[key] = val
}
func NewGoMap() HashMap {
return &GoMap{
m: make(map[uint32]uint32),
}
}
type ShardedGoMap struct {
numShards int
m []HashMap
}
func (s *ShardedGoMap) Get(key uint32) (uint32, bool) {
shard := key % uint32(s.numShards)
return s.m[shard].Get(key)
}
func (s *ShardedGoMap) Put(key, val uint32) {
shard := key % uint32(s.numShards)
s.m[shard].Put(key, val)
}
func NewShardedGoMap(numShards int) HashMap {
r := &ShardedGoMap{
numShards: numShards,
m: make([]HashMap, numShards),
}
for i := 0; i < numShards; i++ {
r.m[i] = NewGoMap()
}
return r
}
func NewShardedGoMap4() HashMap { return NewShardedGoMap(4) }
func NewShardedGoMap8() HashMap { return NewShardedGoMap(8) }
func NewShardedGoMap16() HashMap { return NewShardedGoMap(16) }
func NewShardedGoMap32() HashMap { return NewShardedGoMap(32) }
func NewShardedGoMap64() HashMap { return NewShardedGoMap(64) }
|
package main
import (
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/whosonfirst/go-whosonfirst-geojson-v2/feature"
"github.com/whosonfirst/go-whosonfirst-geojson-v2/properties/whosonfirst"
"github.com/whosonfirst/go-whosonfirst-uri"
"io/ioutil"
"log"
"net/http"
"net/url"
"strconv"
"strings"
)
type Point struct {
Latitude float64
Longitude float64
}
func NewPointFromWOFId(id int64) (*Point, error) {
url, err := uri.Id2AbsPath("https://whosonfirst.mapzen.com/data/", id)
if err != nil {
return nil, err
}
rsp, err := http.Get(url)
if err != nil {
return nil, err
}
defer rsp.Body.Close()
f, err := feature.LoadFeatureFromReader(rsp.Body)
if err != nil {
return nil, err
}
c, err := whosonfirst.Centroid(f)
if err != nil {
return nil, err
}
coord := c.Coord()
pt := Point{
Latitude: coord.Y,
Longitude: coord.X,
}
return &pt, nil
}
func NewPointFromString(str_latlon string) (*Point, error) {
latlon := strings.Split(str_latlon, ",")
if len(latlon) != 2 {
return nil, errors.New("Invalid lat,lon string")
}
lat, err := strconv.ParseFloat(latlon[0], 64)
if err != nil {
return nil, err
}
lon, err := strconv.ParseFloat(latlon[1], 64)
if err != nil {
return nil, err
}
pt := Point{
Latitude: lat,
Longitude: lon,
}
return &pt, nil
}
type Location struct {
Latitude float64 `json:"lat"`
Longitude float64 `json:"lon"`
Type string `json:"type"`
}
type Journey struct {
Locations []Location `json:"locations"`
Costing string `json:"costing"`
}
type Valhalla struct {
Endpoint string
ApiKey string
}
func (v *Valhalla) Route(from *Point, to *Point, costing string) ([]byte, error) {
loc_from := Location{
Latitude: from.Latitude,
Longitude: from.Longitude,
Type: "break",
}
loc_to := Location{
Latitude: to.Latitude,
Longitude: to.Longitude,
Type: "break",
}
journey := Journey{
Locations: []Location{loc_from, loc_to},
Costing: costing,
}
body, err := json.Marshal(journey)
if err != nil {
return nil, err
}
query := url.Values{}
query.Set("json", string(body))
query.Set("api_key", v.ApiKey)
u := url.URL{
RawQuery: query.Encode(),
Host: v.Endpoint,
Path: "/route",
Scheme: "https",
}
client := &http.Client{}
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
rsp, err := client.Do(req)
if err != nil {
return nil, err
}
defer rsp.Body.Close()
if rsp.StatusCode != 200 {
return nil, errors.New(rsp.Status)
}
body, err = ioutil.ReadAll(rsp.Body)
if err != nil {
return nil, err
}
return body, nil
}
func main() {
api_key := flag.String("api-key", "mapzen-xxxxxx", "A valid Mapzen API key.")
endpoint := flag.String("endpoint", "valhalla.mapzen.com", "A valid Valhalla API endpoint.")
costing := flag.String("costing", "auto", "A valid Valhalla costing.")
str_from := flag.String("from", "", "Starting latitude,longitude position.")
str_to := flag.String("to", "", "Destination latitude,longitude position.")
from_wofid := flag.Int64("from-wofid", 0, "Starting Who's On First ID.")
to_wofid := flag.Int64("to-wofid", 0, "Destination Who's On First ID.")
flag.Parse()
var from *Point
var to *Point
if *from_wofid != 0 {
f, err := NewPointFromWOFId(*from_wofid)
if err != nil {
log.Fatal(err)
}
from = f
} else {
f, err := NewPointFromString(*str_from)
if err != nil {
log.Fatal(err)
}
from = f
}
if *to_wofid != 0 {
t, err := NewPointFromWOFId(*to_wofid)
if err != nil {
log.Fatal(err)
}
to = t
} else {
t, err := NewPointFromString(*str_to)
if err != nil {
log.Fatal(err)
}
to = t
}
v := Valhalla{
Endpoint: *endpoint,
ApiKey: *api_key,
}
b, err := v.Route(from, to, *costing)
if err != nil {
log.Fatal(err)
}
fmt.Println(string(b))
}
|
package main
import (
"os"
"github.com/codegangsta/cli"
)
func main() {
app := cli.NewApp()
app.Name = "nomadpanel"
app.Usage = "nomadpanel"
app.Version = serviceVersion
app.Author = "antoniofernandezvara@gmail.com"
app.Email = "antoniofernandezvara@gmail.com"
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "log-level",
Value: "info",
Usage: "Log level (options: debug, info, warn, error, fatal, panic)",
EnvVar: "LOG_LEVEL",
},
cli.StringFlag{
Name: "nomad-address",
Usage: "Nomad Address",
EnvVar: "NOMAD_ADDRESS",
Value: "http://127.0.0.1:4646",
},
cli.StringFlag{
Name: "nomad-region",
Usage: "Nomad Region",
EnvVar: "NOMAD_REGION",
Value: "global",
},
cli.IntFlag{
Name: "wait-time",
Usage: "WaitTime",
EnvVar: "WAIT_TIME",
Value: 3600,
},
cli.StringFlag{
Name: "api-addr",
Usage: "Listener address",
EnvVar: "API_ADDR",
Value: "0.0.0.0:8000",
},
}
app.Commands = []cli.Command{
{
Name: "start",
Usage: "service start",
Action: cliStart,
},
{
Name: "version",
Usage: "service version",
Action: cliVersion,
},
}
app.Run(os.Args)
}
|
package primitives
import (
"encoding/xml"
"github.com/plandem/ooxml/ml"
)
//FontSchemeType is a type to encode XSD ST_FontScheme
type FontSchemeType ml.Property
//MarshalXML marshal FontSchemeType
func (t *FontSchemeType) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return (*ml.Property)(t).MarshalXML(e, start)
}
//UnmarshalXML unmarshal FontSchemeType
func (t *FontSchemeType) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
return (*ml.Property)(t).UnmarshalXML(d, start)
}
|
package machine
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/openshift/installer/pkg/asset"
"github.com/openshift/installer/pkg/asset/installconfig"
"github.com/openshift/installer/pkg/asset/tls"
"github.com/openshift/installer/pkg/ipnet"
"github.com/openshift/installer/pkg/types"
"github.com/openshift/installer/pkg/types/aws"
)
// TestWorkerGenerate tests generating the worker asset.
func TestWorkerGenerate(t *testing.T) {
installConfig := installconfig.MakeAsset(
&types.InstallConfig{
Networking: &types.Networking{
ServiceNetwork: []ipnet.IPNet{*ipnet.MustParseCIDR("10.0.1.0/24")},
},
Platform: types.Platform{
AWS: &aws.Platform{
Region: "us-east",
},
},
})
rootCA := &tls.RootCA{}
err := rootCA.Generate(nil)
assert.NoError(t, err, "unexpected error generating root CA")
parents := asset.Parents{}
parents.Add(installConfig, rootCA)
worker := &Worker{}
err = worker.Generate(parents)
assert.NoError(t, err, "unexpected error generating worker asset")
actualFiles := worker.Files()
assert.Equal(t, 1, len(actualFiles), "unexpected number of files in worker state")
assert.Equal(t, "worker.ign", actualFiles[0].Filename, "unexpected name for worker ignition config")
}
|
package main
import (
"log"
"os"
"github.com/tarm/goserial"
)
const nLEDs = 92
func main() {
// Open the serial port
c := &serial.Config{Name: os.Args[1], Baud: 115200}
ser, err := serial.OpenPort(c)
if err != nil {
log.Fatalln("Trouble opening serial: ", err)
}
buf := make([]byte, (nLEDs*3 + 1))
buf[0] = 0x84
for i := 0; i < nLEDs; i++ {
buf[1+i*3], buf[2+i*3], buf[3+i*3] = 0x80, 0x80, 0x80
}
ser.Write(buf)
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//28. Implement strStr()
//Implement strStr().
//Return the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.
//Example 1:
//Input: haystack = "hello", needle = "ll"
//Output: 2
//Example 2:
//Input: haystack = "aaaaa", needle = "bba"
//Output: -1
//Clarification:
//What should we return when needle is an empty string? This is a great question to ask during an interview.
//For the purpose of this problem, we will return 0 when needle is an empty string. This is consistent to C's strstr() and Java's indexOf().
//func strStr(haystack string, needle string) int {
//}
// Time Is Money |
// timer project main.go
package main
import (
"fmt"
"time"
)
func main() {
fmt.Println("Hello World!")
//定时任务开始
for {
now := time.Now()
fmt.Println("daka——", now.Format("2006-01-02 15:04:05"))
// 计算下一个零点
next := now.Add(time.Second * 70)
next = time.Date(next.Year(), next.Month(), next.Day(), next.Hour(), next.Minute(), 0, 0, next.Location())
fmt.Println("next daka——", next.Format("2006-01-02 15:04:05"))
timer := time.NewTimer(next.Sub(now))
<-timer.C
}
//定时任务结束
}
|
package open_im_sdk
func triggerCmdFriend() error {
return nil
}
func triggerCmdBlackList() error {
return nil
}
func triggerCmdFriendApplication() error {
return nil
}
type deleteConNode struct {
SourceID string
ConversationID string
SessionType int
}
func (u *UserRelated) triggerCmdDeleteConversationAndMessage(sourceID, conversationID string, sessionType int) error {
c2v := cmd2Value{
Cmd: CmdDeleteConversation,
Value: deleteConNode{SourceID: sourceID, ConversationID: conversationID, SessionType: sessionType},
}
return sendCmd(u.ConversationCh, c2v, 1)
}
/*
func triggerCmdGetLoginUserInfo() error {
c2v := cmd2Value{
Cmd: CmdGeyLoginUserInfo,
}
return sendCmd(InitCh, c2v, 1)
}
*/
type updateConNode struct {
ConId string
Action int //1 Delete the conversation; 2 Update the latest news in the conversation or add a conversation; 3 Put a conversation on the top;
// 4 Cancel a conversation on the top, 5 Messages are not read and set to 0, 6 New conversations
Args interface{}
}
func (u *UserRelated) triggerCmdNewMsgCome(msg ArrMsg) error {
c2v := cmd2Value{
Cmd: CmdNewMsgCome,
Value: msg,
}
sdkLog("send cmd: ", u.ConversationCh)
return sendCmd(u.ConversationCh, c2v, 1)
}
func triggerCmdAcceptFriend(sendUid string) error {
return nil
}
func triggerCmdRefuseFriend(receiveUid string) error {
return nil
}
func (u *UserRelated) triggerCmdUpdateConversation(node updateConNode) error {
c2v := cmd2Value{
Cmd: CmdUpdateConversation,
Value: node,
}
return sendCmd(u.ConversationCh, c2v, 1)
}
func (u *UserRelated) unInitAll() {
c2v := cmd2Value{Cmd: CmdUnInit}
_ = sendCmd(u.ConversationCh, c2v, 1)
}
type goroutine interface {
work(cmd cmd2Value)
getCh() chan cmd2Value
}
func doListener(Li goroutine) {
sdkLog("doListener start.", Li.getCh())
for {
sdkLog("doListener for.")
select {
case cmd := <-Li.getCh():
if cmd.Cmd == CmdUnInit {
sdkLog("doListener goroutine.")
return
}
sdkLog("doListener work.")
Li.work(cmd)
}
}
}
|
package main
import (
"fmt"
"strconv"
"strings"
)
func dayOne(data []string) {
lines := data
freqs := make(map[int]int)
answer := 0
dupe := 0
dupeFound := false
loops := 0
for !dupeFound {
fmt.Println(loops)
for _, i := range lines {
isPlus := strings.Contains(i, "+")
numString := strings.TrimLeft(i, "+-")
num, _ := strconv.Atoi(numString)
if isPlus {
answer += num
} else {
answer -= num
}
freqs[answer]++
if freqs[answer] >= 2 {
dupe = answer
dupeFound = true
break
}
}
loops++
}
fmt.Println(answer)
fmt.Println(dupe)
}
|
package main
import "fmt"
import "strconv"
func main() {
fmt.Println("Hello, playground")
number := make(map[int]string)
number[1] = "one"
number[2] = "two"
number[3] = "three"
number[4] = "four"
number[5] = "five"
number[6] = "six"
number[7] = "seven"
number[8] = "eight"
number[9] = "nine"
number[10] = "ten"
number[11] = "eleven"
number[12] = "twelve"
number[13] = "thirteen"
number[14] = "fourteen"
number[15] = "fifteen"
number[16] = "sixteen"
number[17] = "seventeen"
number[18] = "eighteen"
number[19] = "nineteen"
number[20] = "twenty"
number[30] = "thirty"
number[40] = "forty"
number[50] = "fifty"
number[60] = "sixty"
number[70] = "seventy"
number[80] = "eighty"
number[90] = "ninety"
number[100] = "hundred"
number[1000] = "thousand"
tally := 0
k := 0
for n := 1; n <= 1000; n++ {
x := strconv.FormatInt(int64(n), 10)
if len(x) == 1 {
// The first 9 numbers are 1 digit -- in the dictionary
k = len(number[n])
fmt.Printf("n = %d k = %d\n", n, k)
} else if len(x) == 2 {
x1 := x[0:1]
x2 := x[1:2]
x2_i, _ := strconv.Atoi(x2)
if n < 20 {
k = len(number[n])
fmt.Printf("n = %d k = %d\n", n, k)
} else {
if x2 == "0" {
// The numbers under 100 and greater than 19 -- in the
// dictionary ending in '0', (20, 30, 40 ....)
k = len(number[n])
fmt.Printf("n = %d k = %d\n", n, k)
} else {
// The other numbers under 100 greater than 19
x1a := x1 + "0"
x1a_i, _ := strconv.Atoi(x1a)
k = len(number[x1a_i]) + len(number[x2_i])
fmt.Printf("n = %d k = %d\n", n, k)
}
}
} else if len(x) == 3 {
// add 3 for 'and' i.e. -- two-hundred and ten
x1 := x[0:1]
x2 := x[1:2]
x3 := x[2:3]
x1a := x2 + "0"
x1aa := x2 + x3
x1_i, _ := strconv.Atoi(x1)
x3_i, _ := strconv.Atoi(x3)
x1a_i, _ := strconv.Atoi(x1a)
x1aa_i, _ := strconv.Atoi(x1aa)
if x1 == "1" && x2 == "0" && x3 == "0" {
// 100 -- in the dictionary
k = len(number[1]) + len(number[100])
fmt.Printf("n = %d k = %d\n", n, k)
} else if x2 == "0" && x3 == "0" {
// Consider 200, 300, 400, 500, 600, 700, 800, and 900
k = len(number[x1_i]) + len(number[100])
fmt.Printf("n = %d k = %d\n", n, k)
} else if x2 == "0" && x3 != "0" {
// Consider 101, 102 ... 109, 201, 202, ... 209 etc.
k = 3 + len(number[x1_i]) + len(number[100]) + len(number[x3_i])
fmt.Printf("n = %d k = %d\n", n, k)
} else if x2 != "0" && x3 == "0" {
// Consider 110, 120, ... 190, 210, 220, ... 290 etc.
k = 3 + len(number[x1_i]) + len(number[100]) + len(number[x1a_i])
fmt.Printf("n = %d k = %d\n", n, k)
} else if x2 == "1" && x3 != "0" {
// Consider the teens 111, 112, ... 119, 211, 212, ... 219 etc.
k = 3 + len(number[x1_i]) + len(number[100]) + len(number[x1aa_i])
fmt.Printf("n = %d k = %d\n", n, k)
} else {
// Consider all the other numbers
k = 3 + len(number[x1_i]) + len(number[100]) + len(number[x1a_i]) + len(number[x3_i])
fmt.Printf("n = %d k = %d\n", n, k)
}
} else {
// 1000 -- two parts (one and thousand) -- in the dictionary
k = len(number[1]) + len(number[1000])
fmt.Printf("n = %d k = %d\n", n, k)
}
tally += k
}
fmt.Println(tally)
}
|
package main
import (
"fmt"
shortString "sberCloudTest/pkg"
)
func main() {
str := "aaabbccccdeeeff"
res := shortString.Get(str)
fmt.Printf("Результат сокращения строки %s: %s\n\n", str, res)
stringList := []string{
"aaabbccccdeeeff",
"jjjsllkkkkklllllll",
"kkkkkffffffjjjjrrrr",
"nnnnttttlttttbbbbbb",
"zzzzzzzzgggggeeeeeeeellll[[[[[",
"tttttmmmmmggggggg",
"ooooooooooeeeeeekkkkknnnnnccccc",
"fffffiiiiiinnnnrrrrrrrrmmmmmmmmm",
"tttttttggggggnnnnnnnneeeeeee",
}
m := make(map[string]string)
shortString.GetParallelStringsProcessing(stringList, m)
fmt.Println("Результат параллельной обработки строк:")
for k, v := range m {
fmt.Printf("%s -> %s\n", k, v)
}
}
|
package routes
import (
"github.com/danyalov/shebeke/db"
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
)
func NewConnection(db *db.DB) *echo.Echo {
e := echo.New()
env := Env{db}
e.Use(middleware.Logger())
e.Use(middleware.Recover())
e.GET("/contracts", env.GetContracts)
e.GET("/contract/:id", env.GetContractByID)
return e
}
type Env struct {
cd db.ContractDao
}
|
package t2m
// Version bla bla
const Version = "0.0.7"
|
package log_test
import (
"bytes"
"encoding/json"
"fmt"
. "github.com/nomkhonwaan/myblog/pkg/log"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"regexp"
"testing"
"time"
)
type mockTimer time.Time
func (timer mockTimer) Now() time.Time {
return time.Time(timer)
}
type mockOutputer string
func (outputer *mockOutputer) Printf(format string, args ...interface{}) {
*outputer = mockOutputer(fmt.Sprintf(format, args...))
}
func TestHandler(t *testing.T) {
// Given
timer := mockTimer(time.Now())
outputer := new(mockOutputer)
w := httptest.NewRecorder()
r, _ := http.NewRequest("POST", "https://api.nomkhonwaan.com/graphql", bytes.NewBufferString("{ categories { name slug } }"))
r.RequestURI = "https://api.nomkhonwaan.com/graphql"
r.RemoteAddr = "localhost"
// When
NewLoggingInterceptor(timer, outputer).
Handler(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
data, _ := json.Marshal([]map[string]interface{}{
{
"name": "Web Development",
"slug": "web-development-1",
},
})
_, _ = w.Write(data)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
},
),
).
ServeHTTP(w, r)
// Then
assert.Equal(t, `[{"name":"Web Development","slug":"web-development-1"}]`, w.Body.String())
assert.Regexp(t, regexp.MustCompile(fmt.Sprintf(`"%s %s" %d %q %q "(\d+|\.)(.*)"`, r.Method, r.RequestURI, http.StatusOK, r.UserAgent(), r.RemoteAddr)), string(*outputer))
}
func TestDefault(t *testing.T) {
// Given
// When
l := Default()
// Then
assert.IsType(t, &DefaultTimer{}, l.Timer)
assert.IsType(t, DefaultOutputer{}, l.Outputer)
}
|
package main
import (
"fmt"
"testing"
"log"
"os"
"os/signal"
"github.com/Shopify/sarama"
)
// 链接kafka
func TestProducer(t *testing.T) {
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForAll
config.Producer.Partitioner = sarama.NewRandomPartitioner
config.Producer.Return.Successes = true
msg := &sarama.ProducerMessage{}
msg.Topic = "nginx_log"
msg.Value = sarama.StringEncoder("this is a good test, my message is good")
client, err := sarama.NewSyncProducer([]string{"dev-7:9092"}, config)
if err != nil {
fmt.Println("producer close, err:", err)
return
}
defer client.Close()
pid, offset, err := client.SendMessage(msg)
if err != nil {
fmt.Println("send message failed,", err)
return
}
fmt.Printf("pid:%v offset:%v\n", pid, offset)
}
// 链接kafka
func TestConsume(t *testing.T) {
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForAll
config.Producer.Partitioner = sarama.NewRandomPartitioner
config.Producer.Return.Successes = true
// msg := &sarama.ProducerMessage{}
// msg.Topic = "target_index1"
// msg.Value = sarama.StringEncoder("this is a good test, my message is good")
consumer, err := sarama.NewConsumer([]string{"118.31.50.72:9092"}, config)
if err != nil {
panic(err)
}
defer func() {
if err := consumer.Close(); err != nil {
log.Fatalln(err)
}
}()
partitionConsumer, err := consumer.ConsumePartition("target_index1", 0, -1)
if err != nil {
panic(err)
}
defer func() {
if err := partitionConsumer.Close(); err != nil {
log.Fatalln(err)
}
}()
// Trap SIGINT to trigger a shutdown.
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
consumed := 0
ConsumerLoop:
for {
select {
case msg := <-partitionConsumer.Messages():
log.Printf("Consumed message offset %d\n", msg.Offset)
log.Printf("Consumed message : %s\n", string(msg.Value))
consumed++
case <-signals:
break ConsumerLoop
}
}
log.Printf("Consumed: %d\n", consumed)
}
|
package genserver
import (
"github.com/Azer0s/quacktors"
"github.com/Azer0s/quacktors/typeregister"
)
func init() {
typeregister.Store(callMessage{}.Type(), callMessage{})
typeregister.Store(castMessage{}.Type(), castMessage{})
typeregister.Store(ReceivedMessage{}.Type(), ReceivedMessage{})
typeregister.Store(ResponseMessage{}.Type(), ResponseMessage{})
}
type callMessage struct {
sender *quacktors.Pid
message quacktors.Message
}
func (c callMessage) Type() string {
return "quacktors/CallMessage"
}
type castMessage struct {
sender *quacktors.Pid
message quacktors.Message
}
func (c castMessage) Type() string {
return "quacktors/CastMessage"
}
//The ReceivedMessage struct is the acknowledgement
//a Cast operation returns when the GenServer has
//received a message.
type ReceivedMessage struct {
}
//Type of ReceivedMessage returns "ReceivedMessage"
func (r ReceivedMessage) Type() string {
return "quacktors/ReceivedMessage"
}
//The ResponseMessage struct is returned as the result
//type of a Call operation on a GenServer.
type ResponseMessage struct {
quacktors.Message
Error error
}
//Type of ResponseMessage returns "ResponseMessage"
func (r ResponseMessage) Type() string {
return "quacktors/ResponseMessage"
}
|
/*
Write a function that returns true if two arrays, when combined, form a consecutive sequence.
Examples
consecutiveCombo([7, 4, 5, 1], [2, 3, 6]) ➞ true
consecutiveCombo([1, 4, 6, 5], [2, 7, 8, 9]) ➞ false
consecutiveCombo([1, 4, 5, 6], [2, 3, 7, 8, 10]) ➞ false
consecutiveCombo([44, 46], [45]) ➞ true
Notes
The input arrays will have unique values.
The input arrays can be in any order.
A consecutive sequence is a sequence without any gaps in the integers, e.g. 1, 2, 3, 4, 5 is a consecutive sequence, but 1, 2, 4, 5 is not.
*/
package main
import (
"sort"
)
func main() {
assert(consecutive([]int{1, 4, 5, 7}, []int{2, 3, 6}) == true)
assert(consecutive([]int{1, 4, 5, 6}, []int{2, 7, 8, 9}) == false)
assert(consecutive([]int{1, 4, 5, 6}, []int{2, 3, 7, 8, 10}) == false)
assert(consecutive([]int{7, 5, 4, 1}, []int{2, 3, 6, 8}) == true)
assert(consecutive([]int{33, 34, 40}, []int{39, 38, 37, 36, 35, 32, 31, 30}) == true)
assert(consecutive([]int{1, 4, 5, 6}, []int{2, 3, 7, 8, 10}) == false)
assert(consecutive([]int{44, 46}, []int{45}) == true)
assert(consecutive([]int{4, 3, 1}, []int{2, 5}) == true)
assert(consecutive([]int{4, 3, 1}, []int{2, 5, 7, 6}) == true)
assert(consecutive([]int{4, 3, 1}, []int{7, 6, 5}) == false)
assert(consecutive([]int{4, 3, 1}, []int{0, 7, 6, 5}) == false)
assert(consecutive([]int{44, 46}, []int{45}) == true)
}
func consecutive(a, b []int) bool {
var p []int
p = append(p, a...)
p = append(p, b...)
sort.Ints(p)
for i := 1; i < len(p); i++ {
if p[i]-1 != p[i-1] {
return false
}
}
return true
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
|
package api
import (
"My-project/conf"
"context"
"database/sql"
"log"
"net/http"
"time"
"github.com/go-chi/chi"
)
// API - type for dependency injection
type API struct {
cnf *conf.Conf
ctx context.Context
db *sql.DB
//hc *http.Client
Hs *http.Server
}
// New initialize api with routes
func New(ctx context.Context, cnf conf.Conf, dbConn *sql.DB) *API {
api := &API{
cnf: &cnf,
ctx: ctx,
db: dbConn,
}
api.initRouter()
return api
}
func (api *API) initRouter() {
r := chi.NewRouter()
r.Use(setContentType)
r.Route("/v1", func(r chi.Router) {
r.Route("/users", api.initUsersRoutes)
})
api.Hs = &http.Server{
Addr: api.cnf.Addr,
Handler: r,
ReadTimeout: time.Duration(api.cnf.HTTPReadTimeout) * time.Second,
WriteTimeout: time.Duration(api.cnf.HTTPWriteTimeout) * time.Second,
MaxHeaderBytes: 1 << 20, // 1 Mb
}
}
func (api *API) Start() {
log.Println("launching the My-project service at", api.cnf.Addr)
err := api.Hs.ListenAndServe()
if err != nil && err.Error() == "http: Server closed" {
log.Println("api port is closed")
return
}
log.Panic(err)
}
// Stop will call Shutdown function
func (api *API) Stop() error {
ctx, cnl := context.WithTimeout(context.Background(), 5*time.Second)
defer cnl()
return api.Hs.Shutdown(ctx)
}
func setContentType(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
h.ServeHTTP(w, r)
})
}
|
package cmd
import (
"github.com/niclabs/dhsm-signer/signer"
"github.com/spf13/cobra"
)
func init() {
resetKeysCmd.Flags().StringP("p11lib", "p", "", "Full path to PKCS11 lib file")
resetKeysCmd.Flags().StringP("user-key", "k", "1234", "HSM User Login Key (default is 1234)")
resetKeysCmd.Flags().StringP("key-label", "l", "dHSM-signer", "Label of HSM Signer Key")
_ = resetKeysCmd.MarkFlagRequired("p11lib")
}
var resetKeysCmd = &cobra.Command{
Use: "reset-keys",
Short: "Deletes all the keys registered in the HSM with specified key label",
RunE: func(cmd *cobra.Command, args []string) error {
p11lib, _ := cmd.Flags().GetString("p11lib")
key, _ := cmd.Flags().GetString("user-key")
label, _ := cmd.Flags().GetString("key-label")
if err := signer.FilesExist(p11lib); err != nil {
return err
}
s, err := signer.NewSession(p11lib, key, label, Log)
if err != nil {
return err
}
defer s.End()
if err := s.DestroyAllKeys(); err != nil {
return err
}
Log.Printf("All keys destroyed.")
return nil
},
}
|
package main
import "fmt"
func main() {
myfunc := Counter()
fmt.Printf("%T\n", myfunc)
fmt.Println(myfunc())
fmt.Println(myfunc())
fmt.Println(myfunc())
// 创建新的函数, nextNumber1, 并查看结果
//myfunc1 := Counter()
//fmt.Println("myfunc1", myfunc1)
//
//fmt.Println(myfunc1())
//fmt.Println(myfunc1())
//fmt.Println(myfunc1())
}
func Counter() func() int {
i := 0
res := func() int {
i += 1
return i
}
//fmt.Println("Counter res=", res)
fmt.Printf("%T,%v\n", res, res)
return res
}
|
package modules
type User struct {
Id int
Name string
Age int
Address string
Pic string
Phone string
}
type UserInfo struct {
Id int
Name string
Age int
Address string
Pic string
Phone string
}
type DBUserInfo struct {
Id int
Name string
}
type DBXXXUserInfo struct {
Id int
Name string
} |
package main
import "fmt"
const Title = "Person Details"
var Country = "USA"
func main() {
fname, lname := "Changxue", "Fan"
age := 35
fmt.Println(Title)
fmt.Println("First Name: ", fname)
fmt.Println("Last Name: ", lname)
fmt.Println("Age: ", age)
fmt.Println("Country: ", Country)
}
|
package main
import "github.com/regit/cmd"
func main() {
cmd.Execute()
}
|
package main
import (
"bufio"
"fmt"
"io"
"log"
"net"
"time"
)
func main() {
listener, err := net.Listen("tcp", "localhost:8000")
if err != nil {
log.Fatal(err)
}
for {
fmt.Println("Hey, I'm listening on port 8000...")
conn, err := listener.Accept()
if err != nil {
log.Print(err) // e.g., connection aborted
continue
}
go handleConn(conn) // handle one connection at a time
}
}
// Manage the single connection in a separate go routine.
func handleConn(c net.Conn) {
defer c.Close()
fmt.Println("new client connection: ESTABLISHED")
for {
// write current time to the client
_, err := io.WriteString(c, time.Now().Format("15:04:05\n"))
if ok := testError(err); ok {
break
}
time.Sleep(1 * time.Second)
// read from connection until it reaches '\n' single char
br := bufio.NewReader(c)
dataBytes, err := br.ReadBytes('\n')
if ok := testError(err); ok {
break
}
// write data sent by the client back to the client
_, err = io.WriteString(c, string(dataBytes))
if ok := testError(err); !ok {
break
}
}
fmt.Println("client connection: DISCONNECTED")
}
func testError(err error) bool {
if err != nil {
return true
}
return false
}
|
package iirepo_contents
const name string = "contents"
// Name returns the name of the repo contents directory.
func Name() string {
return name
}
|
package authorization
import (
"net"
"github.com/authelia/authelia/v4/internal/configuration/schema"
"github.com/authelia/authelia/v4/internal/utils"
)
// NewAccessControlRules converts a schema.AccessControl into an AccessControlRule slice.
func NewAccessControlRules(config schema.AccessControl) (rules []*AccessControlRule) {
networksMap, networksCacheMap := parseSchemaNetworks(config.Networks)
for i, schemaRule := range config.Rules {
rules = append(rules, NewAccessControlRule(i+1, schemaRule, networksMap, networksCacheMap))
}
return rules
}
// NewAccessControlRule parses a schema ACL and generates an internal ACL.
func NewAccessControlRule(pos int, rule schema.AccessControlRule, networksMap map[string][]*net.IPNet, networksCacheMap map[string]*net.IPNet) *AccessControlRule {
r := &AccessControlRule{
Position: pos,
Query: NewAccessControlQuery(rule.Query),
Methods: schemaMethodsToACL(rule.Methods),
Networks: schemaNetworksToACL(rule.Networks, networksMap, networksCacheMap),
Subjects: schemaSubjectsToACL(rule.Subjects),
Policy: NewLevel(rule.Policy),
}
if len(r.Subjects) != 0 {
r.HasSubjects = true
}
ruleAddDomain(rule.Domains, r)
ruleAddDomainRegex(rule.DomainsRegex, r)
ruleAddResources(rule.Resources, r)
return r
}
// AccessControlRule controls and represents an ACL internally.
type AccessControlRule struct {
HasSubjects bool
Position int
Domains []AccessControlDomain
Resources []AccessControlResource
Query []AccessControlQuery
Methods []string
Networks []*net.IPNet
Subjects []AccessControlSubjects
Policy Level
}
// IsMatch returns true if all elements of an AccessControlRule match the object and subject.
func (acr *AccessControlRule) IsMatch(subject Subject, object Object) (match bool) {
if !acr.MatchesDomains(subject, object) {
return false
}
if !acr.MatchesResources(subject, object) {
return false
}
if !acr.MatchesQuery(object) {
return false
}
if !acr.MatchesMethods(object) {
return false
}
if !acr.MatchesNetworks(subject) {
return false
}
if !acr.MatchesSubjects(subject) {
return false
}
return true
}
// MatchesDomains returns true if the rule matches the domains.
func (acr *AccessControlRule) MatchesDomains(subject Subject, object Object) (matches bool) {
// If there are no domains in this rule then the domain condition is a match.
if len(acr.Domains) == 0 {
return true
}
// Iterate over the domains until we find a match (return true) or until we exit the loop (return false).
for _, domain := range acr.Domains {
if domain.IsMatch(subject, object) {
return true
}
}
return false
}
// MatchesResources returns true if the rule matches the resources.
func (acr *AccessControlRule) MatchesResources(subject Subject, object Object) (matches bool) {
// If there are no resources in this rule then the resource condition is a match.
if len(acr.Resources) == 0 {
return true
}
// Iterate over the resources until we find a match (return true) or until we exit the loop (return false).
for _, resource := range acr.Resources {
if resource.IsMatch(subject, object) {
return true
}
}
return false
}
// MatchesQuery returns true if the rule matches the query arguments.
func (acr *AccessControlRule) MatchesQuery(object Object) (match bool) {
// If there are no query rules in this rule then the query condition is a match.
if len(acr.Query) == 0 {
return true
}
// Iterate over the queries until we find a match (return true) or until we exit the loop (return false).
for _, query := range acr.Query {
if query.IsMatch(object) {
return true
}
}
return false
}
// MatchesMethods returns true if the rule matches the method.
func (acr *AccessControlRule) MatchesMethods(object Object) (match bool) {
// If there are no methods in this rule then the method condition is a match.
if len(acr.Methods) == 0 {
return true
}
return utils.IsStringInSlice(object.Method, acr.Methods)
}
// MatchesNetworks returns true if the rule matches the networks.
func (acr *AccessControlRule) MatchesNetworks(subject Subject) (match bool) {
// If there are no networks in this rule then the network condition is a match.
if len(acr.Networks) == 0 {
return true
}
// Iterate over the networks until we find a match (return true) or until we exit the loop (return false).
for _, network := range acr.Networks {
if network.Contains(subject.IP) {
return true
}
}
return false
}
// MatchesSubjects returns true if the rule matches the subjects.
func (acr *AccessControlRule) MatchesSubjects(subject Subject) (match bool) {
if subject.IsAnonymous() {
return true
}
return acr.MatchesSubjectExact(subject)
}
// MatchesSubjectExact returns true if the rule matches the subjects exactly.
func (acr *AccessControlRule) MatchesSubjectExact(subject Subject) (match bool) {
// If there are no subjects in this rule then the subject condition is a match.
if len(acr.Subjects) == 0 {
return true
} else if subject.IsAnonymous() {
return false
}
// Iterate over the subjects until we find a match (return true) or until we exit the loop (return false).
for _, subjectRule := range acr.Subjects {
if subjectRule.IsMatch(subject) {
return true
}
}
return false
}
|
/*
Create a function that expands a decimal number into a string as shown below:
25.24 ➞ "20 + 5 + 2/10 + 4/100"
70701.05 ➞ "70000 + 700 + 1 + 5/100"
685.27 ➞ "600 + 80 + 5 + 2/10 + 7/100"
Examples
expandedForm(87.04) ➞ "80 + 7 + 4/100"
expandedForm(123.025) ➞ "100 + 20 + 3 + 2/100 + 5/1000"
expandedForm(50.270) ➞ "50 + 2/10 + 7/100"
Notes
N/A
*/
package main
import (
"bytes"
"fmt"
"math/big"
"strings"
)
func main() {
test(87.04, "80 + 7 + 4/100")
test(123.025, "100 + 20 + 3 + 2/100 + 5/1000")
test(50.270, "50 + 2/10 + 7/100")
test(207.333, "200 + 7 + 3/10 + 3/100 + 3/1000")
test(0.57, "5/10 + 7/100")
test(140.023, "100 + 40 + 2/100 + 3/1000")
test(90.99, "90 + 9/10 + 9/100")
test(84.5, "80 + 4 + 5/10")
test(76.02, "70 + 6 + 2/100")
test(44.5, "40 + 4 + 5/10")
test(37.49, "30 + 7 + 4/10 + 9/100")
test(60.0007, "60 + 7/10000")
test(29.22, "20 + 9 + 2/10 + 2/100")
test(10.99, "10 + 9/10 + 9/100")
test(63.09, "60 + 3 + 9/100")
test(37.022, "30 + 7 + 2/100 + 2/1000")
test(43.21, "40 + 3 + 2/10 + 1/100")
test(309.028, "300 + 9 + 2/100 + 8/1000")
test(447.33, "400 + 40 + 7 + 3/10 + 3/100")
test(65.05, "60 + 5 + 5/100")
test(47.34, "40 + 7 + 3/10 + 4/100")
test(68.699, "60 + 8 + 6/10 + 9/100 + 9/1000")
test(1.24, "1 + 2/10 + 4/100")
test(4.28, "4 + 2/10 + 8/100")
test(7.304, "7 + 3/10 + 4/1000")
test(0.04, "4/100")
test(0.1234, "1/10 + 2/100 + 3/1000 + 4/10000")
test(0, "0")
test(10, "10")
test(1051, "1000 + 50 + 1")
}
func test(x float64, r string) {
p := expand(x)
fmt.Printf("%v %q\n", x, p)
assert(p == r)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func expand(x float64) string {
z := big.NewFloat(x)
s := z.String()
u, v := s, ""
if i := strings.IndexRune(s, '.'); i >= 0 {
u = s[:i]
v = s[i+1:]
}
w := new(bytes.Buffer)
for i := range u {
if u[i] != '0' {
fmt.Fprintf(w, "%c%s + ", u[i], strings.Repeat("0", len(u)-i-1))
}
}
for i := range v {
if v[i] != '0' {
fmt.Fprintf(w, "%c/1%s + ", v[i], strings.Repeat("0", i+1))
}
}
r := w.String()
r = strings.TrimSuffix(r, " + ")
if r == "" {
r = "0"
}
return r
}
|
package main
type NodeType int64
const (
Directory NodeType = 0
File NodeType = 1
)
func (n NodeType) String() string {
switch n {
case File:
return "file"
case Directory:
return "dir"
}
return "unknown"
}
|
package main
func findRedundantConnection(edges [][]int) []int {
nfs := NewUnionFindSet(len(edges)+1)
result := make([]int, 2)
for i := 0; i < len(edges); i++ {
a, b := edges[i][0], edges[i][1]
n1, n2 := nfs.getNode(a), nfs.getNode(b)
if n1.find() == n2.find() {
result[0],result[1] = a,b
}else{
nfs.union(n1,n2)
}
}
return result
}
type Node struct {
parent *Node
}
func NewNode()*Node{
n := &Node{}
n.parent = n
return n
}
func (n *Node)find()*Node{
if n == nil {
return nil
}
if n == n.parent {
return n
}
return n.parent.find()
}
type UnionFindSet struct {
m []*Node
}
func NewUnionFindSet(n int) *UnionFindSet{
ufs := &UnionFindSet{}
ufs.m = make([]*Node,n)
return ufs
}
func (nfs *UnionFindSet) getNode(n int) *Node {
node := nfs.m[n]
if node == nil {
node = NewNode()
nfs.m[n] = node
}
return node
}
func (nfs *UnionFindSet)union(a, b *Node) {
s1, s2 := a.find(), b.find()
if s1 == s2 {
return
}
s1.parent = s2
}
|
package payapi
import (
"encoding/json"
"fmt"
"net/url"
"strings"
"time"
"tpay_backend/utils"
)
type Tpay struct {
Host string // 请求的地址
AccNo string // 商户账号id
SecretKey string // 商家通信秘钥
}
func NewTpay(host, appId, appSecret string) *Tpay {
return &Tpay{Host: host, AccNo: appId, SecretKey: appSecret}
}
type CommonRequest struct {
MerchantNo string `json:"merchant_no"`
Timestamp int64 `json:"timestamp"`
}
//--------------------------Pay---------------------------------------------
type PayReq struct {
CommonRequest
Subject string `json:"subject"`
Amount int64 `json:"amount"`
Currency string `json:"currency"`
MchOrderNo string `json:"mch_order_no"`
TradeType string `json:"trade_type"`
NotifyUrl string `json:"notify_url"`
ReturnUrl string `json:"return_url"`
Attach string `json:"attach"`
}
// 返回示例:
// {"code":"AA300098","msg":"订单号重复"}
type PayRes struct {
Code string `json:"code"`
Msg string `json:"msg"`
Data struct {
CodeUrl string `json:"code_url"`
}
}
// 下单
func (t *Tpay) Pay(req PayReq) (*PayRes, error) {
req.CommonRequest.MerchantNo = t.AccNo
req.CommonRequest.Timestamp = time.Now().Unix()
dataByte, jerr := json.Marshal(req)
if jerr != nil {
return nil, jerr
}
data := url.Values{}
data.Set("data", string(dataByte))
data.Set("sign", utils.Md5(string(dataByte)+t.SecretKey))
// post请求
body, resErr := utils.PostForm(strings.TrimRight(t.Host, "/")+"/pay", data)
if resErr != nil {
return nil, resErr
}
fmt.Println("body:", string(body))
return nil, nil
}
//--------------------------PayOrderQuery-----------------------------------
type PayOrderQueryReq struct {
CommonRequest
MchOrderNo string `json:"mch_order_no"`
OrderNo string `json:"order_no"`
}
// 返回示例:
type PayOrderQueryRes struct {
Code string `json:"code"`
Msg string `json:"msg"`
Data struct {
CodeUrl string `json:"code_url"`
OrderNo string `json:"order_no"`
}
}
func (t *Tpay) PayOrderQuery(req PayOrderQueryReq) (*PayOrderQueryRes, error) {
req.CommonRequest.MerchantNo = t.AccNo
req.CommonRequest.Timestamp = time.Now().Unix()
dataByte, jerr := json.Marshal(req)
if jerr != nil {
return nil, jerr
}
data := url.Values{}
data.Set("data", string(dataByte))
data.Set("sign", utils.Md5(string(dataByte)+t.SecretKey))
// post请求
body, resErr := utils.PostForm(strings.TrimRight(t.Host, "/")+"/pay-order-query", data)
if resErr != nil {
return nil, resErr
}
fmt.Println("body:", string(body))
return nil, nil
}
//--------------------------Transfer----------------------------------------
type TransferReq struct {
CommonRequest
Amount int64 `json:"amount"` // 订单金额
Currency string `json:"currency"` // 币种
MchOrderNo string `json:"mch_order_no"` // 外部订单号(商户系统内部的订单号)
TradeType string `json:"trade_type"` // 交易类型
NotifyUrl string `json:"notify_url"` // 异步通知地址
ReturnUrl string `json:"return_url"` // 同步跳转地址
Attach string `json:"attach"` // 原样返回字段
BankName string `json:"bank_name"` // 收款银行名称
BankCardHolderName string `json:"bank_card_holder_name"` // 银行卡持卡人姓名
BankCardNo string `json:"bank_card_no"` // 银行卡号
BankBranchName string `json:"bank_branch_name,optional"` // 收款银行支行名称
}
// 返回示例:
// {"code":"AA300098","msg":"订单号重复"}
type TransferRes struct {
Code string `json:"code"`
Msg string `json:"msg"`
Data struct {
CodeUrl string `json:"code_url"`
}
}
// 下单
func (t *Tpay) Transfer(req TransferReq) (*TransferRes, error) {
req.CommonRequest.MerchantNo = t.AccNo
req.CommonRequest.Timestamp = time.Now().Unix()
dataByte, jerr := json.Marshal(req)
if jerr != nil {
return nil, jerr
}
data := url.Values{}
data.Set("data", string(dataByte))
data.Set("sign", utils.Md5(string(dataByte)+t.SecretKey))
// post请求
body, resErr := utils.PostForm(strings.TrimRight(t.Host, "/")+"/transfer", data)
if resErr != nil {
return nil, resErr
}
fmt.Println("body:", string(body))
return nil, nil
}
//--------------------------TransferOrderQuery-----------------------------
type TransferOrderQueryReq struct {
CommonRequest
MchOrderNo string `json:"mch_order_no"`
OrderNo string `json:"order_no"`
}
// 返回示例:
type TransferOrderQueryRes struct {
Code string `json:"code"`
Msg string `json:"msg"`
Data struct {
CodeUrl string `json:"code_url"`
}
}
func (t *Tpay) TransferOrderQuery(req TransferOrderQueryReq) (*TransferOrderQueryRes, error) {
req.CommonRequest.MerchantNo = t.AccNo
req.CommonRequest.Timestamp = time.Now().Unix()
dataByte, jerr := json.Marshal(req)
if jerr != nil {
return nil, jerr
}
data := url.Values{}
data.Set("data", string(dataByte))
data.Set("sign", utils.Md5(string(dataByte)+t.SecretKey))
// post请求
body, resErr := utils.PostForm(strings.TrimRight(t.Host, "/")+"/transfer-order-query", data)
if resErr != nil {
return nil, resErr
}
fmt.Println("body:", string(body))
return nil, nil
}
//--------------------------QueryBalance-----------------------------------
// 返回示例:
type QueryBalanceRes struct {
Code string `json:"code"`
Msg string `json:"msg"`
Data struct {
Balance int64 `json:"balance"`
Currency string `json:"currency"`
}
}
func (t *Tpay) QueryBalance() (*PayOrderQueryRes, error) {
req := CommonRequest{}
req.MerchantNo = t.AccNo
req.Timestamp = time.Now().Unix()
dataByte, jerr := json.Marshal(req)
if jerr != nil {
return nil, jerr
}
data := url.Values{}
data.Set("data", string(dataByte))
data.Set("sign", utils.Md5(string(dataByte)+t.SecretKey))
// post请求
body, resErr := utils.PostForm(strings.TrimRight(t.Host, "/")+"/query-balance", data)
if resErr != nil {
return nil, resErr
}
fmt.Println("body:", string(body))
return nil, nil
}
//--------------------------------------------------------------------------
|
package main
import (
"github.com/Jenkins/router"
"github.com/Jenkins/setting"
"github.com/phjt-go/logger"
)
func main() {
// 加载日志配置
logger.SetLogger(setting.GetString("logger_jsonFile"))
// 启动路由
router.Run()
}
|
package accumulate
func Accumulate(list []string, converter func(string) string) []string {
accumulation := []string{}
for _, s := range list {
accumulation = append(accumulation, converter(s))
}
return accumulation
} |
package middleware
import (
response "github.com/anraku/echo-sample/response"
"net/http"
"github.com/labstack/echo"
)
// ServerHeader middleware adds a `Server` header to the response.
func ServerHeader() echo.MiddlewareFunc {
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
c.Response().Header().Set(echo.HeaderServer, "Echo/3.0")
return next(c)
}
}
}
// auth Access Token in Request Header
func AuthToken() echo.MiddlewareFunc {
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
token := c.Request().Header.Get("auth-token")
if token != "abc" {
return c.JSON(http.StatusUnauthorized, response.CreateResponse(
map[string]interface{}{
"message": "error response",
},
))
}
return next(c)
}
}
}
|
package main
import (
"context"
"fmt"
"log"
"net"
"os"
"os/signal"
"syscall"
"github.com/bobrovka/calendar/internal"
app "github.com/bobrovka/calendar/internal/calendar-app"
"github.com/bobrovka/calendar/internal/scheduler"
"github.com/bobrovka/calendar/internal/scheduler/producer"
"github.com/bobrovka/calendar/internal/service"
pg "github.com/bobrovka/calendar/internal/storage/storage-pg"
"github.com/bobrovka/calendar/pkg/calendar/api"
"github.com/go-errors/errors"
"github.com/heetch/confita"
"github.com/heetch/confita/backend/file"
_ "github.com/jackc/pgx/v4/stdlib"
flag "github.com/spf13/pflag"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
)
var configPath string
var ErrOSTerminated = errors.New("os terminated")
func init() {
flag.StringVarP(&configPath, "config", "c", "", "path to config file")
}
func main() {
flag.Parse()
cfg := getConfig()
logCfg := zap.NewDevelopmentConfig()
logCfg.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
logCfg.EncoderConfig.EncodeTime = zapcore.EpochMillisTimeEncoder
logCfg.OutputPaths = []string{cfg.LogFile}
logger, err := logCfg.Build()
failOnError(err, "cant create logger")
defer logger.Sync()
sugaredLogger := logger.Sugar()
storage, err := pg.NewStoragePg(cfg.PgUser, cfg.PgPassword, cfg.PgHost, cfg.PgPort, cfg.PgName)
failOnError(err, "cannot create storage")
app, err := app.NewCalendar(storage, sugaredLogger)
failOnError(err, "cannot create app instance")
eventService := service.NewEventService(app, sugaredLogger)
// Create grpc server
grpcServer := grpc.NewServer()
reflection.Register(grpcServer)
api.RegisterEventsServer(grpcServer, eventService)
lis, err := net.Listen("tcp", cfg.HTTPListen)
failOnError(err, fmt.Sprint("cannot listen ", cfg.HTTPListen))
exitChannel := make(chan error)
go func() {
// start grpc server
exitChannel <- grpcServer.Serve(lis)
}()
go func() {
termChan := make(chan os.Signal)
signal.Notify(termChan, syscall.SIGINT, syscall.SIGTERM)
<-termChan
exitChannel <- ErrOSTerminated
}()
producer := producer.NewProducerMQ(fmt.Sprintf(
"amqp://%s:%s@%s:%d",
cfg.RabbitUser,
cfg.RabbitPassword,
cfg.RabbitHost,
cfg.RabbitPort,
), "event.exchange", "direct", "event.queue", "event.notification")
sched := scheduler.NewScheduler(producer, storage, sugaredLogger)
go func() {
exitChannel <- sched.Run()
}()
err = <-exitChannel
log.Println("stopped with err: ", err)
grpcServer.GracefulStop()
err = sched.Stop()
if err != nil {
log.Println("cannot gracefully stop scheduler, err: ", err)
}
}
func failOnError(err error, msg string) {
if err != nil {
log.Fatalf("%s: %s", msg, err)
}
}
func getConfig() *internal.Config {
if configPath == "" {
log.Fatal("no config file")
}
cfg := &internal.Config{
HTTPListen: "127.0.0.1:50051",
LogLevel: "debug",
}
loader := confita.NewLoader(
file.NewBackend(configPath),
)
err := loader.Load(context.Background(), cfg)
failOnError(err, "cannot read config")
fmt.Println(cfg)
return cfg
}
|
package copyfile
import (
"io/ioutil"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var tests = []struct {
fromFileName, toFileName string
offset, limit int
err bool
fromFileData, toFileData []byte
}{
{
offset: -1,
err: true,
},
{
limit: -1,
err: true,
},
{
fromFileName: "notfound.txt",
err: true,
},
{
fromFileName: "found.txt",
fromFileData: []byte("testbob"),
toFileName: "/root/1.txt",
err: true,
},
{
fromFileName: "found.txt",
fromFileData: []byte("testbob"),
toFileName: "/dev/null",
offset: 100,
err: true,
},
{
fromFileName: "found.txt",
fromFileData: []byte("testbob"),
toFileName: "/dev/null",
limit: 100,
err: true,
},
{
fromFileName: "found.txt",
fromFileData: []byte("testbob"),
toFileName: "/dev/null",
offset: 5,
limit: 5,
err: true,
},
{
fromFileName: "found.txt",
fromFileData: []byte("testbob"),
toFileName: "/dev/null",
err: false,
},
{
fromFileName: "found.txt",
fromFileData: []byte("testbob"),
toFileName: "/dev/null",
offset: 1,
limit: 1,
err: false,
},
{
fromFileName: "found.txt",
fromFileData: []byte("testbob"),
toFileName: "result.txt",
toFileData: []byte("testbob"),
err: false,
},
{
fromFileName: "found.txt",
fromFileData: []byte("testbobasd345"),
toFileName: "result.txt",
toFileData: []byte("stboba"),
offset: 2,
limit: 6,
err: false,
},
}
func TestCopy(t *testing.T) {
for _, data := range tests {
t.Run("", func(t *testing.T) {
if data.fromFileData != nil {
err := ioutil.WriteFile(data.fromFileName, data.fromFileData, 0600)
require.NoError(t, err)
}
err := Copy(data.fromFileName, data.toFileName, data.limit, data.offset)
if data.err {
require.Error(t, err)
} else {
require.NoError(t, err)
}
if data.toFileData != nil {
buffer, err := ioutil.ReadFile(data.toFileName)
require.NoError(t, err)
assert.Equal(t, data.toFileData, buffer)
}
})
}
}
|
// Package poll provides an easy way to create polls
package poll
import "errors"
var (
ErrorOptionNotFound = errors.New("Option not found")
ErrorVoterAlreadyVoted = errors.New("Voter already voted")
)
// A poll represents a poll containing a question and options to answer
type Poll struct {
Question string
Votes map[string]int
Voters []string
}
// New instantiates a new poll with 0 votes
func New(question string, options []string) *Poll {
var p Poll
p.Question = question
p.Votes = make(map[string]int)
for _, o := range options {
p.Votes[o] = 0
}
return &p
}
// Vote increases the votes on poll and adds the voter to the list of voters
func (p *Poll) Vote(option string, voter string) error {
if hasVoter(p, voter) {
return ErrorVoterAlreadyVoted
}
if _, ok := p.Votes[option]; !ok {
return ErrorOptionNotFound
}
p.Votes[option]++
p.Voters = append(p.Voters, voter)
return nil
}
// hasVoter checks if a voter has already votes on a poll
func hasVoter(p *Poll, voter string) bool {
for _, v := range p.Voters {
if v == voter {
return true
}
}
return false
}
|
package lingua
import (
"bytes"
"errors"
"io/ioutil"
"net/http"
"reflect"
"testing"
)
type MockClient struct{}
var MockedDoFunction = func(req *http.Request) (*http.Response, error) {
return nil, errors.New("boom")
}
func (mc *MockClient) Do(req *http.Request) (*http.Response, error) {
return MockedDoFunction(req)
}
func TestDefine(t *testing.T) {
t.Run("When an errors occurs requesting from Lingua", func(t *testing.T) {
linguaClient := Lingua{HttpClient: &MockClient{}}
_, err := linguaClient.Define("jejune")
if err == nil {
t.Errorf("Expected to receive an error, but did not")
}
})
t.Run("It sends the correct headers", func(t *testing.T) {
word := "jejune"
expectedHeadersAndValues := map[string]string{
"x-rapidapi-key": "test-key",
"x-rapidapi-host": ApiHost,
"x-debug": word,
}
MockedDoFunction = func(req *http.Request) (*http.Response, error) {
for key, value := range expectedHeadersAndValues {
got := req.Header.Get(key)
if got != value {
t.Errorf("Header \"%s\": got %s, want %s", key, got, value)
}
}
return nil, errors.New("boom")
}
linguaClient := Lingua{HttpClient: &MockClient{}, ApiKey: "test-key"}
linguaClient.Define(word)
})
t.Run("It returns a valid summary", func(t *testing.T) {
MockedDoFunction = func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Body: ioutil.NopCloser(
bytes.NewReader(
[]byte(`{
"entries": [
{
"entry": "jejune",
"lexemes": [
{
"partOfSpeech": "adjective",
"senses": [
{
"definition": ""
}
]
}
],
"pronunciations": [
{
"context": {
"regions": [
"United States"
]
},
"transcriptions": [
{
"notation": "IPA",
"transcription": "/jay-june/"
}
]
}
]
}
]
}`))),
}, nil
}
client := Lingua{HttpClient: &MockClient{}}
want := Summary{
Word: "jejune",
Pronunciation: "/jay-june/",
Definitions: []Definition{
{
Meaning: "",
PartOfSpeech: "adjective",
},
},
}
got, err := client.Define("jejune")
if err != nil {
t.Errorf("Did not expect error, but got %v", err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %q, want %q", got, want)
}
})
}
|
package main
import (
"fmt"
"math"
)
func main() {
const a = 50
fmt.Println(a)
const (
name = "John"
age = 50
country = "Canada"
)
fmt.Println(name, age, country)
const x = 66 //allowed
x = 86 // reassignment not allowed
var y = math.Sqrt(5) //allowed
const z = math.Sqrt(5) //not allowed, since the assignment
//happens at runtime which is not possible
//because it is a constant
const hello1 = "Hello world" //untyped string constant
const typedHello1 string = "Hello world" //typed string constant
//One way to think about untyped constants is that they
//live in a kind of ideal space of values, a space less
//restrictive than Go’s full type system. But to do
//anything with them, we need to assign them to variables,
//and when that happens the variable (not the constant
//itself) needs a type, and the constant can tell the
//variable what type it should have. In this example,
//str becomes a value of type string because the untyped
//string constant gives the declaration its default type, string.
const sam = "Sam"
var varSam = sam
fmt.Printf("type %T value %v", varSam, varSam) //output:- type string value Sam
//**following has nothing to do with typed and untyped as
//**they are for constants, although similar properties
//**will help understand typed and untyped constants better
var defaultName = "Sam" //allowed
type myString string //aliasing
var customName myString = "Sam" //allowed
customName = defaultName //not allowed, because although both are fundamentally strings
//but different types of strings(one is string and another is myString),
//Go compiler considers them of different types
//boolean constants
const trueConst = true
var defaultbool = trueConst //allowed
type mybool bool
var custombool mybool = trueConst //allowed
defaultbool = custombool //not allowed
//numeric constants
var i = 5
var j = 5.5
var k = 5 + 6i
fmt.Printf("i is of type %T, j is of type %T, k is of type %T", i, j, k)
//output:- i is of type int, j is of type float64, c is of type complex128
//therefore, variables assume the type by the values supplied to them
//untyped constants act in similar way, they are like values with implicit types
const a = 5
var intVar int = a
var int32Var int32 = a
var float64Var float64 = a
var complex64Var complex64 = a
fmt.Printf("intVar", intVar, "\nint32Var", int32Var, "\nfloat64Var", float64Var, "\ncomplex64Var", complex64Var)
//output:- intVar 5
// int32Var 5
// float64Var 5
// complex64Var (5 + 0i)
//therefore untyped constants assume their default type depending upon context and hence
//act as values supplied above in the line 62 to line 64
//example to reiterate what is written in line 79 and 80
const X1 = 5.9
const Y1 = 8
var a = X1 / Y1
var b = 5.9 / 8
if a == b {
fmt.Println("True")
}
//output:- True
}
|
package parser
// scanner represents a sequence-recording lexing source-code scanner
type scanner struct {
Lexer *lexer
Records []Fragment
}
// newScanner creates a new scanner instance
func newScanner(lexer *lexer) *scanner {
if lexer == nil {
panic("missing lexer during scanner initialization")
}
return &scanner{Lexer: lexer}
}
// New creates a new scanner succeeding the original one
// dropping its record history
func (sc *scanner) New() *scanner {
return &scanner{Lexer: sc.Lexer}
}
// ReadExact advances the scanner by 1 exact token returning either the read
// fragment or nil if the expectation didn't match
func (sc *scanner) ReadExact(
expectation []rune,
kind FragmentKind,
) (tk *Token, match bool, err error) {
tk, match, err = sc.Lexer.ReadExact(expectation, kind)
if err != nil || tk == nil {
return
}
sc.Records = append(sc.Records, tk)
return
}
// ReadUntil advances the scanner by 1 exact token returning either the read
// fragment or nil if the expectation didn't match
func (sc *scanner) ReadUntil(
fn func(index uint, cursor Cursor) bool,
kind FragmentKind,
) (tk *Token, err error) {
tk, err = sc.Lexer.ReadUntil(fn, kind)
if err != nil || tk == nil {
return
}
sc.Records = append(sc.Records, tk)
return
}
// Append appends a fragment to the records
func (sc *scanner) Append(
pattern Pattern,
fragment Fragment,
) {
if fragment == nil {
return
}
if _, ok := pattern.(*Rule); ok {
sc.Records = append(sc.Records, fragment)
return
}
if termPt := pattern.TerminalPattern(); termPt != nil {
if _, ok := termPt.(*Rule); ok {
sc.Records = append(sc.Records, fragment)
}
}
}
// Fragment returns a typed composite fragment
func (sc *scanner) Fragment(kind FragmentKind) Fragment {
if len(sc.Records) < 1 {
pos := sc.Lexer.cr
return &Construct{
Token: &Token{
VBegin: pos,
VEnd: pos,
VKind: kind,
},
VElements: nil,
}
}
begin := sc.Records[0].Begin()
end := sc.Records[len(sc.Records)-1].End()
return &Construct{
Token: &Token{
VBegin: begin,
VEnd: end,
VKind: kind,
},
VElements: sc.Records,
}
}
// Set sets the scanner's lexer position and tidies up the record history
func (sc *scanner) Set(cursor Cursor) {
sc.Lexer.cr = cursor
sc.TidyUp()
}
// TidyUp removes all records after the current position
func (sc *scanner) TidyUp() (removed int) {
pos := sc.Lexer.cr
for ix := len(sc.Records) - 1; ix >= 0; ix-- {
if sc.Records[ix].Begin().Index < pos.Index {
break
}
removed++
}
// Remove the last n records
sc.Records = sc.Records[:len(sc.Records)-removed]
return
}
|
// Licensed to SolID under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. SolID licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package token
import (
"context"
"fmt"
"testing"
"github.com/golang/mock/gomock"
"github.com/google/go-cmp/cmp"
corev1 "zntr.io/solid/api/gen/go/oidc/core/v1"
"zntr.io/solid/api/oidc"
generatormock "zntr.io/solid/pkg/sdk/generator/mock"
"zntr.io/solid/pkg/sdk/rfcerrors"
"zntr.io/solid/pkg/server/storage"
storagemock "zntr.io/solid/pkg/server/storage/mock"
)
func Test_service_Revoke(t *testing.T) {
type args struct {
ctx context.Context
req *corev1.TokenRevocationRequest
}
tests := []struct {
name string
args args
prepare func(*storagemock.MockClientReader, *storagemock.MockToken)
want *corev1.TokenRevocationResponse
wantErr bool
}{
{
name: "nil request",
args: args{
ctx: context.Background(),
},
wantErr: true,
want: &corev1.TokenRevocationResponse{
Error: rfcerrors.InvalidRequest().Build(),
},
},
{
name: "nil client authentication",
args: args{
ctx: context.Background(),
req: &corev1.TokenRevocationRequest{},
},
wantErr: true,
want: &corev1.TokenRevocationResponse{
Error: rfcerrors.InvalidClient().Build(),
},
},
{
name: "nil token",
args: args{
ctx: context.Background(),
req: &corev1.TokenRevocationRequest{
Client: &corev1.Client{},
},
},
wantErr: true,
want: &corev1.TokenRevocationResponse{
Error: rfcerrors.InvalidRequest().Build(),
},
},
{
name: "empty token",
args: args{
ctx: context.Background(),
req: &corev1.TokenRevocationRequest{
Client: &corev1.Client{},
Token: "",
},
},
wantErr: true,
want: &corev1.TokenRevocationResponse{
Error: rfcerrors.InvalidRequest().Build(),
},
},
// ---------------------------------------------------------------------
{
name: "client not found",
args: args{
ctx: context.Background(),
req: &corev1.TokenRevocationRequest{
Client: &corev1.Client{
ClientId: "s6BhdRkqt3",
},
Token: "cwE.HcbVtkyQCyCUfjxYvjHNODfTbVpSlmyo",
},
},
prepare: func(clients *storagemock.MockClientReader, tokens *storagemock.MockToken) {
clients.EXPECT().Get(gomock.Any(), "s6BhdRkqt3").Return(nil, storage.ErrNotFound)
},
wantErr: true,
want: &corev1.TokenRevocationResponse{
Error: rfcerrors.InvalidClient().Build(),
},
},
{
name: "client storage error",
args: args{
ctx: context.Background(),
req: &corev1.TokenRevocationRequest{
Client: &corev1.Client{
ClientId: "s6BhdRkqt3",
},
Token: "cwE.HcbVtkyQCyCUfjxYvjHNODfTbVpSlmyo",
},
},
prepare: func(clients *storagemock.MockClientReader, tokens *storagemock.MockToken) {
clients.EXPECT().Get(gomock.Any(), "s6BhdRkqt3").Return(nil, fmt.Errorf("foo"))
},
wantErr: true,
want: &corev1.TokenRevocationResponse{
Error: rfcerrors.ServerError().Build(),
},
},
// ---------------------------------------------------------------------
{
name: "token not found",
args: args{
ctx: context.Background(),
req: &corev1.TokenRevocationRequest{
Client: &corev1.Client{
ClientId: "s6BhdRkqt3",
},
Token: "cwE.HcbVtkyQCyCUfjxYvjHNODfTbVpSlmyo",
},
},
prepare: func(clients *storagemock.MockClientReader, tokens *storagemock.MockToken) {
clients.EXPECT().Get(gomock.Any(), "s6BhdRkqt3").Return(&corev1.Client{
GrantTypes: []string{oidc.GrantTypeClientCredentials},
}, nil)
tokens.EXPECT().GetByValue(gomock.Any(), "cwE.HcbVtkyQCyCUfjxYvjHNODfTbVpSlmyo").Return(nil, storage.ErrNotFound)
},
wantErr: true,
want: &corev1.TokenRevocationResponse{},
},
{
name: "token storage error",
args: args{
ctx: context.Background(),
req: &corev1.TokenRevocationRequest{
Client: &corev1.Client{
ClientId: "s6BhdRkqt3",
},
Token: "cwE.HcbVtkyQCyCUfjxYvjHNODfTbVpSlmyo",
},
},
prepare: func(clients *storagemock.MockClientReader, tokens *storagemock.MockToken) {
clients.EXPECT().Get(gomock.Any(), "s6BhdRkqt3").Return(&corev1.Client{
GrantTypes: []string{oidc.GrantTypeClientCredentials},
}, nil)
tokens.EXPECT().GetByValue(gomock.Any(), "cwE.HcbVtkyQCyCUfjxYvjHNODfTbVpSlmyo").Return(nil, fmt.Errorf("foo"))
},
wantErr: true,
want: &corev1.TokenRevocationResponse{},
},
{
name: "revoke storage error",
args: args{
ctx: context.Background(),
req: &corev1.TokenRevocationRequest{
Client: &corev1.Client{
ClientId: "s6BhdRkqt3",
},
Token: "cwE.HcbVtkyQCyCUfjxYvjHNODfTbVpSlmyo",
},
},
prepare: func(clients *storagemock.MockClientReader, tokens *storagemock.MockToken) {
clients.EXPECT().Get(gomock.Any(), "s6BhdRkqt3").Return(&corev1.Client{}, nil)
tokens.EXPECT().GetByValue(gomock.Any(), "cwE.HcbVtkyQCyCUfjxYvjHNODfTbVpSlmyo").Return(&corev1.Token{
Status: corev1.TokenStatus_TOKEN_STATUS_ACTIVE,
TokenId: "123456789",
Value: "cwE.HcbVtkyQCyCUfjxYvjHNODfTbVpSlmyo",
}, nil)
tokens.EXPECT().Revoke(gomock.Any(), "123456789").Return(fmt.Errorf("foo"))
},
wantErr: true,
want: &corev1.TokenRevocationResponse{},
},
// ---------------------------------------------------------------------
{
name: "valid",
args: args{
ctx: context.Background(),
req: &corev1.TokenRevocationRequest{
Client: &corev1.Client{
ClientId: "s6BhdRkqt3",
},
Token: "cwE.HcbVtkyQCyCUfjxYvjHNODfTbVpSlmyo",
},
},
prepare: func(clients *storagemock.MockClientReader, tokens *storagemock.MockToken) {
clients.EXPECT().Get(gomock.Any(), "s6BhdRkqt3").Return(&corev1.Client{}, nil)
tokens.EXPECT().GetByValue(gomock.Any(), "cwE.HcbVtkyQCyCUfjxYvjHNODfTbVpSlmyo").Return(&corev1.Token{
Status: corev1.TokenStatus_TOKEN_STATUS_ACTIVE,
TokenId: "123456789",
Value: "cwE.HcbVtkyQCyCUfjxYvjHNODfTbVpSlmyo",
}, nil)
tokens.EXPECT().Revoke(gomock.Any(), "123456789").Return(nil)
},
wantErr: false,
want: &corev1.TokenRevocationResponse{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
// Arm mocks
clients := storagemock.NewMockClientReader(ctrl)
accessTokens := generatormock.NewMockToken(ctrl)
idTokens := generatormock.NewMockIdentity(ctrl)
tokens := storagemock.NewMockToken(ctrl)
authorizationRequests := storagemock.NewMockAuthorizationRequest(ctrl)
authorizationCodeSessions := storagemock.NewMockAuthorizationCodeSession(ctrl)
deviceCodeSessions := storagemock.NewMockDeviceCodeSession(ctrl)
// Prepare them
if tt.prepare != nil {
tt.prepare(clients, tokens)
}
// instantiate service
underTest := New(accessTokens, idTokens, clients, authorizationRequests, authorizationCodeSessions, deviceCodeSessions, tokens)
got, err := underTest.Revoke(tt.args.ctx, tt.args.req)
if (err != nil) != tt.wantErr {
t.Errorf("service.Revoke() error = %v, wantErr %v", err, tt.wantErr)
return
}
if diff := cmp.Diff(got, tt.want, cmpOpts...); diff != "" {
t.Errorf("service.Revoke() res = %s", diff)
}
})
}
}
|
package rados
/*
#cgo LDFLAGS: -lrados
#include "stdlib.h"
#include "rados/librados.h"
*/
import "C"
import (
"fmt"
"unsafe"
)
// Context represents a RADOS IO context for the pool Pool.
type Context struct {
Pool string
ctx C.rados_ioctx_t
}
// NewContext creates a new RADOS IO context for a given pool, which used to
// do IO operations. The pool must exist (see Rados.PoolCreate()).
func (r *Rados) NewContext(pool string) (*Context, error) {
if r.rados == nil {
return nil, fmt.Errorf("RADOS not connected")
}
cpool := C.CString(pool)
defer C.free(unsafe.Pointer(cpool))
c := &Context{Pool: pool}
if cerr := C.rados_ioctx_create(r.rados, cpool, &c.ctx); cerr < 0 {
return nil, radosReturnCodeError(cerr)
}
return c, nil
}
// Release this RADOS IO context.
//
// TODO: track all uncompleted async operations before calling
// rados_ioctx_destroy(), because it doesn't do that itself.
func (c *Context) Release() error {
C.rados_ioctx_destroy(c.ctx)
return nil
}
// PoolInfo provides usage information about a pool
type PoolInfo struct {
BytesUsed uint64
KBytesUsed uint64
NObjects uint64
NObjectClones uint64
NObjectCopies uint64
NObjectsMissingOnPrimary uint64
NObjectsUnfound uint64
NObjectsDegraded uint64
BytesRead uint64
BytesWritten uint64
KBytesRead uint64
KBytesWritten uint64
}
// PoolStat retrieves the current usage for pool referenced by the
// given context and returns them in the PoolInfo structure.
func (c *Context) PoolStat() (*PoolInfo, error) {
var pstat C.struct_rados_pool_stat_t
if cerr := C.rados_ioctx_pool_stat(c.ctx, &pstat); cerr < 0 {
return nil, radosReturnCodeError(cerr)
}
info := &PoolInfo{
BytesUsed: uint64(pstat.num_bytes),
KBytesUsed: uint64(pstat.num_kb),
NObjects: uint64(pstat.num_objects),
NObjectClones: uint64(pstat.num_object_clones),
NObjectCopies: uint64(pstat.num_object_copies),
NObjectsMissingOnPrimary: uint64(pstat.num_objects_missing_on_primary),
NObjectsUnfound: uint64(pstat.num_objects_unfound),
NObjectsDegraded: uint64(pstat.num_objects_degraded),
BytesRead: uint64(pstat.num_rd),
BytesWritten: uint64(pstat.num_wr),
KBytesRead: uint64(pstat.num_rd_kb),
KBytesWritten: uint64(pstat.num_wr_kb),
}
return info, nil
}
// LocatorSetKey sets the locator key or resets it if an empty key is provided.
func (c *Context) LocatorSetKey(key string) error {
if key == "" {
C.rados_ioctx_locator_set_key(c.ctx, nil)
} else {
ckey := C.CString(key)
defer C.free(unsafe.Pointer(ckey))
C.rados_ioctx_locator_set_key(c.ctx, ckey)
}
return nil
}
|
// ˅
package main
// ˄
type PaintingTarget interface {
Paint(paintingPosX int, paintingPosY int)
Clear()
// ˅
// ˄
}
// ˅
// ˄
|
package drouter_test
import "errors"
var successError = errors.New("success")
|
package p09
func diStringMatch(S string) []int {
d := make([]int, len(S)+1)
i := 0
j := len(S)
for k := 0; k < len(S); k++ {
if S[k] == 'I' {
d[k] = i
i++
} else {
d[k] = j
j--
}
}
d[len(S)] = j
return d
}
|
package ginlogrus
import (
"fmt"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
"time"
)
// Logger is the logrus logger handler
func Logger(l logrus.FieldLogger) gin.HandlerFunc {
if l == nil {
l = logrus.StandardLogger()
}
return func(c *gin.Context) {
// other handler can change c.Path so:
path := c.Request.URL.Path
start := time.Now()
c.Next()
stop := time.Since(start)
statusCode := c.Writer.Status()
if len(c.Errors) > 0 {
l.Error(c.Errors.ByType(gin.ErrorTypePrivate).String())
return
}
msg := fmt.Sprintf("%s %s %s [%d] %d %s %s (%s)",
c.ClientIP(), c.Request.Method, path, statusCode,
c.Writer.Size(), c.Request.Referer(), c.Request.UserAgent(), stop)
if statusCode > 499 {
l.Error(msg)
} else if statusCode > 399 {
l.Warn(msg)
} else {
l.Info(msg)
}
}
}
|
package api
import "github.com/PhongVX/taskmanagement/internal/app/sprint"
func newSprintHandler() (*sprint.Handler, error) {
s, err := dialDefaultMongoDB()
if err != nil {
return nil, err
}
repo := sprint.NewMongoDBRepository(s)
srv := sprint.NewService(repo)
handler := sprint.NewHTTPHandler(*srv)
return handler, nil
}
|
package utils
import (
"io/ioutil"
"log"
"net/http"
"os"
"path/filepath"
"reflect"
"runtime"
"testing"
"github.com/layer5io/meshery/mesheryctl/internal/cli/root/config"
)
func getFixturesDirectory() string {
_, filename, _, ok := runtime.Caller(0)
if !ok {
log.Fatal("helpers_test.go: Cannot get current working directory")
}
currentDirectory := filepath.Dir(filename)
// get the fixtures file directory
fixturesDir := filepath.Join(currentDirectory, "fixtures")
return fixturesDir
}
var fixturesDir = getFixturesDirectory()
func TestBackupConfigFile(t *testing.T) {
name := "config.yaml"
configFilePath := filepath.Join(fixturesDir, name)
backupFileName := "config.bak.yaml"
backupConfigFilePath := filepath.Join(fixturesDir, backupFileName)
// creates a config file
NewGoldenFile(t, name, fixturesDir).Write("mesheryctl")
BackupConfigFile(configFilePath)
// check if backup file is present or not
_, err := os.Stat(backupConfigFilePath)
if err != nil {
t.Errorf("BackupConfigFile error = %v", err)
}
}
func TestStringWithCharset(t *testing.T) {
// checking the length, since this function returns random strings everytime
strLength := 10
gotString := StringWithCharset(strLength)
if len(gotString) != strLength {
t.Errorf("StringWithCharset got = %v want = %v", len(gotString), strLength)
}
}
func TestPrereq(t *testing.T) {
_, _, err := prereq()
if err != nil {
t.Errorf("prereq error = %v", err)
}
}
func TestSetFileLocation(t *testing.T) {
originalMesheryFolder := MesheryFolder
originalDockerComposeFile := DockerComposeFile
originalAuthConfigFile := AuthConfigFile
originalDefaultConfigPath := DefaultConfigPath
defer func() {
MesheryFolder = originalMesheryFolder
DockerComposeFile = originalDockerComposeFile
AuthConfigFile = originalAuthConfigFile
DefaultConfigPath = originalDefaultConfigPath
}()
err := SetFileLocation()
if err != nil {
t.Errorf("SetFileLocation error = %v", err)
}
}
func TestNavigateToBrowser(t *testing.T) {
// opens up a browser window whenever this test runs
err := NavigateToBrowser("https://www.layer5.io")
if err != nil {
t.Errorf("NavigateToBrowser error: %v", err)
}
}
func TestUploadFileWithParams(t *testing.T) {
fixtureFileName := "listmanifest.api.response.golden" // any arbitrary fixture file
uploadFilePath := filepath.Join(fixturesDir, "platform", fixtureFileName)
// returns *http.Request
_, err := UploadFileWithParams("https://www.layer5.io", nil, "meshery", uploadFilePath)
if err != nil {
t.Errorf("UploadFileWithParams error = %v", err)
}
}
func TestContentTypeIsHTML(t *testing.T) {
tests := []struct {
name string
response *http.Response
expectedOutput bool
}{
{
name: "correct content-type",
response: &http.Response{
Header: http.Header{
"Content-Type": []string{"text/html"},
},
},
expectedOutput: true,
},
{
name: "empty content-type",
response: &http.Response{
Header: http.Header{
"Content-Type": []string{},
},
},
expectedOutput: false,
},
{
name: "incorrect content-type",
response: &http.Response{
Header: http.Header{
"Content-Type": []string{"multipart/form-data"},
},
},
expectedOutput: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := ContentTypeIsHTML(tt.response)
if got != tt.expectedOutput {
t.Errorf("ContentTypeIsHTML error = %v want = %v", got, tt.expectedOutput)
}
})
}
}
func TestAskForConfirmation(t *testing.T) {
tests := []struct {
name string
question string
input string
want bool
}{
{
name: "test with input 'yes'",
question: "question?",
input: "yes\n",
want: true,
},
{
name: "test with input 'no'",
question: "question?",
input: "no\n",
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// mocking stdio
// see https://stackoverflow.com/a/64518829
r, w, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
_, err = w.WriteString(tt.input)
if err != nil {
t.Error(err)
}
w.Close()
stdin := os.Stdin
defer func() { os.Stdin = stdin }()
os.Stdin = r
got := AskForConfirmation(tt.question)
if got != tt.want {
t.Errorf("AskForConfirmation got = %v want = %v", got, tt.want)
}
})
}
}
func TestCreateConfigFile(t *testing.T) {
home, err := os.UserHomeDir()
if err != nil {
t.Error(err, "failed to get users home directory")
}
originalDefaultConfigPath := DefaultConfigPath
defer func() { DefaultConfigPath = originalDefaultConfigPath }()
DefaultConfigPath = filepath.Join(home, "config.yaml")
err = CreateConfigFile()
if err != nil {
t.Errorf("CreateConfigFile error = %v", err)
}
}
func TestValidateURL(t *testing.T) {
tests := []struct {
name string
url string
wantErr string
}{
{
name: "Correct URL",
url: "https://www.layer5.io",
},
{
name: "Unsupported scheme",
url: "mqtt://www.layer5.io",
wantErr: "mqtt is not a supported protocol",
},
{
name: "invalid URL",
url: "layer5.io",
wantErr: "parse \"layer5.io\": invalid URI for request",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := ValidateURL(tt.url)
if err != nil {
// check error message
if err.Error() != tt.wantErr {
t.Errorf("ValidateURL error = %v want = %v", err, tt.wantErr)
}
}
})
}
}
func TestReadToken(t *testing.T) {
tests := []struct {
name string
fixture string
want map[string]string
}{
{
name: "with valid JSON",
fixture: "readtoken.golden",
want: map[string]string{
"message": "meshery",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fixtureFilePath := filepath.Join(fixturesDir, tt.fixture)
got, err := ReadToken(fixtureFilePath)
if err != nil {
t.Errorf("ReadToken error = %v", err)
}
// checking map equality
eq := reflect.DeepEqual(got, tt.want)
if !eq {
t.Errorf("ReadToken got = %v want = %v", got, tt.want)
}
})
}
}
func TestTruncateID(t *testing.T) {
id := "1234567890"
want := "12345678"
got := TruncateID(id)
if got != want {
t.Errorf("TruncateID got = %v want = %v", got, want)
}
}
func TestPrintToTable(t *testing.T) {
// mocking Stdout
// https://stackoverflow.com/a/29339052
old := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
// prints to stdout
PrintToTable([]string{"firstheader", "secondheader"}, [][]string{{"data1", "data2"}, {"data3", "data4"}})
w.Close()
// read from stdout
out, _ := ioutil.ReadAll(r)
defer func() { os.Stdout = old }()
got := string(out)
want := NewGoldenFile(t, "printToTable.golden", fixturesDir).Load()
if got != want {
t.Errorf("PrintToTable got = %v want = %v", got, want)
}
}
func TestPrintToTableWithFooter(t *testing.T) {
old := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
// prints to stdout
PrintToTableWithFooter([]string{"firstheader", "secondheader"}, [][]string{{"data1", "data2"}, {"data3", "data4"}}, []string{"footer1", "footer2"})
w.Close()
// read from stdout
out, _ := ioutil.ReadAll(r)
defer func() { os.Stdout = old }()
got := string(out)
want := NewGoldenFile(t, "printToTableWithFooter.golden", fixturesDir).Load()
if got != want {
t.Errorf("PrintToTableWithFooter got = %v want = %v", got, want)
}
}
func TestStringContainedInSlice(t *testing.T) {
tests := []struct {
name string
str string
slice []string
want int
}{
{
name: "test with present string",
str: "data2",
slice: []string{"data1", "data2"},
want: 1,
},
{
name: "test with absent string",
str: "data3",
slice: []string{"data1", "data2"},
want: -1,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := StringContainedInSlice(tt.str, tt.slice)
if got != tt.want {
t.Errorf("StringContainedInSlice got = %v want = %v", got, tt.want)
}
})
}
}
func TestStringInSlice(t *testing.T) {
tests := []struct {
name string
str string
slice []string
want bool
}{
{
name: "test with string present",
str: "data1",
slice: []string{"data1", "data2"},
want: true,
},
{
name: "test with string absent",
str: "data3",
slice: []string{"data1", "data2"},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := StringInSlice(tt.str, tt.slice)
if got != tt.want {
t.Errorf("StringInSlice got = %v want = %v", got, tt.want)
}
})
}
}
func TestAskForInput(t *testing.T) {
input := "data1"
// mocking stdio
// see https://stackoverflow.com/a/64518829
r, w, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
_, err = w.WriteString(input + "\n")
if err != nil {
t.Error(err)
}
w.Close()
stdin := os.Stdin
defer func() { os.Stdin = stdin }()
os.Stdin = r
got := AskForInput("Prompt", []string{"data1", "data2"})
if got != input {
t.Errorf("AskForInput got = %v want = %v", got, input)
}
}
func TestParseURLGithub(t *testing.T) {
tests := []struct {
name string
url string
rawRepoOutput string
pathOutput string
expectedError string
}{
{
name: "test with non-github url",
url: "https://www.layer5.io",
rawRepoOutput: "https://www.layer5.io",
pathOutput: "",
expectedError: "only github urls are supported",
},
{
name: "test with github.com",
url: "https://github.com/layer5io/meshery/blob/master/.goreleaser.yml",
rawRepoOutput: "https://github.com/layer5io/meshery/master",
pathOutput: ".goreleaser.yml",
},
{
name: "test with raw.githubusercontent.com",
url: "https://raw.githubusercontent.com/layer5io/meshery/master/.goreleaser.yml",
rawRepoOutput: "https://raw.githubusercontent.com/layer5io/meshery/master/.goreleaser.yml",
pathOutput: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotRawRepoOutput, gotPathOutput, gotError := ParseURLGithub(tt.url)
// gotRawOutput != tt.rawRepoOutput || gotPathOutput != tt.pathOutput ||
if gotRawRepoOutput != tt.rawRepoOutput {
t.Errorf("ParseURLGithub got = %v, want = %v", gotRawRepoOutput, tt.rawRepoOutput)
}
if gotPathOutput != tt.pathOutput {
t.Errorf("ParseURLGithub got = %v, want = %v", gotPathOutput, tt.pathOutput)
}
if gotError != nil {
if gotError.Error() != tt.expectedError {
t.Errorf("ParseURLGithub error = %v, want = %v", gotError, tt.expectedError)
}
}
})
}
}
func TestPrintToTableInStringFormat(t *testing.T) {
want := NewGoldenFile(t, "PrintToTableInStringFormat.golden", fixturesDir).Load()
got := PrintToTableInStringFormat([]string{"firstheader", "secondheader"}, [][]string{{"data1", "data2"}, {"data3", "data4"}})
if got != want {
t.Errorf("PrintToTableInStringFormat got = %v want = %v", got, want)
}
}
func TestCreateDefaultSpinner(t *testing.T) {
// only checking for Suffix and FinalMSG
got := CreateDefaultSpinner("suffix", "message")
want := struct {
Suffix string
FinalMsg string
}{
Suffix: " suffix", // The leading space is intentional
FinalMsg: "message\n",
}
if want.Suffix != got.Suffix {
t.Errorf("CreateDefaultSpinner got = %v want = %v", got.Suffix, want.Suffix)
}
if want.FinalMsg != got.FinalMSG {
t.Errorf("CreateDefaultSpinner got = %v want = %v", got.FinalMSG, want.FinalMsg)
}
}
func TestContainsStringPrefix(t *testing.T) {
tests := []struct {
name string
slice []string
str string
want bool
}{
{
name: "str is present in the slice",
slice: []string{"data1", "data2"},
str: "data2",
want: true,
},
{
name: "str is not present in the slice",
slice: []string{"data1", "data2"},
str: "data3",
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := ContainsStringPrefix(tt.slice, tt.str)
if got != tt.want {
t.Errorf("ContainsStringPrefix got = %v, want %v", got, tt.want)
}
})
}
}
func TestSetOverrideValues(t *testing.T) {
testChannel := "testChannel"
tests := []struct {
name string
ctx *config.Context
mesheryImageVersion string
want map[string]interface{}
}{
{
name: "Context contains no adapters and no meshery image version",
ctx: &config.Context{
Adapters: nil,
Channel: testChannel,
},
mesheryImageVersion: "",
want: map[string]interface{}{
"meshery-istio": map[string]interface{}{
"enabled": false,
},
"meshery-linkerd": map[string]interface{}{
"enabled": false,
},
"meshery-consul": map[string]interface{}{
"enabled": false,
},
"meshery-kuma": map[string]interface{}{
"enabled": false,
},
"meshery-osm": map[string]interface{}{
"enabled": false,
},
"meshery-nsm": map[string]interface{}{
"enabled": false,
},
"meshery-nginx-sm": map[string]interface{}{
"enabled": false,
},
"meshery-traefik-mesh": map[string]interface{}{
"enabled": false,
},
"meshery-cpx": map[string]interface{}{
"enabled": false,
},
"image": map[string]interface{}{
"tag": testChannel + "-",
},
},
},
{
name: "Context contains part of all available adapters and meshery image version",
ctx: &config.Context{
Adapters: []string{"meshery-istio", "meshery-osm", "meshery-nsm"},
Channel: testChannel,
},
mesheryImageVersion: "testImageVersion",
want: map[string]interface{}{
"meshery-istio": map[string]interface{}{
"enabled": true,
},
"meshery-linkerd": map[string]interface{}{
"enabled": false,
},
"meshery-consul": map[string]interface{}{
"enabled": false,
},
"meshery-kuma": map[string]interface{}{
"enabled": false,
},
"meshery-osm": map[string]interface{}{
"enabled": true,
},
"meshery-nsm": map[string]interface{}{
"enabled": true,
},
"meshery-nginx-sm": map[string]interface{}{
"enabled": false,
},
"meshery-traefik-mesh": map[string]interface{}{
"enabled": false,
},
"meshery-cpx": map[string]interface{}{
"enabled": false,
},
"image": map[string]interface{}{
"tag": testChannel + "-testImageVersion",
},
},
},
{
name: "Context contains all available adapters and meshery image version",
ctx: &config.Context{
Adapters: []string{"meshery-istio", "meshery-linkerd", "meshery-consul", "meshery-kuma",
"meshery-osm", "meshery-nsm", "meshery-nginx-sm", "meshery-traefik-mesh", "meshery-cpx"},
Channel: testChannel,
},
mesheryImageVersion: "testImageVersion",
want: map[string]interface{}{
"meshery-istio": map[string]interface{}{
"enabled": true,
},
"meshery-linkerd": map[string]interface{}{
"enabled": true,
},
"meshery-consul": map[string]interface{}{
"enabled": true,
},
"meshery-kuma": map[string]interface{}{
"enabled": true,
},
"meshery-osm": map[string]interface{}{
"enabled": true,
},
"meshery-nsm": map[string]interface{}{
"enabled": true,
},
"meshery-nginx-sm": map[string]interface{}{
"enabled": true,
},
"meshery-traefik-mesh": map[string]interface{}{
"enabled": true,
},
"meshery-cpx": map[string]interface{}{
"enabled": true,
},
"image": map[string]interface{}{
"tag": testChannel + "-testImageVersion",
},
},
},
}
for _, tt := range tests {
got := SetOverrideValues(tt.ctx, tt.mesheryImageVersion)
eq := reflect.DeepEqual(got, tt.want)
if !eq {
t.Errorf("ReadToken got = %v want = %v", got, tt.want)
}
}
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package traitdefinition
import (
"context"
"fmt"
"github.com/crossplane/crossplane-runtime/pkg/event"
ctrlrec "github.com/kubevela/pkg/controller/reconciler"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/condition"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
oamctrl "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev"
coredef "github.com/oam-dev/kubevela/pkg/controller/core.oam.dev/v1beta1/core"
"github.com/oam-dev/kubevela/pkg/controller/utils"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/version"
)
// Reconciler reconciles a TraitDefinition object
type Reconciler struct {
client.Client
Scheme *runtime.Scheme
record event.Recorder
options
}
type options struct {
defRevLimit int
concurrentReconciles int
ignoreDefNoCtrlReq bool
controllerVersion string
}
// Reconcile is the main logic for TraitDefinition controller
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
ctx, cancel := ctrlrec.NewReconcileContext(ctx)
defer cancel()
klog.InfoS("Reconcile traitDefinition", "traitDefinition", klog.KRef(req.Namespace, req.Name))
var traitDefinition v1beta1.TraitDefinition
if err := r.Get(ctx, req.NamespacedName, &traitDefinition); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// this is a placeholder for finalizer here in the future
if traitDefinition.DeletionTimestamp != nil {
klog.InfoS("The TraitDefinition is being deleted", "traitDefinition", klog.KRef(req.Namespace, req.Name))
return ctrl.Result{}, nil
}
if !coredef.MatchControllerRequirement(&traitDefinition, r.controllerVersion, r.ignoreDefNoCtrlReq) {
klog.InfoS("skip definition: not match the controller requirement of definition", "traitDefinition", klog.KObj(&traitDefinition))
return ctrl.Result{}, nil
}
defRev, result, err := coredef.ReconcileDefinitionRevision(ctx, r.Client, r.record, &traitDefinition, r.defRevLimit, func(revision *common.Revision) error {
traitDefinition.Status.LatestRevision = revision
if err := r.UpdateStatus(ctx, &traitDefinition); err != nil {
return err
}
return nil
})
if result != nil {
return *result, err
}
if err != nil {
return ctrl.Result{}, err
}
def := utils.NewCapabilityTraitDef(&traitDefinition)
def.Name = req.NamespacedName.Name
// Store the parameter of traitDefinition to configMap
cmName, err := def.StoreOpenAPISchema(ctx, r.Client, req.Namespace, req.Name, defRev.Name)
if err != nil {
klog.InfoS("Could not store capability in ConfigMap", "err", err)
r.record.Event(&(traitDefinition), event.Warning("Could not store capability in ConfigMap", err))
return ctrl.Result{}, util.PatchCondition(ctx, r, &traitDefinition,
condition.ReconcileError(fmt.Errorf(util.ErrStoreCapabilityInConfigMap, traitDefinition.Name, err)))
}
if traitDefinition.Status.ConfigMapRef != cmName {
traitDefinition.Status.ConfigMapRef = cmName
// Override the conditions, which maybe include the error info.
traitDefinition.Status.Conditions = []condition.Condition{condition.ReconcileSuccess()}
if err := r.UpdateStatus(ctx, &traitDefinition); err != nil {
klog.ErrorS(err, "Could not update TraitDefinition Status", "traitDefinition", klog.KRef(req.Namespace, req.Name))
r.record.Event(&traitDefinition, event.Warning("Could not update TraitDefinition Status", err))
return ctrl.Result{}, util.PatchCondition(ctx, r, &traitDefinition,
condition.ReconcileError(fmt.Errorf(util.ErrUpdateTraitDefinition, traitDefinition.Name, err)))
}
klog.InfoS("Successfully updated the status.configMapRef of the TraitDefinition", "traitDefinition",
klog.KRef(req.Namespace, req.Name), "status.configMapRef", cmName)
}
return ctrl.Result{}, nil
}
// UpdateStatus updates v1beta1.TraitDefinition's Status with retry.RetryOnConflict
func (r *Reconciler) UpdateStatus(ctx context.Context, def *v1beta1.TraitDefinition, opts ...client.SubResourceUpdateOption) error {
status := def.DeepCopy().Status
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
if err = r.Get(ctx, client.ObjectKey{Namespace: def.Namespace, Name: def.Name}, def); err != nil {
return
}
def.Status = status
return r.Status().Update(ctx, def, opts...)
})
}
// SetupWithManager will setup with event recorder
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
r.record = event.NewAPIRecorder(mgr.GetEventRecorderFor("TraitDefinition")).
WithAnnotations("controller", "TraitDefinition")
return ctrl.NewControllerManagedBy(mgr).
WithOptions(controller.Options{
MaxConcurrentReconciles: r.concurrentReconciles,
}).
For(&v1beta1.TraitDefinition{}).
Complete(r)
}
// Setup adds a controller that reconciles TraitDefinition.
func Setup(mgr ctrl.Manager, args oamctrl.Args) error {
r := Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
options: parseOptions(args),
}
return r.SetupWithManager(mgr)
}
func parseOptions(args oamctrl.Args) options {
return options{
defRevLimit: args.DefRevisionLimit,
concurrentReconciles: args.ConcurrentReconciles,
ignoreDefNoCtrlReq: args.IgnoreDefinitionWithoutControllerRequirement,
controllerVersion: version.VelaVersion,
}
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"bytes"
"cmp"
"container/heap"
"context"
"fmt"
"runtime/trace"
"slices"
"sort"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/executor/internal/builder"
"github.com/pingcap/tidb/executor/internal/exec"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
plannercore "github.com/pingcap/tidb/planner/core"
plannerutil "github.com/pingcap/tidb/planner/util"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/channel"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/execdetails"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/mathutil"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tipb/go-tipb"
"go.uber.org/zap"
)
var (
_ exec.Executor = &IndexMergeReaderExecutor{}
// IndexMergeCancelFuncForTest is used just for test
IndexMergeCancelFuncForTest func()
)
const (
partialIndexWorkerType = "IndexMergePartialIndexWorker"
partialTableWorkerType = "IndexMergePartialTableWorker"
processWorkerType = "IndexMergeProcessWorker"
partTblIntersectionWorkerType = "IndexMergePartTblIntersectionWorker"
tableScanWorkerType = "IndexMergeTableScanWorker"
)
// IndexMergeReaderExecutor accesses a table with multiple index/table scan.
// There are three types of workers:
// 1. partialTableWorker/partialIndexWorker, which are used to fetch the handles
// 2. indexMergeProcessWorker, which is used to do the `Union` operation.
// 3. indexMergeTableScanWorker, which is used to get the table tuples with the given handles.
//
// The execution flow is really like IndexLookUpReader. However, it uses multiple index scans
// or table scans to get the handles:
// 1. use the partialTableWorkers and partialIndexWorkers to fetch the handles (a batch per time)
// and send them to the indexMergeProcessWorker.
// 2. indexMergeProcessWorker do the `Union` operation for a batch of handles it have got.
// For every handle in the batch:
// 1. check whether it has been accessed.
// 2. if not, record it and send it to the indexMergeTableScanWorker.
// 3. if accessed, just ignore it.
type IndexMergeReaderExecutor struct {
exec.BaseExecutor
table table.Table
indexes []*model.IndexInfo
descs []bool
ranges [][]*ranger.Range
dagPBs []*tipb.DAGRequest
startTS uint64
tableRequest *tipb.DAGRequest
keepOrder bool
pushedLimit *plannercore.PushedDownLimit
byItems []*plannerutil.ByItems
// columns are only required by union scan.
columns []*model.ColumnInfo
*dataReaderBuilder
// fields about accessing partition tables
partitionTableMode bool // if this IndexMerge is accessing a partition table
prunedPartitions []table.PhysicalTable // pruned partition tables need to access
partitionKeyRanges [][][]kv.KeyRange // [partitionIdx][partialIndex][ranges]
// All fields above are immutable.
tblWorkerWg sync.WaitGroup
idxWorkerWg sync.WaitGroup
processWorkerWg sync.WaitGroup
finished chan struct{}
workerStarted bool
keyRanges [][]kv.KeyRange
resultCh chan *indexMergeTableTask
resultCurr *indexMergeTableTask
// memTracker is used to track the memory usage of this executor.
memTracker *memory.Tracker
paging bool
// checkIndexValue is used to check the consistency of the index data.
*checkIndexValue // nolint:unused
partialPlans [][]plannercore.PhysicalPlan
tblPlans []plannercore.PhysicalPlan
partialNetDataSizes []float64
dataAvgRowSize float64
handleCols plannercore.HandleCols
stats *IndexMergeRuntimeStat
// Indicates whether there is correlated column in filter or table/index range.
// We need to refresh dagPBs before send DAGReq to storage.
isCorColInPartialFilters []bool
isCorColInTableFilter bool
isCorColInPartialAccess []bool
// Whether it's intersection or union.
isIntersection bool
}
type indexMergeTableTask struct {
lookupTableTask
// parTblIdx are only used in indexMergeProcessWorker.fetchLoopIntersection.
parTblIdx int
// partialPlanID are only used for indexMergeProcessWorker.fetchLoopUnionWithOrderBy.
partialPlanID int
}
// Table implements the dataSourceExecutor interface.
func (e *IndexMergeReaderExecutor) Table() table.Table {
return e.table
}
// Open implements the Executor Open interface
func (e *IndexMergeReaderExecutor) Open(_ context.Context) (err error) {
e.keyRanges = make([][]kv.KeyRange, 0, len(e.partialPlans))
e.initRuntimeStats()
if e.isCorColInTableFilter {
e.tableRequest.Executors, err = builder.ConstructListBasedDistExec(e.Ctx(), e.tblPlans)
if err != nil {
return err
}
}
if err = e.rebuildRangeForCorCol(); err != nil {
return err
}
if !e.partitionTableMode {
if e.keyRanges, err = e.buildKeyRangesForTable(e.table); err != nil {
return err
}
} else {
e.partitionKeyRanges = make([][][]kv.KeyRange, len(e.prunedPartitions))
for i, p := range e.prunedPartitions {
if e.partitionKeyRanges[i], err = e.buildKeyRangesForTable(p); err != nil {
return err
}
}
}
e.finished = make(chan struct{})
e.resultCh = make(chan *indexMergeTableTask, atomic.LoadInt32(&LookupTableTaskChannelSize))
if e.memTracker != nil {
e.memTracker.Reset()
} else {
e.memTracker = memory.NewTracker(e.ID(), -1)
}
e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker)
return nil
}
func (e *IndexMergeReaderExecutor) rebuildRangeForCorCol() (err error) {
len1 := len(e.partialPlans)
len2 := len(e.isCorColInPartialAccess)
if len1 != len2 {
return errors.Errorf("unexpect length for partialPlans(%d) and isCorColInPartialAccess(%d)", len1, len2)
}
for i, plan := range e.partialPlans {
if e.isCorColInPartialAccess[i] {
switch x := plan[0].(type) {
case *plannercore.PhysicalIndexScan:
e.ranges[i], err = rebuildIndexRanges(e.Ctx(), x, x.IdxCols, x.IdxColLens)
case *plannercore.PhysicalTableScan:
e.ranges[i], err = x.ResolveCorrelatedColumns()
default:
err = errors.Errorf("unsupported plan type %T", plan[0])
}
if err != nil {
return err
}
}
}
return nil
}
func (e *IndexMergeReaderExecutor) buildKeyRangesForTable(tbl table.Table) (ranges [][]kv.KeyRange, err error) {
sc := e.Ctx().GetSessionVars().StmtCtx
for i, plan := range e.partialPlans {
_, ok := plan[0].(*plannercore.PhysicalIndexScan)
if !ok {
firstPartRanges, secondPartRanges := distsql.SplitRangesAcrossInt64Boundary(e.ranges[i], false, e.descs[i], tbl.Meta().IsCommonHandle)
firstKeyRanges, err := distsql.TableHandleRangesToKVRanges(sc, []int64{getPhysicalTableID(tbl)}, tbl.Meta().IsCommonHandle, firstPartRanges)
if err != nil {
return nil, err
}
secondKeyRanges, err := distsql.TableHandleRangesToKVRanges(sc, []int64{getPhysicalTableID(tbl)}, tbl.Meta().IsCommonHandle, secondPartRanges)
if err != nil {
return nil, err
}
keyRanges := append(firstKeyRanges.FirstPartitionRange(), secondKeyRanges.FirstPartitionRange()...)
ranges = append(ranges, keyRanges)
continue
}
keyRange, err := distsql.IndexRangesToKVRanges(sc, getPhysicalTableID(tbl), e.indexes[i].ID, e.ranges[i])
if err != nil {
return nil, err
}
ranges = append(ranges, keyRange.FirstPartitionRange())
}
return ranges, nil
}
func (e *IndexMergeReaderExecutor) startWorkers(ctx context.Context) error {
exitCh := make(chan struct{})
workCh := make(chan *indexMergeTableTask, 1)
fetchCh := make(chan *indexMergeTableTask, len(e.keyRanges))
e.startIndexMergeProcessWorker(ctx, workCh, fetchCh)
var err error
for i := 0; i < len(e.partialPlans); i++ {
e.idxWorkerWg.Add(1)
if e.indexes[i] != nil {
err = e.startPartialIndexWorker(ctx, exitCh, fetchCh, i)
} else {
err = e.startPartialTableWorker(ctx, exitCh, fetchCh, i)
}
if err != nil {
e.idxWorkerWg.Done()
break
}
}
go e.waitPartialWorkersAndCloseFetchChan(fetchCh)
if err != nil {
close(exitCh)
return err
}
e.startIndexMergeTableScanWorker(ctx, workCh)
e.workerStarted = true
return nil
}
func (e *IndexMergeReaderExecutor) waitPartialWorkersAndCloseFetchChan(fetchCh chan *indexMergeTableTask) {
e.idxWorkerWg.Wait()
close(fetchCh)
}
func (e *IndexMergeReaderExecutor) startIndexMergeProcessWorker(ctx context.Context, workCh chan<- *indexMergeTableTask, fetch <-chan *indexMergeTableTask) {
idxMergeProcessWorker := &indexMergeProcessWorker{
indexMerge: e,
stats: e.stats,
}
e.processWorkerWg.Add(1)
go func() {
defer trace.StartRegion(ctx, "IndexMergeProcessWorker").End()
util.WithRecovery(
func() {
if e.isIntersection {
if e.pushedLimit != nil || e.keepOrder {
panic("Not support intersection with pushedLimit or keepOrder = true")
}
idxMergeProcessWorker.fetchLoopIntersection(ctx, fetch, workCh, e.resultCh, e.finished)
} else if len(e.byItems) != 0 {
idxMergeProcessWorker.fetchLoopUnionWithOrderBy(ctx, fetch, workCh, e.resultCh, e.finished)
} else {
idxMergeProcessWorker.fetchLoopUnion(ctx, fetch, workCh, e.resultCh, e.finished)
}
},
handleWorkerPanic(ctx, e.finished, e.resultCh, nil, processWorkerType),
)
e.processWorkerWg.Done()
}()
}
func (e *IndexMergeReaderExecutor) startPartialIndexWorker(ctx context.Context, exitCh <-chan struct{}, fetchCh chan<- *indexMergeTableTask, workID int) error {
failpoint.Inject("testIndexMergeResultChCloseEarly", func(_ failpoint.Value) {
// Wait for processWorker to close resultCh.
time.Sleep(time.Second * 2)
// Should use fetchCh instead of resultCh to send error.
syncErr(ctx, e.finished, fetchCh, errors.New("testIndexMergeResultChCloseEarly"))
})
if e.RuntimeStats() != nil {
collExec := true
e.dagPBs[workID].CollectExecutionSummaries = &collExec
}
var keyRanges [][]kv.KeyRange
if e.partitionTableMode {
for _, pKeyRanges := range e.partitionKeyRanges { // get all keyRanges related to this PartialIndex
keyRanges = append(keyRanges, pKeyRanges[workID])
}
} else {
keyRanges = [][]kv.KeyRange{e.keyRanges[workID]}
}
failpoint.Inject("startPartialIndexWorkerErr", func() error {
return errors.New("inject an error before start partialIndexWorker")
})
go func() {
defer trace.StartRegion(ctx, "IndexMergePartialIndexWorker").End()
defer e.idxWorkerWg.Done()
util.WithRecovery(
func() {
failpoint.Inject("testIndexMergePanicPartialIndexWorker", nil)
is := e.partialPlans[workID][0].(*plannercore.PhysicalIndexScan)
worker := &partialIndexWorker{
stats: e.stats,
idxID: e.getPartitalPlanID(workID),
sc: e.Ctx(),
dagPB: e.dagPBs[workID],
plan: e.partialPlans[workID],
batchSize: e.MaxChunkSize(),
maxBatchSize: e.Ctx().GetSessionVars().IndexLookupSize,
maxChunkSize: e.MaxChunkSize(),
memTracker: e.memTracker,
partitionTableMode: e.partitionTableMode,
prunedPartitions: e.prunedPartitions,
byItems: is.ByItems,
pushedLimit: e.pushedLimit,
}
if e.isCorColInPartialFilters[workID] {
// We got correlated column, so need to refresh Selection operator.
var err error
if e.dagPBs[workID].Executors, err = builder.ConstructListBasedDistExec(e.Ctx(), e.partialPlans[workID]); err != nil {
syncErr(ctx, e.finished, fetchCh, err)
return
}
}
var builder distsql.RequestBuilder
builder.SetDAGRequest(e.dagPBs[workID]).
SetStartTS(e.startTS).
SetDesc(e.descs[workID]).
SetKeepOrder(e.keepOrder).
SetTxnScope(e.txnScope).
SetReadReplicaScope(e.readReplicaScope).
SetIsStaleness(e.isStaleness).
SetFromSessionVars(e.Ctx().GetSessionVars()).
SetMemTracker(e.memTracker).
SetPaging(e.paging).
SetFromInfoSchema(e.Ctx().GetInfoSchema()).
SetClosestReplicaReadAdjuster(newClosestReadAdjuster(e.Ctx(), &builder.Request, e.partialNetDataSizes[workID])).
SetConnID(e.Ctx().GetSessionVars().ConnectionID)
tps := worker.getRetTpsForIndexScan(e.handleCols)
results := make([]distsql.SelectResult, 0, len(keyRanges))
defer func() {
// To make sure SelectResult.Close() is called even got panic in fetchHandles().
for _, result := range results {
if err := result.Close(); err != nil {
logutil.Logger(ctx).Error("close Select result failed", zap.Error(err))
}
}
}()
for _, keyRange := range keyRanges {
// check if this executor is closed
select {
case <-ctx.Done():
return
case <-e.finished:
return
default:
}
// init kvReq and worker for this partition
// The key ranges should be ordered.
slices.SortFunc(keyRange, func(i, j kv.KeyRange) int {
return bytes.Compare(i.StartKey, j.StartKey)
})
kvReq, err := builder.SetKeyRanges(keyRange).Build()
if err != nil {
syncErr(ctx, e.finished, fetchCh, err)
return
}
result, err := distsql.SelectWithRuntimeStats(ctx, e.Ctx(), kvReq, tps, getPhysicalPlanIDs(e.partialPlans[workID]), e.getPartitalPlanID(workID))
if err != nil {
syncErr(ctx, e.finished, fetchCh, err)
return
}
results = append(results, result)
failpoint.Inject("testIndexMergePartialIndexWorkerCoprLeak", nil)
}
worker.batchSize = mathutil.Min(e.MaxChunkSize(), worker.maxBatchSize)
if len(results) > 1 && len(e.byItems) != 0 {
// e.Schema() not the output schema for partialIndexReader, and we put byItems related column at first in `buildIndexReq`, so use nil here.
ssr := distsql.NewSortedSelectResults(results, nil, e.byItems, e.memTracker)
results = []distsql.SelectResult{ssr}
}
ctx1, cancel := context.WithCancel(ctx)
// this error is reported in fetchHandles(), so ignore it here.
_, _ = worker.fetchHandles(ctx1, results, exitCh, fetchCh, e.finished, e.handleCols, workID)
cancel()
},
handleWorkerPanic(ctx, e.finished, fetchCh, nil, partialIndexWorkerType),
)
}()
return nil
}
func (e *IndexMergeReaderExecutor) startPartialTableWorker(ctx context.Context, exitCh <-chan struct{}, fetchCh chan<- *indexMergeTableTask, workID int) error {
ts := e.partialPlans[workID][0].(*plannercore.PhysicalTableScan)
tbls := make([]table.Table, 0, 1)
if e.partitionTableMode && len(e.byItems) == 0 {
for _, p := range e.prunedPartitions {
tbls = append(tbls, p)
}
} else {
tbls = append(tbls, e.table)
}
go func() {
defer trace.StartRegion(ctx, "IndexMergePartialTableWorker").End()
defer e.idxWorkerWg.Done()
util.WithRecovery(
func() {
failpoint.Inject("testIndexMergePanicPartialTableWorker", nil)
var err error
partialTableReader := &TableReaderExecutor{
BaseExecutor: exec.NewBaseExecutor(e.Ctx(), ts.Schema(), e.getPartitalPlanID(workID)),
dagPB: e.dagPBs[workID],
startTS: e.startTS,
txnScope: e.txnScope,
readReplicaScope: e.readReplicaScope,
isStaleness: e.isStaleness,
plans: e.partialPlans[workID],
ranges: e.ranges[workID],
netDataSize: e.partialNetDataSizes[workID],
keepOrder: ts.KeepOrder,
byItems: ts.ByItems,
}
worker := &partialTableWorker{
stats: e.stats,
sc: e.Ctx(),
batchSize: e.MaxChunkSize(),
maxBatchSize: e.Ctx().GetSessionVars().IndexLookupSize,
maxChunkSize: e.MaxChunkSize(),
tableReader: partialTableReader,
memTracker: e.memTracker,
partitionTableMode: e.partitionTableMode,
prunedPartitions: e.prunedPartitions,
byItems: ts.ByItems,
pushedLimit: e.pushedLimit,
}
if len(e.prunedPartitions) != 0 && len(e.byItems) != 0 {
slices.SortFunc(worker.prunedPartitions, func(i, j table.PhysicalTable) int {
return cmp.Compare(i.GetPhysicalID(), j.GetPhysicalID())
})
partialTableReader.kvRangeBuilder = kvRangeBuilderFromRangeAndPartition{
sctx: e.Ctx(),
partitions: worker.prunedPartitions,
}
}
if e.isCorColInPartialFilters[workID] {
if e.dagPBs[workID].Executors, err = builder.ConstructListBasedDistExec(e.Ctx(), e.partialPlans[workID]); err != nil {
syncErr(ctx, e.finished, fetchCh, err)
return
}
partialTableReader.dagPB = e.dagPBs[workID]
}
var tableReaderClosed bool
defer func() {
// To make sure SelectResult.Close() is called even got panic in fetchHandles().
if !tableReaderClosed {
terror.Call(worker.tableReader.Close)
}
}()
for parTblIdx, tbl := range tbls {
// check if this executor is closed
select {
case <-ctx.Done():
return
case <-e.finished:
return
default:
}
// init partialTableReader and partialTableWorker again for the next table
partialTableReader.table = tbl
if err = partialTableReader.Open(ctx); err != nil {
logutil.Logger(ctx).Error("open Select result failed:", zap.Error(err))
syncErr(ctx, e.finished, fetchCh, err)
break
}
failpoint.Inject("testIndexMergePartialTableWorkerCoprLeak", nil)
tableReaderClosed = false
worker.batchSize = e.MaxChunkSize()
if worker.batchSize > worker.maxBatchSize {
worker.batchSize = worker.maxBatchSize
}
// fetch all handles from this table
ctx1, cancel := context.WithCancel(ctx)
_, fetchErr := worker.fetchHandles(ctx1, exitCh, fetchCh, e.finished, e.handleCols, parTblIdx, workID)
// release related resources
cancel()
tableReaderClosed = true
if err = worker.tableReader.Close(); err != nil {
logutil.Logger(ctx).Error("close Select result failed:", zap.Error(err))
}
// this error is reported in fetchHandles(), so ignore it here.
if fetchErr != nil {
break
}
}
},
handleWorkerPanic(ctx, e.finished, fetchCh, nil, partialTableWorkerType),
)
}()
return nil
}
func (e *IndexMergeReaderExecutor) initRuntimeStats() {
if e.RuntimeStats() != nil {
e.stats = &IndexMergeRuntimeStat{
Concurrency: e.Ctx().GetSessionVars().IndexLookupConcurrency(),
}
}
}
func (e *IndexMergeReaderExecutor) getPartitalPlanID(workID int) int {
if len(e.partialPlans[workID]) > 0 {
return e.partialPlans[workID][len(e.partialPlans[workID])-1].ID()
}
return 0
}
func (e *IndexMergeReaderExecutor) getTablePlanRootID() int {
if len(e.tblPlans) > 0 {
return e.tblPlans[len(e.tblPlans)-1].ID()
}
return e.ID()
}
type partialTableWorker struct {
stats *IndexMergeRuntimeStat
sc sessionctx.Context
batchSize int
maxBatchSize int
maxChunkSize int
tableReader exec.Executor
memTracker *memory.Tracker
partitionTableMode bool
prunedPartitions []table.PhysicalTable
byItems []*plannerutil.ByItems
scannedKeys uint64
pushedLimit *plannercore.PushedDownLimit
}
// needPartitionHandle indicates whether we need create a partitonHandle or not.
// If the schema from planner part contains ExtraPhysTblID,
// we need create a partitionHandle, otherwise create a normal handle.
// In TableRowIDScan, the partitionHandle will be used to create key ranges.
func (w *partialTableWorker) needPartitionHandle() (bool, error) {
cols := w.tableReader.(*TableReaderExecutor).plans[0].Schema().Columns
outputOffsets := w.tableReader.(*TableReaderExecutor).dagPB.OutputOffsets
col := cols[outputOffsets[len(outputOffsets)-1]]
needPartitionHandle := w.partitionTableMode && len(w.byItems) > 0
// no ExtraPidColID here, because a clustered index couldn't be a global index.
hasExtraCol := col.ID == model.ExtraPhysTblID
// There will be two needPartitionHandle != hasExtraCol situations.
// Only `needPartitionHandle` == true and `hasExtraCol` == false are not allowed.
// `ExtraPhysTblID` will be used in `SelectLock` when `needPartitionHandle` == false and `hasExtraCol` == true.
if needPartitionHandle && !hasExtraCol {
return needPartitionHandle, errors.Errorf("Internal error, needPartitionHandle != ret")
}
return needPartitionHandle, nil
}
func (w *partialTableWorker) fetchHandles(ctx context.Context, exitCh <-chan struct{}, fetchCh chan<- *indexMergeTableTask,
finished <-chan struct{}, handleCols plannercore.HandleCols, parTblIdx int, partialPlanIndex int) (count int64, err error) {
chk := w.sc.GetSessionVars().GetNewChunkWithCapacity(w.getRetTpsForTableScan(), w.maxChunkSize, w.maxChunkSize, w.tableReader.Base().AllocPool)
for {
start := time.Now()
handles, retChunk, err := w.extractTaskHandles(ctx, chk, handleCols)
if err != nil {
syncErr(ctx, finished, fetchCh, err)
return count, err
}
if len(handles) == 0 {
return count, nil
}
count += int64(len(handles))
task := w.buildTableTask(handles, retChunk, parTblIdx, partialPlanIndex)
if w.stats != nil {
atomic.AddInt64(&w.stats.FetchIdxTime, int64(time.Since(start)))
}
select {
case <-ctx.Done():
return count, ctx.Err()
case <-exitCh:
return count, nil
case <-finished:
return count, nil
case fetchCh <- task:
}
}
}
func (w *partialTableWorker) getRetTpsForTableScan() []*types.FieldType {
return retTypes(w.tableReader)
}
func (w *partialTableWorker) extractTaskHandles(ctx context.Context, chk *chunk.Chunk, handleCols plannercore.HandleCols) (
handles []kv.Handle, retChk *chunk.Chunk, err error) {
handles = make([]kv.Handle, 0, w.batchSize)
if len(w.byItems) != 0 {
retChk = chunk.NewChunkWithCapacity(w.getRetTpsForTableScan(), w.batchSize)
}
var memUsage int64
var chunkRowOffset int
defer w.memTracker.Consume(-memUsage)
for len(handles) < w.batchSize {
requiredRows := w.batchSize - len(handles)
if w.pushedLimit != nil {
if w.pushedLimit.Offset+w.pushedLimit.Count <= w.scannedKeys {
return handles, retChk, nil
}
requiredRows = mathutil.Min(int(w.pushedLimit.Offset+w.pushedLimit.Count-w.scannedKeys), requiredRows)
}
chk.SetRequiredRows(requiredRows, w.maxChunkSize)
start := time.Now()
err = errors.Trace(w.tableReader.Next(ctx, chk))
if err != nil {
return nil, nil, err
}
if be := w.tableReader.Base(); be != nil && be.RuntimeStats() != nil {
be.RuntimeStats().Record(time.Since(start), chk.NumRows())
}
if chk.NumRows() == 0 {
failpoint.Inject("testIndexMergeErrorPartialTableWorker", func(v failpoint.Value) {
failpoint.Return(handles, nil, errors.New(v.(string)))
})
return handles, retChk, nil
}
memDelta := chk.MemoryUsage()
memUsage += memDelta
w.memTracker.Consume(memDelta)
for chunkRowOffset = 0; chunkRowOffset < chk.NumRows(); chunkRowOffset++ {
if w.pushedLimit != nil {
w.scannedKeys++
if w.scannedKeys > (w.pushedLimit.Offset + w.pushedLimit.Count) {
// Skip the handles after Offset+Count.
break
}
}
var handle kv.Handle
ok, err1 := w.needPartitionHandle()
if err1 != nil {
return nil, nil, err1
}
if ok {
handle, err = handleCols.BuildPartitionHandleFromIndexRow(chk.GetRow(chunkRowOffset))
} else {
handle, err = handleCols.BuildHandleFromIndexRow(chk.GetRow(chunkRowOffset))
}
if err != nil {
return nil, nil, err
}
handles = append(handles, handle)
}
// used for order by
if len(w.byItems) != 0 {
retChk.Append(chk, 0, chunkRowOffset)
}
}
w.batchSize *= 2
if w.batchSize > w.maxBatchSize {
w.batchSize = w.maxBatchSize
}
return handles, retChk, nil
}
func (w *partialTableWorker) buildTableTask(handles []kv.Handle, retChk *chunk.Chunk, parTblIdx int, partialPlanID int) *indexMergeTableTask {
task := &indexMergeTableTask{
lookupTableTask: lookupTableTask{
handles: handles,
idxRows: retChk,
},
parTblIdx: parTblIdx,
partialPlanID: partialPlanID,
}
if w.prunedPartitions != nil {
task.partitionTable = w.prunedPartitions[parTblIdx]
}
task.doneCh = make(chan error, 1)
return task
}
func (e *IndexMergeReaderExecutor) startIndexMergeTableScanWorker(ctx context.Context, workCh <-chan *indexMergeTableTask) {
lookupConcurrencyLimit := e.Ctx().GetSessionVars().IndexLookupConcurrency()
e.tblWorkerWg.Add(lookupConcurrencyLimit)
for i := 0; i < lookupConcurrencyLimit; i++ {
worker := &indexMergeTableScanWorker{
stats: e.stats,
workCh: workCh,
finished: e.finished,
indexMergeExec: e,
tblPlans: e.tblPlans,
memTracker: e.memTracker,
}
ctx1, cancel := context.WithCancel(ctx)
go func() {
defer trace.StartRegion(ctx, "IndexMergeTableScanWorker").End()
var task *indexMergeTableTask
util.WithRecovery(
// Note we use the address of `task` as the argument of both `pickAndExecTask` and `handleTableScanWorkerPanic`
// because `task` is expected to be assigned in `pickAndExecTask`, and this assignment should also be visible
// in `handleTableScanWorkerPanic` since it will get `doneCh` from `task`. Golang always pass argument by value,
// so if we don't use the address of `task` as the argument, the assignment to `task` in `pickAndExecTask` is
// not visible in `handleTableScanWorkerPanic`
func() { worker.pickAndExecTask(ctx1, &task) },
worker.handleTableScanWorkerPanic(ctx1, e.finished, &task, tableScanWorkerType),
)
cancel()
e.tblWorkerWg.Done()
}()
}
}
func (e *IndexMergeReaderExecutor) buildFinalTableReader(ctx context.Context, tbl table.Table, handles []kv.Handle) (_ exec.Executor, err error) {
tableReaderExec := &TableReaderExecutor{
BaseExecutor: exec.NewBaseExecutor(e.Ctx(), e.Schema(), e.getTablePlanRootID()),
table: tbl,
dagPB: e.tableRequest,
startTS: e.startTS,
txnScope: e.txnScope,
readReplicaScope: e.readReplicaScope,
isStaleness: e.isStaleness,
columns: e.columns,
plans: e.tblPlans,
netDataSize: e.dataAvgRowSize * float64(len(handles)),
}
tableReaderExec.buildVirtualColumnInfo()
// Reorder handles because SplitKeyRangesByLocations() requires startKey of kvRanges is ordered.
// Also it's good for performance.
tableReader, err := e.dataReaderBuilder.buildTableReaderFromHandles(ctx, tableReaderExec, handles, true)
if err != nil {
logutil.Logger(ctx).Error("build table reader from handles failed", zap.Error(err))
return nil, err
}
return tableReader, nil
}
// Next implements Executor Next interface.
func (e *IndexMergeReaderExecutor) Next(ctx context.Context, req *chunk.Chunk) error {
if !e.workerStarted {
if err := e.startWorkers(ctx); err != nil {
return err
}
}
req.Reset()
for {
resultTask, err := e.getResultTask(ctx)
if err != nil {
return errors.Trace(err)
}
if resultTask == nil {
return nil
}
if resultTask.cursor < len(resultTask.rows) {
numToAppend := mathutil.Min(len(resultTask.rows)-resultTask.cursor, e.MaxChunkSize()-req.NumRows())
req.AppendRows(resultTask.rows[resultTask.cursor : resultTask.cursor+numToAppend])
resultTask.cursor += numToAppend
if req.NumRows() >= e.MaxChunkSize() {
return nil
}
}
}
}
func (e *IndexMergeReaderExecutor) getResultTask(ctx context.Context) (*indexMergeTableTask, error) {
failpoint.Inject("testIndexMergeMainReturnEarly", func(_ failpoint.Value) {
// To make sure processWorker make resultCh to be full.
// When main goroutine close finished, processWorker may be stuck when writing resultCh.
time.Sleep(time.Second * 20)
failpoint.Return(nil, errors.New("failpoint testIndexMergeMainReturnEarly"))
})
if e.resultCurr != nil && e.resultCurr.cursor < len(e.resultCurr.rows) {
return e.resultCurr, nil
}
task, ok := <-e.resultCh
if !ok {
return nil, nil
}
select {
case <-ctx.Done():
return nil, errors.Trace(ctx.Err())
case err := <-task.doneCh:
if err != nil {
return nil, errors.Trace(err)
}
}
// Release the memory usage of last task before we handle a new task.
if e.resultCurr != nil {
e.resultCurr.memTracker.Consume(-e.resultCurr.memUsage)
}
e.resultCurr = task
return e.resultCurr, nil
}
func handleWorkerPanic(ctx context.Context, finished <-chan struct{}, ch chan<- *indexMergeTableTask, extraNotifyCh chan bool, worker string) func(r interface{}) {
return func(r interface{}) {
if worker == processWorkerType {
// There is only one processWorker, so it's safe to close here.
// No need to worry about "close on closed channel" error.
defer close(ch)
}
if r == nil {
logutil.BgLogger().Debug("worker finish without panic", zap.Any("worker", worker))
return
}
if extraNotifyCh != nil {
extraNotifyCh <- true
}
err4Panic := errors.Errorf("%s: %v", worker, r)
logutil.Logger(ctx).Error(err4Panic.Error())
doneCh := make(chan error, 1)
doneCh <- err4Panic
task := &indexMergeTableTask{
lookupTableTask: lookupTableTask{
doneCh: doneCh,
},
}
select {
case <-ctx.Done():
return
case <-finished:
return
case ch <- task:
return
}
}
}
// Close implements Exec Close interface.
func (e *IndexMergeReaderExecutor) Close() error {
if e.stats != nil {
defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), e.stats)
}
if e.finished == nil {
return nil
}
close(e.finished)
e.tblWorkerWg.Wait()
e.idxWorkerWg.Wait()
e.processWorkerWg.Wait()
e.finished = nil
e.workerStarted = false
return nil
}
type indexMergeProcessWorker struct {
indexMerge *IndexMergeReaderExecutor
stats *IndexMergeRuntimeStat
}
type rowIdx struct {
partialID int
taskID int
rowID int
}
type handleHeap struct {
// requiredCnt == 0 means need all handles
requiredCnt uint64
tracker *memory.Tracker
taskMap map[int][]*indexMergeTableTask
idx []rowIdx
compareFunc []chunk.CompareFunc
byItems []*plannerutil.ByItems
}
func (h handleHeap) Len() int {
return len(h.idx)
}
func (h handleHeap) Less(i, j int) bool {
rowI := h.taskMap[h.idx[i].partialID][h.idx[i].taskID].idxRows.GetRow(h.idx[i].rowID)
rowJ := h.taskMap[h.idx[j].partialID][h.idx[j].taskID].idxRows.GetRow(h.idx[j].rowID)
for i, compFunc := range h.compareFunc {
cmp := compFunc(rowI, i, rowJ, i)
if !h.byItems[i].Desc {
cmp = -cmp
}
if cmp < 0 {
return true
} else if cmp > 0 {
return false
}
}
return false
}
func (h handleHeap) Swap(i, j int) {
h.idx[i], h.idx[j] = h.idx[j], h.idx[i]
}
func (h *handleHeap) Push(x interface{}) {
idx := x.(rowIdx)
h.idx = append(h.idx, idx)
if h.tracker != nil {
h.tracker.Consume(int64(unsafe.Sizeof(h.idx)))
}
}
func (h *handleHeap) Pop() interface{} {
idxRet := h.idx[len(h.idx)-1]
h.idx = h.idx[:len(h.idx)-1]
if h.tracker != nil {
h.tracker.Consume(-int64(unsafe.Sizeof(h.idx)))
}
return idxRet
}
func (w *indexMergeProcessWorker) NewHandleHeap(taskMap map[int][]*indexMergeTableTask, memTracker *memory.Tracker) *handleHeap {
compareFuncs := make([]chunk.CompareFunc, 0, len(w.indexMerge.byItems))
for _, item := range w.indexMerge.byItems {
keyType := item.Expr.GetType()
compareFuncs = append(compareFuncs, chunk.GetCompareFunc(keyType))
}
requiredCnt := uint64(0)
if w.indexMerge.pushedLimit != nil {
requiredCnt = mathutil.Max(requiredCnt, w.indexMerge.pushedLimit.Count+w.indexMerge.pushedLimit.Offset)
}
return &handleHeap{
requiredCnt: requiredCnt,
tracker: memTracker,
taskMap: taskMap,
idx: make([]rowIdx, 0, requiredCnt),
compareFunc: compareFuncs,
byItems: w.indexMerge.byItems,
}
}
// pruneTableWorkerTaskIdxRows prune idxRows and only keep columns that will be used in byItems.
// e.g. the common handle is (`b`, `c`) and order by with column `c`, we should make column `c` at the first.
func (w *indexMergeProcessWorker) pruneTableWorkerTaskIdxRows(task *indexMergeTableTask) {
if task.idxRows == nil {
return
}
// IndexScan no need to prune retChk, Columns required by byItems are always first.
if plan, ok := w.indexMerge.partialPlans[task.partialPlanID][0].(*plannercore.PhysicalTableScan); ok {
prune := make([]int, 0, len(w.indexMerge.byItems))
for _, item := range plan.ByItems {
c, _ := item.Expr.(*expression.Column)
idx := plan.Schema().ColumnIndex(c)
// couldn't equals to -1 here, if idx == -1, just let it panic
prune = append(prune, idx)
}
task.idxRows = task.idxRows.Prune(prune)
}
}
func (w *indexMergeProcessWorker) fetchLoopUnionWithOrderBy(ctx context.Context, fetchCh <-chan *indexMergeTableTask,
workCh chan<- *indexMergeTableTask, resultCh chan<- *indexMergeTableTask, finished <-chan struct{}) {
memTracker := memory.NewTracker(w.indexMerge.ID(), -1)
memTracker.AttachTo(w.indexMerge.memTracker)
defer memTracker.Detach()
defer close(workCh)
if w.stats != nil {
start := time.Now()
defer func() {
w.stats.IndexMergeProcess += time.Since(start)
}()
}
distinctHandles := kv.NewHandleMap()
taskMap := make(map[int][]*indexMergeTableTask)
uselessMap := make(map[int]struct{})
taskHeap := w.NewHandleHeap(taskMap, memTracker)
for task := range fetchCh {
select {
case err := <-task.doneCh:
// If got error from partialIndexWorker/partialTableWorker, stop processing.
if err != nil {
syncErr(ctx, finished, resultCh, err)
return
}
default:
}
if _, ok := uselessMap[task.partialPlanID]; ok {
continue
}
if _, ok := taskMap[task.partialPlanID]; !ok {
taskMap[task.partialPlanID] = make([]*indexMergeTableTask, 0, 1)
}
w.pruneTableWorkerTaskIdxRows(task)
taskMap[task.partialPlanID] = append(taskMap[task.partialPlanID], task)
for i, h := range task.handles {
if _, ok := distinctHandles.Get(h); !ok {
distinctHandles.Set(h, true)
heap.Push(taskHeap, rowIdx{task.partialPlanID, len(taskMap[task.partialPlanID]) - 1, i})
if int(taskHeap.requiredCnt) != 0 && taskHeap.Len() > int(taskHeap.requiredCnt) {
top := heap.Pop(taskHeap).(rowIdx)
if top.partialID == task.partialPlanID && top.taskID == len(taskMap[task.partialPlanID])-1 && top.rowID == i {
uselessMap[task.partialPlanID] = struct{}{}
task.handles = task.handles[:i]
break
}
}
}
memTracker.Consume(int64(h.MemUsage()))
}
memTracker.Consume(task.idxRows.MemoryUsage())
if len(uselessMap) == len(w.indexMerge.partialPlans) {
// consume reset tasks
go func() {
channel.Clear(fetchCh)
}()
break
}
}
needCount := taskHeap.Len()
if w.indexMerge.pushedLimit != nil {
needCount = mathutil.Max(0, taskHeap.Len()-int(w.indexMerge.pushedLimit.Offset))
}
if needCount == 0 {
return
}
fhs := make([]kv.Handle, needCount)
for i := needCount - 1; i >= 0; i-- {
idx := heap.Pop(taskHeap).(rowIdx)
fhs[i] = taskMap[idx.partialID][idx.taskID].handles[idx.rowID]
}
batchSize := w.indexMerge.Ctx().GetSessionVars().IndexLookupSize
tasks := make([]*indexMergeTableTask, 0, len(fhs)/batchSize+1)
for len(fhs) > 0 {
l := mathutil.Min(len(fhs), batchSize)
// Save the index order.
indexOrder := kv.NewHandleMap()
for i, h := range fhs[:l] {
indexOrder.Set(h, i)
}
tasks = append(tasks, &indexMergeTableTask{
lookupTableTask: lookupTableTask{
handles: fhs[:l],
indexOrder: indexOrder,
doneCh: make(chan error, 1),
},
})
fhs = fhs[l:]
}
for _, task := range tasks {
select {
case <-ctx.Done():
return
case <-finished:
return
case resultCh <- task:
failpoint.Inject("testCancelContext", func() {
IndexMergeCancelFuncForTest()
})
select {
case <-ctx.Done():
return
case <-finished:
return
case workCh <- task:
continue
}
}
}
}
func (w *indexMergeProcessWorker) fetchLoopUnion(ctx context.Context, fetchCh <-chan *indexMergeTableTask,
workCh chan<- *indexMergeTableTask, resultCh chan<- *indexMergeTableTask, finished <-chan struct{}) {
failpoint.Inject("testIndexMergeResultChCloseEarly", func(_ failpoint.Value) {
failpoint.Return()
})
memTracker := memory.NewTracker(w.indexMerge.ID(), -1)
memTracker.AttachTo(w.indexMerge.memTracker)
defer memTracker.Detach()
defer close(workCh)
failpoint.Inject("testIndexMergePanicProcessWorkerUnion", nil)
var pushedLimit *plannercore.PushedDownLimit
if w.indexMerge.pushedLimit != nil {
pushedLimit = w.indexMerge.pushedLimit.Clone()
}
distinctHandles := make(map[int64]*kv.HandleMap)
for {
var ok bool
var task *indexMergeTableTask
if pushedLimit != nil && pushedLimit.Count == 0 {
return
}
select {
case <-ctx.Done():
return
case <-finished:
return
case task, ok = <-fetchCh:
if !ok {
return
}
}
select {
case err := <-task.doneCh:
// If got error from partialIndexWorker/partialTableWorker, stop processing.
if err != nil {
syncErr(ctx, finished, resultCh, err)
return
}
default:
}
start := time.Now()
handles := task.handles
fhs := make([]kv.Handle, 0, 8)
memTracker.Consume(int64(cap(task.handles) * 8))
var tblID int64
if w.indexMerge.partitionTableMode {
tblID = getPhysicalTableID(task.partitionTable)
} else {
tblID = getPhysicalTableID(w.indexMerge.table)
}
if _, ok := distinctHandles[tblID]; !ok {
distinctHandles[tblID] = kv.NewHandleMap()
}
hMap := distinctHandles[tblID]
for _, h := range handles {
if _, ok := hMap.Get(h); !ok {
fhs = append(fhs, h)
hMap.Set(h, true)
}
}
if len(fhs) == 0 {
continue
}
if pushedLimit != nil {
fhsLen := uint64(len(fhs))
// The number of handles is less than the offset, discard all handles.
if fhsLen <= pushedLimit.Offset {
pushedLimit.Offset -= fhsLen
continue
}
fhs = fhs[pushedLimit.Offset:]
pushedLimit.Offset = 0
fhsLen = uint64(len(fhs))
// The number of handles is greater than the limit, only keep limit count.
if fhsLen > pushedLimit.Count {
fhs = fhs[:pushedLimit.Count]
}
pushedLimit.Count -= mathutil.Min(pushedLimit.Count, fhsLen)
}
task = &indexMergeTableTask{
lookupTableTask: lookupTableTask{
handles: fhs,
doneCh: make(chan error, 1),
partitionTable: task.partitionTable,
},
}
if w.stats != nil {
w.stats.IndexMergeProcess += time.Since(start)
}
failpoint.Inject("testIndexMergeProcessWorkerUnionHang", func(_ failpoint.Value) {
for i := 0; i < cap(resultCh); i++ {
select {
case resultCh <- &indexMergeTableTask{}:
default:
}
}
})
select {
case <-ctx.Done():
return
case <-finished:
return
case resultCh <- task:
failpoint.Inject("testCancelContext", func() {
IndexMergeCancelFuncForTest()
})
select {
case <-ctx.Done():
return
case <-finished:
return
case workCh <- task:
}
}
}
}
type intersectionProcessWorker struct {
// key: parTblIdx, val: HandleMap
// Value of MemAwareHandleMap is *int to avoid extra Get().
handleMapsPerWorker map[int]*kv.MemAwareHandleMap[*int]
workerID int
workerCh chan *indexMergeTableTask
indexMerge *IndexMergeReaderExecutor
memTracker *memory.Tracker
batchSize int
// When rowDelta == memConsumeBatchSize, Consume(memUsage)
rowDelta int64
mapUsageDelta int64
}
func (w *intersectionProcessWorker) consumeMemDelta() {
w.memTracker.Consume(w.mapUsageDelta + w.rowDelta*int64(unsafe.Sizeof(int(0))))
w.mapUsageDelta = 0
w.rowDelta = 0
}
func (w *intersectionProcessWorker) doIntersectionPerPartition(ctx context.Context, workCh chan<- *indexMergeTableTask, resultCh chan<- *indexMergeTableTask, finished <-chan struct{}) {
failpoint.Inject("testIndexMergePanicPartitionTableIntersectionWorker", nil)
defer w.memTracker.Detach()
for task := range w.workerCh {
var ok bool
var hMap *kv.MemAwareHandleMap[*int]
if hMap, ok = w.handleMapsPerWorker[task.parTblIdx]; !ok {
hMap = kv.NewMemAwareHandleMap[*int]()
w.handleMapsPerWorker[task.parTblIdx] = hMap
}
var mapDelta int64
var rowDelta int64
for _, h := range task.handles {
// Use *int to avoid Get() again.
if cntPtr, ok := hMap.Get(h); ok {
(*cntPtr)++
} else {
cnt := 1
mapDelta += hMap.Set(h, &cnt) + int64(h.ExtraMemSize())
rowDelta++
}
}
logutil.BgLogger().Debug("intersectionProcessWorker handle tasks", zap.Int("workerID", w.workerID),
zap.Int("task.handles", len(task.handles)), zap.Int64("rowDelta", rowDelta))
w.mapUsageDelta += mapDelta
w.rowDelta += rowDelta
if w.rowDelta >= int64(w.batchSize) {
w.consumeMemDelta()
}
failpoint.Inject("testIndexMergeIntersectionWorkerPanic", nil)
}
if w.rowDelta > 0 {
w.consumeMemDelta()
}
// We assume the result of intersection is small, so no need to track memory.
intersectedMap := make(map[int][]kv.Handle, len(w.handleMapsPerWorker))
for parTblIdx, hMap := range w.handleMapsPerWorker {
hMap.Range(func(h kv.Handle, val *int) bool {
if *(val) == len(w.indexMerge.partialPlans) {
// Means all partial paths have this handle.
intersectedMap[parTblIdx] = append(intersectedMap[parTblIdx], h)
}
return true
})
}
tasks := make([]*indexMergeTableTask, 0, len(w.handleMapsPerWorker))
for parTblIdx, intersected := range intersectedMap {
// Split intersected[parTblIdx] to avoid task is too large.
for len(intersected) > 0 {
length := w.batchSize
if length > len(intersected) {
length = len(intersected)
}
task := &indexMergeTableTask{
lookupTableTask: lookupTableTask{
handles: intersected[:length],
doneCh: make(chan error, 1),
},
}
intersected = intersected[length:]
if w.indexMerge.partitionTableMode {
task.partitionTable = w.indexMerge.prunedPartitions[parTblIdx]
}
tasks = append(tasks, task)
logutil.BgLogger().Debug("intersectionProcessWorker build tasks",
zap.Int("parTblIdx", parTblIdx), zap.Int("task.handles", len(task.handles)))
}
}
failpoint.Inject("testIndexMergeProcessWorkerIntersectionHang", func(_ failpoint.Value) {
for i := 0; i < cap(resultCh); i++ {
select {
case resultCh <- &indexMergeTableTask{}:
default:
}
}
})
for _, task := range tasks {
select {
case <-ctx.Done():
return
case <-finished:
return
case workCh <- task:
select {
case <-ctx.Done():
return
case <-finished:
return
case resultCh <- task:
}
}
}
}
// For each partition(dynamic mode), a map is used to do intersection. Key of the map is handle, and value is the number of times it occurs.
// If the value of handle equals the number of partial paths, it should be sent to final_table_scan_worker.
// To avoid too many goroutines, each intersectionProcessWorker can handle multiple partitions.
func (w *indexMergeProcessWorker) fetchLoopIntersection(ctx context.Context, fetchCh <-chan *indexMergeTableTask,
workCh chan<- *indexMergeTableTask, resultCh chan<- *indexMergeTableTask, finished <-chan struct{}) {
defer close(workCh)
if w.stats != nil {
start := time.Now()
defer func() {
w.stats.IndexMergeProcess += time.Since(start)
}()
}
failpoint.Inject("testIndexMergePanicProcessWorkerIntersection", nil)
// One goroutine may handle one or multiple partitions.
// Max number of partition number is 8192, we use ExecutorConcurrency to avoid too many goroutines.
maxWorkerCnt := w.indexMerge.Ctx().GetSessionVars().IndexMergeIntersectionConcurrency()
maxChannelSize := atomic.LoadInt32(&LookupTableTaskChannelSize)
batchSize := w.indexMerge.Ctx().GetSessionVars().IndexLookupSize
partCnt := 1
if w.indexMerge.partitionTableMode {
partCnt = len(w.indexMerge.prunedPartitions)
}
workerCnt := mathutil.Min(partCnt, maxWorkerCnt)
failpoint.Inject("testIndexMergeIntersectionConcurrency", func(val failpoint.Value) {
con := val.(int)
if con != workerCnt {
panic(fmt.Sprintf("unexpected workerCnt, expect %d, got %d", con, workerCnt))
}
})
workers := make([]*intersectionProcessWorker, 0, workerCnt)
wg := util.WaitGroupWrapper{}
errCh := make(chan bool, workerCnt)
for i := 0; i < workerCnt; i++ {
tracker := memory.NewTracker(w.indexMerge.ID(), -1)
tracker.AttachTo(w.indexMerge.memTracker)
worker := &intersectionProcessWorker{
workerID: i,
handleMapsPerWorker: make(map[int]*kv.MemAwareHandleMap[*int]),
workerCh: make(chan *indexMergeTableTask, maxChannelSize),
indexMerge: w.indexMerge,
memTracker: tracker,
batchSize: batchSize,
}
wg.RunWithRecover(func() {
defer trace.StartRegion(ctx, "IndexMergeIntersectionProcessWorker").End()
worker.doIntersectionPerPartition(ctx, workCh, resultCh, finished)
}, handleWorkerPanic(ctx, finished, resultCh, errCh, partTblIntersectionWorkerType))
workers = append(workers, worker)
}
defer func() {
for _, processWorker := range workers {
close(processWorker.workerCh)
}
wg.Wait()
}()
for {
var ok bool
var task *indexMergeTableTask
select {
case <-ctx.Done():
return
case <-finished:
return
case task, ok = <-fetchCh:
if !ok {
return
}
}
select {
case err := <-task.doneCh:
// If got error from partialIndexWorker/partialTableWorker, stop processing.
if err != nil {
syncErr(ctx, finished, resultCh, err)
return
}
default:
}
select {
case <-ctx.Done():
return
case <-finished:
return
case workers[task.parTblIdx%workerCnt].workerCh <- task:
case <-errCh:
// If got error from intersectionProcessWorker, stop processing.
return
}
}
}
type partialIndexWorker struct {
stats *IndexMergeRuntimeStat
sc sessionctx.Context
idxID int
batchSize int
maxBatchSize int
maxChunkSize int
memTracker *memory.Tracker
partitionTableMode bool
prunedPartitions []table.PhysicalTable
byItems []*plannerutil.ByItems
scannedKeys uint64
pushedLimit *plannercore.PushedDownLimit
dagPB *tipb.DAGRequest
plan []plannercore.PhysicalPlan
}
func syncErr(ctx context.Context, finished <-chan struct{}, errCh chan<- *indexMergeTableTask, err error) {
logutil.BgLogger().Error("IndexMergeReaderExecutor.syncErr", zap.Error(err))
doneCh := make(chan error, 1)
doneCh <- err
task := &indexMergeTableTask{
lookupTableTask: lookupTableTask{
doneCh: doneCh,
},
}
// ctx.Done and finished is to avoid write channel is stuck.
select {
case <-ctx.Done():
return
case <-finished:
return
case errCh <- task:
return
}
}
// needPartitionHandle indicates whether we need create a partitonHandle or not.
// If the schema from planner part contains ExtraPidColID or ExtraPhysTblID,
// we need create a partitionHandle, otherwise create a normal handle.
// In TableRowIDScan, the partitionHandle will be used to create key ranges.
func (w *partialIndexWorker) needPartitionHandle() (bool, error) {
cols := w.plan[0].Schema().Columns
outputOffsets := w.dagPB.OutputOffsets
col := cols[outputOffsets[len(outputOffsets)-1]]
needPartitionHandle := w.partitionTableMode && len(w.byItems) > 0
hasExtraCol := col.ID == model.ExtraPidColID || col.ID == model.ExtraPhysTblID
// There will be two needPartitionHandle != hasExtraCol situations.
// Only `needPartitionHandle` == true and `hasExtraCol` == false are not allowed.
// `ExtraPhysTblID` will be used in `SelectLock` when `needPartitionHandle` == false and `hasExtraCol` == true.
if needPartitionHandle && !hasExtraCol {
return needPartitionHandle, errors.Errorf("Internal error, needPartitionHandle != ret")
}
return needPartitionHandle, nil
}
func (w *partialIndexWorker) fetchHandles(
ctx context.Context,
results []distsql.SelectResult,
exitCh <-chan struct{},
fetchCh chan<- *indexMergeTableTask,
finished <-chan struct{},
handleCols plannercore.HandleCols,
partialPlanIndex int) (count int64, err error) {
tps := w.getRetTpsForIndexScan(handleCols)
chk := chunk.NewChunkWithCapacity(tps, w.maxChunkSize)
for i := 0; i < len(results); {
start := time.Now()
handles, retChunk, err := w.extractTaskHandles(ctx, chk, results[i], handleCols)
if err != nil {
syncErr(ctx, finished, fetchCh, err)
return count, err
}
if len(handles) == 0 {
i++
continue
}
count += int64(len(handles))
task := w.buildTableTask(handles, retChunk, i, partialPlanIndex)
if w.stats != nil {
atomic.AddInt64(&w.stats.FetchIdxTime, int64(time.Since(start)))
}
select {
case <-ctx.Done():
return count, ctx.Err()
case <-exitCh:
return count, nil
case <-finished:
return count, nil
case fetchCh <- task:
}
}
return count, nil
}
func (w *partialIndexWorker) getRetTpsForIndexScan(handleCols plannercore.HandleCols) []*types.FieldType {
var tps []*types.FieldType
if len(w.byItems) != 0 {
for _, item := range w.byItems {
tps = append(tps, item.Expr.GetType())
}
}
tps = append(tps, handleCols.GetFieldsTypes()...)
if ok, _ := w.needPartitionHandle(); ok {
tps = append(tps, types.NewFieldType(mysql.TypeLonglong))
}
return tps
}
func (w *partialIndexWorker) extractTaskHandles(ctx context.Context, chk *chunk.Chunk, idxResult distsql.SelectResult, handleCols plannercore.HandleCols) (
handles []kv.Handle, retChk *chunk.Chunk, err error) {
handles = make([]kv.Handle, 0, w.batchSize)
if len(w.byItems) != 0 {
retChk = chunk.NewChunkWithCapacity(w.getRetTpsForIndexScan(handleCols), w.batchSize)
}
var memUsage int64
var chunkRowOffset int
defer w.memTracker.Consume(-memUsage)
for len(handles) < w.batchSize {
requiredRows := w.batchSize - len(handles)
if w.pushedLimit != nil {
if w.pushedLimit.Offset+w.pushedLimit.Count <= w.scannedKeys {
return handles, retChk, nil
}
requiredRows = mathutil.Min(int(w.pushedLimit.Offset+w.pushedLimit.Count-w.scannedKeys), requiredRows)
}
chk.SetRequiredRows(requiredRows, w.maxChunkSize)
start := time.Now()
err = errors.Trace(idxResult.Next(ctx, chk))
if err != nil {
return nil, nil, err
}
if w.stats != nil && w.idxID != 0 {
w.sc.GetSessionVars().StmtCtx.RuntimeStatsColl.GetBasicRuntimeStats(w.idxID).Record(time.Since(start), chk.NumRows())
}
if chk.NumRows() == 0 {
failpoint.Inject("testIndexMergeErrorPartialIndexWorker", func(v failpoint.Value) {
failpoint.Return(handles, nil, errors.New(v.(string)))
})
return handles, retChk, nil
}
memDelta := chk.MemoryUsage()
memUsage += memDelta
w.memTracker.Consume(memDelta)
for chunkRowOffset = 0; chunkRowOffset < chk.NumRows(); chunkRowOffset++ {
if w.pushedLimit != nil {
w.scannedKeys++
if w.scannedKeys > (w.pushedLimit.Offset + w.pushedLimit.Count) {
// Skip the handles after Offset+Count.
break
}
}
var handle kv.Handle
ok, err1 := w.needPartitionHandle()
if err1 != nil {
return nil, nil, err1
}
if ok {
handle, err = handleCols.BuildPartitionHandleFromIndexRow(chk.GetRow(chunkRowOffset))
} else {
handle, err = handleCols.BuildHandleFromIndexRow(chk.GetRow(chunkRowOffset))
}
if err != nil {
return nil, nil, err
}
handles = append(handles, handle)
}
// used for order by
if len(w.byItems) != 0 {
retChk.Append(chk, 0, chunkRowOffset)
}
}
w.batchSize *= 2
if w.batchSize > w.maxBatchSize {
w.batchSize = w.maxBatchSize
}
return handles, retChk, nil
}
func (w *partialIndexWorker) buildTableTask(handles []kv.Handle, retChk *chunk.Chunk, parTblIdx int, partialPlanID int) *indexMergeTableTask {
task := &indexMergeTableTask{
lookupTableTask: lookupTableTask{
handles: handles,
idxRows: retChk,
},
parTblIdx: parTblIdx,
partialPlanID: partialPlanID,
}
if w.prunedPartitions != nil {
task.partitionTable = w.prunedPartitions[parTblIdx]
}
task.doneCh = make(chan error, 1)
return task
}
type indexMergeTableScanWorker struct {
stats *IndexMergeRuntimeStat
workCh <-chan *indexMergeTableTask
finished <-chan struct{}
indexMergeExec *IndexMergeReaderExecutor
tblPlans []plannercore.PhysicalPlan
// memTracker is used to track the memory usage of this executor.
memTracker *memory.Tracker
}
func (w *indexMergeTableScanWorker) pickAndExecTask(ctx context.Context, task **indexMergeTableTask) {
var ok bool
for {
waitStart := time.Now()
select {
case <-ctx.Done():
return
case <-w.finished:
return
case *task, ok = <-w.workCh:
if !ok {
return
}
}
// Make sure panic failpoint is after fetch task from workCh.
// Otherwise cannot send error to task.doneCh.
failpoint.Inject("testIndexMergePanicTableScanWorker", nil)
execStart := time.Now()
err := w.executeTask(ctx, *task)
if w.stats != nil {
atomic.AddInt64(&w.stats.WaitTime, int64(execStart.Sub(waitStart)))
atomic.AddInt64(&w.stats.FetchRow, int64(time.Since(execStart)))
atomic.AddInt64(&w.stats.TableTaskNum, 1)
}
failpoint.Inject("testIndexMergePickAndExecTaskPanic", nil)
select {
case <-ctx.Done():
return
case <-w.finished:
return
case (*task).doneCh <- err:
}
}
}
func (*indexMergeTableScanWorker) handleTableScanWorkerPanic(ctx context.Context, finished <-chan struct{}, task **indexMergeTableTask, worker string) func(r interface{}) {
return func(r interface{}) {
if r == nil {
logutil.BgLogger().Debug("worker finish without panic", zap.Any("worker", worker))
return
}
err4Panic := errors.Errorf("%s: %v", worker, r)
logutil.Logger(ctx).Error(err4Panic.Error())
if *task != nil {
select {
case <-ctx.Done():
return
case <-finished:
return
case (*task).doneCh <- err4Panic:
return
}
}
}
}
func (w *indexMergeTableScanWorker) executeTask(ctx context.Context, task *indexMergeTableTask) error {
tbl := w.indexMergeExec.table
if w.indexMergeExec.partitionTableMode && task.partitionTable != nil {
tbl = task.partitionTable
}
tableReader, err := w.indexMergeExec.buildFinalTableReader(ctx, tbl, task.handles)
if err != nil {
logutil.Logger(ctx).Error("build table reader failed", zap.Error(err))
return err
}
defer terror.Call(tableReader.Close)
task.memTracker = w.memTracker
memUsage := int64(cap(task.handles) * 8)
task.memUsage = memUsage
task.memTracker.Consume(memUsage)
handleCnt := len(task.handles)
task.rows = make([]chunk.Row, 0, handleCnt)
for {
chk := tryNewCacheChunk(tableReader)
err = Next(ctx, tableReader, chk)
if err != nil {
logutil.Logger(ctx).Error("table reader fetch next chunk failed", zap.Error(err))
return err
}
if chk.NumRows() == 0 {
break
}
memUsage = chk.MemoryUsage()
task.memUsage += memUsage
task.memTracker.Consume(memUsage)
iter := chunk.NewIterator4Chunk(chk)
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
task.rows = append(task.rows, row)
}
}
if w.indexMergeExec.keepOrder {
// Because len(outputOffsets) == tableScan.Schema().Len(),
// so we could use row.GetInt64(idx) to get partition ID here.
// TODO: We could add plannercore.PartitionHandleCols to unify them.
physicalTableIDIdx := -1
for i, c := range w.indexMergeExec.Schema().Columns {
if c.ID == model.ExtraPhysTblID {
physicalTableIDIdx = i
break
}
}
task.rowIdx = make([]int, 0, len(task.rows))
for _, row := range task.rows {
handle, err := w.indexMergeExec.handleCols.BuildHandle(row)
if err != nil {
return err
}
if w.indexMergeExec.partitionTableMode && physicalTableIDIdx != -1 {
handle = kv.NewPartitionHandle(row.GetInt64(physicalTableIDIdx), handle)
}
rowIdx, _ := task.indexOrder.Get(handle)
task.rowIdx = append(task.rowIdx, rowIdx.(int))
}
sort.Sort(task)
}
memUsage = int64(cap(task.rows)) * int64(unsafe.Sizeof(chunk.Row{}))
task.memUsage += memUsage
task.memTracker.Consume(memUsage)
if handleCnt != len(task.rows) && len(w.tblPlans) == 1 {
return errors.Errorf("handle count %d isn't equal to value count %d", handleCnt, len(task.rows))
}
return nil
}
// IndexMergeRuntimeStat record the indexMerge runtime stat
type IndexMergeRuntimeStat struct {
IndexMergeProcess time.Duration
FetchIdxTime int64
WaitTime int64
FetchRow int64
TableTaskNum int64
Concurrency int
}
func (e *IndexMergeRuntimeStat) String() string {
var buf bytes.Buffer
if e.FetchIdxTime != 0 {
buf.WriteString(fmt.Sprintf("index_task:{fetch_handle:%s", time.Duration(e.FetchIdxTime)))
if e.IndexMergeProcess != 0 {
buf.WriteString(fmt.Sprintf(", merge:%s", e.IndexMergeProcess))
}
buf.WriteByte('}')
}
if e.FetchRow != 0 {
if buf.Len() > 0 {
buf.WriteByte(',')
}
fmt.Fprintf(&buf, " table_task:{num:%d, concurrency:%d, fetch_row:%s, wait_time:%s}", e.TableTaskNum, e.Concurrency, time.Duration(e.FetchRow), time.Duration(e.WaitTime))
}
return buf.String()
}
// Clone implements the RuntimeStats interface.
func (e *IndexMergeRuntimeStat) Clone() execdetails.RuntimeStats {
newRs := *e
return &newRs
}
// Merge implements the RuntimeStats interface.
func (e *IndexMergeRuntimeStat) Merge(other execdetails.RuntimeStats) {
tmp, ok := other.(*IndexMergeRuntimeStat)
if !ok {
return
}
e.IndexMergeProcess += tmp.IndexMergeProcess
e.FetchIdxTime += tmp.FetchIdxTime
e.FetchRow += tmp.FetchRow
e.WaitTime += e.WaitTime
e.TableTaskNum += tmp.TableTaskNum
}
// Tp implements the RuntimeStats interface.
func (*IndexMergeRuntimeStat) Tp() int {
return execdetails.TpIndexMergeRunTimeStats
}
|
package appmanager
import (
"context"
"errors"
"github.com/fintechstudios/ververica-platform-k8s-operator/controllers/utils"
appmanager "github.com/fintechstudios/ververica-platform-k8s-operator/appmanager-api-client"
)
// GetDeploymentByName fetches a deployment from the VP by namespace and name
func GetDeploymentByName(ctx context.Context, apiClient *appmanager.APIClient, namespace string, name string) (appmanager.Deployment, error) {
var deployment appmanager.Deployment
if len(namespace) == 0 || len(name) == 0 {
return deployment, errors.New("namespace and name must not be empty")
}
deploymentsList, _, err := apiClient.DeploymentsApi.GetDeployments(ctx, namespace, nil)
if err != nil {
return deployment, err
}
for _, deployment = range deploymentsList.Items {
if deployment.Metadata.Name == name {
return deployment, nil
}
}
return deployment, utils.DeploymentNotFoundError{Namespace: namespace, Name: name}
}
|
package Core
import (
"sync"
"fmt"
"com/pdool/DataStruct"
)
var eventMgrInstance *EventMgr
var eventLock = &sync.Mutex{}
func GetEventMgr() *EventMgr {
eventLock.Lock()
defer eventLock.Unlock()
if eventMgrInstance == nil {
eventMgrInstance = &EventMgr{msgQueue: &DataStruct.Queue{}}
}
return eventMgrInstance
}
type EventMgr struct {
msgQueue *DataStruct.Queue
}
func (eventMgr *EventMgr) Start() {
for eventMgr.msgQueue.Size() > 0 {
ele := eventMgr.msgQueue.Dequeue()
event := ele.(*Event)
go dealEvent(event)
}
}
func(eventMgr *EventMgr) Push(event *Event){
eventMgr.msgQueue.Enqueue(event)
}
func dealEvent(event *Event) {
name := event.GetComponentName()
id := event.GetEventId()
msg := event.GetMsg()
fmt.Println(name)
fmt.Println(id)
fmt.Println(msg)
}
|
package config
import (
"bytes"
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
)
type Config struct {
LastNamespace string `json:"last_namespace"`
LastContext string `json:"last_context"`
}
func getCfgPath() (string, error) {
cfgPath, err := os.UserConfigDir()
if err != nil {
return "", err
}
cfgPath = filepath.Join(cfgPath, "kubectx")
err = os.MkdirAll(cfgPath, 0777)
if err != nil {
return "", err
}
cfgPath = filepath.Join(cfgPath, "config.json")
_, err = os.Lstat(cfgPath)
if os.IsNotExist(err) {
var err error
buf := &bytes.Buffer{}
defaultCfg := Config{
LastNamespace: "default",
LastContext: "",
}
err = json.NewEncoder(buf).Encode(&defaultCfg)
if err != nil {
return "", err
}
err = ioutil.WriteFile(cfgPath, buf.Bytes(), 0666)
if err != nil {
return "", err
}
}
return cfgPath, nil
}
func WriteCfg(cfg *Config) error {
cfgPath, err := getCfgPath()
if err != nil {
return err
}
b, err := json.Marshal(cfg)
if err != nil {
return err
}
err = ioutil.WriteFile(cfgPath, b, 0600)
if err != nil {
return err
}
return nil
}
func ReadCfg() (*Config, error) {
cfg := &Config{}
cfgPath, err := getCfgPath()
if err != nil {
return nil, err
}
b, err := ioutil.ReadFile(cfgPath)
if err != nil {
return nil, err
}
// ignore
_ = json.Unmarshal(b, cfg)
if err != nil {
return nil, err
}
// if cfg.CurrentContext != "" {
// cfg.CurNS = "default"
// }
return cfg, nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.