text
stringlengths 11
4.05M
|
|---|
package main
/*
123
++ package dùng để nhóm 1 hoặc nhiều tập tin có liên quan đến nhau
++ tên package sử dụng in thường, định nghĩa ở đầu chương trình
++ tập để chạy chương trình thì tên là package main, đồn thời phải khởi tạp func main
trình Go compiler sẽ tìm func main để run
*/
/*
++ import package để sử dụng 1 package khác
++ "fmt" : package formated input/output xử lý nhập xuất
*/
import "fmt"
func main() {
fmt.Println("hello duong")
fmt.Println(sum(5))
}
// khởi tạo 1 hàm Sum, truyền vào number (biến int), trả về int
func sum(number int) int {
s := 0 // khai báo biến
for i := 0; i < number; i++ { // vòng lặp
s += i
}
return s
}
|
package ch06
func BubbleSort(arr []int, comp func(int, int) bool) {
for i := 0; i < len(arr) - 1 ; i++ {
for j := 0; j < len(arr) - 1 - i - 1; j++ {
if comp(arr[j], arr[j+1]) {
// swap
arr[j+1], arr[j] = arr[j], arr[j+1]
}
}
}
}
|
package backup
import (
"context"
"github.com/cbochs/spotify-backup-api/schema"
"github.com/cbochs/spotify-backup-api/spotify"
"github.com/cbochs/spotify-backup-api/spotify/options"
"go.mongodb.org/mongo-driver/bson/primitive"
)
type Client struct {
ID primitive.ObjectID
User schema.SpotifyUser
service *Service
sp *spotify.Client
}
func NewClient(id primitive.ObjectID, user schema.SpotifyUser, s *Service, sp *spotify.Client) *Client {
return &Client{
ID: id,
service: s,
sp: sp,
}
}
func (c *Client) replaceClient(sp *spotify.Client) {
c.sp = sp
}
func (c *Client) BackupRecentlyPlayed(ctx context.Context) error {
cur, err := c.sp.RecentlyPlayedOpt(options.Query().Limit(50))
if err != nil {
return err
}
srp := cur.Items
for {
if cur.Next() == "" {
break
}
if err := c.sp.Next(cur); err != nil {
return err
}
srp = append(srp, cur.Items...)
}
rp := make([]schema.PlayHistory, len(srp))
for i, ph := range srp {
track := c.service.db.ListeningHistory().Default()
track.User = c.User
track.PlayedAt = ph.PlayedAt.Time
artists := make([]schema.BasicArtist, len(ph.Track.Artists))
for i, artist := range ph.Track.Artists {
artists[i] = schema.BasicArtist{
Name: artist.Name,
ID: artist.ID,
}
}
track.Track = schema.BasicTrack{
Name: ph.Track.Name,
ID: ph.Track.ID,
Artists: artists,
}
rp[i] = track
}
if _, err := c.service.db.ListeningHistory().Save(ctx, rp); err != nil {
return err
}
if err := c.service.db.Users().Touch(ctx, c.ID); err != nil {
return err
}
return nil
}
|
package main
import (
"fmt"
"os"
"strings"
"time"
"encoding/json"
"github.com/kazu69/hosts_file_manager"
"github.com/ttacon/chalk"
"github.com/urfave/cli"
)
var (
version string
)
func main() {
hfm, err := hfm.NewHosts()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
app := cli.NewApp()
app.Name = "hfm - Hostds File Maneger"
app.Version = version
app.Compiled = time.Now()
app.Authors = []cli.Author{
cli.Author{
Name: "kazu69",
},
}
app.Usage = "hosts file management"
app.UsageText = `
add (a) - hfm add <IP> <HOSTS...> [--format json]
remove (r) - hfm remove <IP> [--format json]
update (u) - hfm update <IP> <HOSTS...> [--format jsos
list (l) - hfm list [--format json] `
app.Commands = []cli.Command{
{
Name: "add",
Aliases: []string{"a"},
Usage: "add a hosts record to hosts file",
Flags: []cli.Flag{
cli.StringFlag{
Name: "format, f",
Usage: "output format type",
},
},
Action: func(c *cli.Context) error {
ip := c.Args().Get(0)
hosts := c.Args()[1:]
record, err := hfm.Add(ip, hosts...)
if err != nil {
return err
}
format := c.String("format")
if format == "json" {
json := ToJSON(record)
fmt.Println(json)
} else {
cyan := chalk.Cyan.NewStyle()
fmt.Printf("%sAdded %s %s\n", cyan, record.IP, strings.Join(record.Hosts, " "))
}
return nil
},
},
{
Name: "remove",
Aliases: []string{"r"},
Usage: "remove a hosts record to hosts file",
Flags: []cli.Flag{
cli.StringFlag{
Name: "format, f",
Usage: "output format type",
},
},
Action: func(c *cli.Context) error {
ip := c.Args().Get(0)
record, err := hfm.Remove(ip)
if err != nil {
return err
}
format := c.String("format")
if format == "json" {
json := ToJSON(record)
fmt.Println(json)
} else {
red := chalk.Red.NewStyle()
fmt.Printf("%sRemoved %s %s\n", red, record.IP, strings.Join(record.Hosts, " "))
}
return nil
},
},
{
Name: "update",
Aliases: []string{"u"},
Usage: "update a hosts record to hosts file",
Flags: []cli.Flag{
cli.StringFlag{
Name: "format, f",
Usage: "output format type",
},
},
Action: func(c *cli.Context) error {
ip := c.Args().Get(0)
hosts := c.Args()[1:]
record, err := hfm.Update(ip, hosts...)
if err != nil {
return err
}
format := c.String("format")
if format == "json" {
json := ToJSON(record)
fmt.Println(json)
} else {
green := chalk.Green.NewStyle()
fmt.Printf("%sUpdated %s %s\n", green, record.IP, strings.Join(record.Hosts, " "))
}
return nil
},
},
{
Name: "list",
Aliases: []string{"l"},
Usage: "lits hosts records to hosts file",
Flags: []cli.Flag{
cli.StringFlag{
Name: "format, f",
Usage: "output format type",
},
},
Action: func(c *cli.Context) error {
records := hfm.List()
if err != nil {
return err
}
format := c.String("format")
if format == "json" {
json := ToJSON(records)
fmt.Println(json)
} else {
for _, r := range records {
fmt.Printf("%s %s\n", r.IP, strings.Join(r.Hosts, " "))
}
}
return nil
},
},
}
app.Run(os.Args)
}
func ToJSON(records interface{}) string {
b, _ := json.Marshal(records)
return string(b)
}
|
package db
import (
"github.com/evansmwendwa/uxp/model"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/sqlite" // Required for sqlite connection
)
var session *gorm.DB
var err error
func init() {
db, err := gorm.Open("sqlite3", "data/data.sqlite")
if err != nil {
panic("DB connection error")
}
db.AutoMigrate(&model.Speaker{})
session = db
}
// Session - Export a db session for use by other packages
func Session() *gorm.DB {
return session
}
|
package gormSupported
import (
"github.com/jinzhu/gorm"
"github.com/mukesh0513/RxSecure/internal/model"
)
type GormConnection struct{}
var (
gormConn *gorm.DB
//GormConnProvider sqlData.ISqlDatabase
)
func Initialize(db *gorm.DB, logging bool) {
if gormConn == nil {
gormConn = db
//GormConnProvider = GormConnection{}
db.LogMode(logging)
db.AutoMigrate(&model.EncKeys{})
db.AutoMigrate(&model.Data{})
}
}
//func (db GormConnection) Find(model interface{}, args interface{}) (interface{}, error) {
//
//}
//
//func (db GormConnection) Create(model interface{}) error {
//
//}
//
//func (db GormConnection) Delete(model interface{}, args interface{}) error {
// var err error
// err = gormConn.Where(args).Delete(&model).Error
//
// return err
//}
//func GetMysqlConn() *gorm.DB {
// return gormConn
//}
|
package routers
import (
"bubble/controller"
"github.com/gin-gonic/gin"
)
func SetRouters() *gin.Engine {
r := gin.Default()
//静态变量设置
r.Static("/static","static")
//模板设置
r.LoadHTMLGlob("template/*")
r.GET("/", controller.IndexHandle)
//v1 设置访问前缀
v1Group := r.Group("v1")
{
//代办事项
//添加
v1Group.POST("/todo",controller.CreateTodo)
//查看全部
v1Group.GET("/todo", controller.GetTodoList)
//查看某一
//v1Group.GET("/todo/:id", controller.GetOneTodo)
//修改
v1Group.PUT("/todo/:id",controller.UpdateOneTodo)
//删除
v1Group.DELETE("/todo/:id", controller.DeleteOneTodo)
}
return r
}
|
package wire
import (
"net/url"
"sort"
"strings"
"fmt"
"net/http"
)
type Mapping struct {
field *string
required bool
}
func Required(field *string) *Mapping {
return &Mapping{
field: field,
required: true,
}
}
func Optional(field *string) *Mapping {
return &Mapping{
field: field,
required: false,
}
}
func ParseRequest(httpRequest http.Request, fields map[string]*Mapping) error {
missingFields := []string{}
httpRequest.ParseForm()
for key, mapping := range fields {
setValueOnRequest(key, httpRequest.Form, &missingFields, mapping)
}
if len(missingFields) > 0 {
sort.Strings(missingFields)
return fmt.Errorf("The following fields are required: %s", missingFields)
} else {
return nil
}
}
func setValueOnRequest(fieldName string, values url.Values, missingFields *[]string, field *Mapping) {
value := values.Get(fieldName)
if strings.TrimSpace(value) == "" && field.required {
*missingFields = append(*missingFields, fieldName)
}
*field.field = value
}
|
package agency
import (
"github.com/bububa/oppo-omni/core"
"github.com/bububa/oppo-omni/model"
"github.com/bububa/oppo-omni/model/communal/agency"
)
// 代理商余额查询
func Balance(clt *core.SDKClient) ([]agency.BalanceAccount, error) {
var req model.BaseRequest
req.SetResourceName("communal")
req.SetResourceAction("agency/balance")
var ret agency.BalanceResponse
err := clt.Post(req, &ret)
if err != nil {
return nil, err
}
return ret.Data, nil
}
|
package main
import "fmt"
func main() {
a := "aa"
b := "*"
fmt.Println(isMatch(a, b))
a = "zacabz"
b = "*a?b*"
fmt.Println(isMatch(a, b))
a = "aa"
b = "a"
fmt.Println(isMatch(a, b))
a = "aaabbbaabaaaaababaabaaabbabbbbbbbbaabababbabbbaaaabaa"
b = "a*******b"
fmt.Println(isMatch(a, b))
a = "babbbbaabababaabbababaababaabbaabababbaaababbababaaaaaabbabaaaabababbabbababbbaaaababbbabbbbbbbbbbaabbb"
b = "b**bb**a**bba*b**a*bbb**aba***babbb*aa****aabb*bbb***a"
fmt.Println(isMatch(a, b))
}
func isMatch(s string, p string) bool {
star := -1
sStation := -1
i, j := 0, 0
for i < len(s) {
if j < len(p) && (s[i] == p[j] || p[j] == '?') {
i++
j++
} else if j < len(p) && (p[j] == '*') {
star = j
j++
sStation = i
} else if star != -1 {
j = star + 1
sStation++
i = sStation
} else {
return false
}
}
for j < len(p) {
if p[j] == '*' {
j++
} else {
return false
}
}
return true
}
|
package _862_Shortest_Subarray_with_Sum_at_Least_K
import "testing"
func TestShortestSubarray(t *testing.T) {
var res int
if res = shortestSubarray([]int{1}, 1); res != 1 {
t.Errorf("wrong res=%d", res)
}
}
|
package utils
import (
"path/filepath"
"reflect"
"sort"
)
// Get the steps-th parent directory of fullPath.
func GetParentDir(fullPath string, steps int) string {
fullPath = filepath.Clean(fullPath)
for ; steps > 0; steps-- {
fullPath = filepath.Dir(fullPath)
}
return fullPath
}
type mapKeyWithString struct {
MapKey reflect.Value
StringKey string
}
func SortKeys(mapKeys []reflect.Value) []mapKeyWithString {
retVals := make([]mapKeyWithString, len(mapKeys))
for idx, key := range mapKeys {
mapKeyAsString := key.String()
retVals[idx] = mapKeyWithString{key, mapKeyAsString}
}
sort.Slice(retVals, func(i, j int) bool {
return retVals[i].StringKey < retVals[j].StringKey
})
return retVals
}
|
package piper
import (
"context"
"errors"
"fmt"
"math/rand"
"strconv"
"testing"
)
type testBatchExecAllSucceedFn struct {
}
func (fn *testBatchExecAllSucceedFn) Execute(ctx context.Context, datum []DataIF) (map[string]error, error) {
errorsMap := make(map[string]error)
for _, data := range datum {
td := data.(*testData)
errorsMap[td.id] = nil
}
return errorsMap, nil
}
type testBatchExecAllFailFn struct {
}
func (fn *testBatchExecAllFailFn) Execute(ctx context.Context, datum []DataIF) (map[string]error, error) {
errorsMap := make(map[string]error)
for _, data := range datum {
td := data.(*testData)
errorsMap[td.id] = fmt.Errorf("Error#%d", td.value)
}
return errorsMap, nil
}
type testBatchExecEvensFailFn struct {
}
func (fn *testBatchExecEvensFailFn) Execute(ctx context.Context, datum []DataIF) (map[string]error, error) {
errorsMap := make(map[string]error)
for _, data := range datum {
td := data.(*testData)
if td.value%2 == 0 {
errorsMap[td.id] = fmt.Errorf("Error#%d", td.value)
} else {
errorsMap[td.id] = nil
}
}
return errorsMap, nil
}
type testBatchExecErrorsFn struct {
}
func (fn *testBatchExecErrorsFn) Execute(ctx context.Context, datum []DataIF) (map[string]error, error) {
errorsMap := make(map[string]error)
return errorsMap, errors.New("Test error")
}
func newTestJobs(numJobs int) []*job {
js := make([]*job, numJobs)
for i := 0; i < numJobs; i++ {
js[i] = newJob(newTestData(i))
}
return js
}
func TestBatch_NewBatch(t *testing.T) {
b := newBatch(0)
if b == nil {
t.Fatal("newBatch returned nil")
}
}
func TestBatch_Add(t *testing.T) {
b := newBatch(10)
// create new jobs
numJobs := rand.Intn(6) + 5
js := newTestJobs(numJobs)
// add one job
b.add(js[0])
// add two jobs
b.add(js[1], js[2])
// then add the rest
b.add(js[3:]...)
if len(b.jobsMap) != numJobs {
t.Fatalf("jobsMap length invalid: wanted [%d], got [%d]", numJobs, len(b.jobsMap))
}
if len(b.datum) != numJobs {
t.Fatalf("datum length invalid: wanted [%d], got [%d]", numJobs, len(b.datum))
}
}
func TestBatch_Size(t *testing.T) {
b := newBatch(10)
if b.size() > 0 {
t.Fatal("wrong initial size")
}
// create and add new jobs
numJobs := rand.Intn(6) + 5
js := newTestJobs(numJobs)
b.add(js...)
if b.size() != numJobs {
t.Fatalf("size invalid: wanted [%d], got [%d]", numJobs, b.size())
}
}
func TestBatch_UpdateSuccess(t *testing.T) {
b := newBatch(10)
numJobs := 2
js := newTestJobs(numJobs)
b.add(js...)
b.updateSuccess("0", false)
if b.successMap["0"] == nil {
t.Fatal("successMap unexpected nil value")
}
if *b.successMap["0"] {
t.Fatalf("successMap invalid: wanted [%t], got [%t]", false, *b.successMap["0"])
}
b.updateSuccess("1", true)
if b.successMap["1"] == nil {
t.Fatal("successMap unexpected nil value")
}
if !*b.successMap["1"] {
t.Fatalf("successMap invalid: wanted [%t], got [%t]", true, *b.successMap["1"])
}
}
func TestBatch_ExecuteSuccess(t *testing.T) {
b := newBatch(10)
numJobs := 10
js := newTestJobs(numJobs)
b.add(js...)
fn := testBatchExecEvensFailFn{}
err := b.execute(context.TODO(), fn.Execute)
if err != nil {
t.Fatal("unexpected error ", err)
}
for k, success := range b.successMap {
if success == nil {
t.Fatal("successMap unexpected nil value")
}
id, _ := strconv.Atoi(k)
if id%2 == 0 {
if *success {
t.Fatalf("successMap invalid: wanted [%t], got [%t]", false, *success)
}
} else {
if !*success {
t.Fatalf("successMap invalid: wanted [%t], got [%t]", true, !*success)
}
}
}
}
func TestBatch_ExecuteFailure(t *testing.T) {
b := newBatch(10)
numJobs := 10
js := newTestJobs(numJobs)
b.add(js...)
fn := testBatchExecErrorsFn{}
err := b.execute(context.TODO(), fn.Execute)
if err == nil {
t.Fatal("expected error but got nil")
}
}
|
package route
import (
"bytes"
"fmt"
"net/http"
"path/filepath"
"text/template"
"github.com/Sirupsen/logrus"
"github.com/gorilla/mux"
)
const (
pathStatic = "/_static"
pathWS = "/_ws"
pathDownload = "/_dl"
)
var (
indexTemplate = template.Must(template.ParseFiles("./client/dist/index.html"))
log = logrus.WithField("pkg", "router")
static = http.FileServer(http.Dir("./client/dist"))
)
type Config struct {
// RootPath is defined for changing the root path of serving. This is useful
// if the server is behind a proxy that changes the root path.
RootPath string `json:"root_path"`
// BasePath is to change the base path after the root path.
// It is used for dynamic mode where we have different locations for the index page.
BasePath string `json:"base_path"`
}
// Static serves static files
func Static(r *mux.Router) {
r.PathPrefix(pathStatic + "/").Handler(http.StripPrefix(pathStatic, static))
}
// Index mounts serving of index.html on a path prefix.
// It uses a prefix since reloads of a page should give serving of the index.html page with the same url
// for the javascript frontend.
func Index(r *mux.Router, pathPrefix string, c Config) error {
if c.BasePath == "" && c.RootPath != "" {
c.BasePath = c.RootPath
}
var index = bytes.NewBuffer(nil)
if err := indexTemplate.Execute(index, c); err != nil {
return fmt.Errorf("executing index template: %s", err)
}
r.PathPrefix(pathPrefix).HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
if _, err := w.Write(index.Bytes()); err != nil {
log.WithError(err).Errorf("Writing index to response")
}
})
return nil
}
// Engine mounts the websocket handler on the router
func Engine(r *mux.Router, basePath string, engine http.Handler) {
path := filepath.Join(basePath, pathWS)
log.Debugf("Adding engine route on %s", path)
r.Path(path).Handler(engine)
}
// Download mounts the websocket handler on the router
func Download(r *mux.Router, basePath string, h http.Handler) {
path := filepath.Join(basePath, pathDownload)
log.Debugf("Adding download route on %s", path)
r.PathPrefix(path + "/").Handler(http.StripPrefix(path, h))
}
// Redirect mounts a redirect handler for a proxy on the router
func Redirect(r *mux.Router, c Config) {
if c.RootPath == "" {
return
}
r.PathPrefix(c.RootPath + "/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
dest := r.URL.Path[len(c.RootPath):]
http.Redirect(w, r, dest, http.StatusTemporaryRedirect)
})
}
|
package article
import (
"context"
"database/sql"
"time"
)
type Article struct {
Id string `db:"id"`
Body string `db:"body"`
Title string `db:"title"`
Preface string `db:"preface"`
UserId string `db:"user_id"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt sql.NullTime `db:"updated_at"`
DeletedAt sql.NullTime `db:"deleted_at"`
}
type Storage interface {
SaveArticle(ctx context.Context, article *Article) (id string, err error)
UpdateArticle(ctx context.Context, article *Article) (id string, err error)
GetArticleById(ctx context.Context, id string) (*Article, error)
GetArticleIdsByPeriod(ctx context.Context, from, to time.Time) ([]string, error)
GetArticlesByPeriod(ctx context.Context, from, to time.Time) ([]Article, error)
DeleteArticleById(ctx context.Context, id string) error
}
|
package common
import "encoding/json"
func ToJson(v interface{}) string {
bs, _ := json.Marshal(v)
return string(bs)
}
|
package main
import (
"fmt"
)
func dropEmety(s []string) []string {
var count int
for _, v := range s {
if v != "" {
s[count] = v
count++
}
}
return s[:count]
}
func dropSame(s []string) (k []string) {
var lk int = 1
k = make([]string, len(s))
for _, v := range s {
if v != k[lk-1] {
k[lk] = v
lk++
}
}
return k[:lk]
}
func main() {
var s1 = []string{"test", "", "test1"}
var s2 = []string{"1", "1", "2", "3", "3", "4", "4", "5"}
s1 = dropEmety(s1)
s2 = dropSame(s2)
//fmt.Println("b is ", b)
fmt.Println("s1 is ", s1)
fmt.Println("s2 is ", s2)
}
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddltest
import (
goctx "context"
"database/sql"
"database/sql/driver"
"flag"
"fmt"
"math/rand"
"os"
"os/exec"
"reflect"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/pingcap/errors"
"github.com/pingcap/log"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/sessiontxn"
"github.com/pingcap/tidb/store"
tidbdriver "github.com/pingcap/tidb/store/driver"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/types"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
var (
etcd = flag.String("etcd", "127.0.0.1:2379", "etcd path")
tidbIP = flag.String("tidb_ip", "127.0.0.1", "tidb-server ip address")
tikvPath = flag.String("tikv_path", "", "tikv path")
lease = flag.Int("lease", 1, "DDL schema lease time, seconds")
serverNum = flag.Int("server_num", 3, "Maximum running tidb server")
startPort = flag.Int("start_port", 5000, "First tidb-server listening port")
statusPort = flag.Int("status_port", 8000, "First tidb-server status port")
logLevel = flag.String("L", "error", "log level")
ddlServerLogLevel = flag.String("ddl_log_level", "fatal", "DDL server log level")
dataNum = flag.Int("n", 100, "minimal test dataset for a table")
enableRestart = flag.Bool("enable_restart", true, "whether random restart servers for tests")
)
type server struct {
*exec.Cmd
logFP *os.File
db *sql.DB
addr string
}
type ddlSuite struct {
store kv.Storage
dom *domain.Domain
s session.Session
ctx sessionctx.Context
m sync.Mutex
procs []*server
wg sync.WaitGroup
quit chan struct{}
retryCount int
}
func createDDLSuite(t *testing.T) (s *ddlSuite) {
var err error
s = new(ddlSuite)
s.quit = make(chan struct{})
s.store, err = store.New(fmt.Sprintf("tikv://%s%s", *etcd, *tikvPath))
require.NoError(t, err)
// Make sure the schema lease of this session is equal to other TiDB servers'.
session.SetSchemaLease(time.Duration(*lease) * time.Second)
s.dom, err = session.BootstrapSession(s.store)
require.NoError(t, err)
s.s, err = session.CreateSession(s.store)
require.NoError(t, err)
s.ctx = s.s.(sessionctx.Context)
goCtx := goctx.Background()
_, err = s.s.Execute(goCtx, "create database if not exists test_ddl")
require.NoError(t, err)
s.Bootstrap(t)
// Stop current DDL worker, so that we can't be the owner now.
err = domain.GetDomain(s.ctx).DDL().Stop()
require.NoError(t, err)
config.GetGlobalConfig().Instance.TiDBEnableDDL.Store(false)
session.ResetStoreForWithTiKVTest(s.store)
s.dom.Close()
require.NoError(t, s.store.Close())
s.store, err = store.New(fmt.Sprintf("tikv://%s%s", *etcd, *tikvPath))
require.NoError(t, err)
s.s, err = session.CreateSession(s.store)
require.NoError(t, err)
s.dom, err = session.BootstrapSession(s.store)
require.NoError(t, err)
s.ctx = s.s.(sessionctx.Context)
_, err = s.s.Execute(goCtx, "use test_ddl")
require.NoError(t, err)
addEnvPath("..")
// Start multi tidb servers
s.procs = make([]*server, *serverNum)
// Set server restart retry count.
s.retryCount = 20
createLogFiles(t, *serverNum)
err = s.startServers()
require.NoError(t, err)
s.wg.Add(1)
go s.restartServerRegularly()
return
}
// restartServerRegularly restarts a tidb server regularly.
func (s *ddlSuite) restartServerRegularly() {
defer s.wg.Done()
var err error
after := *lease * (6 + randomIntn(6))
for {
select {
case <-time.After(time.Duration(after) * time.Second):
if *enableRestart {
err = s.restartServerRand()
if err != nil {
log.Fatal("restartServerRand failed", zap.Error(err))
}
}
case <-s.quit:
return
}
}
}
func (s *ddlSuite) teardown(t *testing.T) {
close(s.quit)
s.wg.Wait()
s.dom.Close()
// TODO: Remove these logs after testing.
quitCh := make(chan struct{})
go func() {
select {
case <-time.After(100 * time.Second):
log.Error("testing timeout", zap.Stack("stack"))
case <-quitCh:
}
}()
err := s.store.Close()
require.NoError(t, err)
close(quitCh)
err = s.stopServers()
require.NoError(t, err)
}
func (s *ddlSuite) startServers() (err error) {
s.m.Lock()
defer s.m.Unlock()
for i := 0; i < len(s.procs); i++ {
if s.procs[i] != nil {
continue
}
// Open log file.
logFP, err := os.OpenFile(fmt.Sprintf("%s%d", logFilePrefix, i), os.O_RDWR, 0766)
if err != nil {
return errors.Trace(err)
}
s.procs[i], err = s.startServer(i, logFP)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func (s *ddlSuite) killServer(proc *os.Process) error {
// Make sure this tidb is killed, and it makes the next tidb that has the same port as this one start quickly.
err := proc.Kill()
if err != nil {
log.Error("kill server failed", zap.Error(err))
return errors.Trace(err)
}
_, err = proc.Wait()
if err != nil {
log.Error("kill server, wait failed", zap.Error(err))
return errors.Trace(err)
}
time.Sleep(1 * time.Second)
return nil
}
func (s *ddlSuite) stopServers() error {
s.m.Lock()
defer s.m.Unlock()
for i := 0; i < len(s.procs); i++ {
if proc := s.procs[i]; proc != nil {
if proc.db != nil {
if err := proc.db.Close(); err != nil {
return err
}
}
err := s.killServer(proc.Process)
if err != nil {
return errors.Trace(err)
}
s.procs[i] = nil
}
}
return nil
}
var logFilePrefix = "tidb_log_file_"
func createLogFiles(t *testing.T, length int) {
for i := 0; i < length; i++ {
fp, err := os.Create(fmt.Sprintf("%s%d", logFilePrefix, i))
if err != nil {
require.NoError(t, err)
}
require.NoError(t, fp.Close())
}
}
func (s *ddlSuite) startServer(i int, fp *os.File) (*server, error) {
cmd := exec.Command("ddltest_tidb-server",
"--store=tikv",
fmt.Sprintf("-L=%s", *ddlServerLogLevel),
fmt.Sprintf("--path=%s%s", *etcd, *tikvPath),
fmt.Sprintf("-P=%d", *startPort+i),
fmt.Sprintf("--status=%d", *statusPort+i),
fmt.Sprintf("--lease=%d", *lease))
cmd.Stderr = fp
cmd.Stdout = fp
err := cmd.Start()
if err != nil {
return nil, errors.Trace(err)
}
time.Sleep(500 * time.Millisecond)
// Make sure tidb server process is started.
ps := fmt.Sprintf("ps -aux|grep ddltest_tidb|grep %d", *startPort+i)
output, _ := exec.Command("sh", "-c", ps).Output()
if !strings.Contains(string(output), "ddltest_tidb-server") {
time.Sleep(1 * time.Second)
}
// Open database.
var db *sql.DB
addr := fmt.Sprintf("%s:%d", *tidbIP, *startPort+i)
sleepTime := time.Millisecond * 250
startTime := time.Now()
for i := 0; i < s.retryCount; i++ {
db, err = sql.Open("mysql", fmt.Sprintf("root@(%s)/test_ddl", addr))
if err != nil {
log.Warn("open addr failed", zap.String("addr", addr), zap.Int("retry count", i), zap.Error(err))
continue
}
err = db.Ping()
if err == nil {
break
}
log.Warn("ping addr failed", zap.String("addr", addr), zap.Int("retry count", i), zap.Error(err))
err = db.Close()
if err != nil {
log.Warn("close db failed", zap.Int("retry count", i), zap.Error(err))
break
}
time.Sleep(sleepTime)
sleepTime += sleepTime
}
if err != nil {
log.Error("restart server addr failed",
zap.String("addr", addr),
zap.Duration("take time", time.Since(startTime)),
zap.Error(err),
)
return nil, errors.Trace(err)
}
db.SetMaxOpenConns(10)
_, err = db.Exec("use test_ddl")
if err != nil {
return nil, errors.Trace(err)
}
log.Info("start server ok", zap.String("addr", addr), zap.Error(err))
return &server{
Cmd: cmd,
db: db,
addr: addr,
logFP: fp,
}, nil
}
func (s *ddlSuite) restartServerRand() error {
i := rand.Intn(*serverNum)
s.m.Lock()
defer s.m.Unlock()
if s.procs[i] == nil {
return nil
}
server := s.procs[i]
s.procs[i] = nil
log.Warn("begin to restart", zap.String("addr", server.addr))
err := s.killServer(server.Process)
if err != nil {
return errors.Trace(err)
}
s.procs[i], err = s.startServer(i, server.logFP)
return errors.Trace(err)
}
func isRetryError(err error) bool {
if err == nil {
return false
}
if terror.ErrorEqual(err, driver.ErrBadConn) ||
strings.Contains(err.Error(), "connection refused") ||
strings.Contains(err.Error(), "getsockopt: connection reset by peer") ||
strings.Contains(err.Error(), "KV error safe to retry") ||
strings.Contains(err.Error(), "try again later") ||
strings.Contains(err.Error(), "invalid connection") {
return true
}
// TODO: Check the specific columns number.
if strings.Contains(err.Error(), "Column count doesn't match value count at row") {
log.Warn("err", zap.Error(err))
return false
}
log.Error("can not retry", zap.Error(err))
return false
}
func (s *ddlSuite) exec(query string, args ...interface{}) (sql.Result, error) {
for {
server := s.getServer()
r, err := server.db.Exec(query, args...)
if isRetryError(err) {
log.Error("exec in server, retry",
zap.String("query", query),
zap.String("addr", server.addr),
zap.Error(err),
)
continue
}
return r, err
}
}
func (s *ddlSuite) mustExec(query string, args ...interface{}) sql.Result {
r, err := s.exec(query, args...)
if err != nil {
log.Fatal("[mustExec fail]query",
zap.String("query", query),
zap.Any("args", args),
zap.Error(err),
)
}
return r
}
func (s *ddlSuite) execInsert(query string, args ...interface{}) sql.Result {
for {
r, err := s.exec(query, args...)
if err == nil {
return r
}
if *enableRestart {
// If you use enable random restart servers, we should ignore key exists error.
if strings.Contains(err.Error(), "Duplicate entry") &&
strings.Contains(err.Error(), "for key") {
return r
}
}
log.Fatal("[execInsert fail]query",
zap.String("query", query),
zap.Any("args", args),
zap.Error(err),
)
}
}
func (s *ddlSuite) query(query string, args ...interface{}) (*sql.Rows, error) {
for {
server := s.getServer()
r, err := server.db.Query(query, args...)
if isRetryError(err) {
log.Error("query in server, retry",
zap.String("query", query),
zap.String("addr", server.addr),
zap.Error(err),
)
continue
}
return r, err
}
}
func (s *ddlSuite) getServer() *server {
s.m.Lock()
defer s.m.Unlock()
for i := 0; i < 20; i++ {
i := rand.Intn(*serverNum)
if s.procs[i] != nil {
return s.procs[i]
}
}
log.Fatal("try to get server too many times")
return nil
}
// runDDL executes the DDL query, returns a channel so that you can use it to wait DDL finished.
func (s *ddlSuite) runDDL(sql string) chan error {
done := make(chan error, 1)
go func() {
_, err := s.s.Execute(goctx.Background(), sql)
// We must wait 2 * lease time to guarantee all servers update the schema.
if err == nil {
time.Sleep(time.Duration(*lease) * time.Second * 2)
}
done <- err
}()
return done
}
func (s *ddlSuite) getTable(t *testing.T, name string) table.Table {
tbl, err := domain.GetDomain(s.ctx).InfoSchema().TableByName(model.NewCIStr("test_ddl"), model.NewCIStr(name))
require.NoError(t, err)
return tbl
}
func dumpRows(t *testing.T, rows *sql.Rows) [][]interface{} {
cols, err := rows.Columns()
require.NoError(t, err)
var ay [][]interface{}
for rows.Next() {
v := make([]interface{}, len(cols))
for i := range v {
v[i] = new(interface{})
}
err = rows.Scan(v...)
require.NoError(t, err)
for i := range v {
v[i] = *(v[i].(*interface{}))
}
ay = append(ay, v)
}
require.NoError(t, rows.Close())
require.NoErrorf(t, rows.Err(), "%v", ay)
return ay
}
func matchRows(t *testing.T, rows *sql.Rows, expected [][]interface{}) {
ay := dumpRows(t, rows)
require.Equalf(t, len(expected), len(ay), "%v", expected)
for i := range ay {
match(t, ay[i], expected[i]...)
}
}
func match(t *testing.T, row []interface{}, expected ...interface{}) {
require.Equal(t, len(expected), len(row))
for i := range row {
if row[i] == nil {
require.Nil(t, expected[i])
continue
}
got, err := types.ToString(row[i])
require.NoError(t, err)
need, err := types.ToString(expected[i])
require.NoError(t, err)
require.Equal(t, need, got)
}
}
func (s *ddlSuite) Bootstrap(t *testing.T) {
tk := testkit.NewTestKit(t, s.store)
tk.MustExec("use test_ddl")
tk.MustExec("drop table if exists test_index, test_column, test_insert, test_conflict_insert, " +
"test_update, test_conflict_update, test_delete, test_conflict_delete, test_mixed, test_inc")
tk.MustExec("create table test_index (c int, c1 bigint, c2 double, c3 varchar(256), primary key(c))")
tk.MustExec("create table test_column (c1 int, c2 int, primary key(c1))")
tk.MustExec("create table test_insert (c1 int, c2 int, primary key(c1))")
tk.MustExec("create table test_conflict_insert (c1 int, c2 int, primary key(c1))")
tk.MustExec("create table test_update (c1 int, c2 int, primary key(c1))")
tk.MustExec("create table test_conflict_update (c1 int, c2 int, primary key(c1))")
tk.MustExec("create table test_delete (c1 int, c2 int, primary key(c1))")
tk.MustExec("create table test_conflict_delete (c1 int, c2 int, primary key(c1))")
tk.MustExec("create table test_mixed (c1 int, c2 int, primary key(c1))")
tk.MustExec("create table test_inc (c1 int, c2 int, primary key(c1))")
tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("drop table if exists test_insert_common, test_conflict_insert_common, " +
"test_update_common, test_conflict_update_common, test_delete_common, test_conflict_delete_common, " +
"test_mixed_common, test_inc_common")
tk.MustExec("create table test_insert_common (c1 int, c2 int, primary key(c1, c2))")
tk.MustExec("create table test_conflict_insert_common (c1 int, c2 int, primary key(c1, c2))")
tk.MustExec("create table test_update_common (c1 int, c2 int, primary key(c1, c2))")
tk.MustExec("create table test_conflict_update_common (c1 int, c2 int, primary key(c1, c2))")
tk.MustExec("create table test_delete_common (c1 int, c2 int, primary key(c1, c2))")
tk.MustExec("create table test_conflict_delete_common (c1 int, c2 int, primary key(c1, c2))")
tk.MustExec("create table test_mixed_common (c1 int, c2 int, primary key(c1, c2))")
tk.MustExec("create table test_inc_common (c1 int, c2 int, primary key(c1, c2))")
tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly
}
func TestSimple(t *testing.T) {
s := createDDLSuite(t)
defer s.teardown(t)
t.Run("Basic", func(t *testing.T) {
done := s.runDDL("create table if not exists test_simple (c1 int, c2 int, c3 int)")
err := <-done
require.NoError(t, err)
_, err = s.exec("insert into test_simple values (1, 1, 1)")
require.NoError(t, err)
rows, err := s.query("select c1 from test_simple limit 1")
require.NoError(t, err)
matchRows(t, rows, [][]interface{}{{1}})
done = s.runDDL("drop table if exists test_simple")
err = <-done
require.NoError(t, err)
})
t.Run("Mixed", func(t *testing.T) {
tests := []struct {
name string
}{
{"test_mixed"},
{"test_mixed_common"},
}
for _, test := range tests {
tblName := test.name
t.Run(test.name, func(t *testing.T) {
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(fmt.Sprintf("insert into %s values (%d, %d)", tblName, k, k))
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleMixed][Insert][Time Cost]%v\n", end.Sub(start))
start = time.Now()
rowID := int64(rowCount)
defaultValue := int64(-1)
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < batch; j++ {
key := atomic.AddInt64(&rowID, 1)
s.execInsert(fmt.Sprintf("insert into %s values (%d, %d)", tblName, key, key))
key = int64(randomNum(rowCount))
s.mustExec(fmt.Sprintf("update %s set c2 = %d where c1 = %d", tblName, defaultValue, key))
key = int64(randomNum(rowCount))
s.mustExec(fmt.Sprintf("delete from %s where c1 = %d", tblName, key))
}
}()
}
wg.Wait()
end = time.Now()
fmt.Printf("[TestSimpleMixed][Mixed][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := sessiontxn.NewTxn(goctx.Background(), ctx)
require.NoError(t, err)
tbl := s.getTable(t, tblName)
updateCount := int64(0)
insertCount := int64(0)
err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
if reflect.DeepEqual(data[1].GetValue(), data[0].GetValue()) {
insertCount++
} else if reflect.DeepEqual(data[1].GetValue(), defaultValue) && data[0].GetInt64() < int64(rowCount) {
updateCount++
} else {
log.Fatal("[TestSimpleMixed fail]invalid row", zap.Any("row", data))
}
return true, nil
})
require.NoError(t, err)
deleteCount := atomic.LoadInt64(&rowID) - insertCount - updateCount
require.Greater(t, insertCount, int64(0))
require.Greater(t, updateCount, int64(0))
require.Greater(t, deleteCount, int64(0))
})
}
})
t.Run("Inc", func(t *testing.T) {
tests := []struct {
name string
}{
{"test_inc"},
{"test_inc_common"},
}
for _, test := range tests {
tblName := test.name
t.Run(test.name, func(t *testing.T) {
workerNum := 10
rowCount := 1000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(fmt.Sprintf("insert into %s values (%d, %d)", tblName, k, k))
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleInc][Insert][Time Cost]%v\n", end.Sub(start))
start = time.Now()
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < batch; j++ {
s.mustExec(fmt.Sprintf("update %s set c2 = c2 + 1 where c1 = 0", tblName))
}
}()
}
wg.Wait()
end = time.Now()
fmt.Printf("[TestSimpleInc][Update][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := sessiontxn.NewTxn(goctx.Background(), ctx)
require.NoError(t, err)
tbl := s.getTable(t, "test_inc")
err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
if reflect.DeepEqual(data[0].GetValue(), int64(0)) {
if *enableRestart {
require.GreaterOrEqual(t, data[1].GetValue(), int64(rowCount))
} else {
require.Equal(t, int64(rowCount), data[1].GetValue())
}
} else {
require.Equal(t, data[1].GetValue(), data[0].GetValue())
}
return true, nil
})
require.NoError(t, err)
})
}
})
}
func TestSimpleInsert(t *testing.T) {
s := createDDLSuite(t)
defer s.teardown(t)
t.Run("Basic", func(t *testing.T) {
tests := []struct {
name string
}{
{"test_insert"},
{"test_insert_common"},
}
for _, test := range tests {
tblName := test.name
t.Run(test.name, func(t *testing.T) {
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(fmt.Sprintf("insert into %s values (%d, %d)", tblName, k, k))
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleInsert][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := sessiontxn.NewTxn(goctx.Background(), ctx)
require.NoError(t, err)
tbl := s.getTable(t, "test_insert")
handles := kv.NewHandleMap()
err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
require.Equal(t, data[1].GetValue(), data[0].GetValue())
return true, nil
})
require.NoError(t, err)
require.Equal(t, rowCount, handles.Len())
})
}
})
t.Run("Conflict", func(t *testing.T) {
tests := []struct {
name string
}{
{"test_conflict_insert"},
{"test_conflict_insert_common"},
}
for _, test := range tests {
tblName := test.name
t.Run(test.name, func(t *testing.T) {
var mu sync.Mutex
keysMap := make(map[int64]int64)
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < batch; j++ {
k := randomNum(rowCount)
_, _ = s.exec(fmt.Sprintf("insert into %s values (%d, %d)", tblName, k, k))
mu.Lock()
keysMap[int64(k)] = int64(k)
mu.Unlock()
}
}()
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleConflictInsert][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := sessiontxn.NewTxn(goctx.Background(), ctx)
require.NoError(t, err)
tbl := s.getTable(t, tblName)
handles := kv.NewHandleMap()
err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
require.Contains(t, keysMap, data[0].GetValue())
require.Equal(t, data[1].GetValue(), data[0].GetValue())
return true, nil
})
require.NoError(t, err)
require.Len(t, keysMap, handles.Len())
})
}
})
}
func TestSimpleUpdate(t *testing.T) {
s := createDDLSuite(t)
defer s.teardown(t)
t.Run("Basic", func(t *testing.T) {
tests := []struct {
name string
}{
{"test_update"},
{"test_update_common"},
}
for _, test := range tests {
tblName := test.name
t.Run(test.name, func(t *testing.T) {
var mu sync.Mutex
keysMap := make(map[int64]int64)
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(fmt.Sprintf("insert into %s values (%d, %d)", tblName, k, k))
v := randomNum(rowCount)
s.mustExec(fmt.Sprintf("update %s set c2 = %d where c1 = %d", tblName, v, k))
mu.Lock()
keysMap[int64(k)] = int64(v)
mu.Unlock()
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleUpdate][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := sessiontxn.NewTxn(goctx.Background(), ctx)
require.NoError(t, err)
tbl := s.getTable(t, tblName)
handles := kv.NewHandleMap()
err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
key := data[0].GetInt64()
require.Equal(t, keysMap[key], data[1].GetValue())
return true, nil
})
require.NoError(t, err)
require.Equal(t, rowCount, handles.Len())
})
}
})
t.Run("Conflict", func(t *testing.T) {
tests := []struct {
name string
}{
{"test_conflict_update"},
{"test_conflict_update_common"},
}
for _, test := range tests {
tblName := test.name
t.Run(test.name, func(t *testing.T) {
var mu sync.Mutex
keysMap := make(map[int64]int64)
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(fmt.Sprintf("insert into %s values (%d, %d)", tblName, k, k))
mu.Lock()
keysMap[int64(k)] = int64(k)
mu.Unlock()
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleConflictUpdate][Insert][Time Cost]%v\n", end.Sub(start))
start = time.Now()
defaultValue := int64(-1)
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < batch; j++ {
k := randomNum(rowCount)
s.mustExec(fmt.Sprintf("update %s set c2 = %d where c1 = %d", tblName, defaultValue, k))
mu.Lock()
keysMap[int64(k)] = defaultValue
mu.Unlock()
}
}()
}
wg.Wait()
end = time.Now()
fmt.Printf("[TestSimpleConflictUpdate][Update][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := sessiontxn.NewTxn(goctx.Background(), ctx)
require.NoError(t, err)
tbl := s.getTable(t, tblName)
handles := kv.NewHandleMap()
err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
require.Contains(t, keysMap, data[0].GetValue())
if !reflect.DeepEqual(data[1].GetValue(), data[0].GetValue()) && !reflect.DeepEqual(data[1].GetValue(), defaultValue) {
log.Fatal("[TestSimpleConflictUpdate fail]Bad row", zap.Any("row", data))
}
return true, nil
})
require.NoError(t, err)
require.Equal(t, rowCount, handles.Len())
})
}
})
}
func TestSimpleDelete(t *testing.T) {
s := createDDLSuite(t)
defer s.teardown(t)
t.Run("Basic", func(t *testing.T) {
tests := []struct {
name string
}{
{"test_delete"},
{"test_delete_common"},
}
for _, test := range tests {
tblName := test.name
t.Run(test.name, func(t *testing.T) {
workerNum := 10
rowCount := 1000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(fmt.Sprintf("insert into %s values (%d, %d)", tblName, k, k))
s.mustExec(fmt.Sprintf("delete from %s where c1 = %d", tblName, k))
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleDelete][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := sessiontxn.NewTxn(goctx.Background(), ctx)
require.NoError(t, err)
tbl := s.getTable(t, tblName)
handles := kv.NewHandleMap()
err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
return true, nil
})
require.NoError(t, err)
require.Equal(t, 0, handles.Len())
})
}
})
t.Run("Conflict", func(t *testing.T) {
tests := []struct {
name string
}{
{"test_conflict_delete"},
{"test_conflict_delete_common"},
}
for _, test := range tests {
tblName := test.name
t.Run(test.name, func(t *testing.T) {
var mu sync.Mutex
keysMap := make(map[int64]int64)
workerNum := 10
rowCount := 1000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(fmt.Sprintf("insert into %s values (%d, %d)", tblName, k, k))
mu.Lock()
keysMap[int64(k)] = int64(k)
mu.Unlock()
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleConflictDelete][Insert][Time Cost]%v\n", end.Sub(start))
start = time.Now()
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := randomNum(rowCount)
s.mustExec(fmt.Sprintf("delete from %s where c1 = %d", tblName, k))
mu.Lock()
delete(keysMap, int64(k))
mu.Unlock()
}
}(i)
}
wg.Wait()
end = time.Now()
fmt.Printf("[TestSimpleConflictDelete][Delete][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := sessiontxn.NewTxn(goctx.Background(), ctx)
require.NoError(t, err)
tbl := s.getTable(t, tblName)
handles := kv.NewHandleMap()
err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
require.Contains(t, keysMap, data[0].GetValue())
return true, nil
})
require.NoError(t, err)
require.Len(t, keysMap, handles.Len())
})
}
})
}
// addEnvPath appends newPath to $PATH.
func addEnvPath(newPath string) {
_ = os.Setenv("PATH", fmt.Sprintf("%s%c%s", os.Getenv("PATH"), os.PathListSeparator, newPath))
}
func init() {
rand.Seed(time.Now().UnixNano())
_ = store.Register("tikv", tidbdriver.TiKVDriver{})
}
|
package main
import "fmt"
func hello() {
fmt.Print("hello ")
}
func world() {
fmt.Println("world")
}
func main() {
defer world() // defers the function right before the main exits (in this case)
hello()
}
|
// Copyright 2016 Lennart Espe. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE.md file.
// Package lib generates and fetches hash patches.
package lib
import (
"bytes"
"errors"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"github.com/lnsp/go-filter"
)
const (
// DefaultFileMode is the default file and directory mode.
DefaultFileMode = 0644
DefaultDirMode = 0755
// PatchFile is the default patch file name.
PatchFile = ".patch"
// IgnoreFile is the default ignore file name.
IgnoreFile = ".benchignore"
)
// Write creates a new file in the directory and puts a list of hashes in it.
// It may return an error if the file is not accessible.
func Write(dir, source string, data HashSet) error {
target := filepath.Join(dir, PatchFile)
joinBuffer, size := bytes.Buffer{}, len(data)
// append source if it exists
if source != "" {
joinBuffer.WriteString(SourceMarker + source + LineSeperator)
log.Notice("generated patch with source", source)
}
// store all datasets in the file
for i := 0; i < size; i++ {
joinBuffer.WriteString(data[i].String())
joinBuffer.WriteString(LineSeperator)
}
outputFile, err := os.Create(target)
if err != nil {
log.Critical("failed to open patch file:", err)
return err
}
defer outputFile.Close()
// copy byte buffer into file
joinBuffer.WriteTo(outputFile)
return nil
}
// Generate creates a new patch file in the target folder storing version information.
// It may return an error if either the hashing or patch file creation fails.
func Generate(targetDir, targetSrc string, pool int, dynamic bool) error {
filterPath := filepath.Join(targetDir, IgnoreFile)
filter := filter.LoadFilter(filterPath)
var hashes HashSet
var err error
if pool < 2 {
hashes, err = HashDirectory(targetDir)
} else {
if dynamic {
pool *= runtime.NumCPU()
}
hashes, err = HashDirectoryAsync(targetDir, pool)
}
if err != nil {
log.Critical("failed to hash directory:", err)
return err
}
filtered := FilterHashes(hashes, filter)
return Write(targetDir, targetSrc, filtered)
}
// FetchWorker is an async worker waiting for jobs.
func FetchWorker(elements <-chan HashItem, results chan<- error, origin Origin, dir string) {
for e := range elements {
data, err := origin.Get(e.Name)
if err != nil {
results <- errors.New("failed to fetch file: " + e.Name)
continue
}
file := filepath.Join(dir, e.Name)
fileDir := filepath.Dir(file)
err = os.MkdirAll(fileDir, DefaultDirMode)
if err != nil {
results <- errors.New("failed to create folder: " + filepath.Dir(e.Name))
continue
}
err = ioutil.WriteFile(file, data, DefaultFileMode)
if err != nil {
results <- errors.New("failed to write file: " + e.Name)
continue
}
results <- nil
}
}
// FetchSpecificAsync downloads the files asynchronously in the set from the origin and stores it in the target directory.
// It may fail if either can't fetch the file or can't create the directory / file.
func FetchSpecificAsync(target string, source Origin, set HashSet, pool int) error {
workload := len(set)
jobs := make(chan HashItem, workload/2+1)
results := make(chan error, workload/2+1)
log.Notice("using", pool, "workers")
for i := 0; i < pool; i++ {
go FetchWorker(jobs, results, source, target)
}
for i := 0; i < workload; i++ {
jobs <- set[i]
}
close(jobs)
var err error
for i := 0; i < workload; i++ {
if err = <-results; err != nil {
log.Warning(err)
}
}
return nil
}
// FetchSpecific downloads the files in the set from the origin and stores it in the target directory.
// It may fail if either can't fetch the file or can't create the directory / file.
func FetchSpecific(dir string, source Origin, set HashSet) error {
for _, hash := range set {
data, err := source.Get(hash.Name)
if err != nil {
return errors.New("failed to fetch file: " + hash.Name)
}
file := filepath.Join(dir, hash.Name)
fileDir := filepath.Dir(file)
err = os.MkdirAll(fileDir, DefaultDirMode)
if err != nil {
return errors.New("failed to create folder: " + fileDir)
}
err = ioutil.WriteFile(file, data, DefaultFileMode)
if err != nil {
return errors.New("failed to write file: " + file)
}
}
return nil
}
// Fetch compares two patches from a global and a local branch and updates the local branch to match the global one.
// It only replaces or adds files, but does not delete any.
// It may return an error if the origin handling fails.
func Fetch(dir, target string, pool int, dynamic bool) error {
local, err := GetOrigin(dir)
if err != nil {
log.Error("bad local origin:", err)
return err
}
localHashes, localSource, err := local.Scan()
if target != "" {
localSource = target
}
global, err := GetOrigin(localSource)
if err != nil {
log.Error("bad global origin:", err)
return err
}
globalHashes, globalSource, err := global.Scan()
if globalSource != localSource {
log.Warning("unverified origin:", globalSource)
} else {
log.Notice("verified origin:", globalSource)
}
missingHashes := Compare(localHashes, globalHashes)
if pool < 2 {
err = FetchSpecific(dir, global, missingHashes)
} else {
if dynamic {
pool *= runtime.NumCPU()
}
err = FetchSpecificAsync(dir, global, missingHashes, pool)
}
if err != nil {
log.Error("failed to fetch files:", err)
return err
}
log.Notice("fetched", len(missingHashes), "files from origin")
return Write(dir, globalSource, globalHashes)
}
// ListFiles list all files in a directory including subdirectories.
// It may return an error if the recursive walking fails.
func ListFiles(dir string) ([]string, error) {
var files []string
err := filepath.Walk(dir, func(active string, info os.FileInfo, err error) error {
// ignore directories
if info.IsDir() {
return nil
}
if err != nil {
return err
}
rel, err := filepath.Rel(dir, active)
files = append(files, rel)
return nil
})
return files, err
}
|
package usecase
import (
"github.com/taniwhy/mochi-match-rest/domain/models"
"github.com/taniwhy/mochi-match-rest/domain/repository"
)
// UserDetailUseCase :
type UserDetailUseCase interface {
FindUserDetailByID(id int64) (*models.UserDetail, error)
CreateUserDetail(userDetail *models.UserDetail) error
UpdateUserDetail(userDetail *models.UserDetail) error
DeleteUserDetail(userDetail *models.UserDetail) error
}
type userDetailUsecase struct {
userDetailRepository repository.UserDetailRepository
}
// NewUserDetailUsecase :
func NewUserDetailUsecase(uR repository.UserDetailRepository) UserDetailUseCase {
return &userDetailUsecase{
userDetailRepository: uR,
}
}
func (uU userDetailUsecase) FindUserDetailByID(id int64) (*models.UserDetail, error) {
userDetail, err := uU.userDetailRepository.FindUserDetailByID(id)
if err != nil {
return nil, err
}
return userDetail, nil
}
func (uU userDetailUsecase) CreateUserDetail(userDetail *models.UserDetail) error {
err := uU.userDetailRepository.InsertUserDetail(userDetail)
if err != nil {
return err
}
return nil
}
func (uU userDetailUsecase) UpdateUserDetail(userDetail *models.UserDetail) error {
err := uU.userDetailRepository.InsertUserDetail(userDetail)
if err != nil {
return err
}
return nil
}
func (uU userDetailUsecase) DeleteUserDetail(userDetail *models.UserDetail) error {
err := uU.userDetailRepository.InsertUserDetail(userDetail)
if err != nil {
return err
}
return nil
}
|
package problem0079
func exist(board [][]byte, word string) bool {
if len(word) == 0 {
return true
}
visited := make([][]bool, len(board))
for i := 0; i < len(board); i++ {
visited[i] = make([]bool, len(board[i]))
}
for i := 0; i < len(board); i++ {
for j := 0; j < len(board[i]); j++ {
if dfs(board, word, i, j, 0, visited) {
return true
}
}
}
return false
}
func dfs(board [][]byte, word string, i, j, k int, visited [][]bool) bool {
if k >= len(word) {
return true
}
if i < 0 || i > len(board)-1 || j < 0 || j > len(board[i])-1 {
return false
}
if board[i][j] != word[k] {
return false
}
if visited[i][j] {
return false
}
visited[i][j] = true
k = k + 1
directions := [][]int{[]int{1, 0}, []int{-1, 0}, []int{0, 1}, []int{0, -1}}
for _, direction := range directions {
ik := i + direction[0]
jk := j + direction[1]
if dfs(board, word, ik, jk, k, visited) {
return true
}
}
visited[i][j] = false
return false
}
|
package worker
// InitWorker is the entry point of init the worker, during which multiple components of
// worker need to be initialized.
func InitWorker(filePath string) error {
if err := InitConfig(filePath); err != nil {
return err
}
if err := InitRegister(); err != nil {
return err
}
if err := InitLogSink(); err != nil {
return err
}
if err := InitExecutor(); err != nil {
return err
}
// todo init scheduler
if err := InitJobManager(); err != nil {
return err
}
return nil
}
|
/*
* Copyright 2019-present Open Networking Foundation
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mocks
import (
"context"
"errors"
"fmt"
"strings"
"github.com/gogo/protobuf/proto"
"github.com/opencord/voltha-lib-go/v4/pkg/adapters"
"github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif"
com "github.com/opencord/voltha-lib-go/v4/pkg/adapters/common"
"github.com/opencord/voltha-lib-go/v4/pkg/log"
ic "github.com/opencord/voltha-protos/v4/go/inter_container"
of "github.com/opencord/voltha-protos/v4/go/openflow_13"
"github.com/opencord/voltha-protos/v4/go/voltha"
)
const (
numONUPerOLT = 4
startingUNIPortNo = 100
)
// static implementation check
var _ adapters.IAdapter = &OLTAdapter{}
// OLTAdapter represent OLT adapter
type OLTAdapter struct {
*Adapter
}
// NewOLTAdapter - creates OLT adapter instance
func NewOLTAdapter(ctx context.Context, cp adapterif.CoreProxy) *OLTAdapter {
return &OLTAdapter{
Adapter: NewAdapter(cp),
}
}
// Adopt_device creates new handler for added device
func (oltA *OLTAdapter) Adopt_device(ctx context.Context, device *voltha.Device) error { // nolint
go func() {
d := proto.Clone(device).(*voltha.Device)
d.Root = true
d.Vendor = "olt_adapter_mock"
d.Model = "go-mock"
d.SerialNumber = com.GetRandomSerialNumber()
d.MacAddress = strings.ToUpper(com.GetRandomMacAddress())
oltA.storeDevice(d)
if res := oltA.coreProxy.DeviceUpdate(context.TODO(), d); res != nil {
logger.Fatalf(ctx, "deviceUpdate-failed-%s", res)
}
capability := uint32(of.OfpPortFeatures_OFPPF_1GB_FD | of.OfpPortFeatures_OFPPF_FIBER)
nniPort := &voltha.Port{
PortNo: 2,
Label: fmt.Sprintf("nni-%d", 2),
Type: voltha.Port_ETHERNET_NNI,
OperStatus: voltha.OperStatus_ACTIVE,
OfpPort: &of.OfpPort{
HwAddr: macAddressToUint32Array("11:22:33:44:55:66"),
Config: 0,
State: uint32(of.OfpPortState_OFPPS_LIVE),
Curr: capability,
Advertised: capability,
Peer: capability,
CurrSpeed: uint32(of.OfpPortFeatures_OFPPF_1GB_FD),
MaxSpeed: uint32(of.OfpPortFeatures_OFPPF_1GB_FD),
},
}
var err error
if err = oltA.coreProxy.PortCreated(context.TODO(), d.Id, nniPort); err != nil {
logger.Fatalf(ctx, "PortCreated-failed-%s", err)
}
ponPort := &voltha.Port{
PortNo: 1,
Label: fmt.Sprintf("pon-%d", 1),
Type: voltha.Port_PON_OLT,
OperStatus: voltha.OperStatus_ACTIVE,
}
if err = oltA.coreProxy.PortCreated(context.TODO(), d.Id, ponPort); err != nil {
logger.Fatalf(ctx, "PortCreated-failed-%s", err)
}
d.ConnectStatus = voltha.ConnectStatus_REACHABLE
d.OperStatus = voltha.OperStatus_ACTIVE
if err = oltA.coreProxy.DeviceStateUpdate(context.TODO(), d.Id, d.ConnectStatus, d.OperStatus); err != nil {
logger.Fatalf(ctx, "Device-state-update-failed-%s", err)
}
//Get the latest device data from the Core
if d, err = oltA.coreProxy.GetDevice(context.TODO(), d.Id, d.Id); err != nil {
logger.Fatalf(ctx, "getting-device-failed-%s", err)
}
oltA.updateDevice(d)
// Register Child devices
initialUniPortNo := startingUNIPortNo
for i := 0; i < numONUPerOLT; i++ {
go func(seqNo int) {
if _, err := oltA.coreProxy.ChildDeviceDetected(
context.TODO(),
d.Id,
1,
"onu_adapter_mock",
initialUniPortNo+seqNo,
"onu_adapter_mock",
com.GetRandomSerialNumber(),
int64(seqNo)); err != nil {
logger.Fatalf(ctx, "failure-sending-child-device-%s", err)
}
}(i)
}
}()
return nil
}
// Get_ofp_device_info returns ofp device info
func (oltA *OLTAdapter) Get_ofp_device_info(ctx context.Context, device *voltha.Device) (*ic.SwitchCapability, error) { // nolint
if d := oltA.getDevice(device.Id); d == nil {
logger.Fatalf(ctx, "device-not-found-%s", device.Id)
}
return &ic.SwitchCapability{
Desc: &of.OfpDesc{
HwDesc: "olt_adapter_mock",
SwDesc: "olt_adapter_mock",
SerialNum: "12345678",
},
SwitchFeatures: &of.OfpSwitchFeatures{
NBuffers: 256,
NTables: 2,
Capabilities: uint32(of.OfpCapabilities_OFPC_FLOW_STATS |
of.OfpCapabilities_OFPC_TABLE_STATS |
of.OfpCapabilities_OFPC_PORT_STATS |
of.OfpCapabilities_OFPC_GROUP_STATS),
},
}, nil
}
// GetNumONUPerOLT returns number of ONUs per OLT
func (oltA *OLTAdapter) GetNumONUPerOLT() int {
return numONUPerOLT
}
// Returns the starting UNI port number
func (oltA *OLTAdapter) GetStartingUNIPortNo() int {
return startingUNIPortNo
}
// Disable_device disables device
func (oltA *OLTAdapter) Disable_device(ctx context.Context, device *voltha.Device) error { // nolint
go func() {
if d := oltA.getDevice(device.Id); d == nil {
logger.Fatalf(ctx, "device-not-found-%s", device.Id)
}
cloned := proto.Clone(device).(*voltha.Device)
// Update the all ports state on that device to disable
if err := oltA.coreProxy.PortsStateUpdate(context.TODO(), cloned.Id, 0, voltha.OperStatus_UNKNOWN); err != nil {
logger.Warnw(ctx, "updating-ports-failed", log.Fields{"device-id": device.Id, "error": err})
}
//Update the device operational state
cloned.OperStatus = voltha.OperStatus_UNKNOWN
// The device is still reachable after it has been disabled, so the connection status should not be changed.
if err := oltA.coreProxy.DeviceStateUpdate(context.TODO(), cloned.Id, cloned.ConnectStatus, cloned.OperStatus); err != nil {
// Device may already have been deleted in the core
logger.Warnw(ctx, "device-state-update-failed", log.Fields{"device-id": device.Id, "error": err})
return
}
oltA.updateDevice(cloned)
// Tell the Core that all child devices have been disabled (by default it's an action already taken by the Core
if err := oltA.coreProxy.ChildDevicesLost(context.TODO(), cloned.Id); err != nil {
// Device may already have been deleted in the core
logger.Warnw(ctx, "lost-notif-of-child-devices-failed", log.Fields{"device-id": device.Id, "error": err})
}
}()
return nil
}
// Reenable_device reenables device
func (oltA *OLTAdapter) Reenable_device(ctx context.Context, device *voltha.Device) error { // nolint
go func() {
if d := oltA.getDevice(device.Id); d == nil {
logger.Fatalf(ctx, "device-not-found-%s", device.Id)
}
cloned := proto.Clone(device).(*voltha.Device)
// Update the all ports state on that device to enable
if err := oltA.coreProxy.PortsStateUpdate(context.TODO(), cloned.Id, 0, voltha.OperStatus_ACTIVE); err != nil {
logger.Fatalf(ctx, "updating-ports-failed", log.Fields{"device-id": device.Id, "error": err})
}
//Update the device state
cloned.OperStatus = voltha.OperStatus_ACTIVE
if err := oltA.coreProxy.DeviceStateUpdate(context.TODO(), cloned.Id, cloned.ConnectStatus, cloned.OperStatus); err != nil {
logger.Fatalf(ctx, "device-state-update-failed", log.Fields{"device-id": device.Id, "error": err})
}
// Tell the Core that all child devices have been enabled
if err := oltA.coreProxy.ChildDevicesDetected(context.TODO(), cloned.Id); err != nil {
logger.Fatalf(ctx, "detection-notif-of-child-devices-failed", log.Fields{"device-id": device.Id, "error": err})
}
}()
return nil
}
// Enable_port -
func (oltA *OLTAdapter) Enable_port(ctx context.Context, deviceId string, Port *voltha.Port) error { //nolint
go func() {
if Port.Type == voltha.Port_PON_OLT {
if err := oltA.coreProxy.PortStateUpdate(context.TODO(), deviceId, voltha.Port_PON_OLT, Port.PortNo, voltha.OperStatus_ACTIVE); err != nil {
logger.Fatalf(ctx, "updating-ports-failed", log.Fields{"device-id": deviceId, "error": err})
}
}
}()
return nil
}
// Disable_port -
func (oltA *OLTAdapter) Disable_port(ctx context.Context, deviceId string, Port *voltha.Port) error { //nolint
go func() {
if Port.Type == voltha.Port_PON_OLT {
if err := oltA.coreProxy.PortStateUpdate(context.TODO(), deviceId, voltha.Port_PON_OLT, Port.PortNo, voltha.OperStatus_DISCOVERED); err != nil {
// Corresponding device may have been deleted
logger.Warnw(ctx, "updating-ports-failed", log.Fields{"device-id": deviceId, "error": err})
}
}
}()
return nil
}
// Child_device_lost deletes ONU and its references
func (oltA *OLTAdapter) Child_device_lost(ctx context.Context, deviceID string, pPortNo uint32, onuID uint32) error { // nolint
return nil
}
// Reboot_device -
func (oltA *OLTAdapter) Reboot_device(ctx context.Context, device *voltha.Device) error { // nolint
logger.Infow(ctx, "reboot-device", log.Fields{"device-id": device.Id})
go func() {
if err := oltA.coreProxy.DeviceStateUpdate(context.TODO(), device.Id, voltha.ConnectStatus_UNREACHABLE, voltha.OperStatus_UNKNOWN); err != nil {
logger.Fatalf(ctx, "device-state-update-failed", log.Fields{"device-id": device.Id, "error": err})
}
if err := oltA.coreProxy.PortsStateUpdate(context.TODO(), device.Id, 0, voltha.OperStatus_UNKNOWN); err != nil {
// Not an error as the previous command will start the process of clearing the OLT
logger.Infow(ctx, "port-update-failed", log.Fields{"device-id": device.Id, "error": err})
}
}()
return nil
}
// TODO: REMOVE Start_omci_test begins an omci self-test
func (oltA *OLTAdapter) Start_omci_test(ctx context.Context, device *voltha.Device, request *voltha.OmciTestRequest) (*ic.TestResponse, error) { // nolint
_ = device
return nil, errors.New("start-omci-test-not-implemented")
}
func (oltA *OLTAdapter) Get_ext_value(ctx context.Context, deviceId string, device *voltha.Device, valueflag voltha.ValueType_Type) (*voltha.ReturnValues, error) { // nolint
_ = deviceId
_ = device
_ = valueflag
return nil, errors.New("get-ext-value-not-implemented")
}
|
package json
import (
"testing"
"github.com/polydawn/refmt/tok/fixtures"
)
func testComposite(t *testing.T) {
t.Run("array nested in map as non-first and final entry", func(t *testing.T) {
seq := fixtures.SequenceMap["array nested in map as non-first and final entry"]
checkCanonical(t, seq, `{"k1":"v1","ke":["oh","whee","wow"]}`)
})
t.Run("array nested in map as first and non-final entry", func(t *testing.T) {
seq := fixtures.SequenceMap["array nested in map as first and non-final entry"]
checkCanonical(t, seq, `{"ke":["oh","whee","wow"],"k1":"v1"}`)
})
t.Run("maps nested in array", func(t *testing.T) {
seq := fixtures.SequenceMap["maps nested in array"]
checkCanonical(t, seq, `[{"k":"v"},"whee",{"k1":"v1"}]`)
})
t.Run("arrays in arrays in arrays", func(t *testing.T) {
seq := fixtures.SequenceMap["arrays in arrays in arrays"]
checkCanonical(t, seq, `[[[]]]`)
})
t.Run("maps nested in maps", func(t *testing.T) {
seq := fixtures.SequenceMap["maps nested in maps"]
checkCanonical(t, seq, `{"k":{"k2":"v2"}}`)
})
}
|
package util
import (
"github.com/mndrix/tap-go"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/cgroups"
)
// ValidateLinuxResourcesMemory validates linux.resources.memory.
func ValidateLinuxResourcesMemory(config *rspec.Spec, t *tap.T, state *rspec.State) error {
cg, err := cgroups.FindCgroup()
t.Ok((err == nil), "find memory cgroup")
if err != nil {
t.Diagnostic(err.Error())
return nil
}
lm, err := cg.GetMemoryData(state.Pid, config.Linux.CgroupsPath)
t.Ok((err == nil), "get memory cgroup data")
if err != nil {
t.Diagnostic(err.Error())
return nil
}
t.Ok(*lm.Limit == *config.Linux.Resources.Memory.Limit, "memory limit is set correctly")
t.Diagnosticf("expect: %d, actual: %d", *config.Linux.Resources.Memory.Limit, *lm.Limit)
t.Ok(*lm.Reservation == *config.Linux.Resources.Memory.Reservation, "memory reservation is set correctly")
t.Diagnosticf("expect: %d, actual: %d", *config.Linux.Resources.Memory.Reservation, *lm.Reservation)
t.Ok(*lm.Swap == *config.Linux.Resources.Memory.Swap, "memory swap is set correctly")
t.Diagnosticf("expect: %d, actual: %d", *config.Linux.Resources.Memory.Swap, *lm.Reservation)
t.Ok(*lm.Kernel == *config.Linux.Resources.Memory.Kernel, "memory kernel is set correctly")
t.Diagnosticf("expect: %d, actual: %d", *config.Linux.Resources.Memory.Kernel, *lm.Kernel)
t.Ok(*lm.KernelTCP == *config.Linux.Resources.Memory.KernelTCP, "memory kernelTCP is set correctly")
t.Diagnosticf("expect: %d, actual: %d", *config.Linux.Resources.Memory.KernelTCP, *lm.Kernel)
t.Ok(*lm.Swappiness == *config.Linux.Resources.Memory.Swappiness, "memory swappiness is set correctly")
t.Diagnosticf("expect: %d, actual: %d", *config.Linux.Resources.Memory.Swappiness, *lm.Swappiness)
t.Ok(*lm.DisableOOMKiller == *config.Linux.Resources.Memory.DisableOOMKiller, "memory oom is set correctly")
t.Diagnosticf("expect: %t, actual: %t", *config.Linux.Resources.Memory.DisableOOMKiller, *lm.DisableOOMKiller)
return nil
}
|
package routers
import (
"github.com/astaxie/beego"
"goWebDemo/controllers"
)
func init() {
beego.Router("/", &controllers.HomeController{}, "Get:Index")
beego.Router("/menu", &controllers.MenuController{}, "Get:Index")
beego.Router("/menu/list", &controllers.MenuController{}, "*:List")
beego.Router("/menu/edit", &controllers.MenuController{}, "*:Edit")
beego.Router("/menu/editdo", &controllers.MenuController{}, "*:EditDo")
beego.Router("/menu/add", &controllers.MenuController{}, "Get:Add")
beego.Router("/menu/adddo", &controllers.MenuController{}, "*:AddDo")
//login
beego.Router("/login", &controllers.LoginController{}, "*:Index")
}
|
package _56_Merge_Intervals
import (
"fmt"
"testing"
)
func TestMerge(t *testing.T) {
var (
intervals, ret [][]int
)
intervals = [][]int{{1, 3}, {2, 6}, {8, 10}, {15, 18}}
ret = merge(intervals)
fmt.Println(ret)
intervals = [][]int{{1, 4}, {4, 5}}
ret = merge(intervals)
fmt.Println(ret)
intervals = [][]int{{1, 4}, {1, 5}}
ret = merge(intervals)
fmt.Println(ret)
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
package types
import (
"bytes"
"encoding/json"
"errors"
"io"
"strconv"
)
// AllocationRecord type.
//
// https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/cat/allocation/types.ts#L24-L75
type AllocationRecord struct {
// DiskAvail Free disk space available to Elasticsearch.
// Elasticsearch retrieves this metric from the node’s operating system.
// Disk-based shard allocation uses this metric to assign shards to nodes based
// on available disk space.
DiskAvail ByteSize `json:"disk.avail,omitempty"`
// DiskIndices Disk space used by the node’s shards. Does not include disk space for the
// translog or unassigned shards.
// IMPORTANT: This metric double-counts disk space for hard-linked files, such
// as those created when shrinking, splitting, or cloning an index.
DiskIndices ByteSize `json:"disk.indices,omitempty"`
// DiskPercent Total percentage of disk space in use. Calculated as `disk.used /
// disk.total`.
DiskPercent Percentage `json:"disk.percent,omitempty"`
// DiskTotal Total disk space for the node, including in-use and available space.
DiskTotal ByteSize `json:"disk.total,omitempty"`
// DiskUsed Total disk space in use.
// Elasticsearch retrieves this metric from the node’s operating system (OS).
// The metric includes disk space for: Elasticsearch, including the translog and
// unassigned shards; the node’s operating system; any other applications or
// files on the node.
// Unlike `disk.indices`, this metric does not double-count disk space for
// hard-linked files.
DiskUsed ByteSize `json:"disk.used,omitempty"`
// Host Network host for the node. Set using the `network.host` setting.
Host string `json:"host,omitempty"`
// Ip IP address and port for the node.
Ip string `json:"ip,omitempty"`
// Node Name for the node. Set using the `node.name` setting.
Node *string `json:"node,omitempty"`
// Shards Number of primary and replica shards assigned to the node.
Shards *string `json:"shards,omitempty"`
}
func (s *AllocationRecord) UnmarshalJSON(data []byte) error {
dec := json.NewDecoder(bytes.NewReader(data))
for {
t, err := dec.Token()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
switch t {
case "disk.avail", "da", "diskAvail":
if err := dec.Decode(&s.DiskAvail); err != nil {
return err
}
case "disk.indices", "di", "diskIndices":
if err := dec.Decode(&s.DiskIndices); err != nil {
return err
}
case "disk.percent", "dp", "diskPercent":
if err := dec.Decode(&s.DiskPercent); err != nil {
return err
}
case "disk.total", "dt", "diskTotal":
if err := dec.Decode(&s.DiskTotal); err != nil {
return err
}
case "disk.used", "du", "diskUsed":
if err := dec.Decode(&s.DiskUsed); err != nil {
return err
}
case "host", "h":
if err := dec.Decode(&s.Host); err != nil {
return err
}
case "ip":
if err := dec.Decode(&s.Ip); err != nil {
return err
}
case "node", "n":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.Node = &o
case "shards", "s":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.Shards = &o
}
}
return nil
}
// NewAllocationRecord returns a AllocationRecord.
func NewAllocationRecord() *AllocationRecord {
r := &AllocationRecord{}
return r
}
|
package main
import (
"flag"
"github.com/fighterlyt/file2go/compress"
)
var (
file = ""
packageName = ""
targetFileName = "file2go.go"
)
func init() {
flag.StringVar(&file,"file", file, "数据文件名")
flag.StringVar(&packageName,"package", packageName, "包名")
flag.StringVar(&targetFileName,"target", targetFileName, "目标文件名")
}
func main() {
flag.Parse()
if err := compress.NewModel(file, packageName, targetFileName); err != nil {
panic(err.Error())
}
}
|
package export
import (
"encoding/json"
"io"
"os"
"github.com/Zenika/marcel/api/db"
)
func export(fetch func() (interface{}, error), outputFile string, pretty bool) error {
if err := db.OpenRO(); err != nil {
return err
}
defer db.Close()
var w io.WriteCloser
if outputFile == "" {
w = os.Stdout
} else {
var err error
if w, err = os.Create(outputFile); err != nil {
return err
}
defer w.Close()
}
data, err := fetch()
if err != nil {
return err
}
encoder := json.NewEncoder(w)
if pretty {
encoder.SetIndent("", " ")
}
return encoder.Encode(data)
}
|
package main
import (
"bytes"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"log"
"os"
"path/filepath"
"strings"
"golang.org/x/crypto/openpgp"
"golang.org/x/crypto/openpgp/armor"
"golang.org/x/crypto/openpgp/clearsign"
"golang.org/x/crypto/openpgp/packet"
)
// createRelease scans for Packages files and builds a Release file summary, then signs it with a key.
// Both Packages and Packages.gz files are included and hashed.
func createRelease(config conf, distro string) error {
if *verbose {
log.Printf("Creating release file for \"%s\"", distro)
}
workingDirectory := filepath.Join(config.RootRepoPath, "dists", distro)
outfile, err := os.Create(filepath.Join(workingDirectory, "Release"))
if err != nil {
return fmt.Errorf("failed to create Release: %s", err)
}
defer outfile.Close()
currentTime := Now().UTC()
fmt.Fprintf(outfile, "Suite: %s\n", distro)
fmt.Fprintf(outfile, "Codename: %s\n", distro)
fmt.Fprintf(outfile, "Components: %s\n", strings.Join(config.Sections, " "))
fmt.Fprintf(outfile, "Architectures: %s\n", strings.Join(config.SupportArch, " "))
fmt.Fprintf(outfile, "Date: %s\n", currentTime.Format("Mon, 02 Jan 2006 15:04:05 UTC"))
var md5Sums strings.Builder
var sha1Sums strings.Builder
var sha256Sums strings.Builder
err = filepath.Walk(workingDirectory, func(path string, file os.FileInfo, err error) error {
if err != nil {
return err
}
if strings.HasSuffix(path, "Packages.gz") || strings.HasSuffix(path, "Packages") {
var (
md5hash = md5.New()
sha1hash = sha1.New()
sha256hash = sha256.New()
)
relPath, _ := filepath.Rel(workingDirectory, path)
spath := filepath.ToSlash(relPath)
f, err := os.Open(path)
if err != nil {
log.Println("Error opening the Packages file for reading", err)
}
if _, err = io.Copy(io.MultiWriter(md5hash, sha1hash, sha256hash), f); err != nil {
return fmt.Errorf("Error hashing file for Release list: %s", err)
}
fmt.Fprintf(&md5Sums, " %s %d %s\n",
hex.EncodeToString(md5hash.Sum(nil)),
file.Size(), spath)
fmt.Fprintf(&sha1Sums, " %s %d %s\n",
hex.EncodeToString(sha1hash.Sum(nil)),
file.Size(), spath)
fmt.Fprintf(&sha256Sums, " %s %d %s\n",
hex.EncodeToString(sha256hash.Sum(nil)),
file.Size(), spath)
f = nil
}
return nil
})
if err != nil {
return fmt.Errorf("Error scanning for Packages files: %s", err)
}
outfile.WriteString("MD5Sum:\n")
outfile.WriteString(md5Sums.String())
outfile.WriteString("SHA1:\n")
outfile.WriteString(sha1Sums.String())
outfile.WriteString("SHA256:\n")
outfile.WriteString(sha256Sums.String())
if err = signRelease(config, outfile.Name()); err != nil {
return fmt.Errorf("Error signing Release file: %s", err)
}
return nil
}
// signRelease takes the path to an existing Release file, and signs it with the configured private key.
// Both Release.gpg (detached signature) and InRelease (inline signature) will be generated, in order to
// ensure maximum compatibility
func signRelease(config conf, filename string) error {
if *verbose {
log.Printf("Signing release file \"%s\"", filename)
}
entity := createEntityFromPrivateKey(config.PrivateKey)
workingDirectory := filepath.Dir(filename)
releaseFile, err := os.Open(filename)
if err != nil {
return fmt.Errorf("Error opening Release file (%s) for writing: %s", filename, err)
}
releaseGpg, err := os.Create(filepath.Join(workingDirectory, "Release.gpg"))
if err != nil {
return fmt.Errorf("Error creating Release.gpg file for writing: %s", err)
}
defer releaseGpg.Close()
err = openpgp.ArmoredDetachSign(releaseGpg, entity, releaseFile, nil)
if err != nil {
return fmt.Errorf("Error writing signature to Release.gpg file: %s", err)
}
releaseFile.Seek(0, 0)
inlineRelease, err := os.Create(filepath.Join(workingDirectory, "InRelease"))
if err != nil {
return fmt.Errorf("Error creating InRelease file for writing: %s", err)
}
defer inlineRelease.Close()
writer, err := clearsign.Encode(inlineRelease, entity.PrivateKey, nil)
if err != nil {
return fmt.Errorf("Error signing InRelease file : %s", err)
}
io.Copy(writer, releaseFile)
writer.Close()
return nil
}
// createKeyPair generates a new OpenPGP Entity with the provided name, comment and email.
// The keys are returned as ASCII Armor encoded strings, ready to write to files.
func createKeyPair(name, comment, email string) (*openpgp.Entity, string, string) {
entity, err := openpgp.NewEntity(name, comment, email, nil)
if err != nil {
log.Fatalf("Error creating openpgp entity: %s", err)
}
serializedPrivateEntity := bytes.NewBuffer(nil)
entity.SerializePrivate(serializedPrivateEntity, nil)
serializedEntity := bytes.NewBuffer(nil)
entity.Serialize(serializedEntity)
buf := bytes.NewBuffer(nil)
headers := map[string]string{"Version": "GnuPG v1"}
w, err := armor.Encode(buf, openpgp.PublicKeyType, headers)
if err != nil {
log.Fatal(err)
}
_, err = w.Write(serializedEntity.Bytes())
if err != nil {
log.Fatalf("Error encoding public key: %s", err)
}
w.Close()
publicKey := buf.String()
buf = bytes.NewBuffer(nil)
w, err = armor.Encode(buf, openpgp.PrivateKeyType, headers)
if err != nil {
log.Fatal(err)
}
_, err = w.Write(serializedPrivateEntity.Bytes())
if err != nil {
log.Fatalf("Error encoding private key: %s", err)
}
w.Close()
privateKey := buf.String()
return entity, publicKey, privateKey
}
// createEntityFromPrivateKey creates a new OpenPGP Entity objects from the provided private key path.
// The key should be in ASCII Armour format.
// The returned entity can be used to sign files - the public key / identity is not needed.
func createEntityFromPrivateKey(privateKeyPath string) *openpgp.Entity {
privateKeyData, err := os.Open(privateKeyPath)
if err != nil {
log.Fatalf("Error opening private key file: %s", err)
}
defer privateKeyData.Close()
block, err := armor.Decode(privateKeyData)
if err != nil {
log.Fatalf("Error decoding private key data: %s", err)
}
if block.Type != openpgp.PrivateKeyType {
log.Fatalf("Invalid private key type %s", block.Type)
}
reader := packet.NewReader(block.Body)
pkt, err := reader.Next()
if err != nil {
log.Fatalf("Error reading private key data: %s", err)
}
privateKey, ok := pkt.(*packet.PrivateKey)
if !ok {
log.Fatalf("Error parsing private key")
}
e := openpgp.Entity{
PrivateKey: privateKey,
}
return &e
}
// createKeyHandler generates a new public and private key pair, and writes them out to workingDirectory.
func createKeyHandler(workingDirectory, name, email string) {
_, publicKey, privateKey := createKeyPair(name, "Generated by deb-simple", email)
pubFile, err := os.Create(filepath.Join(workingDirectory, "public.key"))
if err != nil {
log.Fatalf("Could not open public key file for writing: %s", err)
}
defer pubFile.Close()
pubFile.WriteString(publicKey)
priFile, err := os.Create(filepath.Join(workingDirectory, "private.key"))
if err != nil {
log.Fatalf("Could not open private key file for writing: %s", err)
}
defer priFile.Close()
priFile.WriteString(privateKey)
}
|
package main
import (
"fmt"
"github.com/gin-gonic/gin"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/mysql"
"net/http"
)
var (
DB *gorm.DB
)
func initMySQL() (err error) {
dsn := "root:123456@tcp(192.168.99.100:13306)/db1?charset=utf8mb4&parseTime=True&loc=Local"
DB ,err = gorm.Open("mysql",dsn)
if err != nil{
return
}
return DB.DB().Ping()
}
// Todomodel
type Todo struct {
ID int `json:"id"`
Title string `json:"title"`
Status bool `json:"status"`
}
func main() {
// 创建数据库
// CREATE DATABASE bubble;
// 连接数据库
err := initMySQL()
if err != nil{
panic(err)
}
fmt.Println("数据库连接成功..........")
// 数据库绑定模型
DB.AutoMigrate(&Todo{})
defer DB.Close() //程序退出关闭数据连接
r := gin.Default()
// 告诉gin静态文件哪里去找
r.Static("/static","static")
// 加载模板文件
r.LoadHTMLGlob("templates/*")
// 解析模板
r.GET("/", func(c *gin.Context) {
c.HTML(http.StatusOK,"index.html",nil)
})
//v1
v1Group := r.Group("v1")
{
// 待办事项
// 添加
v1Group.POST("/todo", func(c *gin.Context) {
// 前端页面填写代码事项,点击提交,会发送请求到这里
// 1. 从请求中把数据拿出来
// 2. 存入数据库
})
// 查看
// 1. 查看所有代办事项
v1Group.GET("/todo", func(c *gin.Context) {
})
// 2. 查看某一个代办事项
v1Group.GET("/todo/:id", func(c *gin.Context) {
})
// 修改
// 修改某一待办事项
v1Group.PUT("/todo/:id", func(c *gin.Context) {
})
// 删除
v1Group.DELETE("/todo/:id", func(c *gin.Context) {
})
}
r.Run(":8080")
}
|
package main
import (
"flag"
"github.com/a8uhnf/suich/cmd"
)
func main() {
flag.Parse()
c := cmd.RootCmd()
if err := c.Execute(); err != nil {
panic(err)
}
}
|
// Copyright 2011 The Go Authors. All rights reserved.
// Copyright 2011 ThePiachu. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bitecdsa
import (
"crypto/rand"
"encoding/base64"
"math/big"
"testing"
"github.com/njones/bitcoin-crypto/bitelliptic"
)
func testKeyGeneration(t *testing.T, c *bitelliptic.BitCurve, tag string) {
priv, err := GenerateKey(c, rand.Reader)
if err != nil {
t.Errorf("%s: error: %s", tag, err)
return
}
if !c.IsOnCurve(priv.PublicKey.X, priv.PublicKey.Y) {
t.Errorf("%s: public key invalid: %s", tag, err)
}
}
func TestKeyGeneration(t *testing.T) {
testKeyGeneration(t, bitelliptic.S256(), "S256")
if testing.Short() {
return
}
testKeyGeneration(t, bitelliptic.S160(), "S160")
testKeyGeneration(t, bitelliptic.S192(), "S192")
testKeyGeneration(t, bitelliptic.S224(), "S224")
}
func testSignAndVerify(t *testing.T, c *bitelliptic.BitCurve, tag string) {
priv, _ := GenerateKey(c, rand.Reader)
hashed := []byte("testing")
r, s, err := Sign(rand.Reader, priv, hashed)
if err != nil {
t.Errorf("%s: error signing: %s", tag, err)
return
}
if !Verify(&priv.PublicKey, hashed, r, s) {
t.Errorf("%s: Verify failed", tag)
}
hashed[0] ^= 0xff
if Verify(&priv.PublicKey, hashed, r, s) {
t.Errorf("%s: Verify always works!", tag)
}
}
func TestSignAndVerify(t *testing.T) {
testSignAndVerify(t, bitelliptic.S256(), "S256")
if testing.Short() {
return
}
testSignAndVerify(t, bitelliptic.S160(), "S160")
testSignAndVerify(t, bitelliptic.S192(), "S192")
testSignAndVerify(t, bitelliptic.S224(), "S224")
}
func fromHex(s string) *big.Int {
r, ok := new(big.Int).SetString(s, 16)
if !ok {
panic("bad hex")
}
return r
}
// These test vectors were generated with OpenSSL using vectorgen.rb
var testVectors = []struct {
hash string
Qx, Qy string
r, s string
ok bool
}{
{
"rWO/YB4Ur9u5yXRkOC0QrRmutzuGReCslws/wgt3uaE=",
"75180709339c672ffb8db4fe8ca27a0603b2c85a4460371f7bd69618000935e6",
"54e67cde188fcd0e3ade6b94422834505e17ba44c98b945f80596ed4c5d57ff1",
"acbd37fb3876e5580556855f51b158b7462069283c15e337b47562d9246b7cf9",
"117ff1e940b28f3add88d138f7c4b67145c508019c4fa0bf5b2fdbf622db6226",
true,
},
{
"MhMQsu+hkicpORVDX+liavMDRvH2IBidH9Z1UO77GWk=",
"2a81f04c0ed31850f7701c72179b4f0cf5f438705c821c2c340775f31589694d",
"9c7e176c645de82d00e80a6980eda165b6840ad4fb66f305f0994f299dee83cc",
"3f154d5d2214958197fa777c7aeb114c29ce3c9d1978e08413b72a56476317c6",
"b79b8da65ec1f31607de2d5697c4d8e4460c34bcf0b595979ee8886a7a22490f",
true,
},
{
"mPVLf1SunM/ffOxrFaC+NV5dHbaHDeFJy0ViI3bSddw=",
"b3b5e83a406478b26ff5a051286b9295c7bb11e350a75806c7e21fd067dd3caa",
"404da2f031a5e1697d7e3550db992b3f1093b1cfb26a43ac1136d1e34175520c",
"7334611111a2df05bcc8955ea12eda1187be693d59977aef8a537e83a7cf0228",
"227a8054c147a5b89c88cd524a4b81da8f07457add995e35fa5ff02a96b01dbb",
true,
},
{
"2MNMltlFPVVmpqOx524b7l0W+aRcHtokKZSKBA2/8z8=",
"51bc15c20131a92f3161be567dcde6675b866d11e6d64f9094f93ec91536136d",
"69f6d1b74743e1857d02cd720e4378822e0598485ce65422d27b0090f7276c64",
"2225f7e1dfb852989e8ab085afcc3a4941e10e73fdd31b06b4cdf315ee982980",
"d1bc806b74a85232b3cd843595a201d6910e6a370faf639657b45e5e7dd06d68",
true,
},
{
"05oUbaIeMArcXpe2Q4U6s5UfcmyoluHoRdZb0H0LOP8=",
"7653996b68c20e98e32f1f062bf66c4d906f4573fba5ba317d614f886535d4fe",
"bf9e3f48523b2d49962c5cc29f67eb4ddf6de6afebda0d777b10a1bc94b58cee",
"8f42e09fc02c7caa7471d1fb010b99ab6d0fdc173dcb546d8014eace25c6b4e0",
"3c951d468ba26c59d3c3008310b46c5986b1e6fc21c030e5fd5824d677e76ea3",
true,
},
{
"XQKZz3vOw9t76d102XJty/1gIeMr4jxoV0vakfB64hg=",
"54e298ad4bbe26e1935eacb057e621b0cf496a11ca2ee485d56567c631989978",
"a96039cee81be6fc95c4d4c5f6f0c69f48ce4be56d0156ee694f7bdaef3ee0cd",
"6ce6c3cc8ead129d0da35d378da07fa86330993fd3f166b3fccf8c9ea9067af0",
"c481fd7360ceef79357f5beefcedc21fa6b8aa3fb8eea6c7a13b18d8b679256c",
true,
},
{
"UZ/P1J1K+64bLnNc+K5JSxZX5ZupRSaxMLI0JedXdRs=",
"1b994a0fdecff7cf3a4392743727a3a74be726f4d7c02224d0fad910f1714ee2",
"8b3b37234126c6764fbebbb45adb9be0e13a08b3e30659232b54bd73d0dd9508",
"8ca7216aaa8ff2ccc101b6a15710fe273a41a04d5a43360c7d6a889c9f71fb78",
"44a74dd5c2b39257308fbc36f83291a8748de26e3cf42d0533e4348a58d4a899",
true,
},
{
"U16DczVy8AqDWnFvJGVPfLM8UZL6WHMfkIjeiRGCRSY=",
"77f6e0d2a20c161b8788a18dfd77662dfb60e3cd408b74591f48acb6dac2d7d4",
"5557af02ad7c227e71639f00c08ec34ab94d2877c9089701b619f23789334f0d",
"3b75c6d5124b2e71c8bea53acce1bf7587b00a68705571f609c33547f696f469",
"b6024e1ff27e2775690b37f18e74c8e3845226557bb3b40be0206a6c64fd928b",
true,
},
{
"7vXxpKXjgoeuGYrSSZsemM8pLehnB22kR8He88iyGEM=",
"0d6b65ac0699fab1c22f9fdfbb63b540d692aa6a0e6127e495af55d11d49d60e",
"214b168e985cd03acc0c7e0c79b8cefea0f57fd482b042bbea898ab9dadb69fe",
"86af531e21cb21bb26a460d48f0859865e13d421018607983827d18e6dac9115",
"11de053699e0ab61c09840e7f2f2b994bd3a642246852e6d781303fb3cde0f21",
true,
},
{
"Ys16vX2HP0xLQ/qs/B0ibIG74OklkW7HGHFBt+Jrars=",
"1dcb5989e0ea5dafe23c20f16fcbd5304f0111d1b2d9787bc8e27e4b309df58b",
"6c7fd68e517bc301e028b9b4b857a0c0c17dcab5091dda7cccebe41b023bab58",
"b91bfb1e04f142359e204c547d5d9d0103b3242f22481266c0dd2d0f550afce2",
"f27b01154367ada77ef0864159f67e5d8a8b9c147c109cc7dbb58edf4d31fb3d",
true,
},
{
"WQ6cVAnMSU1EY/TqxPlEf6NiukEqwg6I9SQawOafdfA=",
"434709b03bc6a4f85e41b36cb363fcffacfa6004cd88319c4f198dfb55817c9d",
"a328f6f34135070008db8627a2bcc059d2d22f94ab997f324efd8ed7b46eeca9",
"0ec1168e5492a30c6d7416728fbff65e09d8a5604cccd300ce78a03f3bb6a819",
"e67a8c628a62c9d8c290ad80d720c301099eac6980d70d460b83496fd3897880",
false,
},
{
"HKl7FWfgW6Ak94ul+d0h2EBoWMrmQ+UvObs02G253fI=",
"6fe145fd43ce35776a7a100572dce5a56ed99e0d016e9a3492001f07e3b94bb4",
"51f409be7a06f45a5b50b968a074475261d2c295a6361f9fba7938d84d98a07f",
"6bf5928119b50383d5a0827246c54332e5195a16f67c7d0d0395785e8ec6b37a",
"9893394f8b8d72d83e170a6fc75f6cab3014023ffb9dba78132d07fff303aac9",
false,
},
{
"HMiKpuZ77GXybslRjcXVGQeZtsXoO+43RXYOfZNqaco=",
"df869c02298562f61daf77b14ec61d9846bee5b3a0ad889ba7579bcea654d930",
"a90586b5f7edac6bfd36082f1caa75b9ad6c5b14608c37292f291018de2459aa",
"3a63a928dd7ae2a893375a5ff59ceeb514af4f50cc59403ed1ce678fbb740678",
"72dbfd3ed248ec227308d84c1c7b51ae3d45e1f56ff9da6a4469f5a75353a20e",
false,
},
{
"I9yE0WBaIWJi1b30cXLnCU6f9ZHpZYnqu/iexCSOAis=",
"6b12278a10906d7a7bdea6f4acd27b8aab7aff741291e2c0936a7e9195a139ea",
"2eb90f005a796cb9e4ac916eb9ada784b0a6e7f00b400a4d1112163d18ce642c",
"78825efc06632ce1e8c18740e7c890e43874d409f4990ebbf574157724166b24",
"8fbc10a9cffd6f51da9a1b3dca808575d7b5848ba5744fab85bfed9722617008",
false,
},
{
"zdwUrQPYBZBYiRTFHkpT7m5Y2g9dmIOEUOeg1UT8Q3E=",
"14860c07172ee7bbfedefe36a88732a3b6b0c881b9267b06c1ddad3df0e74cc0",
"c02612cdf33db35ed3a396c461f3fab0c0a02fda8377aed94927315fbf37f3fb",
"51abd6dc6a5128640cfaece311444c81758317806ebcd5c7cd5e4be87939aa48",
"ca9aeadc7ab478de83fa4ed7e7462340c7cb85292b077db0b553ebd6afa8e27d",
false,
},
{
"DRyVx9Nh8TzP0Dvf6mWg49PzVDu1lFzgvplIeOyCryI=",
"e11380da9773efa620793e250e3aae5f35968e752f869b268b4a85a840051012",
"8a22802d8bd396c442d82f8143c85c2a2f0f38fa9f4c7abd020b24c4e60d7592",
"dc499e27300a12a0c64a682a921c7b493f025734f14404cb87b6b16afa73fcba",
"334b81cae281a0d255ec32174a2c71960a45bf7dbc83aa063da476ea08e47ad0",
false,
},
{
"A4gPHKhpDW6TfM+iqTs9ERCMEH1D4JWDp/ikIXx+8DE=",
"5d72e49d777228163a02ab7a44627ccfeed65a539db2d10b9b870244a101c56b",
"15e4e579a75bc535c4597292b4c4e7f465681b8b5e1cdfb7e2832a3340984628",
"a035ad65506baa9dd031fdeea7fbf8c2561cf8b078c488d1d2e5a953bc1dd9fc",
"5c0f7f2b250d3ec79b200f2f8ee2e1d07b55a8d8f1ba7cf1f97634a6463c6676",
false,
},
{
"O9VfPlCHlELFnfyk3XyWe61sqrWFdIjhX/9YKuHcpc0=",
"160a5c3e40c612755aa92c7bafa2ec0d30beaf3c5e8502357598ef1ff5a30d39",
"ec0157612f8bc12fd3401a623d74cf995474652b02d9157bf6a516e28e581b2d",
"753af6d5ef6bc9b619a65c935abc10ca9669fa36fc0f04eb0d770cfa9851bb53",
"cde3845f37da3b48a5d73bc4da9721e0420ffdc90fcff3d4e6327741ce489d67",
false,
},
{
"mslv4d71bz33jMEVkhYCYZambPkF1AGl1XeKzAqUHdI=",
"177fac6032f5e7943887c649f2d1d644e46fe9a4855deff3dbee7501658eebb9",
"b936a5174434aa416190f3b934d33517560a1e9986ca2c6fdd30988425090e62",
"e85f7bc9d1f497e70b24a950fa1247fa45b4abc125445de96499f511298d7f1b",
"b86ad799a14a580074c3be6f04947d71fcf0fe65e325dc601a9ea4a5f05e722f",
false,
},
{
"jHOF5JG6NDz3qvXxc7BMFvQnuaDFu6w2WNOzrVS3c5s=",
"1cab54e51f4f0a29bdb6469e5991db6808ec9feee87c390f23850f45dbb46cde",
"4eb3a2ba5d5c3f6f5236e46f01d4c1f6e7a6bb1d28925d5a4ee4b4c39198b481",
"efebf536c797cf0544f604590c9308785ea01b9ef9383f037dde205dcc19abed",
"f7c8463a43b722b11bfff0043e4c4b4da3b4a7a3d532b1a0b2e7492869ecf877",
false,
},
}
// These test vectors were generated with OpenSSL using vectorgen.rb
var testVectors224 = []struct {
hash string
Qx, Qy string
r, s string
ok bool
}{
{
"UuKml8aOeeE2urmZDcpnKS3Jt+wU06WL1I95JTTLF8o=",
"d08c21db30c52e907c48498266c3bb1b266d3534886f4c9c88abefd6",
"712b2a602ab7df8aae81d8350bd02591d1aa9fc9e4ab1319c828cc80",
"de2eddb766aa9e58aa043f2213a71027413a638d55c7c5fb952d1dc6",
"67a107dc70d7a00581cc7b3dc459ba54d259a9703e1082e9ed0762ba",
true,
},
{
"h1uIWz5niypHBwA+exlAo630wMydGEv7wCVD3JeRpEM=",
"d64f191ba1e94ba1b3fa2bcc25035c97ff2a95409597883aa020451b",
"026099d210dcd2a28537054883753ce082484541a15d8810dff94c66",
"ee3bae46f6119c138df772b587d3c79b91886689ee1cce170508a8ba",
"dfd75542bbdbabab1ce145f6c6f4814e775edb6869e4b13bd8568595",
true,
},
{
"Oh2UE1NDfVXyyU6syFIoGfBNmGwZtIODaM3/4lMqbzA=",
"c429c52939d2b813b1393c633d321ad0f50a29aa966f51040d05acfb",
"76b53d4476bac4b2347cb29aeacfb9da35a6d9cdd8526eeb5f171a2a",
"a9bb6562076d10652e4acd61bc4b05ca437e03653ba046c056333101",
"24cacf46d60e0f024902113e9eaeecb832e6891af5b963ab40e26554",
true,
},
{
"Z/LUphX+3CnxhKC7WSiuRzpYw0vyF4WROqWI3jkqOeg=",
"1ec944787c2468014feceed664a2b7ffcd72e7e9d0598469342083f9",
"319b5b6be3bef5751f90377965e7641a660976b05ea000696ec16b9e",
"f9a8b817351c65c3ef4acb01bdff96caf18e5806f01004cf58f1a3e6",
"e55e168d9f7fc6879d7fdd6bfd92d81ca996f52ba383f4045402508c",
true,
},
{
"kra7yS1U5C642EV3SLISkmmynnNDgHAeJhHMXc8uZMc=",
"85bf8fed26c46a79f4616cb9ac130285d70396ab662ba8c585909e39",
"4f2771e2d05fa4c148355a47a30618c958b0f42189843bf076bf4d24",
"cecc2d251e433421b1cf98061ca6132002fa995cb8b564b003d8becf",
"5e23f12ce90d6167ca034f6cf162dde6a0294eaf5fa2b2e3fd4b6a1c",
true,
},
{
"IFYjqmzcKTdilUKTi8XCisLaZX64l64av4gh8exLIQA=",
"848b5189b3f418f7ba15e18e4a09d60147da3beb6df81e8c16245987",
"19841978aab89f934b41a783e863b35cd228e8fcde7a93916c66f103",
"f35edfe252100757ee47494904b4da82787d914338e9e42534467b28",
"79fd9664baf251906cae69f700c2f120431f46c7be519fbddfaa528a",
true,
},
{
"8xqjsKyNnLCYc7lGDkyyZ5cTeZCm0HC+/Zo5z/Hs9cA=",
"518e8319b6679645c17b23d800ddf26f9f57f6f9f9d8a76d49c472b2",
"e53596f2f7466ae04f8bc4cbbc7e498081f3af90a9ed560fdd769a1f",
"9e1487d13871f290fe0cb0b9b8a79f003a6000b164b839fb190f5765",
"5fded83ef435e846dec78aa30c80bf521d9003c877c2bd4ae433cc7e",
true,
},
{
"xY4Api3GxezZaGdrGV7UIR85Qx9ehA28PN6AWCVGPwU=",
"316ef682f18769426108a4ee80ca02dd419b438b89717b09acf6ea01",
"84fff5821610010cc0b9e3ec2744a9f60352bf07fe2c5b93522a5dce",
"a74122597cba74477ac4e32489b928da6b781e8cd33da5ed4ed8d8f9",
"69aca50641e89dd028da863c5d9e7f9b4cc38a87c28891c7c3198e15",
true,
},
{
"bNqZQVOWhDb6i5HZpIA/gF2zpEkB2/9R5NqgHKU+L6o=",
"a27d5a422b778a6d826a332c862685edf8d7cf5d526fcbd0b9f0f4ff",
"d191ec83857cbce055b499eba738a9f383bb01e76496f1a163ddbef2",
"6cae353c1e5b42f61ff79667e622002abe93eb2700446a9bba52e58d",
"ca5a4e8374aaa9fafc242286d08e5b119dd8f4aa20fcac63b698ea17",
true,
},
{
"wRtVGDwbunZDTo+gD4iJGJXazpPM4bDEcL+UmmS9Bz8=",
"57652110df7ee6e3594309796490424cb9df9f28bce9953cbc024982",
"6b5f0cc133b1e2b2d820dc4a6ec5380696841ef7bb9ed7a8dfc439ed",
"bd3c131acc84773d2a9b6da23b83191f66a17d7fcaffe556093e933e",
"968ac0ae52d01059baef154abf3f6868a60c8ac72c1814f424248513",
true,
},
{
"zHfLV4XA+xfAwIk5cyFZiavzN8G4Wjj9OCNo/kgRV/Q=",
"1e249fd0c9b986451d43b42f89021336f5a2728e72affbcc0c37f512",
"24d136c4d9c9303d13088a7b3db0febc5ebd02c4560ff08f1d1b5d5b",
"ebd9f93dcc2347e36d7886bf48700fa492ff13fe1d4f751429983d96",
"867488fc76e0d32bcfa5c8171585ec5571c423b3f28e70057ae6a9d1",
false,
},
{
"fvJ0Yg8q46LnKlSnzLson8g8TC3j+wlvhT3JoYpqfTo=",
"e8ee3171c9de606fec6f2a8adb2d3f5cb1c93acf0b5ce6a04f6cee30",
"8e83207db5b9bbd7f9ac651c7ee4cb9b6187c11a90abfbd5de75521e",
"ae0c7084360104d08ee6a408d09c3542f8c5d9bea5f634de24bb3df2",
"8fe2c249caa69a4653e5b754224e750e666e664c68126237e482e691",
false,
},
{
"62SB5tn+Vcv9u60b3em4Ao+reyBcKXewWOHstA9Bo9Q=",
"064b10c4880da85292a55a622c51be91b29190a55e7733d85d29a17c",
"3816a5df39c25e5f4396ba1cc21119d99184510d2cf08195073fd8ba",
"36a5c8e22accde047269f90e3ea0db20b44d980f327de733be4217f5",
"1a0fc8c5cc20f998f9351c1b7965e8d199e9ec36a92495791071bac2",
false,
},
{
"psm3iAG5lCDNd4wD0J+r01hYkbKT3jUgffYuBuJo668=",
"f862e75d65342ce82b3e5db791b2c538e106a42a2986e6dbb665e3d9",
"9002f61e5ea252cdf7e19741c220233eb4d6e99a6793cf0cf581f1e2",
"9385f393b9ea0d929e25ff0787f223fa718b68d5fdb3a02fac8c6eac",
"752e933122b41d659c12e4922bc354fd9126466544f836d0eefb7d7d",
false,
},
{
"J8SOBOD3ANxiJG6Sy94HgpUK4PS2HoMRVrclCIYOwh8=",
"7bc19896ea7a5e463ea26ac0fc639097698185556ef46f879ffd25ff",
"0c9b5e55747f2dfdf55a86c9916520de36fdefb1b3e5f4c80786c393",
"803770360232f0f3e252836bee4a2a8b7391a83d0f79e6e8a40967b2",
"3271aae7ff9db3030d24aa60c05194b584fa9f24fb2722572974bdb7",
false,
},
{
"pQc8DsZ5JZccHzq4Re6jMl0XhbSxtDlhWbVJuy7Pk8k=",
"f582e64838f4c34b3519607d926c80826c7b045d3c15bbc877689887",
"c36bf1c51c2960cb1cfb88f87bc7759569c2a7bf9ae2f5b84e736c56",
"0a2becb6f6c33b6fa34cd8eb1c00c1bc525c89de779274d7f5156995",
"36236cc0234a6076aa7a1dbbd4f8933a36d38aea231dc0e175ad29b9",
false,
},
{
"Mz4Lq2b0qvMgxG8d2mv512PM0c9M1aSCxoi4rVZ8Gs0=",
"d76592923ee51add3dd5131ce34d0f21a8da62bc83891c22f6e62d44",
"7b8df34922f32d7918df2c2b1a05a60e4024a8a3a03c32c5d1be32e3",
"69e7a2f7c9f890f013711792ebdd4783403062e19aca006185de2307",
"20379cbe3d7054df9721734be93003c76713bd836931e75ba6b34788",
false,
},
{
"NN7r4K+UiXAcbHvrOmyYlajr+D9yMZLBCDIqIwtJNXY=",
"a999aa5db63d7a045f3a6897201dbdc00a86308079060574263f484d",
"0fb60c3e518c662b2663ac5261464349936aa118b2655de9dc4b3176",
"bf8dc8e6fe8ff96ac6a817436ff5abc050161389cec54547f6207084",
"8f17e024580a117080ed4fc2b7e01f3d7f45710d5ad9a9dc8fef3c96",
false,
},
{
"Ns9u7Vtibnlk5Txz63F+yE51J2/5jkAQLpoxE//vRTs=",
"55385a4198dc2470cc3f9a2ddcc9e8f4bb9ed3de5907c5e01a255dbb",
"bbee8d47fb31d31808aee7c792b9c489b49e184faa0e89cda4f6ecbc",
"0fd7c4f8c1e51bc517e093a0513b65adb640498775850ea402bb2587",
"e70af5385e62b9fe50ae855b788d56f5312d18d55fd2ab91c73ede3e",
false,
},
{
"l5Rb4iq5lffTHK2T4fOumrdu5OFlO33pQVcxhwPuUo0=",
"bc1646fb050c1d463bfb01e3cba9921163eb17a6599d5650cad76528",
"ed82f5339909463540b96629e6ac20e863f72108cdbbcba2df8a8dd3",
"322692672ddcc36680478b080b1ebd67afb462f4293c81c1c5dd250b",
"d30471c3cbb18fe3fbd60c4cdec73c9675e8a2b42c753fd128240c4c",
false,
},
{ // From bouncycastle
"MA3Ya9lPN6+gKqIgKY88Ih0P5LqooQMncGjjuQ==",
"dc57c400279eff3debaa3b2069d7f1a52cae0e243d762365a53f988b",
"e57ad223fe9ff5d9a481ec57177aa213fd2f74ef443d70c06b0946d8",
"76ca6d809377918baa397f29e9c94bb480b49cb093944a0368f868e5",
"7aeff445dad7f5c72ebceed45788d130f0fe105ff1fcedf0fd1bd91b",
true,
},
{ // With SHA224
"iK6Cb+JNpwVtCDbpLpYWrJxmNLD/RwdeIx94Wg==",
"df1425299ab4c1708ca9919b6be979090e55ac4e9588c1aa687e006f",
"3310301eda28577a271eb6bdacd2c7c3285bcb36e266989110c216e3",
"25a774ac280a675febbf0a84f2f63a97f23623d5ca862c5becddc28d",
"1d3e0e7594d25c841761ef8bb5ecd4d471549b0eb3de0c8b02c06787",
true,
},
{
"CYfb3NMhBVOXAatM1+G3NevQV4byA2xjIskmFw==",
"b42d247a54ece53b3dfc2e09978f3a7579474c08d3d5e66f36544ef8",
"d47471fdb5f82048df98545062d55191ad436da0ab4c6e113c92cd75",
"4a3fa9e2dd5b1159c63d49e1e5fb8cd032a290a954a97463cd1c7bdd",
"929b324879fd1c27fecb00c13acdcfd31f883c35e33005d0b77d4876",
true,
},
{
"zPeDGtE2CKlJL3iWX3Up9SBQ5e4ssevsCXdooA==",
"480e0430ba7f560491d877deade67a1aef6d9e217f76de9854f0c472",
"58363df81d673502f9d711d5f9756f17894c6915f988cf74c582309e",
"82647625e44059f5070e98731708d03d3f4e724454d9a23a9f4e1d9c",
"56947439fafedab0cd70ac20a5816f07e3f0bd02a2a9688935491ea9",
true,
},
{
"mWtQV42buoxfhJ7qiuvvfBY0IJgc0IoJXwPGxg==",
"54727ca4098cbdf47a0720804d56572dcbd775be1242a7b9af032e88",
"80e9ff04242c9d48c9cf76ac061e5d7b58fc2a3f3c1b76fe40640e7a",
"e9e570ce5325109598be7e2b8f130d1cdbf53c47e7b4a27157010a3c",
"c51a4419db67f7f1089b3dceb683eec62fec2379320484c9aa51fb2f",
true,
},
{
"hq5QRfajGJY9hHwijEYMfEemzm+HHE2r5VPG+w==",
"499caa429e29600e4ff030436c75b3f293242d4ff3e8ccb7ec10e1b3",
"88a9257c06b8c53fbacfbbd2320a2c655a3723443187be5bfe542ac5",
"83a727aff472073a3721bfb8a02a2c9bd4611bd7039b8e273558b3dc",
"87ebbb5737521a08b1c8485972d3344a3ab586ee38319cd891d21262",
true,
},
{
"73Y8mTjcHFkh67p11Gdv8aRU0UB7jg3uEL6FNA==",
"6ec69cd380e48705109320145491c2bfa6df932edb26aa4f8ee10fca",
"11918788a4a6440025b2fce30fdfd240f32f87d1a6d24ed8beba30d3",
"5313cb9885e529a1960bb02b3bb9033b693c26e5e266a50d18ef03eb",
"70c1ffd185578d87f9f53f46fcd242406daf8c2066db74dde02e2989",
true,
},
{
"fR2a4aSPZpMNgzAhCwD/lmWSDl2uUi6vies/Dw==",
"234314bd2223f2612f7bb53054670b95b6f551107b59615e021f97f5",
"454e9a178f98c46c4e3940b9671ee8c087743ae7a6a846b64fd59ad6",
"93bacf7faa7781a85da85febe5b7c44b9a7d60f50e8b39c3873ba1b4",
"d11a23a798fbdc3fd2af9b1b7d097a09ddd6ee3a2ca345d3dfa34923",
true,
},
{
"WEx0ZrJ2kqHGInDbiS0W94cQtv709JG8a/qBuQ==",
"8ea93be28150a55aefe5fbed613ff94c260a0d0753145dd9e1e46751",
"578ab63fabe4dbfad4726151be599e434b229f1ab242821692d31fa6",
"2a77f32044c4def9a1d2bb3cb4efc641bd3c44451fa05541d80ce384",
"4545711bbc462c96f191e05ae2381f01b05eb27f041f98c8fc52bcf7",
true,
},
{
"LKEDiUeQSqocG2JDHPCztAO6RCeiSYzezUpuyQ==",
"c0820d847500215ecef7f9f423ccb04d5e884edb1d7867b15ec72cef",
"a525064e54cae8911e9fb211ca5c782368aa75b2dc670a106901d764",
"0bd268f34d7c9ebb699c1227f67363401f16399640fa888cd6637708",
"6e8316c4fbe0603b37f30694297137e7bfcdb7508dcbb04737803116",
true,
},
{
"MH+UBz8oWKCgga3JepkYQ3pJmc43riFn1neo9A==",
"a8f8ebca44d7e0997604282792eef7d4e7125a9d95a502e4292dbe56",
"0c652a88aa3e0b0c351853b14f38987daad1ced43e074afa4d7e0add",
"09f16c5e71dd89fb6dd65e510991163ff6f2eabc57363203d73bd6a3",
"552761b036550f835207c2ec4364d259df81ebc23e7060e171dbda1e",
true,
},
{
"jcpCclP/LGV/7M8+3KDp9tXBw4Pw8lrrU4U5yA==",
"27e964152d793e404366f0723ddf88bf0158598e2cbc653e834b7f14",
"054494e937b93c2fab8698f07221a6e375285166cbf08ff4e5176551",
"9066307646ca66b76cacc2a39ca82a9d5ba17d7519d2316782588fd9",
"1f5ed65e8ed433916d724f9151b71024a02f1eb4bcab7530cb018945",
false,
},
{
"k0JNsD/hqOdOc4nFV6/KEP9DFLQ8Js/U9Yg2fA==",
"1935a7a894122c5c81b2b17d9a3c56764cbb1dde32f5ccaaf051a8cd",
"006601a65a6254615d0ab56655f2bdffe7eb9b51c8ccb1777d68bd6a",
"031dcd89299c7ea4a180a1bd8ab37c54d632797df2c843bd99028d7e",
"41b1c59da0e208fab5064f64d817849e6db587f2a999a4effe867735",
false,
},
{
"X3akSPEWTAeZCxgvEksKofbXZeEEs9nOH+VJvA==",
"b369ea70c2c78688fbca324f49f7e842d9a7a48b259f8eae045683f7",
"fdbc8aaa5fefcf2d87917258639292be7b5cd758c73dcc080d89ffa7",
"f8ea023580d144897b318112718b87d4744a664fd8bff9bbe60a7b5c",
"3035e98dfc71ed858503365e36356bd30cd4dd55763b33f66fa48166",
false,
},
{
"Ngsi4+dskyzDr62s3dFGLqNJSLbkylpz7NZUvQ==",
"a98f58bb0177fc84b7f954fcc6107855b77781a214937f475bb122b5",
"a3a647d5a07d1eff710c66f83fe2b7e52df020c9baffece81f0d9efc",
"49074698370764d9fc16926c56f3a3c995f8236291f3f89b30aab2b1",
"44cef38ebd300231d9d55e353ac1ea1968db3329938d1898634d05db",
false,
},
{
"7O44g55hrjN2404WK/EKL0UipdAN9oaItw8xNw==",
"03d92a28c778ec4ae3ff512fbd3be3d9b3a8d0d358a56057af814465",
"5a6a392078d9fc4b80ed3852bd4a4da51bef4bf6b52a49567fed07aa",
"180667348f6f0da7e2f5969efc16de029a2e92287b683f4848f38247",
"b16972f1b1e64efd87d2f36baffd0d1d2cafa0d2777eb8829afe694d",
false,
},
{
"iZMWig9ybYia4aqmy1riQ3WxlVYVQGxMjwzFcw==",
"cb4e0d1997c5085e82ced1455c471ffc59b51d7cc31a9fe2cedf6570",
"2d1c50c8fe407f5f070767e03c06ea16d113f3d8de46711f285c62ec",
"f2da17dda55b5df219e792c354ec70d326dcb73ccd69dca44c363d6f",
"926afcd5e523b426bccc3e86ff30f1544802e338de93b45dcc54745f",
false,
},
{
"8cTgV4In+GNbDovnMZveif/FSPkcKNKvdNQoiA==",
"c04cf770e32f885a83fb438b0289abe661df24e4d6a57741560bd3b0",
"21579a2b53eaaa276bc97fdcaf11244f303bcf1dbb73ff9021b646b2",
"868269282eb994d0a925502400d26830b06c392847fb342446c81c03",
"a7ba7ad9a681fa7bc5c52b946ee5cbb53ccf64762aa026573ec927f4",
false,
},
{
"LeIj5rhwKoO8BIILBcHGhc7hUV3FNTK8jeHT9Q==",
"65003cb5de8b1bbde952d3f9fc9e0a3bd2946ae075ab673d5e32d9ef",
"fea89b37b0dd4319140370f667a0130547ffa7912c653143c6d24336",
"5100f34d400c9d177a1b57e8ad05deef2d6eb1bc1d90a1fbf62077fc",
"a5054f958872dcbd8360f7c6dd79b1a8da89910267a977a78117dc91",
false,
},
{
"gtCRMOIfhcdn+ftzNqSDQIvq/KSPyy6iK9C9iQ==",
"a9862ebdb846906875c482d3c9d0fe3197cec65f8e5544e2afec162c",
"7c02aca3a65a58f8b252e7a39e347d038a5e02106b9ef1b6ce7feb28",
"0c915f9df3a814497df4d0cc479f5b877a3428dfcd38fc38611ba0e8",
"1fee66fa37e933b6e45212244d05ed9e76d8894ebd756f27358bb5c6",
false,
},
{
"ZRKaKoSu1CFzS60oNu4ug59KaKllkVBCJp2y3A==",
"d9ee4f6232c129002176b4a9da2ca740fd52be22a65e15ad461945ba",
"4e1e5b1ee68e27f93f376b83f16cfecfa1fd44a412ebc4751a83e01a",
"53535f882c643f5937607a5e49f44082aa04b33a384e8b46bdedf395",
"6cfccc7304111baa22f48e3a97fe8f9760de72c91d017d525fc7eff5",
false,
},
}
func TestVectors256(t *testing.T) {
for i, test := range testVectors {
pub := PublicKey{
BitCurve: bitelliptic.S256(),
X: fromHex(test.Qx),
Y: fromHex(test.Qy),
}
hashed, _ := base64.StdEncoding.DecodeString(test.hash)
r := fromHex(test.r)
s := fromHex(test.s)
if Verify(&pub, hashed, r, s) != test.ok {
t.Errorf("%d: bad result", i)
}
if testing.Short() {
break
}
}
}
func TestVectors224(t *testing.T) {
for i, test := range testVectors224 {
pub := PublicKey{
BitCurve: bitelliptic.S224(),
X: fromHex(test.Qx),
Y: fromHex(test.Qy),
}
hashed, _ := base64.StdEncoding.DecodeString(test.hash)
r := fromHex(test.r)
s := fromHex(test.s)
// t.Logf("Comparing %v %v %v %v", pub, hashed, r, s)
if Verify(&pub, hashed, r, s) != test.ok {
t.Errorf("%d: bad result", i)
}
if testing.Short() {
break
}
}
}
func BenchmarkVerify(b *testing.B) {
b.StopTimer()
data := testVectors[0]
pub := &PublicKey{
BitCurve: bitelliptic.S256(),
X: fromHex(data.Qx),
Y: fromHex(data.Qy),
}
hashed, _ := base64.StdEncoding.DecodeString(data.hash)
r := fromHex(data.r)
s := fromHex(data.s)
b.StartTimer()
for i := 0; i < b.N; i++ {
Verify(pub, hashed, r, s)
}
}
func BenchmarkSign(b *testing.B) {
b.StopTimer()
priv, _ := GenerateKey(bitelliptic.S256(), rand.Reader)
hashed := []byte("testing")
b.StartTimer()
for i := 0; i < b.N; i++ {
Sign(rand.Reader, priv, hashed)
}
}
|
package main
import (
"fmt"
"math/rand"
"os"
"os/exec"
"time"
)
const (
width = 80
height = 15
)
// Universe is a type which holds a 2d field of cells.
// Each cell will be either dead(false) or alive(true)
type Universe [][]bool
// NewUniverse creates a Universe with heigth rows and width columns per row
func NewUniverse() Universe {
universe := make(Universe, height)
for i := range universe {
universe[i] = make([]bool, width)
}
return universe
}
// Show prints the universe to the screen
func (u Universe) Show() {
for i := range u {
for j := range u[i] {
if u[i][j] {
fmt.Printf("*")
} else {
fmt.Printf(" ")
}
}
fmt.Println()
}
}
// Seed sets randomly ~25% of the cells to alive
func (u Universe) Seed() {
for i := range u {
for j := range u[i] {
if rand.Intn(100) < 25 {
u[i][j] = true
}
}
}
}
// Alive determines wheter a cell is dead or alive
func (u Universe) Alive(x, y int) bool {
for x < 0 {
x += height
}
for y < 0 {
y += width
}
if x >= height {
x = x % height
}
if y >= width {
y = y % width
}
return u[x][y]
}
// Neighbors determines the number of alive neighbors for a given cell from 0 to 8
func (u Universe) Neighbors(x, y int) int {
aliveNeighbors := 0
for i := x - 1; i <= x+1; i++ {
for j := y - 1; j <= y+1; j++ {
if !(i == x && j == y) {
if u.Alive(i, j) {
aliveNeighbors++
}
}
}
}
return aliveNeighbors
}
// Next determines whether a cell should be dead or alive in the next generation
func (u Universe) Next(x, y int) bool {
aliveCell := u.Alive(x, y)
liveNeighbors := u.Neighbors(x, y)
if aliveCell && liveNeighbors < 2 {
return false
}
if aliveCell && (liveNeighbors == 2 || liveNeighbors == 3) {
return true
}
if aliveCell && liveNeighbors > 3 {
return false
}
if !aliveCell && liveNeighbors == 3 {
return true
}
return false
}
// Step sets the next generation of Universe a to Universe b
func Step(a, b Universe) {
for i := range a {
for j := range a[i] {
b[i][j] = a.Next(i, j)
}
}
}
// ClearScreen clear the screen on Windows
func ClearScreen() {
cmd := exec.Command("cmd", "/c", "cls")
cmd.Stdout = os.Stdout
cmd.Run()
}
// RunSimulation runs Conway's Game Life for n generations
func RunSimulation(n int) {
a := NewUniverse()
b := NewUniverse()
a.Seed()
for i := 0; i < n; i++ {
Step(a, b)
a.Show()
time.Sleep(100 * time.Millisecond)
ClearScreen()
a, b = b, a
}
}
|
package models
import (
"strings"
)
// DatabaseModel represent the config for a single model that can be written to the DB.
type DatabaseModel struct {
configFile string
createScript string
constraintScript string
insertScript string
}
// ModelConfig Is the pre-dialect creation scripts for a given model.
type ModelConfig struct {
Create string
Constraints string
Insert string
}
// SQLDialect has dialect replacements for create/insert/update scripts..
type SQLDialect struct {
replaceInsertStatement string
replacements map[string]string
}
func (dialect *SQLDialect) InsertStatement(valueStatement string) string {
return strings.Replace(dialect.replaceInsertStatement, "$VALUES", valueStatement, 1)
}
// NewDatabaseModel converts a database config to the passed dialect.
func NewDatabaseModel(dialect *SQLDialect, config ModelConfig) *DatabaseModel {
return &DatabaseModel{
createScript: processScript(config.Create, dialect.replacements),
constraintScript: processScript(config.Constraints, dialect.replacements),
insertScript: processScript(config.Insert, dialect.replacements),
}
}
// CreateScript returns the final creation script for the model tables.
func (model *DatabaseModel) CreateScript() string {
return model.createScript
}
// ConstraintScript returns the final table creation scripts.
func (model *DatabaseModel) ConstraintScript() string {
return model.constraintScript
}
// InsertScript returns the final single insertion script.
func (model *DatabaseModel) InsertScript() string {
return model.insertScript
}
func processScript(script string, dialect map[string]string) string {
result := script
for k, v := range dialect {
result = strings.ReplaceAll(result, k, v)
}
return result
}
// Sqlite3Dialect has the dialect definition for sqlite3.
func Sqlite3Dialect() *SQLDialect {
return &SQLDialect{
replaceInsertStatement: "INSERT OR REPLACE INTO $VALUES;",
replacements: map[string]string{
"$TEXT": "TEXT",
"$INT": "INTEGER",
},
}
}
func Postgres12Dialect() *SQLDialect {
return &SQLDialect{
replaceInsertStatement: "INSERT INTO $VALUES ON CONFLICT DO NOTHING;",
replacements: map[string]string{
"$TEXT": "TEXT",
"$INT": "INTEGER",
},
}
}
|
package client
import (
"github.com/wish/ctl/pkg/client/types"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
// describeversioned "k8s.io/kubectl/pkg/describe/versioned"
"fmt"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubectl/pkg/describe"
"strings"
)
// Helper to print contextual info
func (c *Client) describeContextInfo(context string) string {
if c.ClusterExt == nil {
return ""
}
if _, ok := c.ClusterExt[context]; !ok {
return ""
}
var sb strings.Builder
fmt.Fprintf(&sb, "Context: \t%s\n", context)
if _, ok := c.ClusterExt[context]; ok {
fmt.Fprintf(&sb, "Labels: \t")
first := true
for k, v := range c.ClusterExt[context] {
if strings.HasPrefix(k, "_") {
continue
}
if first {
fmt.Fprintf(&sb, "%s=%s\n", k, v)
first = false
} else {
fmt.Fprintf(&sb, " \t%s=%s\n", k, v)
}
}
}
return sb.String()
}
// DescribePod returns a human readable format to describe the pod
func (c *Client) DescribePod(pod types.PodDiscovery, options DescribeOptions) (string, error) {
d, err := c.getDescriber(pod.Context, schema.GroupKind{Group: corev1.GroupName, Kind: "Pod"})
if err != nil {
return "", err
}
return d.Describe(pod.Namespace, pod.Name, describe.DescriberSettings{ShowEvents: options.ShowEvents})
}
// DescribeCronJob returns a human readable format to describe the cronjob
func (c *Client) DescribeCronJob(cronjob types.CronJobDiscovery, options DescribeOptions) (string, error) {
d, err := c.getDescriber(cronjob.Context, schema.GroupKind{Group: batchv1.GroupName, Kind: "CronJob"})
if err != nil {
return "", err
}
s, err := d.Describe(cronjob.Namespace, cronjob.Name, describe.DescriberSettings{ShowEvents: options.ShowEvents})
return c.describeContextInfo(cronjob.Context) + s, err
}
// DescribeJob returns a human readable format to describe the job
func (c *Client) DescribeJob(job types.JobDiscovery, options DescribeOptions) (string, error) {
d, err := c.getDescriber(job.Context, schema.GroupKind{Group: batchv1.GroupName, Kind: "Job"})
if err != nil {
return "", err
}
s, err := d.Describe(job.Namespace, job.Name, describe.DescriberSettings{ShowEvents: options.ShowEvents})
return c.describeContextInfo(job.Context) + s, err
}
// DescribeConfigMap returns a human readable format to describe the configmap
func (c *Client) DescribeConfigMap(configmap types.ConfigMapDiscovery, options DescribeOptions) (string, error) {
d, err := c.getDescriber(configmap.Context, schema.GroupKind{Group: corev1.GroupName, Kind: "ConfigMap"})
if err != nil {
return "", err
}
s, err := d.Describe(configmap.Namespace, configmap.Name, describe.DescriberSettings{ShowEvents: options.ShowEvents})
return c.describeContextInfo(configmap.Context) + s, err
}
// DescribeDeployment returns a human readable format to describe the deployment
func (c *Client) DescribeDeployment(deployment types.DeploymentDiscovery, options DescribeOptions) (string, error) {
d, err := c.getDescriber(deployment.Context, schema.GroupKind{Group: appsv1.GroupName, Kind: "Deployment"})
if err != nil {
return "", err
}
s, err := d.Describe(deployment.Namespace, deployment.Name, describe.DescriberSettings{ShowEvents: options.ShowEvents})
return c.describeContextInfo(deployment.Context) + s, err
}
// DescribeReplicaSet returns a human readable format to describe the replicaset
func (c *Client) DescribeReplicaSet(replicaset types.ReplicaSetDiscovery, options DescribeOptions) (string, error) {
d, err := c.getDescriber(replicaset.Context, schema.GroupKind{Group: appsv1.GroupName, Kind: "ReplicaSet"})
if err != nil {
return "", err
}
s, err := d.Describe(replicaset.Namespace, replicaset.Name, describe.DescriberSettings{ShowEvents: options.ShowEvents})
return c.describeContextInfo(replicaset.Context) + s, err
}
|
package flow
import (
"testing"
. "github.com/BaritoLog/go-boilerplate/testkit"
"github.com/BaritoLog/instru"
)
func ResetApplicationSecretCollection() {
instru.Metric("application_group").Put("app_secrets", nil)
}
func TestContains_NotMatch(t *testing.T) {
given := []string{"a", "b"}
exist := Contains(given, "e")
FatalIf(t, exist, "Should not contain")
}
func TestContains(t *testing.T) {
given := []string{"a", "b"}
exist := Contains(given, "b")
FatalIf(t, !exist, "Should contain")
}
func TestGetApplicationSecretCollection_Empty(t *testing.T) {
ResetApplicationSecretCollection()
want := GetApplicationSecretCollection()
FatalIf(t, len(want) > 0, "Should be empty")
}
func TestGetApplicationSecretCollection_Exist(t *testing.T) {
expected := []string{"some-secret"}
ResetApplicationSecretCollection()
instru.Metric("application_group").Put("app_secrets", expected)
want := GetApplicationSecretCollection()
FatalIf(t, len(want) == 0, "Should not be empty")
}
// func TestInstruApplicationSecret(t *testing.T) {
// appSecret := "some-secret"
// InstruApplicationSecret(appSecret)
//
// collection := GetApplicationSecretCollection()
// FatalIf(t, !Contains(collection, "some-secret"), "Should contain app secret")
// }
func TestInstruApplicationSecret(t *testing.T) {
appSecret := "some-secret"
duplicateAppSecret := "some-secret"
nextAppSecret := "other-secret"
ResetApplicationSecretCollection()
InstruApplicationSecret(appSecret)
collection := GetApplicationSecretCollection()
FatalIf(t, !Contains(collection, "some-secret"), "Should contain app secret")
InstruApplicationSecret(duplicateAppSecret)
collection = GetApplicationSecretCollection()
FatalIf(t, len(collection) == 2, "Should not be duplicate")
InstruApplicationSecret(nextAppSecret)
collection = GetApplicationSecretCollection()
FatalIf(t, len(collection) > 2, "Should be contain 2 app secret")
}
|
package main
import (
"bufio"
"fmt"
"math/big"
"os"
)
func main() {
var reader = bufio.NewReader(os.Stdin)
var n big.Int
fmt.Fscan(reader, &n)
divisor := big.NewInt(20000303)
ret := new(big.Int)
ret = ret.Mod(&n, divisor)
fmt.Println(ret)
}
|
package handler
import (
"context"
"path/filepath"
"testing"
proto "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/subscription/v1"
jinmuidpb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/user/v1"
"github.com/micro/go-micro/v2/client"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
)
// SubscriptionTestSuite 测试帐号和 mac 的关联
type SubscriptionTestSuite struct {
suite.Suite
subscriptionService *SubscriptionService
account *Account
jinmuidSrv jinmuidpb.UserManagerAPIService
}
type Account struct {
account string
password string
userID int32
seed string
hashPassword string
code string
activationCodeEncryptKey string
activationCode string
contractYear int32
maxUserLimits int32
}
// SetupSuite 初始化测试
func (suite *SubscriptionTestSuite) SetupSuite() {
suite.subscriptionService = new(SubscriptionService)
envFilepath := filepath.Join("testdata", "local.svc-subscription.env")
suite.subscriptionService.datastore, _ = newTestingDbClientFromEnvFile(envFilepath)
suite.account = newTestingAccountFromEnvFile(envFilepath)
suite.jinmuidSrv = jinmuidpb.NewUserManagerAPIService(rpcJinmuidServiceName, client.DefaultClient)
}
// TestGetUserSubscriptions 测试得到使用中的订阅
func (suite *SubscriptionTestSuite) TestGetUserSubscriptions() {
t := suite.T()
ctx := context.Background()
ctx, err := mockSignin(ctx, suite.jinmuidSrv, suite.account.account, suite.account.hashPassword, suite.account.seed)
assert.NoError(t, err)
resp := new(proto.GetUserSubscriptionsResponse)
err = suite.subscriptionService.GetUserSubscriptions(ctx, &proto.GetUserSubscriptionsRequest{
UserId: suite.account.userID,
}, resp)
assert.NoError(t, err)
assert.Equal(t, int32(300), resp.Subscriptions[0].MaxUserLimits)
}
func TestSubscriptionTestSuite(t *testing.T) {
suite.Run(t, new(SubscriptionTestSuite))
}
// mockSignin 模拟登录
func mockSignin(ctx context.Context, rpcUserManagerSrv jinmuidpb.UserManagerAPIService, username string, passwordHash, seed string) (context.Context, error) {
resp, err := rpcUserManagerSrv.UserSignInByUsernamePassword(ctx, &jinmuidpb.UserSignInByUsernamePasswordRequest{
Username: username,
HashedPassword: passwordHash,
Seed: seed,
})
if err != nil {
return nil, err
}
return AddContextToken(ctx, resp.AccessToken), nil
}
|
package image
import (
"encoding/base64"
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
aiv1beta1 "github.com/openshift/assisted-service/api/v1beta1"
"github.com/openshift/assisted-service/models"
hivev1 "github.com/openshift/hive/apis/hive/v1"
"github.com/openshift/installer/pkg/asset"
"github.com/openshift/installer/pkg/asset/agent/manifests"
"github.com/openshift/installer/pkg/asset/agent/mirror"
)
func TestUnconfiguredIgnition_Generate(t *testing.T) {
skipTestIfnmstatectlIsMissing(t)
nmStateConfig := getTestNMStateConfig()
cases := []struct {
name string
overrideDeps []asset.Asset
expectedError string
expectedFiles []string
serviceEnabledMap map[string]bool
}{
{
name: "default-configs-and-no-nmstateconfigs",
expectedFiles: generatedFilesUnconfiguredIgnition("/usr/local/bin/pre-network-manager-config.sh"),
serviceEnabledMap: map[string]bool{
"pre-network-manager-config.service": false,
"agent-check-config-image.service": true},
},
{
name: "with-mirror-configs",
overrideDeps: []asset.Asset{
&mirror.RegistriesConf{
File: &asset.File{
Filename: mirror.RegistriesConfFilename,
Data: []byte(""),
},
MirrorConfig: []mirror.RegistriesConfig{
{
Location: "some.registry.org/release",
Mirror: "some.mirror.org",
},
},
},
&mirror.CaBundle{
File: &asset.File{
Filename: "my.crt",
Data: []byte("my-certificate"),
},
},
},
expectedFiles: generatedFilesUnconfiguredIgnition(registriesConfPath,
registryCABundlePath, "/usr/local/bin/pre-network-manager-config.sh"),
serviceEnabledMap: map[string]bool{
"pre-network-manager-config.service": false,
"agent-check-config-image.service": true},
},
{
name: "with-nmstateconfigs",
overrideDeps: []asset.Asset{
&nmStateConfig,
},
expectedFiles: generatedFilesUnconfiguredIgnition("/etc/assisted/network/host0/eth0.nmconnection",
"/etc/assisted/network/host0/mac_interface.ini", "/usr/local/bin/pre-network-manager-config.sh"),
serviceEnabledMap: map[string]bool{
"pre-network-manager-config.service": true,
"agent-check-config-image.service": true},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
deps := buildUnconfiguredIgnitionAssetDefaultDependencies(t)
overrideDeps(deps, tc.overrideDeps)
parents := asset.Parents{}
parents.Add(deps...)
unconfiguredIgnitionAsset := &UnconfiguredIgnition{}
err := unconfiguredIgnitionAsset.Generate(parents)
if tc.expectedError != "" {
assert.Equal(t, tc.expectedError, err.Error())
} else {
assert.NoError(t, err)
assertExpectedFiles(t, unconfiguredIgnitionAsset.Config, tc.expectedFiles, nil)
assertServiceEnabled(t, unconfiguredIgnitionAsset.Config, tc.serviceEnabledMap)
}
})
}
}
// This test util create the minimum valid set of dependencies for the
// UnconfiguredIgnition asset.
func buildUnconfiguredIgnitionAssetDefaultDependencies(t *testing.T) []asset.Asset {
t.Helper()
infraEnv := getTestInfraEnv()
agentPullSecret := getTestAgentPullSecret(t)
clusterImageSet := getTestClusterImageSet()
return []asset.Asset{
&infraEnv,
&agentPullSecret,
&clusterImageSet,
&manifests.NMStateConfig{},
&mirror.RegistriesConf{},
&mirror.CaBundle{},
}
}
func getTestInfraEnv() manifests.InfraEnv {
return manifests.InfraEnv{
Config: &aiv1beta1.InfraEnv{
Spec: aiv1beta1.InfraEnvSpec{
SSHAuthorizedKey: "my-ssh-key",
},
},
File: &asset.File{
Filename: "infraenv.yaml",
Data: []byte("infraenv"),
},
}
}
func getTestAgentPullSecret(t *testing.T) manifests.AgentPullSecret {
t.Helper()
secretDataBytes, err := base64.StdEncoding.DecodeString("c3VwZXItc2VjcmV0Cg==")
assert.NoError(t, err)
return manifests.AgentPullSecret{
Config: &v1.Secret{
Data: map[string][]byte{
".dockerconfigjson": secretDataBytes,
},
},
File: &asset.File{
Filename: "pull-secret.yaml",
Data: []byte("pull-secret"),
},
}
}
func getTestClusterImageSet() manifests.ClusterImageSet {
return manifests.ClusterImageSet{
Config: &hivev1.ClusterImageSet{
Spec: hivev1.ClusterImageSetSpec{
ReleaseImage: "registry.ci.openshift.org/origin/release:4.11",
},
},
File: &asset.File{
Filename: "cluster-image-set.yaml",
Data: []byte("cluster-image-set"),
},
}
}
func getTestNMStateConfig() manifests.NMStateConfig {
return manifests.NMStateConfig{
Config: []*aiv1beta1.NMStateConfig{
{
Spec: aiv1beta1.NMStateConfigSpec{
Interfaces: []*aiv1beta1.Interface{
{
Name: "eth0",
MacAddress: "00:01:02:03:04:05",
},
},
},
},
},
StaticNetworkConfig: []*models.HostStaticNetworkConfig{
{
MacInterfaceMap: models.MacInterfaceMap{
{LogicalNicName: "eth0", MacAddress: "00:01:02:03:04:05"},
},
NetworkYaml: "interfaces:\n- ipv4:\n address:\n - ip: 192.168.122.21\n prefix-length: 24\n enabled: true\n mac-address: 00:01:02:03:04:05\n name: eth0\n state: up\n type: ethernet\n",
},
},
File: &asset.File{
Filename: "nmstateconfig.yaml",
Data: []byte("nmstateconfig"),
},
}
}
func generatedFilesUnconfiguredIgnition(otherFiles ...string) []string {
unconfiguredIgnitionFiles := []string{
"/etc/assisted/manifests/pull-secret.yaml",
"/etc/assisted/manifests/cluster-image-set.yaml",
"/etc/assisted/manifests/infraenv.yaml",
}
unconfiguredIgnitionFiles = append(unconfiguredIgnitionFiles, otherFiles...)
return append(unconfiguredIgnitionFiles, commonFiles()...)
}
|
package config
import (
"os"
"path/filepath"
"github.com/hashicorp/go-multierror"
"gopkg.in/yaml.v2"
)
const (
lockFileName = "mona.lock"
lockFilePerm = 0644
)
type (
// The LockFile type represents the structure of a lock file, it stores the project name,
// version and the last build hashes used for each app
LockFile struct {
Apps map[string]*AppVersion `yaml:"apps,omitempty"`
}
// The AppVersion type represents individual app information as stored
// in the lock file.
AppVersion struct {
BuildHash string `yaml:"build"`
TestHash string `yaml:"test"`
LintHash string `yaml:"lint"`
}
)
// NewLockFile creates a new "mona.lock" file in the current working directory using the
// provided name.
func NewLockFile(dir string, name string) error {
location := filepath.Join(dir, lockFileName)
file, err := os.Create(location)
if err != nil {
return err
}
lock := LockFile{
Apps: make(map[string]*AppVersion),
}
return multierror.Append(
yaml.NewEncoder(file).Encode(lock),
file.Close()).
ErrorOrNil()
}
// UpdateLockFile overwrites the current "mona.lock" file in the given working
// directory with the data provided.
func UpdateLockFile(wd string, lock *LockFile) error {
file, err := os.OpenFile(
filepath.Join(wd, lockFileName),
os.O_CREATE|os.O_WRONLY,
lockFilePerm)
if err != nil {
return err
}
return multierror.Append(
yaml.NewEncoder(file).Encode(lock),
file.Close()).
ErrorOrNil()
}
// LoadLockFile attempts to load a lock file into memory from the provided
// working directory.
func LoadLockFile(wd string) (*LockFile, error) {
file, err := os.OpenFile(
filepath.Join(wd, lockFileName),
os.O_RDONLY,
lockFilePerm)
if os.IsNotExist(err) {
return &LockFile{
Apps: make(map[string]*AppVersion),
}, nil
}
if err != nil {
return nil, err
}
var out LockFile
if err := yaml.NewDecoder(file).Decode(&out); err != nil {
return nil, err
}
if out.Apps == nil {
out.Apps = make(map[string]*AppVersion)
}
return &out, file.Close()
}
// AddApp adds a new app entry to the lock file in the provided working directory.
func AddApp(l *LockFile, wd, name string) error {
l.Apps[name] = &AppVersion{}
return UpdateLockFile(wd, l)
}
|
package mock
import (
"fmt"
)
// MockTask holds the attributes needed to perform unit of work
type MockTask struct {
id int
writerID int
}
// New creates MockTask object, takes a producer id
func New(taskID, writerID int) *MockTask {
return &MockTask{
id: taskID,
writerID: writerID,
}
}
// Identity return ID of task
func (t *MockTask) Identity() int {
return t.id
}
// Exec execute task, returns an error if present
func (t *MockTask) Exec() error {
fmt.Printf("task id = %d by writer id = %d done\n", t.id, t.writerID)
return nil
}
|
package main
import (
"encoding/json"
"log"
"math/rand"
"net"
"os"
"github.com/nsf/termbox-go"
)
var AI_DISPLAY_ON = true
type AIPlayer struct {
arena [][]bool
prev Move
display Display
}
func NewAI() (AI AIPlayer) {
AI.arena = buildArena()
return AI
}
func (ai *AIPlayer) ReadState(conn net.Conn) (state State) {
dec := json.NewDecoder(conn)
err := dec.Decode(&state)
if err != nil {
ai.display.Debug("Error: Lost connection with server." + err.Error())
ai.WaitForInput()
os.Exit(1)
}
return state
}
func (ai *AIPlayer) SendMove(conn net.Conn, m Move) {
enc := json.NewEncoder(conn)
err := enc.Encode(m)
if err != nil {
ai.display.Debug("Error: Lost connection with server." + err.Error())
ai.WaitForInput()
os.Exit(1)
}
}
func (ai *AIPlayer) WaitForInput() {
termbox.PollEvent()
}
func RunAI() {
ai := NewAI()
if AI_DISPLAY_ON {
ai.display = NewDisplay()
ai.display.DrawBoard()
}
conn, err := net.Dial("tcp", HOST+PORT)
if err != nil {
ai.display.Debug("Error: Failed to connect to server:" + HOST + PORT + "\nm" + err.Error())
ai.WaitForInput()
os.Exit(1)
}
for {
state := ai.ReadState(conn)
if state.Step == 0 {
if AI_DISPLAY_ON {
ai.display.Reset()
}
ai.Reset()
}
ai.UpdateArena(state)
if AI_DISPLAY_ON {
ai.display.UpdateState(state)
ai.display.Sync()
}
if state.IsGameOver() {
// The board will get reset on `if state.Step == 0`
continue
}
if state.Players[state.PlayerIndex].Alive {
m := ai.NextMove(state)
ai.SendMove(conn, m)
}
}
}
func printMap(arena [][]bool) {
for y := 0; y < ARENA_HEIGHT; y++ {
out := ""
for x := 0; x < ARENA_WIDTH; x++ {
if arena[x][y] {
out += "1"
} else {
out += "0"
}
}
log.Println(out)
}
}
func (ai *AIPlayer) UpdateArena(state State) {
for _, player := range state.Players {
ai.arena[player.X][player.Y] = true
}
}
func (ai *AIPlayer) Reset() {
ai.arena = buildArena()
}
func (ai *AIPlayer) isGoodDirection(d int) bool {
move := ai.prev
guessMove := UpdateMove(d, move)
if guessMove.X <= 0 || guessMove.Y <= 0 || guessMove.X >= ARENA_WIDTH || guessMove.Y >= ARENA_HEIGHT {
return false
}
return !ai.arena[guessMove.X][guessMove.Y]
}
func (ai *AIPlayer) NextMove(state State) Move {
ai.prev = state.Players[state.PlayerIndex].Move
nextMove := ai.prev
// Continue on path
if ai.isGoodDirection(nextMove.D) {
nextMove = UpdateMove(nextMove.D, nextMove)
ai.prev = nextMove
return nextMove
}
possibleDirections := []int{UP, DOWN, LEFT, RIGHT}
for len(possibleDirections) != 0 {
i := rand.Intn(len(possibleDirections))
direction := possibleDirections[i]
if ai.isGoodDirection(direction) {
nextMove = UpdateMove(direction, nextMove)
ai.prev = nextMove
return nextMove
}
possibleDirections = append(possibleDirections[0:i], possibleDirections[i+1:]...)
}
// there is no good move. :(
nextMove = UpdateMove(nextMove.D, nextMove)
ai.prev = nextMove
return nextMove
}
func buildArena() [][]bool {
arena := make([][]bool, ARENA_WIDTH)
for i := 0; i < ARENA_WIDTH; i++ {
col := make([]bool, ARENA_HEIGHT)
col[0] = true
col[ARENA_HEIGHT-1] = true
arena[i] = col
}
for i := 0; i < ARENA_HEIGHT; i++ {
arena[0][i] = true
arena[ARENA_WIDTH-1][i] = true
}
return arena
}
|
// Copyright 2021 PingCAP, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package txninfo
import (
"encoding/json"
"fmt"
"strings"
"time"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/logutil"
"github.com/prometheus/client_golang/prometheus"
"github.com/tikv/client-go/v2/oracle"
"go.uber.org/zap"
)
// TxnRunningState is the current state of a transaction
type TxnRunningState = int32
const (
// TxnIdle means the transaction is idle, i.e. waiting for the user's next statement
TxnIdle TxnRunningState = iota
// TxnRunning means the transaction is running, i.e. executing a statement
TxnRunning
// TxnLockAcquiring means the transaction is trying to acquire a lock
TxnLockAcquiring
// TxnCommitting means`` the transaction is (at least trying to) committing
TxnCommitting
// TxnRollingBack means the transaction is rolling back
TxnRollingBack
// TxnStateCounter is a marker of the number of states, ensuring we don't miss any of them
TxnStateCounter
)
var (
txnDurationHistogramForState [][]prometheus.Observer
txnStatusEnteringCounterForState []prometheus.Counter
)
func init() {
InitMetricsVars()
}
// InitMetricsVars init transaction metrics vars.
func InitMetricsVars() {
txnDurationHistogramForState = [][]prometheus.Observer{
{
metrics.TxnDurationHistogram.WithLabelValues("idle", "false"),
metrics.TxnDurationHistogram.WithLabelValues("idle", "true"),
},
{
metrics.TxnDurationHistogram.WithLabelValues("executing_sql", "false"),
metrics.TxnDurationHistogram.WithLabelValues("executing_sql", "true"),
},
{
metrics.TxnDurationHistogram.WithLabelValues("acquiring_lock", "false"),
metrics.TxnDurationHistogram.WithLabelValues("acquiring_lock", "true"),
},
{
metrics.TxnDurationHistogram.WithLabelValues("committing", "false"),
metrics.TxnDurationHistogram.WithLabelValues("committing", "true"),
},
{
metrics.TxnDurationHistogram.WithLabelValues("rolling_back", "false"),
metrics.TxnDurationHistogram.WithLabelValues("rolling_back", "true"),
},
}
txnStatusEnteringCounterForState = []prometheus.Counter{
metrics.TxnStatusEnteringCounter.WithLabelValues("idle"),
metrics.TxnStatusEnteringCounter.WithLabelValues("executing_sql"),
metrics.TxnStatusEnteringCounter.WithLabelValues("acquiring_lock"),
metrics.TxnStatusEnteringCounter.WithLabelValues("committing"),
metrics.TxnStatusEnteringCounter.WithLabelValues("rolling_back"),
}
if len(txnDurationHistogramForState) != int(TxnStateCounter) {
panic("len(txnDurationHistogramForState) != TxnStateCounter")
}
if len(txnStatusEnteringCounterForState) != int(TxnStateCounter) {
panic("len(txnStatusEnteringCounterForState) != TxnStateCounter")
}
}
// TxnDurationHistogram returns the observer for the given state and hasLock type.
func TxnDurationHistogram(state TxnRunningState, hasLock bool) prometheus.Observer {
hasLockInt := 0
if hasLock {
hasLockInt = 1
}
return txnDurationHistogramForState[state][hasLockInt]
}
// TxnStatusEnteringCounter returns the counter for the given state.
func TxnStatusEnteringCounter(state TxnRunningState) prometheus.Counter {
return txnStatusEnteringCounterForState[state]
}
const (
// IDStr is the column name of the TIDB_TRX table's ID column.
IDStr = "ID"
// StartTimeStr is the column name of the TIDB_TRX table's StartTime column.
StartTimeStr = "START_TIME"
// CurrentSQLDigestStr is the column name of the TIDB_TRX table's CurrentSQLDigest column.
CurrentSQLDigestStr = "CURRENT_SQL_DIGEST"
// CurrentSQLDigestTextStr is the column name of the TIDB_TRX table's CurrentSQLDigestText column.
CurrentSQLDigestTextStr = "CURRENT_SQL_DIGEST_TEXT"
// StateStr is the column name of the TIDB_TRX table's State column.
StateStr = "STATE"
// WaitingStartTimeStr is the column name of the TIDB_TRX table's WaitingStartTime column.
WaitingStartTimeStr = "WAITING_START_TIME"
// MemBufferKeysStr is the column name of the TIDB_TRX table's MemBufferKeys column.
MemBufferKeysStr = "MEM_BUFFER_KEYS"
// MemBufferBytesStr is the column name of the TIDB_TRX table's MemBufferBytes column.
MemBufferBytesStr = "MEM_BUFFER_BYTES"
// SessionIDStr is the column name of the TIDB_TRX table's SessionID column.
SessionIDStr = "SESSION_ID"
// UserStr is the column name of the TIDB_TRX table's User column.
UserStr = "USER"
// DBStr is the column name of the TIDB_TRX table's DB column.
DBStr = "DB"
// AllSQLDigestsStr is the column name of the TIDB_TRX table's AllSQLDigests column.
AllSQLDigestsStr = "ALL_SQL_DIGESTS"
// RelatedTableIDsStr is the table id of the TIDB_TRX table's RelatedTableIDs column.
RelatedTableIDsStr = "RELATED_TABLE_IDS"
// WaitingTimeStr is the column name of the TIDB_TRX table's WaitingTime column.
WaitingTimeStr = "WAITING_TIME"
)
// TxnRunningStateStrs is the names of the TxnRunningStates
var TxnRunningStateStrs = []string{
"Idle", "Running", "LockWaiting", "Committing", "RollingBack",
}
// TxnInfo is information about a running transaction
// This is supposed to be the datasource of `TIDB_TRX` in infoschema
type TxnInfo struct {
// The following fields are immutable and can be safely read across threads.
StartTS uint64
// Digest of SQL currently running
CurrentSQLDigest string
// Digests of all SQLs executed in the transaction.
AllSQLDigests []string
// The following fields are mutable and needs to be read or written by atomic operations. But since only the
// transaction's thread can modify its value, it's ok for the transaction's thread to read it without atomic
// operations.
// Current execution state of the transaction.
State TxnRunningState
// When last time `State` changes, for metrics
LastStateChangeTime time.Time
// Last trying to block start time. Invalid if State is not TxnLockAcquiring.
BlockStartTime struct {
Valid bool
time.Time
}
// How many entries are in MemDB
EntriesCount uint64
// The following fields will be filled in `session` instead of `LazyTxn`
// Which session this transaction belongs to
ConnectionID uint64
// The user who open this session
Username string
// The schema this transaction works on
CurrentDB string
// The related table IDs.
RelatedTableIDs map[int64]struct{}
}
var columnValueGetterMap = map[string]func(*TxnInfo) types.Datum{
IDStr: func(info *TxnInfo) types.Datum {
return types.NewDatum(info.StartTS)
},
StartTimeStr: func(info *TxnInfo) types.Datum {
humanReadableStartTime := time.UnixMilli(oracle.ExtractPhysical(info.StartTS))
return types.NewDatum(types.NewTime(types.FromGoTime(humanReadableStartTime), mysql.TypeTimestamp, types.MaxFsp))
},
CurrentSQLDigestStr: func(info *TxnInfo) types.Datum {
if len(info.CurrentSQLDigest) != 0 {
return types.NewDatum(info.CurrentSQLDigest)
}
return types.NewDatum(nil)
},
StateStr: func(info *TxnInfo) types.Datum {
e, err := types.ParseEnumValue(TxnRunningStateStrs, uint64(info.State+1))
if err != nil {
panic("this should never happen")
}
state := types.NewMysqlEnumDatum(e)
return state
},
WaitingStartTimeStr: func(info *TxnInfo) types.Datum {
if !info.BlockStartTime.Valid {
return types.NewDatum(nil)
}
return types.NewDatum(types.NewTime(types.FromGoTime(info.BlockStartTime.Time), mysql.TypeTimestamp, types.MaxFsp))
},
MemBufferKeysStr: func(info *TxnInfo) types.Datum {
return types.NewDatum(info.EntriesCount)
},
SessionIDStr: func(info *TxnInfo) types.Datum {
return types.NewDatum(info.ConnectionID)
},
UserStr: func(info *TxnInfo) types.Datum {
return types.NewDatum(info.Username)
},
DBStr: func(info *TxnInfo) types.Datum {
return types.NewDatum(info.CurrentDB)
},
AllSQLDigestsStr: func(info *TxnInfo) types.Datum {
allSQLDigests := info.AllSQLDigests
// Replace nil with empty array
if allSQLDigests == nil {
allSQLDigests = []string{}
}
res, err := json.Marshal(allSQLDigests)
if err != nil {
logutil.BgLogger().Warn("Failed to marshal sql digests list as json", zap.Uint64("txnStartTS", info.StartTS))
return types.NewDatum(nil)
}
return types.NewDatum(string(res))
},
RelatedTableIDsStr: func(info *TxnInfo) types.Datum {
relatedTableIDs := info.RelatedTableIDs
str := strings.Builder{}
first := true
for tblID := range relatedTableIDs {
if !first {
str.WriteString(",")
} else {
first = false
}
str.WriteString(fmt.Sprintf("%d", tblID))
}
return types.NewDatum(str.String())
},
WaitingTimeStr: func(info *TxnInfo) types.Datum {
if !info.BlockStartTime.Valid {
return types.NewDatum(nil)
}
return types.NewFloat64Datum(time.Since(info.BlockStartTime.Time).Seconds())
},
}
// ToDatum Converts the `TxnInfo`'s specified column to `Datum` to show in the `TIDB_TRX` table.
func (info *TxnInfo) ToDatum(column string) types.Datum {
res, ok := columnValueGetterMap[column]
if !ok {
return types.NewDatum(nil)
}
return res(info)
}
|
package main
import (
"log"
"net"
)
// Controller is an interface representing a controller that can handle incoming events
// and add new clients
type Controller interface {
HandleEvent(string) error
Reset()
AddUserClient(UserClient)
}
// ForwarderController connects the domain model and the notification logic.
// This is the class that receives the message, translates it into the domain,
// and retrieves the response from the domain and notifies the clients
type ForwarderController struct {
Users UserCollection
Notifier Notifier
pendingMessages MessageQueue
lastDeliveredID int
}
// NewForwarderController creates a controller with the default
func NewForwarderController() *ForwarderController {
return &ForwarderController{
Users: NewUserCollection(),
Notifier: NewTCPNotifier(),
pendingMessages: MessageQueue{},
lastDeliveredID: 0}
}
// UserClient represents an incoming UserClient
type UserClient struct {
Conn net.Conn
ID string
}
// HandleEvent handles an incoming message from the pipe
func (fw *ForwarderController) HandleEvent(evt string) error {
msg, err := Parse(evt)
if err != nil {
return err
}
fw.ProcessMessage(msg)
return nil
}
// Reset resets the queue and message id
func (fw *ForwarderController) Reset() {
fw.lastDeliveredID = 0
fw.pendingMessages = MessageQueue{}
}
// AddUserClient creates the user in our data store and passes the ID to the notifier
func (fw *ForwarderController) AddUserClient(uc UserClient) {
id := uc.ID
fw.Users.GetOrCreateUser(id)
fw.Notifier.AddClient(uc)
}
// ProcessMessage handles a new message
func (fw *ForwarderController) ProcessMessage(msg Message) {
fw.pendingMessages.Enqueue(msg)
fw.flushPendingQueue()
}
// IsNext determines whether a message is the next message to be handled in order
func (fw *ForwarderController) isNext(msg Message) bool {
return msg.Timestamp == fw.lastDeliveredID+1
}
// HandleMessage handles an individual message. It passes it to the domain, then
// asks the notifier to notify all client connections about the
func (fw *ForwarderController) dispatchMessage(msg Message) {
fw.lastDeliveredID = msg.Timestamp
users, err := fw.Users.UpdateAndGetNotifiees(msg)
if err != nil {
log.Println(err)
return
}
for _, u := range users {
fw.Notifier.Notify(u.ID, msg)
}
}
// FlushPendingQueue handles any messages in the queue it possibly can
func (fw *ForwarderController) flushPendingQueue() {
for fw.pendingMessages.Any() && fw.isNext(fw.pendingMessages.Peek()) {
msg := fw.pendingMessages.Dequeue()
fw.dispatchMessage(msg)
}
}
|
package main
import (
"database/sql"
"fmt"
_ "github.com/go-sql-driver/mysql"
)
func main() {
db, err := sql.Open("mysql", "root:ResAdmin14@tcp(127.0.0.1:3306)/sakila")
if err != nil {
panic(err.Error())
}
// defer the close till after the main function has finished
// executing
defer db.Close()
rows, err := db.Query("SELECT COUNT(*) as count_actor FROM actor")
if err == nil {
var countActor int64
for rows.Next() {
rows.Scan(&countActor)
fmt.Println(countActor)
}
}
}
|
package app
import (
"github.com/martini-contrib/render"
"github.com/martini-contrib/sessions"
)
func homepage(r render.Render, s sessions.Session) {
user := s.Get("user")
if user == nil {
r.Redirect("/login")
return
}
r.Redirect("/dashboard")
}
func dashboard(r render.Render) {
r.HTML(200, "dashboard", nil)
}
func logout(r render.Render, s sessions.Session) {
s.Clear()
r.Redirect("/login")
}
|
package basic
import (
"fmt"
)
func bitOperation() {
var a uint8 = 0x82
var b uint8 = 0x02
fmt.Printf("%08b [A]\n", a)
fmt.Printf("%08b [B]\n", b)
fmt.Printf("%08b (NOT B)\n", ^b) // ^ 即取反
/*
异或运算, 两个不一样才为1
*/
fmt.Printf("%08b ^ %08b = %08b [B XOR 0xff]\n", b, 0xff, b^0xff)
fmt.Printf("%08b ^ %08b = %08b [A XOR B]\n", a, b, a^b)
fmt.Printf("%08b & %08b = %08b [A AND B]\n", a, b, a&b)
fmt.Printf("%08b &^%08b = %08b [A 'AND NOT' B]\n", a, b, a&^b)
fmt.Printf("%08b&(^%08b)= %08b [A AND (NOT B)]\n", a, b, a&(^b))
}
func bitPriority() {
fmt.Printf("0x2 & 0x2 + 0x4 -> %#x\n", 0x2&0x2+0x4)
//prints: 0x2 & 0x2 + 0x4 -> 0x6
//Go: (0x2 & 0x2) + 0x4 go 位运算 优先于 加减运算
//C++: 0x2 & (0x2 + 0x4) -> 0x2
fmt.Printf("0x2 + 0x2 << 0x1 -> %#x\n", 0x2+0x2<<0x1)
//prints: 0x2 + 0x2 << 0x1 -> 0x6
//Go: 0x2 + (0x2 << 0x1) go 位运算 优先于 加减运算
//C++: (0x2 + 0x2) << 0x1 -> 0x8
fmt.Printf("0xf | 0x2 ^ 0x2 -> %#x\n", 0xf|0x2^0x2)
//prints: 0xf | 0x2 ^ 0x2 -> 0xd
//Go: (0xf | 0x2) ^ 0x2 go 从左往右运算
//C++: 0xf | (0x2 ^ 0x2) -> 0xf
}
func BitOperation() {
bitOperation()
bitPriority()
}
|
// +build !windows
package dht
import "syscall"
func curFileLimit() uint64 {
var n syscall.Rlimit
syscall.Getrlimit(syscall.RLIMIT_NOFILE, &n)
// cast because some platforms use int64 (e.g., freebsd)
return uint64(n.Cur)
}
|
package message
import (
"bytes"
"encoding/gob"
"github.com/hashicorp/memberlist"
"log"
)
type BroadcastMsg struct {
Msg P2PMessage
}
func (bm BroadcastMsg) Finished() {
}
func (bm BroadcastMsg) Invalidates(b memberlist.Broadcast) bool {
// todo figure out what that does
return false
}
func (bm BroadcastMsg) Message() []byte {
msg, err := EncodeMessage(bm.Msg)
if err != nil {
log.Println(err)
return []byte{}
}
return msg
}
// Broadcast message Payload types
type P2PMessageType int
const (
SeedAnnounceMsg P2PMessageType = iota
MetadataRequestMsg P2PMessageType = iota
MetadataResponseMsg P2PMessageType = iota
AnnounceSegmentMsg P2PMessageType = iota
ExchangeSegmentRequestMsg P2PMessageType = iota
ExchangeSegmentResponseMsg P2PMessageType = iota
ClusterChangeMsg P2PMessageType = iota
NodeClusterStatusMsg P2PMessageType = iota
)
type P2PMessage struct {
MsgType P2PMessageType
Payload []byte
}
type SeedFileAnnounce struct {
Filename string
NodeName string
Timestamp int64
}
type AnnounceSegment struct {
NodeName string
SegmentNum int
}
type Segment struct {
NodeName string
Idx int
Segment []byte
}
type ClusterChange struct {
NodeName string
Cluster int
}
type NodeClusterStatus struct {
NodeClusterList []ClusterChange
}
func EncodeMessage(msg interface{}) ([]byte, error) {
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
err := enc.Encode(msg)
return buf.Bytes(), err
}
func DecodeP2PMessage(b []byte) (P2PMessage, error) {
var msg P2PMessage
bufDecoder := bytes.NewBuffer(b)
decoder := gob.NewDecoder(bufDecoder)
err := decoder.Decode(&msg)
if err != nil {
return P2PMessage{}, err
}
return msg, nil
}
func DecodeSeedAnnounce(b []byte) (SeedFileAnnounce, error) {
var msg SeedFileAnnounce
bufDecoder := bytes.NewBuffer(b)
decoder := gob.NewDecoder(bufDecoder)
err := decoder.Decode(&msg)
if err != nil {
return SeedFileAnnounce{}, err
}
return msg, nil
}
func DecodeAnnounceSegment(b []byte) (AnnounceSegment, error) {
var msg AnnounceSegment
bufDecoder := bytes.NewBuffer(b)
decoder := gob.NewDecoder(bufDecoder)
err := decoder.Decode(&msg)
if err != nil {
return AnnounceSegment{}, err
}
return msg, nil
}
func DecodeTradeSegmentSegment(b []byte) (Segment, error) {
var msg Segment
bufDecoder := bytes.NewBuffer(b)
decoder := gob.NewDecoder(bufDecoder)
err := decoder.Decode(&msg)
if err != nil {
return Segment{}, err
}
return msg, nil
}
func DecodeClusterChange(b []byte) (ClusterChange, error) {
var msg ClusterChange
bufDecoder := bytes.NewBuffer(b)
decoder := gob.NewDecoder(bufDecoder)
err := decoder.Decode(&msg)
if err != nil {
return ClusterChange{}, err
}
return msg, nil
}
func DecodeNodeClusterStatus(b []byte) (NodeClusterStatus, error) {
var msg NodeClusterStatus
bufDecoder := bytes.NewBuffer(b)
decoder := gob.NewDecoder(bufDecoder)
err := decoder.Decode(&msg)
if err != nil {
return NodeClusterStatus{}, err
}
return msg, nil
}
func CreateSegment(nodeName string, idx int, payload []byte) Segment {
return Segment{
NodeName: nodeName,
Idx: idx,
Segment: payload,
}
}
func CreateP2PMsg(msg interface{}) (P2PMessage, error) {
byt, err := EncodeMessage(msg)
if err != nil {
return P2PMessage{}, err
}
return P2PMessage{
Payload: byt,
}, nil
}
func CreateNodeClusterStatusMsg(nodeToSegNum map[string]int) NodeClusterStatus {
clusterStatus := []ClusterChange{}
for node, segment := range nodeToSegNum {
announceMsg := ClusterChange{
NodeName: node,
Cluster: segment,
}
clusterStatus = append(clusterStatus, announceMsg)
}
return NodeClusterStatus{
NodeClusterList: clusterStatus,
}
}
func CreateSegmentRequest(nodeName string, idx int, payload []byte) (P2PMessage, error) {
segment := CreateSegment(nodeName, idx, payload)
p2pMsg, err := CreateP2PMsg(segment)
if err != nil {
return P2PMessage{}, err
}
p2pMsg.MsgType = ExchangeSegmentRequestMsg
return p2pMsg, err
}
func CreateSegmentResponse(nodeName string, idx int, payload []byte) (P2PMessage, error) {
segment := CreateSegment(nodeName, idx, payload)
p2pMsg, err := CreateP2PMsg(segment)
if err != nil {
return P2PMessage{}, err
}
p2pMsg.MsgType = ExchangeSegmentResponseMsg
return p2pMsg, err
}
|
package main
import (
"github.com/MathisBurger/yb-http/config"
"github.com/MathisBurger/yb-http/installation"
"github.com/MathisBurger/yb-http/routing"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/logger"
)
func main() {
installation.Install()
config.LoadConfigurations()
app := fiber.New()
app.Use(logger.New())
app.Get("/*", routing.Redirect)
// Start Server
app.Listen(":80")
}
|
// Author: Sankar <sankar.curiosity@gmail.com>
// Distributed under Creative Commons Zero License - Public Domain
// For more information see LICENSE file
package main
import (
"bufio"
"fmt"
"log"
"os"
"strings"
"unicode"
)
func main() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
const usage = `Usage: classifyWords <Path-to-the-words-file>
The program will generate two files in $PWD named three.txt and four.txt
`
if len(os.Args) != 2 {
fmt.Fprintf(os.Stderr, usage)
os.Exit(1)
}
if strings.HasPrefix(os.Args[1], "-") {
fmt.Fprintf(os.Stderr, usage)
return
}
var three, four []string
// Assuming os.Args[1] points to a
// valid file having a list of words
fd, err := os.Open(os.Args[1])
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
defer fd.Close()
scanner := bufio.NewScanner(fd)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
l := scanner.Text()
if strings.ContainsAny(l, "ஃ௦௪௫௰௯௩௬௨௲௵௴௷௱+") {
continue
}
// Skip words with grandham
grandhed := false
grandhams := []rune{'ஜ', 'ஷ', 'ஸ', 'ஹ'}
for _, g := range grandhams {
if strings.ContainsRune(l, g) {
grandhed = true
}
}
if grandhed {
continue
}
// Skip non-tamil words (if any)
runes := []rune(l)
tamil := true
for _, r := range runes {
if !unicode.Is(unicode.Tamil, r) {
tamil = false
break
}
}
if !tamil {
continue
}
// Skip words ending in some letters
suffixes := []string{"க்", "ங்", "ச்", "ஞ்", "ட்", "த்", "ந்", "ப்", "வ்", "ற்"}
hasSuffix := false
for _, suffix := range suffixes {
if strings.HasSuffix(l, suffix) {
hasSuffix = true
break
}
}
if hasSuffix {
continue
}
// For now, we will worry about only 3x3 and 4x4 grids
switch strlen(l) {
case 3:
three = append(three, l)
case 4:
four = append(four, l)
}
}
if err := scanner.Err(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
log.Println("Three letter words found: ", len(three))
f, err := os.Create("three.txt")
if err != nil {
log.Fatal(err)
}
w := bufio.NewWriter(f)
for _, s := range three {
_, err := w.WriteString(s + "\n")
if err != nil {
log.Fatal(err)
}
}
w.Flush()
log.Println("Four letter words found: ", len(four))
f, err = os.Create("four.txt")
if err != nil {
log.Fatal(err)
}
w = bufio.NewWriter(f)
for _, s := range four {
_, err := w.WriteString(s + "\n")
if err != nil {
log.Fatal(err)
}
}
w.Flush()
}
func strlen(s string) int {
// fmt.Println(s)
p := []rune(s)
c := 0 // Count of letters (excluding diacritics)
i := 0 // Runes iterator
for {
//fmt.Println(i)
for i < len(p) && (unicode.Is(unicode.Mn, p[i]) ||
unicode.Is(unicode.Me, p[i]) ||
unicode.Is(unicode.Mc, p[i])) {
i++
}
if i >= len(p) {
return c
}
c++
i++
}
}
|
// Copyright 2020, Jeff Alder
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nr_yml
import (
"github.com/newrelic/go-agent/v3/newrelic"
"github.com/stretchr/testify/assert"
"testing"
)
func TestIndividualLists(t *testing.T) {
withContents(`
production:
attributes:
enabled: true
include:
- attrib-include-1
- attrib-include-2
exclude:
- attrib-exclude-1
- attrib-exclude-2
`, t, func(filename string, t *testing.T) {
cfg := new(newrelic.Config)
ConfigFromYamlFile(filename)(cfg)
assert.NoError(t, cfg.Error)
assert.True(t, cfg.Attributes.Enabled)
assert.Contains(t, cfg.Attributes.Include, "attrib-include-1")
assert.Contains(t, cfg.Attributes.Include, "attrib-include-2")
assert.Equal(t, 2, len(cfg.Attributes.Include))
assert.Contains(t, cfg.Attributes.Exclude, "attrib-exclude-1")
assert.Contains(t, cfg.Attributes.Exclude, "attrib-exclude-2")
assert.Equal(t, 2, len(cfg.Attributes.Exclude))
})
}
|
package glc
import (
"fmt"
"io/ioutil"
"os"
"runtime"
"strings"
"time"
"github.com/golang/glog"
)
// exit status of cleaner routine.
var status = true
// exit status of GLC.
var exit = false
// GLC define the glog cleaner options:
//
// path - Log files will be clean to this directory
// prefix - Log files name prefix
// interval - Log files clean scanning interval
// reserve - Log files reserve time
//
type GLC struct {
path string
prefix string
interval time.Duration
reserve time.Duration
}
// InitOption define the glog cleaner init options for glc:
//
// Path - Log files will be clean to this directory
// Prefix - Log files name prefix
// Interval - Log files clean scanning interval
// Reserve - Log files reserve time
//
type InitOption struct {
Path string
Prefix string
Interval time.Duration
Reserve time.Duration
}
// NewGLC create a cleaner in a goroutine and do instantiation GLC by given
// init options.
func NewGLC(option InitOption) *GLC {
c := new(GLC)
c.path = option.Path
c.interval = option.Interval
c.prefix = option.Prefix
c.reserve = option.Reserve
go c.cleaner()
return c
}
// clean provides function to check path exists by given log files path.
func (c *GLC) clean() {
exists, err := c.exists(c.path)
if err != nil {
glog.Errorln(err)
return
}
if !exists {
return
}
files, err := ioutil.ReadDir(c.path)
if err != nil {
glog.Errorln(err)
return
}
c.check(files)
}
// exists returns whether the given file or directory exists or not
func (c *GLC) exists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return true, err
}
// check provides function to check log files name whether the deletion
// condition is satisfied.
func (c *GLC) check(files []os.FileInfo) {
n := 0
if runtime.GOOS == `windows` {
n = 1
}
for _, f := range files {
prefix := strings.HasPrefix(f.Name(), c.prefix)
str := strings.Split(f.Name(), `.`)
if prefix && len(str) == 7+n && str[3+n] == `log` {
c.drop(f)
}
}
}
// drop check the log file creation time and delete the file if the conditions
// are met.
func (c *GLC) drop(f os.FileInfo) {
if time.Since(f.ModTime()) > c.reserve {
err := os.Remove(c.path + f.Name())
if err != nil {
glog.Errorln(err)
}
}
}
// cleaner provides regular cleaning function by given log files clean
// scanning interval.
func (c *GLC) cleaner() {
status = false
fmt.Println("cleaner routine start.")
for !exit {
c.clean()
for i, s := time.Duration(0), c.interval/time.Second; i < s && !exit; i++ {
time.Sleep(time.Second)
}
}
fmt.Println("cleaner routine stop.")
status = true
}
// Stop Stop the cleaner routine.
func (c *GLC) Stop() {
exit = true
}
// Wait Wait for cleaner exit.
func (c *GLC) Wait() {
exit = true
for !status {
time.Sleep(time.Second)
}
}
|
package trycopy
import (
"fmt"
)
func init() {
fmt.Println("package trycopy init()")
}
func countries() []string {
countries := []string{"USA", "Singapore", "Germany", "India", "Australia"}
neededCountries := countries[:len(countries)-2]
countriesCpy := make([]string, len(neededCountries))
copy(countriesCpy, neededCountries)
return countriesCpy
}
//TryCopy prints results of copy(dst, src) func by calling countries() func
func TryCopy() {
fmt.Println(countries())
}
|
package common
// Abs : 绝对值
func Abs(a int) int {
if a < 0 {
return -a
}
return a
}
// Clamp : 范围限制
func Clamp(v, min, max int) int {
if v < min {
return min
}
if v > max {
return max
}
return v
}
// Sign : 根据输入的正负情况返回 -1, 0, 1
func Sign(a int) int {
if a < 0 {
return -1
}
if a > 0 {
return 1
}
return 0
}
|
package core
import (
"fmt"
"math"
"pmvs/featdetect"
"sort"
"gonum.org/v1/gonum/mat"
)
func StartMatching() {
fmt.Println("Initial Matching...")
for id, photo := range imgsManager.Photos {
num := 0
relevantImgs := getRelevantImages(id)
for _, featPool := range photo.Feats {
for _, feat := range featPool {
num += constructPatch(id, relevantImgs, feat)
}
}
fmt.Println("done img", id, " patches ", num)
}
}
func constructPatch(photoID int, relevantImgs []int, feat *featdetect.Feature) int {
cell := getCell(photoID, feat.Y, feat.X)
if len(cell.Patches) != 0 {
return 0
}
type FeatSort struct {
feature *featdetect.Feature
photoID int
relDepth float64
pos3d *mat.VecDense
}
photo := imgsManager.Photos[photoID]
opticalCenter := photo.OpticalCenter()
relevantFeats, ids := getRelevantFeatures(feat, photoID, relevantImgs)
featDataFiltered := make([]FeatSort, 0, len(relevantFeats))
depthVector1, depthVector2 := mat.NewVecDense(4, nil), mat.NewVecDense(4, nil)
for feat2Id, feat2 := range relevantFeats {
cell = getCell(ids[feat2Id], feat2.Y, feat2.X)
if len(cell.Patches) != 0 {
continue
}
photo2 := imgsManager.Photos[ids[feat2Id]]
center := triangulate(feat.X, feat.Y, feat2.X, feat2.Y, photoID, ids[feat2Id])
center.ScaleVec(1/center.AtVec(3), center)
if !visualHullCheck(center) {
continue
}
depthVector1.SubVec(opticalCenter, center)
depthVector2.SubVec(photo2.OpticalCenter(), center)
depth1 := math.Sqrt(mat.Dot(depthVector1, depthVector1))
depth2 := math.Sqrt(mat.Dot(depthVector2, depthVector2))
relDepth := math.Abs(depth1 - depth2)
ff := FeatSort{
feat2, ids[feat2Id], relDepth, center,
}
featDataFiltered = append(featDataFiltered, ff)
}
sort.Slice(featDataFiltered, func(i, j int) bool {
return featDataFiltered[i].relDepth < featDataFiltered[j].relDepth
})
patch := new(Patch)
patch.Center = mat.NewVecDense(4, nil)
patch.Normal = mat.NewVecDense(4, nil)
patch.RefPhoto = photoID
for _, ff := range featDataFiltered {
patch.Center = ff.pos3d
patch.Normal.SubVec(opticalCenter, patch.Center)
patch.Normal.ScaleVec(1/math.Sqrt(mat.Dot(patch.Normal, patch.Normal)),
patch.Normal)
patch.TPhotos = constraintPhotos(patch, 0.6, relevantImgs)
if len(patch.TPhotos) <= 1 {
continue
}
optimizePatch(patch)
patch.TPhotos = constraintPhotos(patch, 0.7, relevantImgs)
if len(patch.TPhotos) >= 3 {
registerPatch(patch)
return 1
}
}
return 0
}
|
package common
import (
"encoding/json"
"os"
)
type configuration struct {
Lang string `json:"lang"`
Debug bool `json:"debug"`
Address string `json:"address"`
Port int `json:"port"`
Rpc string `json:"rpc"`
ReadTimeout int64 `json:"readTimeout"`
WriteTimeout int64 `json:"writeTimeout"`
IdleTimeout int64 `json:"idleTimeout"`
Static string `json:"static"`
Db *dbConfig `json:"db"`
Mail *mailConfig `json:"mail"`
Redirects *[]redirect `json:"redirects,omitempty"`
}
type dbConfig struct {
Driver string `json:driver`
User string `json:user`
Password string `json:password`
Dbname string `json:dbname`
Sslmode string `json:sslmode`
}
type mailConfig struct {
From string `json:"from"`
Host string `json:"host"`
Username string `json:"username"`
Password string `json:"password"`
Port int `json:"port"`
GotoUrl string `json:"gotourl"`
}
type redirect struct {
Prefix string `json:"prefix"`
Protocol string `json:"protocol"`
ReqUri string `json:"req_uri"`
Port int `json:"port"`
}
var config *configuration
func loadConfig() {
file, err := os.Open("config.json")
if err != nil {
Danger("Cannot open config file", err)
}
decoder := json.NewDecoder(file)
config = &configuration{}
err = decoder.Decode(config)
if err != nil {
Danger("Cannot get configuration from file", err)
}
}
func Env() *configuration {
return config
}
|
// Copyright (C) 2019 Cisco Systems Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package vpplink
import (
"fmt"
"net"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/interface_types"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/ip6_nd"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/types"
)
func (v *VppLink) DisableIP6RouterAdvertisements(swIfIndex uint32) error {
client := ip6_nd.NewServiceClient(v.GetConnection())
_, err := client.SwInterfaceIP6ndRaConfig(v.GetContext(), &ip6_nd.SwInterfaceIP6ndRaConfig{
SwIfIndex: interface_types.InterfaceIndex(swIfIndex),
Suppress: 1,
})
if err != nil {
return fmt.Errorf("failed to disable IP6 ND RA (swif %d): %w", swIfIndex, err)
}
return nil
}
func (v *VppLink) EnableIP6NdProxy(swIfIndex uint32, address net.IP) error {
client := ip6_nd.NewServiceClient(v.GetConnection())
_, err := client.IP6ndProxyAddDel(v.GetContext(), &ip6_nd.IP6ndProxyAddDel{
IsAdd: true,
IP: types.ToVppIP6Address(address),
SwIfIndex: interface_types.InterfaceIndex(swIfIndex),
})
if err != nil {
return fmt.Errorf("failed to add IP6 ND Proxy address %v (swif %d): %w", address, swIfIndex, err)
}
// now disable source / dest checks for nd proxy
_, err = client.IP6ndProxyEnableDisable(v.GetContext(), &ip6_nd.IP6ndProxyEnableDisable{
IsEnable: true,
SwIfIndex: interface_types.InterfaceIndex(swIfIndex),
})
if err != nil {
return fmt.Errorf("failed to enable IP6 ND Proxy (swif %d): %w", swIfIndex, err)
}
return nil
}
|
package srapi
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
)
func GetGameByID(id string) (*Game, error) {
resp, err := http.Get(fmt.Sprintf("https://www.speedrun.com/api/v1/games/%s", id))
if err != nil {
return nil, err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var respData struct {
Data *Game
}
if err := json.Unmarshal(data, &respData); err != nil {
return nil, err
}
return respData.Data, nil
}
func GetGameByName(query string) ([]*Game, error) {
resp, err := http.Get(fmt.Sprintf("https://www.speedrun.com/api/v1/games?name=%s", url.QueryEscape(query)))
if err != nil {
return nil, err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var respData struct {
Data []*Game
}
if err := json.Unmarshal(data, &respData); err != nil {
return nil, err
}
return respData.Data, nil
}
|
package bucket
import (
"container/list"
"fmt"
"sync"
"time"
)
// TokenBucket represents a token bucket
// (https://en.wikipedia.org/wiki/Token_bucket) which based on multi goroutines,
// and is safe to use under concurrency environments.
type TokenBucket struct {
interval time.Duration
ticker *time.Ticker
tokenMutex *sync.Mutex
waitingQuqueMutex *sync.Mutex
waitingQuque *list.List
cap int64
avail int64
}
type waitingJob struct {
ch chan struct{}
need int64
use int64
abandoned bool
}
// New returns a new token bucket with specified fill interval and
// capability. The bucket is initially full.
func New(interval time.Duration, cap int64) *TokenBucket {
if interval < 0 {
panic(fmt.Sprintf("ratelimit: interval %v should > 0", interval))
}
if cap < 0 {
panic(fmt.Sprintf("ratelimit: capability %v should > 0", cap))
}
tb := &TokenBucket{
interval: interval,
tokenMutex: &sync.Mutex{},
waitingQuqueMutex: &sync.Mutex{},
waitingQuque: list.New(),
cap: cap,
avail: cap,
ticker: time.NewTicker(interval),
}
go tb.adjustDaemon()
return tb
}
// Capability returns the capability of this token bucket.
func (tb *TokenBucket) Capability() int64 {
return tb.cap
}
// Availible returns how many tokens are availible in the bucket.
func (tb *TokenBucket) Availible() int64 {
tb.tokenMutex.Lock()
defer tb.tokenMutex.Unlock()
return tb.avail
}
// TryTake trys to task specified count tokens from the bucket. if there are
// not enough tokens in the bucket, it will return false.
func (tb *TokenBucket) TryTake(count int64) bool {
return tb.tryTake(count, count)
}
// Take tasks specified count tokens from the bucket, if there are
// not enough tokens in the bucket, it will keep waiting until count tokens are
// availible and then take them.
func (tb *TokenBucket) Take(count int64) {
tb.waitAndTake(count, count)
}
// TakeMaxDuration tasks specified count tokens from the bucket, if there are
// not enough tokens in the bucket, it will keep waiting until count tokens are
// availible and then take them or just return false when reach the given max
// duration.
func (tb *TokenBucket) TakeMaxDuration(count int64, max time.Duration) bool {
return tb.waitAndTakeMaxDuration(count, count, max)
}
// Wait will keep waiting until count tokens are availible in the bucket.
func (tb *TokenBucket) Wait(count int64) {
tb.waitAndTake(count, 0)
}
// WaitMaxDuration will keep waiting until count tokens are availible in the
// bucket or just return false when reach the given max duration.
func (tb *TokenBucket) WaitMaxDuration(count int64, max time.Duration) bool {
return tb.waitAndTakeMaxDuration(count, 0, max)
}
func (tb *TokenBucket) tryTake(need, use int64) bool {
tb.checkCount(use)
tb.tokenMutex.Lock()
defer tb.tokenMutex.Unlock()
if need <= tb.avail {
tb.avail -= use
return true
}
return false
}
func (tb *TokenBucket) waitAndTake(need, use int64) {
if ok := tb.tryTake(need, use); ok {
return
}
w := &waitingJob{
ch: make(chan struct{}),
use: use,
need: need,
}
tb.addWaitingJob(w)
<-w.ch
tb.avail -= use
w.ch <- struct{}{}
close(w.ch)
}
func (tb *TokenBucket) waitAndTakeMaxDuration(need, use int64, max time.Duration) bool {
if ok := tb.tryTake(need, use); ok {
return true
}
w := &waitingJob{
ch: make(chan struct{}),
use: use,
need: need,
}
defer close(w.ch)
tb.addWaitingJob(w)
select {
case <-w.ch:
tb.avail -= use
w.ch <- struct{}{}
return true
case <-time.After(max):
w.abandoned = true
return false
}
}
// Destory destorys the token bucket and stop the inner channels.
func (tb *TokenBucket) Destory() {
tb.ticker.Stop()
}
func (tb *TokenBucket) adjustDaemon() {
var waitingJobNow *waitingJob
for range tb.ticker.C {
tb.tokenMutex.Lock()
if tb.avail < tb.cap {
tb.avail++
}
element := tb.getFrontWaitingJob()
if element != nil {
if waitingJobNow == nil || waitingJobNow.abandoned {
waitingJobNow = element.Value.(*waitingJob)
tb.removeWaitingJob(element)
}
if tb.avail >= waitingJobNow.need && !waitingJobNow.abandoned {
waitingJobNow.ch <- struct{}{}
<-waitingJobNow.ch
waitingJobNow = nil
}
}
tb.tokenMutex.Unlock()
}
}
func (tb *TokenBucket) addWaitingJob(w *waitingJob) {
tb.waitingQuqueMutex.Lock()
tb.waitingQuque.PushBack(w)
tb.waitingQuqueMutex.Unlock()
}
func (tb *TokenBucket) getFrontWaitingJob() *list.Element {
tb.waitingQuqueMutex.Lock()
e := tb.waitingQuque.Front()
tb.waitingQuqueMutex.Unlock()
return e
}
func (tb *TokenBucket) removeWaitingJob(e *list.Element) {
tb.waitingQuqueMutex.Lock()
tb.waitingQuque.Remove(e)
tb.waitingQuqueMutex.Unlock()
}
func (tb *TokenBucket) checkCount(count int64) {
if count < 0 || count > tb.cap {
panic(fmt.Sprintf("token-bucket: count %v should be less than bucket's"+
" capablity %v", count, tb.cap))
}
}
|
package log_test
import (
"Edwardz43/tgbot/log"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestEmitThenReturnSuccess(t *testing.T) {
c := &log.Content{
Level: "Info",
Message: "Test ES Log",
Date: time.Now(),
Caller: "zaplogger/zaplogger.go:104",
}
err := log.Emit(c)
assert.Nil(t, err)
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"bytes"
"encoding/json"
"fmt"
"strconv"
"testing"
"github.com/pingcap/tidb/parser/auth"
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/util"
"github.com/stretchr/testify/require"
)
func TestExplainFor(t *testing.T) {
store := testkit.CreateMockStore(t)
tkRoot := testkit.NewTestKit(t, store)
tkUser := testkit.NewTestKit(t, store)
tkRoot.MustExec("use test")
tkRoot.MustExec("drop table if exists t1, t2;")
tkRoot.MustExec("create table t1(c1 int, c2 int)")
tkRoot.MustExec("create table t2(c1 int, c2 int)")
tkRoot.MustExec("create user tu@'%'")
tkRoot.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost", CurrentUser: true, AuthUsername: "root", AuthHostname: "%"}, nil, []byte("012345678901234567890"), nil)
tkUser.Session().Auth(&auth.UserIdentity{Username: "tu", Hostname: "localhost", CurrentUser: true, AuthUsername: "tu", AuthHostname: "%"}, nil, []byte("012345678901234567890"), nil)
tkRoot.MustExec("set @@tidb_enable_collect_execution_info=0;")
tkRoot.MustQuery("select * from t1;")
tkRootProcess := tkRoot.Session().ShowProcess()
ps := []*util.ProcessInfo{tkRootProcess}
tkRoot.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
tkUser.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
tkRoot.MustQuery(fmt.Sprintf("explain for connection %d", tkRootProcess.ID)).Check(testkit.Rows(
"TableReader_5 10000.00 root data:TableFullScan_4",
"└─TableFullScan_4 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo",
))
tkRoot.MustExec("set @@tidb_enable_collect_execution_info=1;")
check := func() {
tkRootProcess = tkRoot.Session().ShowProcess()
ps = []*util.ProcessInfo{tkRootProcess}
tkRoot.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
tkUser.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
rows := tkRoot.MustQuery(fmt.Sprintf("explain for connection %d", tkRootProcess.ID)).Rows()
require.Len(t, rows, 2)
require.Len(t, rows[0], 9)
buf := bytes.NewBuffer(nil)
for i, row := range rows {
if i > 0 {
buf.WriteString("\n")
}
for j, v := range row {
if j > 0 {
buf.WriteString(" ")
}
buf.WriteString(fmt.Sprintf("%v", v))
}
}
require.Regexp(t, "TableReader_5 10000.00 0 root time:.*, loops:1,( RU:.*,)? cop_task: {num:.*, max:.*, proc_keys:.* rpc_num: 1, rpc_time:.*} data:TableFullScan_4 N/A N/A\n"+
"└─TableFullScan_4 10000.00 0 cop.* table:t1 tikv_task:{time:.*, loops:0} keep order:false, stats:pseudo N/A N/A",
buf.String())
}
tkRoot.MustQuery("select * from t1;")
check()
tkRoot.MustQuery("explain analyze select * from t1;")
check()
err := tkUser.ExecToErr(fmt.Sprintf("explain for connection %d", tkRootProcess.ID))
require.True(t, core.ErrAccessDenied.Equal(err))
err = tkUser.ExecToErr("explain for connection 42")
require.True(t, core.ErrNoSuchThread.Equal(err))
tkRootProcess.Plan = nil
ps = []*util.ProcessInfo{tkRootProcess}
tkRoot.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
tkRoot.MustExec(fmt.Sprintf("explain for connection %d", tkRootProcess.ID))
}
func TestExplainForVerbose(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk2 := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(id int);")
tk.MustQuery("select * from t1;")
tkRootProcess := tk.Session().ShowProcess()
ps := []*util.ProcessInfo{tkRootProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
tk2.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
rs := tk.MustQuery("explain format = 'verbose' select * from t1").Rows()
rs2 := tk2.MustQuery(fmt.Sprintf("explain format = 'verbose' for connection %d", tkRootProcess.ID)).Rows()
require.Len(t, rs, len(rs2))
for i := range rs {
require.Equal(t, rs2[i], rs[i])
}
tk.MustExec("set @@tidb_enable_collect_execution_info=1;")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t2(id int);")
tk.MustQuery("select * from t2;")
tkRootProcess = tk.Session().ShowProcess()
ps = []*util.ProcessInfo{tkRootProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
tk2.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
rs = tk.MustQuery("explain format = 'verbose' select * from t2").Rows()
rs2 = tk2.MustQuery(fmt.Sprintf("explain format = 'verbose' for connection %d", tkRootProcess.ID)).Rows()
require.Len(t, rs, len(rs2))
for i := range rs {
// "id", "estRows", "estCost", "task", "access object", "operator info"
require.Len(t, rs[i], 6)
// "id", "estRows", "estCost", "actRows", "task", "access object", "execution info", "operator info", "memory", "disk"
require.Len(t, rs2[i], 10)
for j := 0; j < 3; j++ {
require.Equal(t, rs2[i][j], rs[i][j])
}
}
tk.MustQuery("explain format = 'verbose' select * from t1").Rows()
tk.MustQuery("explain format = 'VERBOSE' select * from t1").Rows()
tk.MustQuery("explain analyze format = 'verbose' select * from t1").Rows()
tk.MustQuery("explain analyze format = 'VERBOSE' select * from t1").Rows()
}
func TestIssue11124(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk2 := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk.MustExec("drop table if exists kankan1")
tk.MustExec("drop table if exists kankan2")
tk.MustExec("create table kankan1(id int, name text);")
tk.MustExec("create table kankan2(id int, h1 text);")
tk.MustExec("insert into kankan1 values(1, 'a'), (2, 'a');")
tk.MustExec("insert into kankan2 values(2, 'z');")
tk.MustQuery("select t1.id from kankan1 t1 left join kankan2 t2 on t1.id = t2.id where (case when t1.name='b' then 'case2' when t1.name='a' then 'case1' else NULL end) = 'case1'")
tkRootProcess := tk.Session().ShowProcess()
ps := []*util.ProcessInfo{tkRootProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
tk2.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
rs := tk.MustQuery("explain select t1.id from kankan1 t1 left join kankan2 t2 on t1.id = t2.id where (case when t1.name='b' then 'case2' when t1.name='a' then 'case1' else NULL end) = 'case1'").Rows()
rs2 := tk2.MustQuery(fmt.Sprintf("explain for connection %d", tkRootProcess.ID)).Rows()
for i := range rs {
require.Equal(t, rs2[i], rs[i])
}
}
func TestExplainMemTablePredicate(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustQuery("desc select * from METRICS_SCHEMA.tidb_query_duration where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13' ").Check(testkit.Rows(
"MemTableScan_5 10000.00 root table:tidb_query_duration PromQL:histogram_quantile(0.9, sum(rate(tidb_server_handle_query_duration_seconds_bucket{}[60s])) by (le,sql_type,instance)), start_time:2019-12-23 16:10:13, end_time:2019-12-23 16:30:13, step:1m0s"))
tk.MustQuery("desc select * from METRICS_SCHEMA.up where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13' ").Check(testkit.Rows(
"MemTableScan_5 10000.00 root table:up PromQL:up{}, start_time:2019-12-23 16:10:13, end_time:2019-12-23 16:30:13, step:1m0s"))
tk.MustQuery("desc select * from information_schema.cluster_log where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'").Check(testkit.Rows(
"MemTableScan_5 10000.00 root table:CLUSTER_LOG start_time:2019-12-23 16:10:13, end_time:2019-12-23 16:30:13"))
tk.MustQuery("desc select * from information_schema.cluster_log where level in ('warn','error') and time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'").Check(testkit.Rows(
`MemTableScan_5 10000.00 root table:CLUSTER_LOG start_time:2019-12-23 16:10:13, end_time:2019-12-23 16:30:13, log_levels:["error","warn"]`))
tk.MustQuery("desc select * from information_schema.cluster_log where type in ('high_cpu_1','high_memory_1') and time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'").Check(testkit.Rows(
`MemTableScan_5 10000.00 root table:CLUSTER_LOG start_time:2019-12-23 16:10:13, end_time:2019-12-23 16:30:13, node_types:["high_cpu_1","high_memory_1"]`))
tk.MustQuery("desc select * from information_schema.slow_query").Check(testkit.Rows(
"MemTableScan_4 10000.00 root table:SLOW_QUERY only search in the current 'tidb-slow.log' file"))
tk.MustQuery("desc select * from information_schema.slow_query where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'").Check(testkit.Rows(
"MemTableScan_5 10000.00 root table:SLOW_QUERY start_time:2019-12-23 16:10:13.000000, end_time:2019-12-23 16:30:13.000000"))
tk.MustExec("set @@time_zone = '+00:00';")
tk.MustQuery("desc select * from information_schema.slow_query where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'").Check(testkit.Rows(
"MemTableScan_5 10000.00 root table:SLOW_QUERY start_time:2019-12-23 16:10:13.000000, end_time:2019-12-23 16:30:13.000000"))
}
func TestExplainClusterTable(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustQuery("desc select * from information_schema.cluster_config where type in ('tikv', 'tidb')").Check(testkit.Rows(
`MemTableScan_5 10000.00 root table:CLUSTER_CONFIG node_types:["tidb","tikv"]`))
tk.MustQuery("desc select * from information_schema.cluster_config where instance='192.168.1.7:2379'").Check(testkit.Rows(
`MemTableScan_5 10000.00 root table:CLUSTER_CONFIG instances:["192.168.1.7:2379"]`))
tk.MustQuery("desc select * from information_schema.cluster_config where type='tidb' and instance='192.168.1.7:2379'").Check(testkit.Rows(
`MemTableScan_5 10000.00 root table:CLUSTER_CONFIG node_types:["tidb"], instances:["192.168.1.7:2379"]`))
}
func TestInspectionResultTable(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustQuery("desc select * from information_schema.inspection_result where rule = 'ddl' and rule = 'config'").Check(testkit.Rows(
`MemTableScan_5 10000.00 root table:INSPECTION_RESULT skip_inspection:true`))
tk.MustQuery("desc select * from information_schema.inspection_result where rule in ('ddl', 'config')").Check(testkit.Rows(
`MemTableScan_5 10000.00 root table:INSPECTION_RESULT rules:["config","ddl"], items:[]`))
tk.MustQuery("desc select * from information_schema.inspection_result where item in ('ddl.lease', 'raftstore.threadpool')").Check(testkit.Rows(
`MemTableScan_5 10000.00 root table:INSPECTION_RESULT rules:[], items:["ddl.lease","raftstore.threadpool"]`))
tk.MustQuery("desc select * from information_schema.inspection_result where item in ('ddl.lease', 'raftstore.threadpool') and rule in ('ddl', 'config')").Check(testkit.Rows(
`MemTableScan_5 10000.00 root table:INSPECTION_RESULT rules:["config","ddl"], items:["ddl.lease","raftstore.threadpool"]`))
}
func TestInspectionRuleTable(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustQuery("desc select * from information_schema.inspection_rules where type='inspection'").Check(testkit.Rows(
`MemTableScan_5 10000.00 root table:INSPECTION_RULES node_types:["inspection"]`))
tk.MustQuery("desc select * from information_schema.inspection_rules where type='inspection' or type='summary'").Check(testkit.Rows(
`MemTableScan_5 10000.00 root table:INSPECTION_RULES node_types:["inspection","summary"]`))
tk.MustQuery("desc select * from information_schema.inspection_rules where type='inspection' and type='summary'").Check(testkit.Rows(
`MemTableScan_5 10000.00 root table:INSPECTION_RULES skip_request: true`))
}
func TestExplainForConnPlanCache(t *testing.T) {
t.Skip("unstable")
store := testkit.CreateMockStore(t)
tk1 := testkit.NewTestKit(t, store)
tk1.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk2 := testkit.NewTestKit(t, store)
tk2.MustExec("use test")
tk1.MustExec("use test")
tk1.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk1.MustExec("drop table if exists t")
tk1.MustExec("create table t(a int)")
tk1.MustExec("prepare stmt from 'select * from t where a = ?'")
tk1.MustExec("set @p0='1'")
executeQuery := "execute stmt using @p0"
explainQuery := "explain for connection " + strconv.FormatUint(tk1.Session().ShowProcess().ID, 10)
explainResult := testkit.Rows(
"TableReader_7 10.00 root data:Selection_6",
"└─Selection_6 10.00 cop[tikv] eq(test.t.a, 1)",
" └─TableFullScan_5 10000.00 cop[tikv] table:t keep order:false, stats:pseudo",
)
// Now the ProcessInfo held by mockSessionManager1 will not be updated in real time.
// So it needs to be reset every time before tk2 query.
// TODO: replace mockSessionManager1 with another mockSessionManager.
// single test
tk1.MustExec(executeQuery)
tk2.Session().SetSessionManager(&testkit.MockSessionManager{
PS: []*util.ProcessInfo{tk1.Session().ShowProcess()},
})
tk2.MustQuery(explainQuery).Check(explainResult)
tk1.MustExec(executeQuery)
// The plan can not be cached because the string type parameter will be convert to int type for calculation.
tk1.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
// multiple test, '1000' is both effective and efficient.
repeats := 1000
var wg util.WaitGroupWrapper
wg.Run(func() {
for i := 0; i < repeats; i++ {
tk1.MustExec(executeQuery)
}
})
wg.Run(func() {
for i := 0; i < repeats; i++ {
tk2.Session().SetSessionManager(&testkit.MockSessionManager{
PS: []*util.ProcessInfo{tk1.Session().ShowProcess()},
})
tk2.MustQuery(explainQuery).Check(explainResult)
}
})
wg.Wait()
}
func TestSavedPlanPanicPlanCache(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, c int generated always as (a+b) stored)")
tk.MustExec("insert into t(a,b) values(1,1)")
tk.MustExec("begin")
tk.MustExec("update t set b = 2 where a = 1")
tk.MustExec("prepare stmt from 'select b from t where a > ?'")
tk.MustExec("set @p = 0")
tk.MustQuery("execute stmt using @p").Check(testkit.Rows(
"2",
))
tk.MustExec("set @p = 1")
tk.MustQuery("execute stmt using @p").Check(testkit.Rows())
err := tk.ExecToErr("insert into t(a,b,c) values(3,3,3)")
require.EqualError(t, err, "[planner:3105]The value specified for generated column 'c' in table 't' is not allowed.")
}
func TestExplainDotForExplainPlan(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
rows := tk.MustQuery("select connection_id()").Rows()
require.Len(t, rows, 1)
connID := rows[0][0].(string)
tk.MustQuery("explain format = 'brief' select 1").Check(testkit.Rows(
"Projection 1.00 root 1->Column#1",
"└─TableDual 1.00 root rows:1",
))
tkProcess := tk.Session().ShowProcess()
ps := []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
tk.MustQuery(fmt.Sprintf("explain format=\"dot\" for connection %s", connID)).Check(nil)
}
func TestExplainDotForQuery(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk2 := testkit.NewTestKit(t, store)
rows := tk.MustQuery("select connection_id()").Rows()
require.Len(t, rows, 1)
connID := rows[0][0].(string)
tk.MustQuery("select 1")
tkProcess := tk.Session().ShowProcess()
ps := []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
expected := tk2.MustQuery("explain format=\"dot\" select 1").Rows()
got := tk.MustQuery(fmt.Sprintf("explain format=\"dot\" for connection %s", connID)).Rows()
for i := range got {
require.Equal(t, expected[i], got[i])
}
}
func TestExplainTableStorage(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustQuery("desc select * from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'information_schema'").Check(testkit.Rows(
"MemTableScan_5 10000.00 root table:TABLE_STORAGE_STATS schema:[\"information_schema\"]"))
tk.MustQuery("desc select * from information_schema.TABLE_STORAGE_STATS where TABLE_NAME = 'schemata'").Check(testkit.Rows(
"MemTableScan_5 10000.00 root table:TABLE_STORAGE_STATS table:[\"schemata\"]"))
tk.MustQuery("desc select * from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'information_schema' and TABLE_NAME = 'schemata'").Check(testkit.Rows(
"MemTableScan_5 10000.00 root table:TABLE_STORAGE_STATS schema:[\"information_schema\"], table:[\"schemata\"]"))
}
func TestInspectionSummaryTable(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustQuery("desc select * from information_schema.inspection_summary where rule='ddl'").Check(testkit.Rows(
`Selection_5 8000.00 root eq(Column#1, "ddl")`,
`└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["ddl"]`,
))
tk.MustQuery("desc select * from information_schema.inspection_summary where 'ddl'=rule or rule='config'").Check(testkit.Rows(
`Selection_5 8000.00 root or(eq("ddl", Column#1), eq(Column#1, "config"))`,
`└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["config","ddl"]`,
))
tk.MustQuery("desc select * from information_schema.inspection_summary where 'ddl'=rule or rule='config' or rule='slow_query'").Check(testkit.Rows(
`Selection_5 8000.00 root or(eq("ddl", Column#1), or(eq(Column#1, "config"), eq(Column#1, "slow_query")))`,
`└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["config","ddl","slow_query"]`,
))
tk.MustQuery("desc select * from information_schema.inspection_summary where (rule='config' or rule='slow_query') and (metrics_name='metric_name3' or metrics_name='metric_name1')").Check(testkit.Rows(
`Selection_5 8000.00 root or(eq(Column#1, "config"), eq(Column#1, "slow_query")), or(eq(Column#3, "metric_name3"), eq(Column#3, "metric_name1"))`,
`└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["config","slow_query"], metric_names:["metric_name1","metric_name3"]`,
))
tk.MustQuery("desc select * from information_schema.inspection_summary where rule in ('ddl', 'slow_query')").Check(testkit.Rows(
`Selection_5 8000.00 root in(Column#1, "ddl", "slow_query")`,
`└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["ddl","slow_query"]`,
))
tk.MustQuery("desc select * from information_schema.inspection_summary where rule in ('ddl', 'slow_query') and metrics_name='metric_name1'").Check(testkit.Rows(
`Selection_5 8000.00 root eq(Column#3, "metric_name1"), in(Column#1, "ddl", "slow_query")`,
`└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["ddl","slow_query"], metric_names:["metric_name1"]`,
))
tk.MustQuery("desc select * from information_schema.inspection_summary where rule in ('ddl', 'slow_query') and metrics_name in ('metric_name1', 'metric_name2')").Check(testkit.Rows(
`Selection_5 8000.00 root in(Column#1, "ddl", "slow_query"), in(Column#3, "metric_name1", "metric_name2")`,
`└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["ddl","slow_query"], metric_names:["metric_name1","metric_name2"]`,
))
tk.MustQuery("desc select * from information_schema.inspection_summary where rule='ddl' and metrics_name in ('metric_name1', 'metric_name2')").Check(testkit.Rows(
`Selection_5 8000.00 root eq(Column#1, "ddl"), in(Column#3, "metric_name1", "metric_name2")`,
`└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["ddl"], metric_names:["metric_name1","metric_name2"]`,
))
tk.MustQuery("desc select * from information_schema.inspection_summary where rule='ddl' and metrics_name='metric_NAME3'").Check(testkit.Rows(
`Selection_5 8000.00 root eq(Column#1, "ddl"), eq(Column#3, "metric_NAME3")`,
`└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["ddl"], metric_names:["metric_name3"]`,
))
tk.MustQuery("desc select * from information_schema.inspection_summary where rule in ('ddl', 'config') and rule in ('slow_query', 'config')").Check(testkit.Rows(
`Selection_5 8000.00 root in(Column#1, "ddl", "config"), in(Column#1, "slow_query", "config")`,
`└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["config"]`,
))
tk.MustQuery("desc select * from information_schema.inspection_summary where metrics_name in ('metric_name1', 'metric_name4') and metrics_name in ('metric_name5', 'metric_name4') and rule in ('ddl', 'config') and rule in ('slow_query', 'config') and quantile in (0.80, 0.90)").Check(testkit.Rows(
`Selection_5 8000.00 root in(Column#1, "ddl", "config"), in(Column#1, "slow_query", "config"), in(Column#3, "metric_name1", "metric_name4"), in(Column#3, "metric_name5", "metric_name4")`,
`└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["config"], metric_names:["metric_name4"], quantiles:[0.800000,0.900000]`,
))
tk.MustQuery("desc select * from information_schema.inspection_summary where metrics_name in ('metric_name1', 'metric_name4') and metrics_name in ('metric_name5', 'metric_name4') and metrics_name in ('metric_name5', 'metric_name1') and metrics_name in ('metric_name1', 'metric_name3')").Check(testkit.Rows(
`Selection_5 8000.00 root in(Column#3, "metric_name1", "metric_name3"), in(Column#3, "metric_name1", "metric_name4"), in(Column#3, "metric_name5", "metric_name1"), in(Column#3, "metric_name5", "metric_name4")`,
`└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY skip_inspection: true`,
))
}
func TestExplainTiFlashSystemTables(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tiflashInstance := "192.168.1.7:3930"
database := "test"
table := "t"
tk.MustQuery(fmt.Sprintf("desc select * from information_schema.TIFLASH_TABLES where TIFLASH_INSTANCE = '%s'", tiflashInstance)).Check(testkit.Rows(
fmt.Sprintf("MemTableScan_5 10000.00 root table:TIFLASH_TABLES tiflash_instances:[\"%s\"]", tiflashInstance)))
tk.MustQuery(fmt.Sprintf("desc select * from information_schema.TIFLASH_SEGMENTS where TIFLASH_INSTANCE = '%s'", tiflashInstance)).Check(testkit.Rows(
fmt.Sprintf("MemTableScan_5 10000.00 root table:TIFLASH_SEGMENTS tiflash_instances:[\"%s\"]", tiflashInstance)))
tk.MustQuery(fmt.Sprintf("desc select * from information_schema.TIFLASH_TABLES where TIDB_DATABASE = '%s'", database)).Check(testkit.Rows(
fmt.Sprintf("MemTableScan_5 10000.00 root table:TIFLASH_TABLES tidb_databases:[\"%s\"]", database)))
tk.MustQuery(fmt.Sprintf("desc select * from information_schema.TIFLASH_SEGMENTS where TIDB_DATABASE = '%s'", database)).Check(testkit.Rows(
fmt.Sprintf("MemTableScan_5 10000.00 root table:TIFLASH_SEGMENTS tidb_databases:[\"%s\"]", database)))
tk.MustQuery(fmt.Sprintf("desc select * from information_schema.TIFLASH_TABLES where TIDB_TABLE = '%s'", table)).Check(testkit.Rows(
fmt.Sprintf("MemTableScan_5 10000.00 root table:TIFLASH_TABLES tidb_tables:[\"%s\"]", table)))
tk.MustQuery(fmt.Sprintf("desc select * from information_schema.TIFLASH_SEGMENTS where TIDB_TABLE = '%s'", table)).Check(testkit.Rows(
fmt.Sprintf("MemTableScan_5 10000.00 root table:TIFLASH_SEGMENTS tidb_tables:[\"%s\"]", table)))
tk.MustQuery(fmt.Sprintf("desc select * from information_schema.TIFLASH_TABLES where TIFLASH_INSTANCE = '%s' and TIDB_DATABASE = '%s' and TIDB_TABLE = '%s'", tiflashInstance, database, table)).Check(testkit.Rows(
fmt.Sprintf("MemTableScan_5 10000.00 root table:TIFLASH_TABLES tiflash_instances:[\"%s\"], tidb_databases:[\"%s\"], tidb_tables:[\"%s\"]", tiflashInstance, database, table)))
tk.MustQuery(fmt.Sprintf("desc select * from information_schema.TIFLASH_SEGMENTS where TIFLASH_INSTANCE = '%s' and TIDB_DATABASE = '%s' and TIDB_TABLE = '%s'", tiflashInstance, database, table)).Check(testkit.Rows(
fmt.Sprintf("MemTableScan_5 10000.00 root table:TIFLASH_SEGMENTS tiflash_instances:[\"%s\"], tidb_databases:[\"%s\"], tidb_tables:[\"%s\"]", tiflashInstance, database, table)))
}
func TestPointGetUserVarPlanCache(t *testing.T) {
store := testkit.CreateMockStore(t)
tmp := testkit.NewTestKit(t, store)
tmp.MustExec("set tidb_enable_prepared_plan_cache=ON")
tk := testkit.NewTestKit(t, store)
tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost", CurrentUser: true, AuthUsername: "root", AuthHostname: "%"}, nil, []byte("012345678901234567890"), nil)
tk.MustExec("use test")
tk.MustExec("set tidb_cost_model_version=2")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("drop table if exists t1")
tk.MustExec("CREATE TABLE t1 (a BIGINT, b VARCHAR(40), PRIMARY KEY (a, b))")
tk.MustExec("INSERT INTO t1 VALUES (1,'3'),(2,'4')")
tk.MustExec("drop table if exists t2")
tk.MustExec("CREATE TABLE t2 (a BIGINT, b VARCHAR(40), UNIQUE KEY idx_a (a))")
tk.MustExec("INSERT INTO t2 VALUES (1,'1'),(2,'2')")
tk.MustExec("prepare stmt from 'select * from t1, t2 where t1.a = t2.a and t2.a = ?'")
tk.MustExec("set @a=1")
tk.MustQuery("execute stmt using @a").Check(testkit.Rows(
"1 3 1 1",
))
tkProcess := tk.Session().ShowProcess()
ps := []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Check(testkit.Rows( // can use idx_a
`Projection_9 10.00 root test.t1.a, test.t1.b, test.t2.a, test.t2.b`,
`└─HashJoin_11 10.00 root CARTESIAN inner join`,
` ├─Point_Get_12(Build) 1.00 root table:t2, index:idx_a(a) `, // use idx_a
` └─TableReader_14(Probe) 10.00 root data:TableRangeScan_13`,
` └─TableRangeScan_13 10.00 cop[tikv] table:t1 range:[1,1], keep order:false, stats:pseudo`))
tk.MustExec("set @a=2")
tk.MustQuery("execute stmt using @a").Check(testkit.Rows(
"2 4 2 2",
))
tkProcess = tk.Session().ShowProcess()
ps = []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Check(testkit.Rows( // can use idx_a
`Projection_9 10.00 root test.t1.a, test.t1.b, test.t2.a, test.t2.b`,
`└─HashJoin_11 10.00 root CARTESIAN inner join`,
` ├─Point_Get_12(Build) 1.00 root table:t2, index:idx_a(a) `,
` └─TableReader_14(Probe) 10.00 root data:TableRangeScan_13`,
` └─TableRangeScan_13 10.00 cop[tikv] table:t1 range:[2,2], keep order:false, stats:pseudo`))
tk.MustQuery("execute stmt using @a").Check(testkit.Rows(
"2 4 2 2",
))
}
func TestExpressionIndexPreparePlanCache(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, key ((a+b)));")
tk.MustExec("prepare stmt from 'select * from t where a+b = ?'")
tk.MustExec("set @a = 123")
tk.MustExec("execute stmt using @a")
tkProcess := tk.Session().ShowProcess()
ps := []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
res := tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
require.Len(t, res.Rows(), 4)
require.Regexp(t, ".*expression_index.*", res.Rows()[2][3])
require.Regexp(t, ".*[123,123].*", res.Rows()[2][4])
tk.MustExec("set @a = 1234")
tk.MustExec("execute stmt using @a")
res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
require.Len(t, res.Rows(), 4)
require.Regexp(t, ".*expression_index.*", res.Rows()[2][3])
require.Regexp(t, ".*[1234,1234].*", res.Rows()[2][4])
}
func TestIssue28259(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
// test for indexRange
tk.MustExec("use test")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk.MustExec("drop table if exists UK_GCOL_VIRTUAL_18588;")
tk.MustExec("CREATE TABLE `UK_GCOL_VIRTUAL_18588` (`COL1` bigint(20), UNIQUE KEY `UK_COL1` (`COL1`)" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;")
tk.MustExec("insert into UK_GCOL_VIRTUAL_18588 values('8502658334322817163');")
tk.MustExec(`prepare stmt from 'select col1 from UK_GCOL_VIRTUAL_18588 where col1 between ? and ? or col1 < ?';`)
tk.MustExec("set @a=5516958330762833919, @b=8551969118506051323, @c=2887622822023883594;")
tk.MustQuery("execute stmt using @a,@b,@c;").Check(testkit.Rows("8502658334322817163"))
tkProcess := tk.Session().ShowProcess()
ps := []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
res := tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
require.Len(t, res.Rows(), 2)
require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[1][0])
tk.MustExec("set @a=-1696020282760139948, @b=-2619168038882941276, @c=-4004648990067362699;")
tk.MustQuery("execute stmt using @a,@b,@c;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt using @a,@b,@c;").Check(testkit.Rows())
tkProcess = tk.Session().ShowProcess()
ps = []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
require.Len(t, res.Rows(), 3)
require.Regexp(t, ".*Selection.*", res.Rows()[1][0])
require.Regexp(t, ".*IndexFullScan.*", res.Rows()[2][0])
res = tk.MustQuery("explain format = 'brief' select col1 from UK_GCOL_VIRTUAL_18588 use index(UK_COL1) " +
"where col1 between -1696020282760139948 and -2619168038882941276 or col1 < -4004648990067362699;")
require.Len(t, res.Rows(), 3)
require.Regexp(t, ".*Selection.*", res.Rows()[1][0])
require.Regexp(t, ".*IndexFullScan.*", res.Rows()[2][0])
res = tk.MustQuery("explain format = 'brief' select col1 from UK_GCOL_VIRTUAL_18588 use index(UK_COL1) " +
"where col1 between 5516958330762833919 and 8551969118506051323 or col1 < 2887622822023883594;")
require.Len(t, res.Rows(), 2)
require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[1][0])
tk.MustExec("drop table if exists t;")
tk.MustExec("CREATE TABLE t (a int, b int, index idx(a, b));")
tk.MustExec("insert into t values(1, 0);")
tk.MustExec(`prepare stmt from 'select a from t where (a between ? and ? or a < ?) and b < 1;'`)
tk.MustExec("set @a=0, @b=2, @c=2;")
tk.MustQuery("execute stmt using @a,@b,@c;").Check(testkit.Rows("1"))
tkProcess = tk.Session().ShowProcess()
ps = []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
require.Len(t, res.Rows(), 4)
require.Regexp(t, ".*IndexReader.*", res.Rows()[1][0])
require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[3][0])
tk.MustExec("set @a=2, @b=1, @c=1;")
tk.MustQuery("execute stmt using @a,@b,@c;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt using @a,@b,@c;").Check(testkit.Rows())
tkProcess = tk.Session().ShowProcess()
ps = []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
require.Len(t, res.Rows(), 4)
require.Regexp(t, ".*Selection.*", res.Rows()[2][0])
require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[3][0])
res = tk.MustQuery("explain format = 'brief' select a from t use index(idx) " +
"where (a between 0 and 2 or a < 2) and b < 1;")
require.Len(t, res.Rows(), 4)
require.Regexp(t, ".*IndexReader.*", res.Rows()[1][0])
require.Regexp(t, ".*Selection.*", res.Rows()[2][0])
require.Equal(t, "lt(test.t.b, 1)", res.Rows()[2][4])
require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[3][0])
res = tk.MustQuery("explain format = 'brief' select a from t use index(idx) " +
"where (a between 2 and 1 or a < 1) and b < 1;")
require.Len(t, res.Rows(), 4)
require.Regexp(t, ".*IndexReader.*", res.Rows()[1][0])
require.Regexp(t, ".*Selection.*", res.Rows()[2][0])
require.Equal(t, "lt(test.t.b, 1)", res.Rows()[2][4])
require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[3][0])
// test for indexLookUp
tk.MustExec("drop table if exists t;")
tk.MustExec("CREATE TABLE t (a int, b int, index idx(a));")
tk.MustExec("insert into t values(1, 0);")
tk.MustExec(`prepare stmt from 'select /*+ USE_INDEX(t, idx) */ a from t where (a between ? and ? or a < ?) and b < 1;'`)
tk.MustExec("set @a=0, @b=2, @c=2;")
tk.MustQuery("execute stmt using @a,@b,@c;").Check(testkit.Rows("1"))
tkProcess = tk.Session().ShowProcess()
ps = []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
require.Len(t, res.Rows(), 5)
require.Regexp(t, ".*IndexLookUp.*", res.Rows()[1][0])
require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[2][0])
require.Regexp(t, ".*Selection.*", res.Rows()[3][0])
require.Regexp(t, ".*TableRowIDScan.*", res.Rows()[4][0])
tk.MustExec("set @a=2, @b=1, @c=1;")
tk.MustQuery("execute stmt using @a,@b,@c;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt using @a,@b,@c;").Check(testkit.Rows())
tkProcess = tk.Session().ShowProcess()
ps = []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
require.Len(t, res.Rows(), 5)
require.Regexp(t, ".*IndexLookUp.*", res.Rows()[1][0])
require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[2][0])
require.Regexp(t, ".*Selection.*", res.Rows()[3][0])
require.Regexp(t, ".*TableRowIDScan.*", res.Rows()[4][0])
res = tk.MustQuery("explain format = 'brief' select /*+ USE_INDEX(t, idx) */ a from t use index(idx) " +
"where (a between 0 and 2 or a < 2) and b < 1;")
require.Len(t, res.Rows(), 5)
require.Regexp(t, ".*IndexLookUp.*", res.Rows()[1][0])
require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[2][0])
res = tk.MustQuery("explain format = 'brief' select /*+ USE_INDEX(t, idx) */ a from t use index(idx) " +
"where (a between 2 and 1 or a < 1) and b < 1;")
require.Len(t, res.Rows(), 5)
require.Regexp(t, ".*IndexLookUp.*", res.Rows()[1][0])
require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[2][0])
// test for tableReader
tk.MustExec("drop table if exists t;")
tk.MustExec("CREATE TABLE t (a int PRIMARY KEY CLUSTERED, b int);")
tk.MustExec("insert into t values(1, 0);")
tk.MustExec(`prepare stmt from 'select a from t where (a between ? and ? or a < ?) and b < 1;'`)
tk.MustExec("set @a=0, @b=2, @c=2;")
tk.MustQuery("execute stmt using @a,@b,@c;").Check(testkit.Rows("1"))
tkProcess = tk.Session().ShowProcess()
ps = []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
require.Len(t, res.Rows(), 4)
require.Regexp(t, ".*TableReader.*", res.Rows()[1][0])
require.Regexp(t, ".*Selection.*", res.Rows()[2][0])
require.Regexp(t, ".*TableRangeScan.*", res.Rows()[3][0])
tk.MustExec("set @a=2, @b=1, @c=1;")
tk.MustQuery("execute stmt using @a,@b,@c;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt using @a,@b,@c;").Check(testkit.Rows())
tkProcess = tk.Session().ShowProcess()
ps = []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
require.Len(t, res.Rows(), 4)
require.Regexp(t, ".*TableReader.*", res.Rows()[1][0])
require.Regexp(t, ".*Selection.*", res.Rows()[2][0])
require.Regexp(t, ".*TableRangeScan.*", res.Rows()[3][0])
res = tk.MustQuery("explain format = 'brief' select a from t " +
"where (a between 0 and 2 or a < 2) and b < 1;")
require.Len(t, res.Rows(), 4)
require.Regexp(t, ".*TableReader.*", res.Rows()[1][0])
require.Regexp(t, ".*Selection.*", res.Rows()[2][0])
require.Equal(t, "lt(test.t.b, 1)", res.Rows()[2][4])
require.Regexp(t, ".*TableRangeScan.*", res.Rows()[3][0])
res = tk.MustQuery("explain format = 'brief' select a from t " +
"where (a between 2 and 1 or a < 1) and b < 1;")
require.Len(t, res.Rows(), 4)
require.Regexp(t, ".*TableReader.*", res.Rows()[1][0])
require.Regexp(t, ".*Selection.*", res.Rows()[2][0])
require.Equal(t, "lt(test.t.b, 1)", res.Rows()[2][4])
require.Regexp(t, ".*TableRangeScan.*", res.Rows()[3][0])
tk.MustExec("drop table if exists t;")
tk.MustExec("CREATE TABLE t (a int primary key, b int, c int, d int);")
tk.MustExec(`prepare stmt from 'select * from t where ((a > ? and a < 5 and b > 2) or (a > ? and a < 10 and c > 3)) and d = 5;';`)
tk.MustExec("set @a=1, @b=8;")
tk.MustQuery("execute stmt using @a,@b;").Check(testkit.Rows())
tkProcess = tk.Session().ShowProcess()
ps = []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
require.Len(t, res.Rows(), 3)
require.Regexp(t, ".*TableReader.*", res.Rows()[0][0])
require.Regexp(t, ".*Selection.*", res.Rows()[1][0])
require.Regexp(t, ".*TableRangeScan.*", res.Rows()[2][0])
}
func TestIssue28696(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t1(a int primary key, b varchar(255), c int);")
tk.MustExec("create unique index b on t1(b(3));")
tk.MustExec("insert into t1 values(1,'abcdfsafd',1),(2,'addfdsafd',2),(3,'ddcdsaf',3),(4,'bbcsa',4);")
tk.MustExec(`prepare stmt from "select a from t1 where b = ?";`)
tk.MustExec("set @a='bbcsa';")
tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("4"))
tkProcess := tk.Session().ShowProcess()
ps := []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
res := tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
require.Len(t, res.Rows(), 5)
require.Regexp(t, ".*IndexLookUp.*", res.Rows()[1][0])
require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[2][0])
require.Regexp(t, ".*Selection.*", res.Rows()[3][0])
require.Regexp(t, ".*TableRowIDScan.*", res.Rows()[4][0])
res = tk.MustQuery("explain format = 'brief' select a from t1 where b = 'bbcsa';")
require.Len(t, res.Rows(), 5)
require.Regexp(t, ".*IndexLookUp.*", res.Rows()[1][0])
require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[2][0])
require.Regexp(t, ".*Selection.*", res.Rows()[3][0])
require.Regexp(t, ".*TableRowIDScan.*", res.Rows()[4][0])
}
func TestIndexMerge4PlanCache(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk.MustExec("drop table if exists IDT_MULTI15858STROBJSTROBJ;")
tk.MustExec("CREATE TABLE `IDT_MULTI15858STROBJSTROBJ` (" +
"`COL1` enum('aa','bb','cc','dd','ee','ff','gg','hh','ii','mm') DEFAULT NULL," +
"`COL2` int(41) DEFAULT NULL," +
"`COL3` datetime DEFAULT NULL," +
"KEY `U_M_COL4` (`COL1`,`COL2`)," +
"KEY `U_M_COL5` (`COL3`,`COL2`)" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;")
tk.MustExec("insert into IDT_MULTI15858STROBJSTROBJ values('aa', 1333053589,'1037-12-26 01:38:52');")
tk.MustExec("set tidb_enable_index_merge=on;")
tk.MustExec("prepare stmt from 'select * from IDT_MULTI15858STROBJSTROBJ where col2 <> ? and col1 not in (?, ?, ?) or col3 = ? order by 2;';")
tk.MustExec("set @a=2134549621, @b='aa', @c='aa', @d='aa', @e='9941-07-07 01:08:48';")
tk.MustQuery("execute stmt using @a,@b,@c,@d,@e;").Check(testkit.Rows())
tk.MustExec("set @a=-2144294194, @b='mm', @c='mm', @d='mm', @e='0198-09-29 20:19:49';")
tk.MustQuery("execute stmt using @a,@b,@c,@d,@e;").Check(testkit.Rows("aa 1333053589 1037-12-26 01:38:52"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt using @a,@b,@c,@d,@e;").Check(testkit.Rows("aa 1333053589 1037-12-26 01:38:52"))
tkProcess := tk.Session().ShowProcess()
ps := []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
res := tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
require.Len(t, res.Rows(), 7)
require.Regexp(t, ".*IndexMerge.*", res.Rows()[1][0])
require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[3][0])
require.Equal(t, "range:(NULL,\"mm\"), (\"mm\",+inf], keep order:false, stats:pseudo", res.Rows()[3][4])
require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[4][0])
require.Equal(t, "range:[0198-09-29 20:19:49,0198-09-29 20:19:49], keep order:false, stats:pseudo", res.Rows()[4][4])
// test for cluster index in indexMerge
tk.MustExec("drop table if exists t;")
tk.MustExec("set @@tidb_enable_clustered_index = 1;")
tk.MustExec("create table t(a int, b int, c int, primary key(a), index idx_b(b));")
tk.MustExec("prepare stmt from 'select * from t where ((a > ? and a < ?) or b > 1) and c > 1;';")
tk.MustExec("set @a = 0, @b = 3;")
tk.MustQuery("execute stmt using @a, @b;").Check(testkit.Rows())
tkProcess = tk.Session().ShowProcess()
ps = []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
require.Len(t, res.Rows(), 5)
require.Regexp(t, ".*IndexMerge.*", res.Rows()[0][0])
require.Regexp(t, ".*TableRangeScan.*", res.Rows()[1][0])
require.Equal(t, "range:(0,3), keep order:false, stats:pseudo", res.Rows()[1][4])
require.Regexp(t, ".*IndexRangeScan.*", res.Rows()[2][0])
require.Equal(t, "range:(1,+inf], keep order:false, stats:pseudo", res.Rows()[2][4])
// test for prefix index
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t1(a int primary key, b varchar(255), c int, index idx_c(c));")
tk.MustExec("create unique index idx_b on t1(b(3));")
tk.MustExec("insert into t1 values(1,'abcdfsafd',1),(2,'addfdsafd',2),(3,'ddcdsaf',3),(4,'bbcsa',4);")
tk.MustExec("prepare stmt from 'select /*+ USE_INDEX_MERGE(t1, primary, idx_b, idx_c) */ * from t1 where b = ? or a > 10 or c > 10;';")
tk.MustExec("set @a='bbcsa', @b='ddcdsaf';")
tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("4 bbcsa 4"))
tkProcess = tk.Session().ShowProcess()
ps = []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
require.Regexp(t, ".*IndexMerge.*", res.Rows()[0][0])
tk.MustQuery("execute stmt using @b;").Check(testkit.Rows("3 ddcdsaf 3"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) // unsafe range
tk.MustQuery("execute stmt using @b;").Check(testkit.Rows("3 ddcdsaf 3"))
tkProcess = tk.Session().ShowProcess()
ps = []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
require.Regexp(t, ".*IndexMerge.*", res.Rows()[0][0])
// rewrite the origin indexMerge test
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (a int, b int, c int, primary key(a), key(b))")
tk.MustExec("prepare stmt from 'select /*+ inl_join(t2) */ * from t t1 join t t2 on t1.a = t2.a and t1.c = t2.c where t2.a = 1 or t2.b = 1;';")
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(a int primary key, b int, c int, key(b), key(c));")
tk.MustExec("INSERT INTO t1 VALUES (10, 10, 10), (11, 11, 11)")
tk.MustExec("prepare stmt from 'select /*+ use_index_merge(t1) */ * from t1 where c=? or (b=? and a=?);';")
tk.MustExec("set @a = 10, @b = 11;")
tk.MustQuery("execute stmt using @a, @a, @a").Check(testkit.Rows("10 10 10"))
tk.MustQuery("execute stmt using @b, @b, @b").Check(testkit.Rows("11 11 11"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("prepare stmt from 'select /*+ use_index_merge(t1) */ * from t1 where c=? or (b=? and (a=? or a=?));';")
tk.MustQuery("execute stmt using @a, @a, @a, @a").Check(testkit.Rows("10 10 10"))
tk.MustQuery("execute stmt using @b, @b, @b, @b").Check(testkit.Rows("11 11 11"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("prepare stmt from 'select /*+ use_index_merge(t1) */ * from t1 where c=? or (b=? and (a=? and c=?));';")
tk.MustQuery("execute stmt using @a, @a, @a, @a").Check(testkit.Rows("10 10 10"))
tk.MustQuery("execute stmt using @b, @b, @b, @b").Check(testkit.Rows("11 11 11"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("prepare stmt from 'select /*+ use_index_merge(t1) */ * from t1 where c=? or (b=? and (a >= ? and a <= ?));';")
tk.MustQuery("execute stmt using @a, @a, @b, @a").Check(testkit.Rows("10 10 10"))
tk.MustQuery("execute stmt using @b, @b, @b, @b").Check(testkit.Rows("11 11 11"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
tk.MustExec("prepare stmt from 'select /*+ use_index_merge(t1) */ * from t1 where c=10 or (a >=? and a <= ?);';")
tk.MustExec("set @a=9, @b=10, @c=11;")
tk.MustQuery("execute stmt using @a, @a;").Check(testkit.Rows("10 10 10"))
tk.MustQuery("execute stmt using @a, @c;").Check(testkit.Rows("10 10 10", "11 11 11"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) // a>=9 and a<=9 --> a=9
tk.MustQuery("execute stmt using @c, @a;").Check(testkit.Rows("10 10 10"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("prepare stmt from 'select /*+ use_index_merge(t1) */ * from t1 where c=10 or (a >=? and a <= ?);';")
tk.MustExec("set @a=9, @b=10, @c=11;")
tk.MustQuery("execute stmt using @a, @c;").Check(testkit.Rows("10 10 10", "11 11 11"))
tk.MustQuery("execute stmt using @a, @a;").Check(testkit.Rows("10 10 10"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt using @c, @a;").Check(testkit.Rows("10 10 10"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("prepare stmt from 'select /*+ use_index_merge(t1) */ * from t1 where c=10 or (a >=? and a <= ?);';")
tk.MustExec("set @a=9, @b=10, @c=11;")
tk.MustQuery("execute stmt using @c, @a;").Check(testkit.Rows("10 10 10"))
tk.MustQuery("execute stmt using @a, @c;").Check(testkit.Rows("10 10 10", "11 11 11"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt using @a, @a;").Check(testkit.Rows("10 10 10"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t0")
tk.MustExec("CREATE TABLE t0(c0 INT AS (1), c1 INT PRIMARY KEY)")
tk.MustExec("INSERT INTO t0(c1) VALUES (0)")
tk.MustExec("CREATE INDEX i0 ON t0(c0)")
tk.MustExec("prepare stmt from 'SELECT /*+ USE_INDEX_MERGE(t0, i0, PRIMARY)*/ t0.c0 FROM t0 WHERE t0.c1 OR t0.c0;';")
tk.MustQuery("execute stmt;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt;").Check(testkit.Rows("1"))
// The plan contains the generated column, so it can not be cached.
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(id int primary key, a int, b int, c int, d int)")
tk.MustExec("create index t1a on t1(a)")
tk.MustExec("create index t1b on t1(b)")
tk.MustExec("create table t2(id int primary key, a int)")
tk.MustExec("create index t2a on t2(a)")
tk.MustExec("insert into t1 values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5)")
tk.MustExec("insert into t2 values(1,1),(5,5)")
tk.MustExec("prepare stmt from 'select /*+ use_index_merge(t1, t1a, t1b) */ sum(t1.a) from t1 join t2 on t1.id = t2.id where t1.a < ? or t1.b > ?';")
tk.MustExec("set @a=2, @b=4, @c=5;")
tk.MustQuery("execute stmt using @a, @b").Check(testkit.Rows("6"))
tk.MustQuery("execute stmt using @a, @c").Check(testkit.Rows("1"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
}
func TestSetOperations4PlanCache(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk.MustExec("drop table if exists t1, t2;")
tk.MustExec("CREATE TABLE `t1` (a int);")
tk.MustExec("CREATE TABLE `t2` (a int);")
tk.MustExec("insert into t1 values(1), (2);")
tk.MustExec("insert into t2 values(1), (3);")
// test for UNION
tk.MustExec("prepare stmt from 'select * from t1 where a > ? union select * from t2 where a > ?;';")
tk.MustExec("set @a=0, @b=1;")
tk.MustQuery("execute stmt using @a, @b;").Sort().Check(testkit.Rows("1", "2", "3"))
tk.MustQuery("execute stmt using @b, @a;").Sort().Check(testkit.Rows("1", "2", "3"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt using @b, @b;").Sort().Check(testkit.Rows("2", "3"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt using @a, @a;").Sort().Check(testkit.Rows("1", "2", "3"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("prepare stmt from 'select * from t1 where a > ? union all select * from t2 where a > ?;';")
tk.MustExec("set @a=0, @b=1;")
tk.MustQuery("execute stmt using @a, @b;").Sort().Check(testkit.Rows("1", "2", "3"))
tk.MustQuery("execute stmt using @b, @a;").Sort().Check(testkit.Rows("1", "2", "3"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt using @b, @b;").Sort().Check(testkit.Rows("2", "3"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt using @a, @a;").Sort().Check(testkit.Rows("1", "1", "2", "3"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
// test for EXCEPT
tk.MustExec("prepare stmt from 'select * from t1 where a > ? except select * from t2 where a > ?;';")
tk.MustExec("set @a=0, @b=1;")
tk.MustQuery("execute stmt using @a, @a;").Sort().Check(testkit.Rows("2"))
tk.MustQuery("execute stmt using @b, @a;").Sort().Check(testkit.Rows("2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt using @b, @b;").Sort().Check(testkit.Rows("2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt using @a, @b;").Sort().Check(testkit.Rows("1", "2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
// test for INTERSECT
tk.MustExec("prepare stmt from 'select * from t1 where a > ? union select * from t2 where a > ?;';")
tk.MustExec("set @a=0, @b=1;")
tk.MustQuery("execute stmt using @a, @a;").Sort().Check(testkit.Rows("1", "2", "3"))
tk.MustQuery("execute stmt using @b, @a;").Sort().Check(testkit.Rows("1", "2", "3"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt using @b, @b;").Sort().Check(testkit.Rows("2", "3"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt using @a, @b;").Sort().Check(testkit.Rows("1", "2", "3"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
// test for UNION + INTERSECT
tk.MustExec("prepare stmt from 'select * from t1 union all select * from t1 intersect select * from t2;'")
tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "1", "2"))
tk.MustExec("prepare stmt from '(select * from t1 union all select * from t1) intersect select * from t2;'")
tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1"))
// test for order by and limit
tk.MustExec("prepare stmt from '(select * from t1 union all select * from t1 intersect select * from t2) order by a limit 2;'")
tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "1"))
}
func TestSPM4PlanCache(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("set tidb_cost_model_version=2")
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int, index idx_a(a));")
tk.MustExec("delete from mysql.bind_info where default_db='test';")
tk.MustExec("admin reload bindings;")
res := tk.MustQuery("explain format = 'brief' select * from t;")
require.Regexp(t, ".*IndexReader.*", res.Rows()[0][0])
require.Regexp(t, ".*IndexFullScan.*", res.Rows()[1][0])
tk.MustExec("prepare stmt from 'select * from t;';")
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tkProcess := tk.Session().ShowProcess()
ps := []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
require.Regexp(t, ".*IndexReader.*", res.Rows()[0][0])
require.Regexp(t, ".*IndexFullScan.*", res.Rows()[1][0])
tk.MustExec("create global binding for select * from t using select * from t use index(idx_a);")
res = tk.MustQuery("explain format = 'brief' select * from t;")
require.Regexp(t, ".*IndexReader.*", res.Rows()[0][0])
require.Regexp(t, ".*IndexFullScan.*", res.Rows()[1][0])
tk.MustQuery("select @@last_plan_from_binding").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt;").Check(testkit.Rows())
// The bindSQL has changed, the previous cache is invalid.
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tkProcess = tk.Session().ShowProcess()
ps = []*util.ProcessInfo{tkProcess}
tk.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10))
// We can use the new binding.
require.Regexp(t, ".*IndexReader.*", res.Rows()[0][0])
require.Regexp(t, ".*IndexFullScan.*", res.Rows()[1][0])
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_binding").Check(testkit.Rows("1"))
tk.MustExec("delete from mysql.bind_info where default_db='test';")
tk.MustExec("admin reload bindings;")
}
func TestHint4PlanCache(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int, index idx_a(a));")
tk.MustExec("prepare stmt from 'select * from t;';")
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("prepare stmt from 'select /*+ IGNORE_PLAN_CACHE() */ * from t;';")
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
}
func TestIgnorePlanCacheWithPrepare(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int, index idx_a(a));")
tk.MustExec("drop table if exists r;")
tk.MustExec("create table r(a int);")
// test use_index
tk.MustExec("prepare stmt from 'select * from t;';")
tk.MustExec("create binding for select * from t using select /*+ use_index(t, idx_a) */ * from t;")
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_binding;").Check(testkit.Rows("1"))
tk.MustExec("create binding for select * from t using select /*+ ignore_plan_cache() */ * from t;")
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_binding;").Check(testkit.Rows("1"))
tk.MustExec("create binding for select * from t using select /*+ use_index(t, idx_a) */ * from t;")
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_binding;").Check(testkit.Rows("1"))
// test straight_join
tk.MustExec("prepare stmt_join from 'select * from t, r where r.a = t.a;';")
tk.MustExec("create binding for select * from t, r where r.a = t.a using select /*+ straight_join() */* from t, r where r.a = t.a;")
tk.MustQuery("execute stmt_join;").Check(testkit.Rows())
tk.MustQuery("execute stmt_join;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt_join;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_binding;").Check(testkit.Rows("1"))
tk.MustExec("create binding for select * from t, r where r.a = t.a using select /*+ ignore_plan_cache() */* from t, r where r.a = t.a;")
tk.MustQuery("execute stmt_join;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt_join;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_binding;").Check(testkit.Rows("1"))
tk.MustExec("create binding for select * from t, r where r.a = t.a using select /*+ straight_join() */* from t, r where r.a = t.a;")
tk.MustQuery("execute stmt_join;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt_join;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_binding;").Check(testkit.Rows("1"))
}
func TestSelectView4PlanCache(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk.MustExec("drop table if exists view_t;")
tk.MustExec("create table view_t (a int,b int)")
tk.MustExec("insert into view_t values(1,2)")
tk.MustExec("create definer='root'@'localhost' view view1 as select * from view_t")
tk.MustExec("create definer='root'@'localhost' view view2(c,d) as select * from view_t")
tk.MustExec("create definer='root'@'localhost' view view3(c,d) as select a,b from view_t")
tk.MustExec("create definer='root'@'localhost' view view4 as select * from (select * from (select * from view_t) tb1) tb;")
tk.MustExec("prepare stmt1 from 'select * from view1;'")
tk.MustQuery("execute stmt1;").Check(testkit.Rows("1 2"))
tk.MustQuery("execute stmt1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("prepare stmt2 from 'select * from view2;'")
tk.MustQuery("execute stmt2;").Check(testkit.Rows("1 2"))
tk.MustQuery("execute stmt2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("prepare stmt3 from 'select * from view3;'")
tk.MustQuery("execute stmt3;").Check(testkit.Rows("1 2"))
tk.MustQuery("execute stmt3;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("prepare stmt4 from 'select * from view4;'")
tk.MustQuery("execute stmt4;").Check(testkit.Rows("1 2"))
tk.MustQuery("execute stmt4;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("drop table view_t;")
tk.MustExec("create table view_t(c int,d int)")
err := tk.ExecToErr("execute stmt1;")
require.Equal(t, "[planner:1356]View 'test.view1' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them", err.Error())
err = tk.ExecToErr("execute stmt2")
require.Equal(t, "[planner:1356]View 'test.view2' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them", err.Error())
err = tk.ExecToErr("execute stmt3")
require.Equal(t, core.ErrViewInvalid.GenWithStackByArgs("test", "view3").Error(), err.Error())
tk.MustExec("drop table view_t;")
tk.MustExec("create table view_t(a int,b int,c int)")
tk.MustExec("insert into view_t values(1,2,3)")
tk.MustQuery("execute stmt1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt3;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt3;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt4;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt4;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("alter table view_t drop column a")
tk.MustExec("alter table view_t add column a int after b")
tk.MustExec("update view_t set a=1;")
tk.MustQuery("execute stmt1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt1;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt2;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt3;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt3;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt4;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt4;").Check(testkit.Rows("1 2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("drop table view_t;")
tk.MustExec("drop view view1,view2,view3,view4;")
tk.MustExec("set @@tidb_enable_window_function = 1")
defer func() {
tk.MustExec("set @@tidb_enable_window_function = 0")
}()
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int, b int)")
tk.MustExec("insert into t values (1,1),(1,2),(2,1),(2,2)")
tk.MustExec("create definer='root'@'localhost' view v as select a, first_value(a) over(rows between 1 preceding and 1 following), last_value(a) over(rows between 1 preceding and 1 following) from t")
tk.MustExec("prepare stmt from 'select * from v;';")
tk.MustQuery("execute stmt;").Check(testkit.Rows("1 1 1", "1 1 2", "2 1 2", "2 2 2"))
tk.MustQuery("execute stmt;").Check(testkit.Rows("1 1 1", "1 1 2", "2 1 2", "2 2 2"))
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
tk.MustExec("drop view v;")
}
func TestInvisibleIndex4PlanCache(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk.MustExec("drop table if exists t;")
tk.MustExec("CREATE TABLE t(c1 INT, index idx_c(c1));")
tk.MustExec("prepare stmt from 'select * from t use index(idx_c) where c1 > 1;';")
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("ALTER TABLE t ALTER INDEX idx_c INVISIBLE;")
err := tk.ExecToErr("select * from t use index(idx_c) where c1 > 1;")
require.Equal(t, "[planner:1176]Key 'idx_c' doesn't exist in table 't'", err.Error())
err = tk.ExecToErr("execute stmt;")
require.Equal(t, "[planner:1176]Key 'idx_c' doesn't exist in table 't'", err.Error())
}
func TestCTE4PlanCache(t *testing.T) {
// CTE can not be cached, because part of it will be treated as a subquery.
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk.MustExec("prepare stmt from 'with recursive cte1 as (" +
"select ? c1 " +
"union all " +
"select c1 + 1 c1 from cte1 where c1 < ?) " +
"select * from cte1;';")
tk.MustExec("set @a=5, @b=4, @c=2, @d=1;")
tk.MustQuery("execute stmt using @d, @a").Check(testkit.Rows("1", "2", "3", "4", "5"))
tk.MustQuery("execute stmt using @d, @b").Check(testkit.Rows("1", "2", "3", "4"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt using @c, @b").Check(testkit.Rows("2", "3", "4"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
// Two seed parts.
tk.MustExec("prepare stmt from 'with recursive cte1 as (" +
"select 1 c1 " +
"union all " +
"select 2 c1 " +
"union all " +
"select c1 + 1 c1 from cte1 where c1 < ?) " +
"select * from cte1 order by c1;';")
tk.MustExec("set @a=10, @b=2;")
tk.MustQuery("execute stmt using @a").Check(testkit.Rows("1", "2", "2", "3", "3", "4", "4", "5", "5", "6", "6", "7", "7", "8", "8", "9", "9", "10", "10"))
tk.MustQuery("execute stmt using @b").Check(testkit.Rows("1", "2", "2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
// Two recursive parts.
tk.MustExec("prepare stmt from 'with recursive cte1 as (" +
"select 1 c1 " +
"union all " +
"select 2 c1 " +
"union all " +
"select c1 + 1 c1 from cte1 where c1 < ? " +
"union all " +
"select c1 + ? c1 from cte1 where c1 < ?) " +
"select * from cte1 order by c1;';")
tk.MustExec("set @a=1, @b=2, @c=3, @d=4, @e=5;")
tk.MustQuery("execute stmt using @c, @b, @e;").Check(testkit.Rows("1", "2", "2", "3", "3", "3", "4", "4", "5", "5", "5", "6", "6"))
tk.MustQuery("execute stmt using @b, @a, @d;").Check(testkit.Rows("1", "2", "2", "2", "3", "3", "3", "4", "4", "4"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t1(a int);")
tk.MustExec("insert into t1 values(1);")
tk.MustExec("insert into t1 values(2);")
tk.MustExec("prepare stmt from 'SELECT * FROM t1 dt WHERE EXISTS(WITH RECURSIVE qn AS (SELECT a*? AS b UNION ALL SELECT b+? FROM qn WHERE b=?) SELECT * FROM qn WHERE b=a);';")
tk.MustExec("set @a=1, @b=2, @c=3, @d=4, @e=5, @f=0;")
tk.MustQuery("execute stmt using @f, @a, @f").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt using @a, @b, @a").Sort().Check(testkit.Rows("1", "2"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt using @a, @b, @a").Sort().Check(testkit.Rows("1", "2"))
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 skip prepared plan-cache: PhysicalApply plan is un-cacheable"))
tk.MustExec("prepare stmt from 'with recursive c(p) as (select ?), cte(a, b) as (select 1, 1 union select a+?, 1 from cte, c where a < ?) select * from cte order by 1, 2;';")
tk.MustQuery("execute stmt using @a, @a, @e;").Check(testkit.Rows("1 1", "2 1", "3 1", "4 1", "5 1"))
tk.MustQuery("execute stmt using @b, @b, @c;").Check(testkit.Rows("1 1", "3 1"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
}
func TestValidity4PlanCache(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int);")
tk.MustExec("prepare stmt from 'select * from t;';")
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("drop database if exists plan_cache;")
tk.MustExec("create database plan_cache;")
tk.MustExec("use plan_cache;")
tk.MustExec("create table t(a int);")
tk.MustExec("insert into t values(1);")
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("prepare stmt from 'select * from t;';")
tk.MustQuery("execute stmt;").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt;").Check(testkit.Rows("1"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
tk.MustExec("use test") // still read plan_cache.t and can hit the cache
tk.MustQuery("execute stmt;").Check(testkit.Rows("1"))
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1"))
}
func TestListPartition4PlanCache(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk.MustExec("set @@session.tidb_enable_list_partition=1;")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t(a int, b int) PARTITION BY LIST (a) ( PARTITION p0 VALUES IN (1, 2, 3), PARTITION p1 VALUES IN (4, 5, 6));")
tk.MustExec("set @@tidb_partition_prune_mode='static';")
tk.MustExec("prepare stmt from 'select * from t;';")
tk.MustQuery("execute stmt;").Check(testkit.Rows())
tk.MustQuery("execute stmt;").Check(testkit.Rows())
// The list partition plan can not be cached.
tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0"))
}
func TestMoreSessions4PlanCache(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk2 := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk.MustExec("use test;")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int);")
tk.MustExec("prepare stmt from 'select * from t;';")
tk.MustQuery("execute stmt").Check(testkit.Rows())
tk.MustQuery("execute stmt").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
tk2.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk2.MustExec("use test;")
require.EqualError(t, tk2.ExecToErr("execute stmt;"), "[planner:8111]Prepared statement not found")
tk2.MustExec("prepare stmt from 'select * from t;';")
tk2.MustQuery("execute stmt").Check(testkit.Rows())
tk2.MustQuery("execute stmt").Check(testkit.Rows())
tk2.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt").Check(testkit.Rows())
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
}
func TestIssue28792(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("CREATE TABLE t12(a INT, b INT)")
tk.MustExec("CREATE TABLE t97(a INT, b INT UNIQUE NOT NULL);")
r1 := tk.MustQuery("EXPLAIN SELECT t12.a, t12.b FROM t12 LEFT JOIN t97 on t12.b = t97.b;").Rows()
r2 := tk.MustQuery("EXPLAIN SELECT t12.a, t12.b FROM t12 LEFT JOIN t97 use index () on t12.b = t97.b;").Rows()
require.Equal(t, r2, r1)
}
func TestExplainForJSON(t *testing.T) {
store := testkit.CreateMockStore(t)
tk1 := testkit.NewTestKit(t, store)
tk2 := testkit.NewTestKit(t, store)
tk1.MustExec("use test")
tk1.MustExec("set @@tidb_enable_collect_execution_info=0;")
tk1.MustExec("drop table if exists t1")
tk1.MustExec("create table t1(id int);")
tk1.MustQuery("select * from t1;")
tk1RootProcess := tk1.Session().ShowProcess()
ps := []*util.ProcessInfo{tk1RootProcess}
tk1.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
tk2.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
resRow := tk2.MustQuery(fmt.Sprintf("explain format = 'row' for connection %d", tk1RootProcess.ID)).Rows()
resJSON := tk2.MustQuery(fmt.Sprintf("explain format = 'tidb_json' for connection %d", tk1RootProcess.ID)).Rows()
j := new([]*core.ExplainInfoForEncode)
require.NoError(t, json.Unmarshal([]byte(resJSON[0][0].(string)), j))
flatJSONRows := make([]*core.ExplainInfoForEncode, 0)
for _, row := range *j {
flatJSONRows = append(flatJSONRows, flatJSONPlan(row)...)
}
require.Equal(t, len(flatJSONRows), len(resRow))
for i, row := range resRow {
require.Contains(t, row[0], flatJSONRows[i].ID)
require.Equal(t, flatJSONRows[i].EstRows, row[1])
require.Equal(t, flatJSONRows[i].TaskType, row[2])
require.Equal(t, flatJSONRows[i].AccessObject, row[3])
require.Equal(t, flatJSONRows[i].OperatorInfo, row[4])
}
tk1.MustExec("set @@tidb_enable_collect_execution_info=1;")
tk1.MustExec("drop table if exists t2")
tk1.MustExec("create table t2(id int);")
tk1.MustQuery("select * from t2;")
tk1RootProcess = tk1.Session().ShowProcess()
ps = []*util.ProcessInfo{tk1RootProcess}
tk1.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
tk2.Session().SetSessionManager(&testkit.MockSessionManager{PS: ps})
resRow = tk2.MustQuery(fmt.Sprintf("explain format = 'row' for connection %d", tk1RootProcess.ID)).Rows()
resJSON = tk2.MustQuery(fmt.Sprintf("explain format = 'tidb_json' for connection %d", tk1RootProcess.ID)).Rows()
j = new([]*core.ExplainInfoForEncode)
require.NoError(t, json.Unmarshal([]byte(resJSON[0][0].(string)), j))
flatJSONRows = []*core.ExplainInfoForEncode{}
for _, row := range *j {
flatJSONRows = append(flatJSONRows, flatJSONPlan(row)...)
}
require.Equal(t, len(flatJSONRows), len(resRow))
for i, row := range resRow {
require.Contains(t, row[0], flatJSONRows[i].ID)
require.Equal(t, flatJSONRows[i].EstRows, row[1])
require.Equal(t, flatJSONRows[i].ActRows, row[2])
require.Equal(t, flatJSONRows[i].TaskType, row[3])
require.Equal(t, flatJSONRows[i].AccessObject, row[4])
require.Equal(t, flatJSONRows[i].OperatorInfo, row[6])
// executeInfo, memory, disk maybe vary in multi execution
require.NotEqual(t, flatJSONRows[i].ExecuteInfo, "")
require.NotEqual(t, flatJSONRows[i].MemoryInfo, "")
require.NotEqual(t, flatJSONRows[i].DiskInfo, "")
}
// test syntax
tk2.MustExec(fmt.Sprintf("explain format = 'tidb_json' for connection %d", tk1RootProcess.ID))
tk2.MustExec(fmt.Sprintf("explain format = tidb_json for connection %d", tk1RootProcess.ID))
tk2.MustExec(fmt.Sprintf("explain format = 'TIDB_JSON' for connection %d", tk1RootProcess.ID))
tk2.MustExec(fmt.Sprintf("explain format = TIDB_JSON for connection %d", tk1RootProcess.ID))
}
|
package osbuild2
// Options for the org.osbuild.ostree.pull stage.
type OSTreePullStageOptions struct {
// Location of the ostree repo
Repo string `json:"repo"`
}
func (OSTreePullStageOptions) isStageOptions() {}
type OSTreePullStageInput struct {
inputCommon
References OSTreePullStageReferences `json:"references"`
}
func (OSTreePullStageInput) isStageInput() {}
type OSTreePullStageInputs struct {
Commits *OSTreePullStageInput `json:"commits"`
}
func (OSTreePullStageInputs) isStageInputs() {}
type OSTreePullStageReferences map[string]OSTreePullStageReference
func (OSTreePullStageReferences) isReferences() {}
type OSTreePullStageReference struct {
Ref string `json:"ref"`
}
// A new org.osbuild.ostree.pull stage to pull OSTree commits into an existing repo
func NewOSTreePullStage(options *OSTreePullStageOptions, inputs *OSTreePullStageInputs) *Stage {
return &Stage{
Type: "org.osbuild.ostree.pull",
Inputs: inputs,
Options: options,
}
}
|
// +build !boltdb
package model
func getArtistCount() int {
return len(db)
}
func getArtistForID(ID int) *Artist {
return db[ID]
}
func GetArtistForName(name string) *Artist {
for _, artist := range db {
if artist.Name == name {
return artist
}
}
return nil
}
func getAlbumCount() int {
var albumCount int
for _, artists := range db {
albumCount += len(artists.Albums)
}
return albumCount
}
func GetAlbumCountForArtist(name string) int {
return len(GetArtistForName(name).Albums)
}
func DeleteAlbum(ID int) {
for _, artist := range db {
for _, album := range artist.Albums {
if album.ID == ID {
delete(artist.Albums, album.ID)
}
}
}
}
func CreateNewArtist(artistId int, name string) {
db[artistId] = &Artist{artistId, name, make(map[int]*Album)}
}
func CreateNewAlbum(artistId int, albumId int, title string, year int) {
db[artistId].Albums[albumId] = &Album{albumId, title, year}
}
func GetNextArtistID() int {
var highestID int
for _, artist := range db {
if artist.ID > highestID {
highestID = artist.ID
}
}
return highestID + 1
}
func GetNextAlbumID() int {
var highestID int
for _, artist := range db {
for _, album := range artist.Albums {
if album.ID > highestID {
highestID = album.ID
}
}
}
return highestID + 1
}
func getModelArray() []*dbArrayStruct {
m := make(map[int]*dbArrayStruct)
for _, artist := range db {
for _, album := range artist.Albums {
m[album.ID] = &dbArrayStruct{artist, album}
}
}
o := make([]*dbArrayStruct, 0)
for i := 0; i <= GetNextAlbumID(); i++ {
if s, ok := m[i]; ok {
o = append(o, s)
}
}
return o
}
|
package main
import (
"database/sql"
"fmt"
"os"
"path/filepath"
"time"
_ "github.com/go-sql-driver/mysql"
)
var db *sql.DB
func dbConnect(dbUser, dbPass, dbName string) {
var err error
db, err = sql.Open("mysql", fmt.Sprintf("%s:%s@/%s", dbUser, dbPass, dbName))
checkErr(err)
db.SetConnMaxLifetime(time.Minute * 3)
db.SetMaxOpenConns(10)
db.SetMaxIdleConns(10)
}
func getUser(email string) (User, error) {
user := User{}
rows, err := db.Query("SELECT * FROM Users WHERE Email=?", email)
if err != nil {
return user, err
}
for rows.Next() {
err = rows.Scan(&user.FirstName, &user.LastName, &user.Email, &user.Password)
if err != nil {
return user, err
}
}
return user, nil
}
func createUser(user User) error {
stmt, err := db.Prepare("INSERT Users SET FirstName=?,LastName=?,Email=?,Password=?")
if err != nil {
return err
}
_, err = stmt.Exec(user.FirstName, user.LastName, user.Email, user.Password)
if err != nil {
return err
}
//Create a directory for the user
err = os.MkdirAll(filepath.Join(clientsBaseDir, user.Email, "home"), 0755)
if err != nil {
return err
}
return nil
}
func checkErr(err error) {
if err != nil {
panic(err)
}
}
|
package field
import "errors"
const (
rowID = 1
fullpathID = 2
locationID = 3
filenameID = 4
titleID = 6
artistID = 7
albumID = 8
genreID = 9
lengthID = 10
bitrateID = 13
bpmID = 15
commentID = 17
groupingID = 19
remixerID = 20
labelID = 21
composerID = 22
yearID = 23
starttimeID = 28
endtimeID = 29
deckID = 31
field39ID = 39
playtimeID = 45
sessionID = 48
playedID = 50
keyID = 51
addedID = 52
updatedAtID = 53
field68ID = 68
field69ID = 69
field70ID = 70
field72ID = 72
)
// ErrUnexpectedIdentifier is an error returned when a field constructor is
// given the wrong field identifier for the field type being created.
var ErrUnexpectedIdentifier = errors.New("unexpected field identifier")
|
/*
Copyright 2016 Manav Bhatia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mgraph
import (
"fmt"
)
// An Edge represents an edge in the directed multi graph.
type Edge struct {
source *Vertex
data interface{}
dest *Vertex
edgeName string
}
func (e Edge) String() string {
return fmt.Sprintf("%s --> %s (%s)", e.source, e.dest, e.edgeName)
}
func AddEdge(source *Vertex, data interface{}, destination *Vertex) *Edge {
e := &Edge{source, data, destination, ""}
e.edgeName = fmt.Sprintf("<%s -> %s>", source.name, destination.name)
source.Out = append(source.Out, e)
destination.In = append(destination.In, e)
return e
}
func ShowEdgesEntering(vertex *Vertex) {
fmt.Println("Finding all edges entering ", vertex)
for _, e := range vertex.In {
fmt.Println(e)
}
}
func ShowEdgesLeaving(vertex *Vertex) {
fmt.Println("Finding all edges leaving ", vertex)
for _, e := range vertex.Out {
fmt.Println(e)
}
}
func RemoveEdge(edge *Edge) {
/* First delete this edge from the source */
removeOutEdge(edge)
/* Now delete this edge from the destination */
removeInEdge(edge)
}
// removeOutEdge removes an edge from leaving a vertex
func removeOutEdge(edge *Edge) {
source := edge.source
n := len(source.Out)
for i, e := range source.Out {
if e == edge {
// Replace it with the final element and shrink the slice.
source.Out[i] = source.Out[n-1]
source.Out[n-1] = nil // Mark this so that the GC can clean it up
source.Out = source.Out[:n-1]
return
}
}
panic("edge not found on source: " + edge.String())
}
// removeInEdge removes an edge from entering a vertex
func removeInEdge(edge *Edge) {
dest := edge.dest
n := len(dest.In)
for i, e := range dest.In {
if e == edge {
// Replace it with the final element and shrink the slice.
dest.In[i] = dest.In[n-1]
dest.In[n-1] = nil // Mark this explicitly so that the GC can pick this up
dest.In = dest.In[:n-1]
return
}
}
panic("edge not found on destination: " + edge.String())
}
|
package raffle
import (
"github.com/avecost/promov/db"
)
type NBRaffle struct {
Id int
Cardno string
Terminal string
Provider string
Outlet string
Game string
JackpotAt string
Cashier string
JackpotAmt float32
}
func GetAllPendingNonBaccaratResultsByDate(db *db.DB, dateTo string) ([]NBRaffle, error) {
rows, err := db.Query("SELECT r.id AS pid, r.cardno AS pcard, r.terminal_acct AS terminal, "+
" TRIM(p.name) AS pname, TRIM(r.outlet) AS outlet, TRIM(g.name) AS game, DATE_FORMAT(r.jackpot_at, '%m-%d-%Y') AS hiton, "+
" r.cashier_name AS cashier, r.jackpot_amt AS hitamt "+
"FROM raffles as r "+
" LEFT JOIN providers as p ON r.provider_id = p.id "+
" LEFT JOIN games as g ON r.game_id = g.id "+
"WHERE r.validated = ? "+
" AND r.game_id != 355 "+
" AND r.game_id != 356 "+
" AND r.game_id != 357 "+
" AND r.game_id != 358 "+
" AND r.game_id != 359 "+
" AND r.game_id != 360 "+
" AND r.game_id != 365 "+
" AND r.game_id != 366 "+
" AND r.game_id != 367 "+
" AND r.game_id != 368 "+
" AND r.game_id != 369 "+
" AND r.jackpot_at = ?", 0, dateTo)
if err != nil {
return nil, err
}
defer rows.Close()
var nbrs []NBRaffle
for rows.Next() {
n := NBRaffle{}
err = rows.Scan(&n.Id, &n.Cardno, &n.Terminal, &n.Provider, &n.Outlet, &n.Game, &n.JackpotAt, &n.Cashier, &n.JackpotAmt)
if err != nil {
return nil, err
}
nbrs = append(nbrs, n)
}
err = rows.Err()
if err != nil {
return nil, err
}
return nbrs, nil
}
// status:
// 0 invalid / 1 pending / 2 valid
func UpdateRaffleStatus(db *db.DB, raffleId, status int) error {
_, err := db.Exec("UPDATE raffles SET valid = ?, validated = ? WHERE id = ?", status, 1, raffleId)
if err != nil {
return err
}
updateEntriesStatus(db, raffleId, status)
return nil
}
// status:
// 0 invalid / 1 pending / 2 valid
func updateEntriesStatus(db *db.DB, raffleId, status int) error {
_, err := db.Exec("UPDATE entries SET valid = ? WHERE player_id = ?", status, raffleId)
if err != nil {
return err
}
return nil
}
|
package base
import (
"container/heap"
"gonum.org/v1/gonum/floats"
"sort"
)
// Indexer manages the map between sparse IDs and dense indices. A sparse ID is
// a user ID or item ID. The dense index is the internal user index or item index
// optimized for faster parameter access and less memory usage.
type Indexer struct {
Indices map[int]int // sparse ID -> dense index
IDs []int // dense index -> sparse ID
}
// NotId represents an ID doesn't exist.
const NotId = -1
// NewIndexer creates a Indexer.
func NewIndexer() *Indexer {
set := new(Indexer)
set.Indices = make(map[int]int)
set.IDs = make([]int, 0)
return set
}
// Len returns the number of indexed IDs.
func (set *Indexer) Len() int {
return len(set.IDs)
}
// Add adds a new ID to the indexer.
func (set *Indexer) Add(ID int) {
if _, exist := set.Indices[ID]; !exist {
set.Indices[ID] = len(set.IDs)
set.IDs = append(set.IDs, ID)
}
}
// ToIndex converts a sparse ID to a dense index.
func (set *Indexer) ToIndex(ID int) int {
if denseId, exist := set.Indices[ID]; exist {
return denseId
}
return NotId
}
// ToID converts a dense index to a sparse ID.
func (set *Indexer) ToID(index int) int {
return set.IDs[index]
}
// StringIndexer manages the map between names and indices. The index is the internal index
// optimized for faster parameter access and less memory usage.
type StringIndexer struct {
Indices map[string]int
Names []string
}
// NewStringIndexer creates a StringIndexer.
func NewStringIndexer() *StringIndexer {
set := new(StringIndexer)
set.Indices = make(map[string]int)
set.Names = make([]string, 0)
return set
}
// Len returns the number of indexed IDs.
func (set *StringIndexer) Len() int {
return len(set.Names)
}
// Add adds a new ID to the indexer.
func (set *StringIndexer) Add(name string) {
if _, exist := set.Indices[name]; !exist {
set.Indices[name] = len(set.Names)
set.Names = append(set.Names, name)
}
}
// ToIndex converts a sparse ID to a dense index.
func (set *StringIndexer) ToIndex(name string) int {
if denseId, exist := set.Indices[name]; exist {
return denseId
}
return NotId
}
// ToName converts an index to a name.
func (set *StringIndexer) ToName(index int) string {
return set.Names[index]
}
// MarginalSubSet constructs a subset over a list of IDs, indices and values.
type MarginalSubSet struct {
Indexer *Indexer // the indexer
Indices []int // the full list of indices
Values []float64 // the full list of values
SubSet []int // indices of the subset
}
// NewMarginalSubSet creates a MarginalSubSet.
func NewMarginalSubSet(indexer *Indexer, indices []int, values []float64, subset []int) *MarginalSubSet {
set := new(MarginalSubSet)
set.Indexer = indexer
set.Indices = indices
set.Values = values
set.SubSet = subset
sort.Sort(set)
return set
}
// Len returns the number of items.
func (set *MarginalSubSet) Len() int {
return len(set.SubSet)
}
// Swap two items.
func (set *MarginalSubSet) Swap(i, j int) {
set.SubSet[i], set.SubSet[j] = set.SubSet[j], set.SubSet[i]
}
// Less compares two items.
func (set *MarginalSubSet) Less(i, j int) bool {
return set.GetID(i) < set.GetID(j)
}
// Count gets the size of marginal subset.
func (set *MarginalSubSet) Count() int {
return len(set.SubSet)
}
// GetIndex returns the index of i-th item.
func (set *MarginalSubSet) GetIndex(i int) int {
return set.Indices[set.SubSet[i]]
}
// GetID returns the ID of i-th item.
func (set *MarginalSubSet) GetID(i int) int {
index := set.GetIndex(i)
return set.Indexer.ToID(index)
}
// Mean of ratings in the subset.
func (set *MarginalSubSet) Mean() float64 {
sum := 0.0
for _, i := range set.SubSet {
sum += set.Values[i]
}
return sum / float64(set.Len())
}
// Contain returns true am ID existed in the subset.
func (set *MarginalSubSet) Contain(id int) bool {
// if id is out of range
if set.Len() == 0 || id < set.GetID(0) || id > set.GetID(set.Len()-1) {
return false
}
// binary search
low, high := 0, set.Len()-1
for low <= high {
// in bound
if set.GetID(low) == id || set.GetID(high) == id {
return true
}
mid := (low + high) / 2
// in mid
if id == set.GetID(mid) {
return true
} else if id < set.GetID(mid) {
low = low + 1
high = mid - 1
} else if id > set.GetID(mid) {
low = mid + 1
high = high - 1
}
}
return false
}
// ForIntersection iterates items in the intersection of two subsets.
// The method find items with common indices in linear time.
func (set *MarginalSubSet) ForIntersection(other *MarginalSubSet, f func(id int, a, b float64)) {
// Iterate
i, j := 0, 0
for i < set.Len() && j < other.Len() {
if set.GetID(i) == other.GetID(j) {
f(set.GetID(i), set.Values[set.SubSet[i]], other.Values[other.SubSet[j]])
i++
j++
} else if set.GetID(i) < other.GetID(j) {
i++
} else {
j++
}
}
}
// ForEach iterates items in the subset with IDs.
func (set *MarginalSubSet) ForEach(f func(i, id int, value float64)) {
for i, offset := range set.SubSet {
f(i, set.GetID(i), set.Values[offset])
}
}
// ForEachIndex iterates items in the subset with indices.
func (set *MarginalSubSet) ForEachIndex(f func(i, index int, value float64)) {
for i, offset := range set.SubSet {
f(i, set.Indices[offset], set.Values[offset])
}
}
// SparseVector is the data structure for the sparse vector.
type SparseVector struct {
Indices []int
Values []float64
Sorted bool
}
// NewSparseVector creates a SparseVector.
func NewSparseVector() *SparseVector {
return &SparseVector{
Indices: make([]int, 0),
Values: make([]float64, 0),
}
}
// NewDenseSparseMatrix creates an array of SparseVectors.
func NewDenseSparseMatrix(row int) []*SparseVector {
mat := make([]*SparseVector, row)
for i := range mat {
mat[i] = NewSparseVector()
}
return mat
}
// Add a new item.
func (vec *SparseVector) Add(index int, value float64) {
vec.Indices = append(vec.Indices, index)
vec.Values = append(vec.Values, value)
vec.Sorted = false
}
// Len returns the number of items.
func (vec *SparseVector) Len() int {
if vec == nil {
return 0
}
return len(vec.Values)
}
// Less returns true if the index of i-th item is less than the index of j-th item.
func (vec *SparseVector) Less(i, j int) bool {
return vec.Indices[i] < vec.Indices[j]
}
// Swap two items.
func (vec *SparseVector) Swap(i, j int) {
vec.Indices[i], vec.Indices[j] = vec.Indices[j], vec.Indices[i]
vec.Values[i], vec.Values[j] = vec.Values[j], vec.Values[i]
}
// ForEach iterates items in the sparse vector.
func (vec *SparseVector) ForEach(f func(i, index int, value float64)) {
if vec != nil {
for i := range vec.Indices {
f(i, vec.Indices[i], vec.Values[i])
}
}
}
// SortIndex sorts items by indices.
func (vec *SparseVector) SortIndex() {
if !vec.Sorted {
sort.Sort(vec)
vec.Sorted = true
}
}
// ForIntersection iterates items in the intersection of two vectors. The method sorts two vectors
// by indices first, then find common indices in linear time.
func (vec *SparseVector) ForIntersection(other *SparseVector, f func(index int, a, b float64)) {
// Sort indices of the left vec
vec.SortIndex()
// Sort indices of the right vec
other.SortIndex()
// Iterate
i, j := 0, 0
for i < vec.Len() && j < other.Len() {
if vec.Indices[i] == other.Indices[j] {
f(vec.Indices[i], vec.Values[i], other.Values[j])
i++
j++
} else if vec.Indices[i] < other.Indices[j] {
i++
} else {
j++
}
}
}
// MaxHeap is designed for store K maximal elements. Heap is used to reduce time complexity
// and memory complexity in top-K searching.
type MaxHeap struct {
Elem []interface{} // store elements
Score []float64 // store scores
K int // the size of heap
}
// NewMaxHeap creates a MaxHeap.
func NewMaxHeap(k int) *MaxHeap {
knnHeap := new(MaxHeap)
knnHeap.Elem = make([]interface{}, 0)
knnHeap.Score = make([]float64, 0)
knnHeap.K = k
return knnHeap
}
// Less returns true if the score of i-th item is less than the score of j-th item.
// It is a method of heap.Interface.
func (maxHeap *MaxHeap) Less(i, j int) bool {
return maxHeap.Score[i] < maxHeap.Score[j]
}
// Swap the i-th item with the j-th item. It is a method of heap.Interface.
func (maxHeap *MaxHeap) Swap(i, j int) {
maxHeap.Elem[i], maxHeap.Elem[j] = maxHeap.Elem[j], maxHeap.Elem[i]
maxHeap.Score[i], maxHeap.Score[j] = maxHeap.Score[j], maxHeap.Score[i]
}
// Len returns the size of heap. It is a method of heap.Interface.
func (maxHeap *MaxHeap) Len() int {
return len(maxHeap.Elem)
}
// _HeapItem is designed for heap.Interface to pass neighborhoods.
type _HeapItem struct {
Elem interface{}
Score float64
}
// Push a neighbors into the MaxHeap. It is a method of heap.Interface.
func (maxHeap *MaxHeap) Push(x interface{}) {
item := x.(_HeapItem)
maxHeap.Elem = append(maxHeap.Elem, item.Elem)
maxHeap.Score = append(maxHeap.Score, item.Score)
}
// Pop the last item (the element with minimal score) in the MaxHeap.
// It is a method of heap.Interface.
func (maxHeap *MaxHeap) Pop() interface{} {
// Extract the minimum
n := maxHeap.Len()
item := _HeapItem{
Elem: maxHeap.Elem[n-1],
Score: maxHeap.Score[n-1],
}
// Remove last element
maxHeap.Elem = maxHeap.Elem[0 : n-1]
maxHeap.Score = maxHeap.Score[0 : n-1]
// We never use returned item
return item
}
// Add a new element to the MaxHeap.
func (maxHeap *MaxHeap) Add(elem interface{}, score float64) {
// Insert item
heap.Push(maxHeap, _HeapItem{elem, score})
// Remove minimum
if maxHeap.Len() > maxHeap.K {
heap.Pop(maxHeap)
}
}
// ToSorted returns a sorted slice od elements in the heap.
func (maxHeap *MaxHeap) ToSorted() ([]interface{}, []float64) {
// sort indices
scores := make([]float64, maxHeap.Len())
indices := make([]int, maxHeap.Len())
copy(scores, maxHeap.Score)
floats.Argsort(scores, indices)
// make output
sorted := make([]interface{}, maxHeap.Len())
for i := range indices {
sorted[i] = maxHeap.Elem[indices[maxHeap.Len()-1-i]]
scores[i] = maxHeap.Score[indices[maxHeap.Len()-1-i]]
}
return sorted, scores
}
|
package Services
import (
"github.com/kylesliu/gin-demo/App/Extensions/Crypto"
"github.com/kylesliu/gin-demo/Bootstrap/config"
)
// 加密
func Encryption(str string) string {
key := config.AppConfig.EncryptKey
res := Crypto.EncryptDES_ECB(str, key)
return res
}
// 解密
func Decryption(str string) string {
key := config.AppConfig.EncryptKey
res := Crypto.DecryptDES_ECB(str, key)
return res
}
|
package ibmcloud
import (
"context"
"github.com/openshift/installer/pkg/asset/installconfig/ibmcloud"
)
// AvailabilityZones returns a list of supported zones for the specified region.
func AvailabilityZones(region string) ([]string, error) {
ctx := context.TODO()
client, err := ibmcloud.NewClient()
if err != nil {
return nil, err
}
return client.GetVPCZonesForRegion(ctx, region)
}
|
// Copyright 2021 The image-cloner Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package docker
import (
"context"
"encoding/base64"
"encoding/json"
"io/ioutil"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"k8s.io/klog/v2"
)
// Client defines the operations that can be performed with a Docker client.
type Client interface {
ImagePull(ctx context.Context, image string) error
ImagePush(ctx context.Context, image string) error
ImageTag(ctx context.Context, src, dst string) error
}
type docker struct {
client *client.Client
registryAuth string
}
// volume path for registry username and password
const (
usrPath = "/auth/username"
pswdPath = "/auth/password"
)
// CreateClient returns a DockerClient
func CreateClient() (Client, error) {
c, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
return nil, err
}
auth := types.AuthConfig{
Username: readData(usrPath),
Password: readData(pswdPath),
}
encodedJSON, err := json.Marshal(auth)
if err != nil {
panic(err)
}
authStr := base64.StdEncoding.EncodeToString(encodedJSON)
return &docker{client: c, registryAuth: authStr}, nil
}
// RegistryUser returns the username for provided container registry
func RegistryUser() string {
return readData("/auth/username")
}
func readData(src string) string {
data, err := ioutil.ReadFile(src)
if err != nil {
klog.Errorf("[error]: error reading data from source: %s\n%v", src, err)
return ""
}
return string(data)
}
|
package entity
import "github.com/jinzhu/gorm"
// PostとImageの中間テーブル
type PostImage struct {
gorm.Model //ID, CreatedAt, UpdatedAt, DeletedAtを自動で定義する
PostId uint `gorm:"type:int; not null"`
Position int `gorm:"type:int; default:0 not null"`
// FIXME: 少し書いているけど、この定義はまだdb処理を追加していません。後日改修予定
ImageId uint `gorm:"type:int;"`
Images []Image `gorm:"foreignKey:ImageId"`
}
|
package p07
func selfDividingNumbers(left int, right int) []int {
ret := make([]int, 0)
for i := left; i <= right; i++ {
if isSelfDividing(i) {
ret = append(ret, i)
}
}
return ret
}
func isSelfDividing(n int) bool {
m := n
for n != 0 {
d := n % 10
if d == 0 {
return false
}
if m%d != 0 {
return false
}
n = n / 10
}
return true
}
|
package models
import (
"github.com/astaxie/beego/orm"
"strconv"
"time"
"tokensky_bg_admin/common"
"tokensky_bg_admin/conf"
"tokensky_bg_admin/utils"
)
//查询的类
type HashrateSendBalanceRecordParam struct {
BaseQueryParam
StartTime int64 `json:"startTime"` //开始时间
EndTime int64 `json:"endTime"` //截止时间
Treaty string `json:"treaty"` //订单号
Status string `json:"status"` //状态
CoinType string `json:"coinType"` //币种
}
func (a *HashrateSendBalanceRecord) TableName() string {
return HashrateSendBalanceRecordTBName()
}
//算力资源资产发放记录表 防止重复发放 Unknown column 'T0.-' in 'field list'
type HashrateSendBalanceRecord struct {
KeyId int `orm:"pk;column(id)"json:"id"form:"id"`
//货币类型
CoinType string `orm:"column(coin_type)"json:"coinType"form:"coinType"`
//总数
TotalQuantity float64 `orm:"column(total_quantity)"json:"totalQuantity"form:"totalQuantity"`
//总算力
TotalHashrate int64 `orm:"column(total_hashrate)"json:"totalHashrate"form:"totalHashrate"`
//发放数量
SendQuantity float64 `orm:"column(send_quantity)"json:"sendQuantity"form:"sendQuantity"`
//电费
Electric float64 `orm:"column(electric)"json:"electric"form:"electric"`
//更新时间
UpdateTime time.Time `orm:"auto_now;type(datetime);column(update_time)"json:"updateTime"form:"updateTime"`
//创建时间
CreateTime time.Time `orm:"auto_now_add;type(datetime);column(create_time)"json:"createTime"form:"createTime"`
//算力资源日期
Isdate time.Time `orm:"type(date);column(isdate)"json:"isdate"form:"isdate"`
//状态 是否完成 0 未完成,1完成
Status int
//每T收益
UnitOutput float64 `orm:"column(unit_output)"json:"unitOutput"form:"unitOutput"`
/*其它字段*/
//单分收益
Profit float64 `orm:"-"json:"-"form:"-"`
CTName string `orm:"-"json:"-"form:"-"`
//每T收益
ProfitTOne float64 `orm:"-"json:"profitTOne"form:"-"`
TotalHashrateP float64 `orm:"-"json:"totalHashrateP"form:"-"`
}
//分页数据
func HashrateSendBalanceRecordPageList(params *HashrateSendBalanceRecordParam) ([]*HashrateSendBalanceRecord, int64) {
o := orm.NewOrm()
query := o.QueryTable(HashrateSendBalanceRecordTBName())
data := make([]*HashrateSendBalanceRecord, 0)
//默认排序
sortorder := "id"
switch params.Sort {
case "id":
sortorder = "id"
}
if params.Order == "desc" {
sortorder = "-" + sortorder
}
//状态
if params.Status != "" && params.Status != "-1" {
query = query.Filter("status__iexact", params.Status)
}
//时间段
if params.StartTime > 0 {
query = query.Filter("isdate__gte", time.Unix(params.StartTime, 0))
}
if params.EndTime > 0 {
query = query.Filter("isdate__lte", time.Unix(params.EndTime, 0))
}
//订单号
if params.Treaty != "" {
query = query.Filter("treaty__iexact", params.Treaty)
}
if params.CoinType != "" {
query = query.Filter("coin_type__iexact", params.CoinType)
}
total, _ := query.Count()
query.OrderBy(sortorder).Limit(params.Limit, (params.Offset-1)*params.Limit).RelatedSel().All(&data)
for _, obj := range data {
//每T收益
obj.ProfitTOne = utils.Float64Quo(obj.TotalQuantity, float64(obj.TotalHashrate/conf.HASHRATE_UNIT_T))
//总算力
obj.TotalHashrateP = utils.Float64Quo(float64(obj.TotalHashrate), float64(conf.HASHRATE_UNIT_T))
}
return data, total
}
//获取资产记录表,根据订单号和日期
func HashrateSendBalanceRecordOneByTreatyAndDay(treaty int, now int64) *HashrateSendBalanceRecord {
var t1, t2 time.Time
if now <= 0 {
//取今天
t1 = time.Now()
} else {
t1 = time.Unix(now, 0)
}
t1 = time.Date(t1.Year(), t1.Month(), t1.Day(), 0, 0, 0, 0, t1.Location())
t2 = t1.AddDate(0, 0, 1)
obj := &HashrateSendBalanceRecord{}
o := orm.NewOrm()
query := o.QueryTable(HashrateSendBalanceRecordTBName())
err := query.Filter("treaty__exact", treaty).Filter("update_time__gte", t1).Filter("update_time__lt", t2).One(obj)
if err != nil {
return nil
}
return obj
}
func HashrateSendBalanceRecordOneByCoinTypeAndDay(coinType string, now int64) *HashrateSendBalanceRecord {
var t1, t2 time.Time
if now <= 0 {
//取今天
t1 = time.Now()
} else {
t1 = time.Unix(now, 0)
}
t1 = time.Date(t1.Year(), t1.Month(), t1.Day(), 0, 0, 0, 0, t1.Location())
t2 = t1.AddDate(0, 0, 1)
obj := &HashrateSendBalanceRecord{}
o := orm.NewOrm()
query := o.QueryTable(HashrateSendBalanceRecordTBName())
err := query.Filter("coin_type__exact", coinType).Filter("update_time__gte", t1).Filter("update_time__lt", t2).One(obj)
if err != nil {
return nil
}
return obj
}
//获取资产记录表
func HashrateSendBalanceRecordByDateAndCions(tm time.Time, coin []string) ([]*HashrateSendBalanceRecord, error) {
o := orm.NewOrm()
query := o.QueryTable(HashrateSendBalanceRecordTBName())
//时间转凌晨
now := time.Date(tm.Year(), tm.Month(), tm.Day(), 0, 0, 0, 0, tm.Location())
query = query.Filter("coin_type__in", coin)
query = query.Filter("isdate__exact", now)
data := make([]*HashrateSendBalanceRecord, 0)
if _, err := query.All(&data); err != nil {
return data, err
}
return data, nil
}
//拉去记录数据
/*资产发放*/
//拉取资产收益
func HashrateOrderSendBalanceGetProfitRecord(tm int64, cions []string) (map[string]*HashrateSendBalanceRecord,error) {
if len(cions) == 0 {
return make(map[string]*HashrateSendBalanceRecord),nil
}
now := time.Unix(tm, 0)
data, err := HashrateSendBalanceRecordByDateAndCions(now, cions)
mapp := make(map[string]*HashrateSendBalanceRecord)
if err != nil {
return mapp,err
}
for _, obj := range data {
mapp[obj.CoinType] = obj
}
//新增表
newList := make([]*HashrateSendBalanceRecord, 0)
for _, coin := range cions {
if _, found := mapp[coin]; !found {
//获取历史收益
profit, found := utils.GetViabtcProfitHistoryData(tm, coin)
if !found {
continue
}
//获取历史算力
hashrate, found := utils.GetViabtcHashrateHistoryData(tm, coin)
if !found {
continue
}
obj := &HashrateSendBalanceRecord{
CoinType: coin,
TotalQuantity: profit,
TotalHashrate: hashrate,
Isdate: now,
}
market := SpiderCoinMarketOne(coin)
//每T收益
num, err := strconv.ParseFloat(market.UnitOutput, 64)
if err != nil{
return mapp,err
}
obj.UnitOutput = num
newList = append(newList, obj)
}
}
if len(newList) == 0 {
mapp2 := make(map[string]*HashrateSendBalanceRecord)
for _, obj := range mapp {
if obj.Status == 0 {
mapp2[obj.CoinType] = obj
}
}
return mapp2,nil
}
//新增
o := orm.NewOrm()
if _, err := o.InsertMulti(len(newList), newList); err != nil {
//保存错误
return make(map[string]*HashrateSendBalanceRecord),err
}
data, err = HashrateSendBalanceRecordByDateAndCions(now, cions)
mapp = make(map[string]*HashrateSendBalanceRecord)
if err != nil {
return mapp,err
}
for _, obj := range data {
if obj.Status == 0 {
mapp[obj.CoinType] = obj
}
}
return mapp,nil
}
//创建收益表
func HashrateOrderSendBalanceCreateProfitTb(t1 int64, sendRecordMap map[string]*HashrateSendBalanceRecord) {
// t1 发放收益的的时间
//拉取汇率 USD汇率
exchangeRateUsdt,found := common.UpdateUsdtExchangeRate()
if !found{
utils.EmailNotify("criticalToAddress","算力收益创建","获取汇率失败","")
return
}
//凌晨时间戳
now := time.Now()
smallHours := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.Local).Unix()
//时间处理
var opTime time.Time
//指定时间
if t1 > 0 {
opTime = time.Unix(t1, 0)
} else {
return
}
//遍历所有的订单
OrderIteration := HashrateOrderIteration(20, opTime)
//算力收益
hsByTid := HashrateOrderSendBalanceByTreaty(sendRecordMap)
//异常的资产发放表
errHs := make(map[int]float64)
isIds := make(map[string]struct{})
for {
orders := OrderIteration()
if len(orders) <= 0 {
break
}
//算力订单收益
hashrateOrderProfits := make([]*HashrateOrderProfit, 0)
//防止重复收益
ids := make([]string, 0, len(orders))
for _, order := range orders {
ids = append(ids, order.OrderId)
}
//防止重复发放奖励
mapp := HashrateOrderProfitMapByIds(ids, opTime)
o := orm.NewOrm()
//开启事务
err := o.Begin()
if err != nil {
for _, v := range sendRecordMap {
errHs[v.KeyId] += 0
}
break
}
for _, order := range orders {
if _,found := isIds[order.OrderId];found{
continue
}else {
isIds[order.OrderId] = struct{}{}
}
//查看是否已经建立过该收益表
if _, found := mapp[order.OrderId]; found {
//该时间段已经有过该订单段收益
continue
}
//收益
hobj := hsByTid(order.HashrateTreaty.KeyId)
if hobj == nil {
continue
}
//if hobj.Profit <= 0 {
// continue
//}
if order.BuyQuantity < 1 {
continue
}
//购买份数
buyQuantity := float64(order.BuyQuantity)
//实际收益
profitNum := utils.Float64Mul(hobj.UnitOutput, buyQuantity)
//管理费
managementProfit := utils.Float64Mul(profitNum,order.HashrateTreaty.Management)
//扣除管理费
profitNum = utils.Float64Sub(profitNum,managementProfit)
//电费
electricity := utils.Float64Mul(buyQuantity, order.HashrateTreaty.ElectricBill/exchangeRateUsdt)
//记录发放收益
hobj.SendQuantity = utils.Float64Add(hobj.SendQuantity, profitNum)
hobj.Electric = utils.Float64Add(hobj.Electric, electricity)
hashrateOrderProfits = append(hashrateOrderProfits, &HashrateOrderProfit{
Order: order,
OrderId: TokenskyOrderIdsInsertOne(conf.ORDER_BUSINESS_HASHRATE_SEND_PRICEP_CODE),
CategoryName: hobj.CTName,
User: order.User,
Profit: profitNum,
CoinType: hobj.CoinType,
Electricity: electricity,
Status: 0,
Isdate: opTime,
RecordId: hobj.KeyId,
})
}
size := len(hashrateOrderProfits)
if size == 0 {
o.Rollback()
continue
}
_, err = o.InsertMulti(len(hashrateOrderProfits), &hashrateOrderProfits)
if err != nil {
for _, v := range hashrateOrderProfits {
errHs[v.RecordId] = utils.Float64Add(errHs[v.RecordId], v.Profit)
}
//回滚
o.Rollback()
break
}
//执行
if err := o.Commit(); err != nil {
for _, v := range hashrateOrderProfits {
errHs[v.RecordId] = utils.Float64Add(errHs[v.RecordId], v.Profit)
}
}
//是否最后一天发放收益
for _, order := range orders {
if order.EndTime.Unix() == smallHours {
TokenskyJiguangRegistrationidSendByOne(order.User.UserId, "云算力已到期", order.OrderId, "云算力已到期", order.OrderId)
}
}
}
//完成 更新资产表
o := orm.NewOrm()
for _, obj := range sendRecordMap {
if profit, found := errHs[obj.KeyId]; !found {
obj.Status = 1
} else {
obj.Profit -= profit
}
o.Update(obj)
}
}
//收益发放
func HashrateOrderSendBalanceProfit() {
/*收益发放*/
hashrateOrderProfitIteration := HashrateOrderProfitIteration(200)
now := time.Now()
//电费不足用户
electricityIds := make([]int, 0)
isIds := make(map[int]struct{})
for {
profitObjs := hashrateOrderProfitIteration()
if len(profitObjs) <= 0 {
break
}
for _, obj := range profitObjs {
//不支持类型
if _, found := conf.TOKENSKY_ACCEPT_BALANCE_COIN_TYPES[obj.CoinType]; !found {
continue
}
if _,found := isIds[obj.Id];found{
continue
}else {
isIds[obj.Id] = struct{}{}
}
o := orm.NewOrm()
//开启事务
err := o.Begin()
if err != nil {
break
}
electricityBalance := TokenskyUserElectricityBalanceByUid(o, obj.User.UserId)
obj.Status = 0
balanceChange := common.NewTokenskyUserBalanceChange(3,"hashrateOrderProfit","算力奖励发放")
if electricityBalance == nil {
//没有电力表信息
obj.Status = 2
//o.Rollback()
//continuex
} else {
//扣除电费
electricityBalance.Balance = utils.Float64Sub(electricityBalance.Balance, obj.Electricity)
if electricityBalance.Balance < 0 {
//电费不足
obj.Status = 2
//o.Rollback()
//continue
}
}
//非电费不足
if obj.Status != 2 {
balanceChange.Add(obj.User.UserId,obj.CoinType,obj.Order.OrderId,
conf.CHANGE_ADD,obj.Profit,"",0)
//用户资产表
//balance := GetTokenskyUserBalanceByUidCoinType2(o, obj.User.UserId, obj.CoinType)
//if balance != nil {
// //存在 更新操作
// balance.Balance = utils.Float64Add(balance.Balance, obj.Profit)
// if _, err := o.Update(balance); err != nil {
// o.Rollback()
// continue
// }
//} else {
// //不存在 新增操作
// balance = &TokenskyUserBalance{
// UserId: obj.User.UserId,
// CoinType: obj.CoinType,
// Balance: obj.Profit,
// }
// if _, err := o.Insert(balance); err != nil {
// o.Rollback()
// continue
// }
//}
//保存电费
if _, err := o.Update(electricityBalance); err != nil {
o.Rollback()
continue
}
//保存收益表
obj.Status = 1
//新增交易明细
record := &TokenskyTransactionRecord{
CoinType: obj.CoinType,
TranType: "算力合约收益",
PushTime: now,
Category: 1, //收入
Money: obj.Profit,
Status: 1,
//TranNum:strconv.Itoa(obj.Id), 交易编号
User: &TokenskyUser{UserId: obj.User.UserId},
RelevanceId: obj.OrderId,
RelevanceCategory: "hashrateOrderProfit",
}
if _, err := o.Insert(record); err != nil {
o.Rollback()
continue
}
} else {
//电费不足
electricityIds = append(electricityIds, obj.User.UserId)
}
if _, err := o.Update(obj); err != nil {
o.Rollback()
continue
}
//资产变动
if balanceChange.Count() >0{
ok,_,tx := balanceChange.Send()
if !ok{
o.Rollback()
continue
}
ok = TokenskyUserBalanceHashSetStatus(o,tx)
if !ok{
o.Rollback()
continue
}
}
if err := o.Commit(); err != nil {
o.Rollback()
continue
}
}
}
//电费不足用户
TokenskyJiguangRegistrationidSendByIds(electricityIds, "算力电费不足", "无法获取收益", "算力电费不足", "无法获取收益")
}
//获取收益 返回值为负数,代表收益不存在
func HashrateOrderSendBalanceGetProfitByCoinType(objs map[string]*HashrateSendBalanceRecord) func(tid int) float64 {
mapp := make(map[string]map[string]float64)
for _, obj := range objs {
if obj.TotalHashrate <= 0 {
continue
}
mapp[obj.CoinType] = make(map[string]float64)
for _, st := range []string{"H", "K","M", "G", "T", "P", "E"} {
switch st {
case "H":
mapp[obj.CoinType]["H"] = utils.Float64Quo(obj.TotalQuantity, float64(obj.TotalHashrate/conf.HASHRATE_UNIT_H))
case "K":
mapp[obj.CoinType]["K"] = utils.Float64Quo(obj.TotalQuantity, float64(obj.TotalHashrate/conf.HASHRATE_UNIT_K))
case "M":
mapp[obj.CoinType]["M"] = utils.Float64Quo(obj.TotalQuantity, float64(obj.TotalHashrate/conf.HASHRATE_UNIT_M))
case "G":
mapp[obj.CoinType]["G"] = utils.Float64Quo(obj.TotalQuantity, float64(obj.TotalHashrate/conf.HASHRATE_UNIT_G))
case "T":
mapp[obj.CoinType]["T"] = utils.Float64Quo(obj.TotalQuantity, float64(obj.TotalHashrate/conf.HASHRATE_UNIT_T))
case "P":
mapp[obj.CoinType]["P"] = utils.Float64Quo(obj.TotalQuantity, float64(obj.TotalHashrate/conf.HASHRATE_UNIT_P))
case "E":
mapp[obj.CoinType]["E"] = utils.Float64Quo(obj.TotalQuantity, float64(obj.TotalHashrate/conf.HASHRATE_UNIT_E))
}
}
}
//订单收益
orders := make(map[int]float64)
return func(tid int) float64 {
if v, found := orders[tid]; found {
return v
} else {
if obj := HashrateTreatyOneById(tid); obj != nil {
if vs, found := mapp[obj.HashrateCategoryObj.Name]; found {
if v, found := vs[obj.HashrateCategoryObj.Unit]; found {
orders[obj.KeyId] = v
return v
}
}
}
}
orders[tid] = -1
return -1
}
}
//根据收益表获取对应的资产发放表
func HashrateOrderSendBalanceByTreaty(objs map[string]*HashrateSendBalanceRecord) func(tid int) *HashrateSendBalanceRecord {
mapp := make(map[string]map[string]float64)
hs := make(map[int]*HashrateSendBalanceRecord)
for _, obj := range objs {
if obj.TotalHashrate <= 0 {
continue
}
mapp[obj.CoinType] = make(map[string]float64)
for _, st := range []string{"H", "K","M", "G", "T", "P", "E"} {
var num float64
switch st {
case "H":
num = obj.TotalQuantity * float64(conf.HASHRATE_UNIT_H) / float64(obj.TotalHashrate)
case "K":
num = obj.TotalQuantity * float64(conf.HASHRATE_UNIT_K) / float64(obj.TotalHashrate)
case "M":
num = obj.TotalQuantity * float64(conf.HASHRATE_UNIT_M) / float64(obj.TotalHashrate)
case "G":
num = obj.TotalQuantity * float64(conf.HASHRATE_UNIT_G) / float64(obj.TotalHashrate)
case "T":
num = obj.TotalQuantity * float64(conf.HASHRATE_UNIT_T) / float64(obj.TotalHashrate)
case "P":
num = obj.TotalQuantity * float64(conf.HASHRATE_UNIT_P) / float64(obj.TotalHashrate)
case "E":
num = obj.TotalQuantity * float64(conf.HASHRATE_UNIT_E) / float64(obj.TotalHashrate)
}
if num >= utils.FloatGetPrec(conf.FLOAT_PRECISE_NUM_8) {
mapp[obj.CoinType][st] = num
}
}
}
//订单收益
return func(tid int) *HashrateSendBalanceRecord {
if h, found := hs[tid]; found {
return h
} else {
if obj := HashrateTreatyOneById(tid); obj != nil {
if h, found := objs[obj.HashrateCategoryObj.Name]; found {
if vs, found := mapp[obj.HashrateCategoryObj.Name]; found {
if v, found := vs[obj.HashrateCategoryObj.Unit]; found {
h.Profit = v
h.CTName = obj.HashrateCategoryObj.Name
hs[tid] = h
return h
}
}
}
}
}
hs[tid] = nil
return nil
}
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"math"
"reflect"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/ddl/util"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/util/dbterror"
"github.com/pingcap/tidb/util/mathutil"
)
func onCreateSequence(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) {
schemaID := job.SchemaID
tbInfo := &model.TableInfo{}
if err := job.DecodeArgs(tbInfo); err != nil {
// Invalid arguments, cancel this job.
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
tbInfo.State = model.StateNone
err := checkTableNotExists(d, t, schemaID, tbInfo.Name.L)
if err != nil {
if infoschema.ErrDatabaseNotExists.Equal(err) || infoschema.ErrTableExists.Equal(err) {
job.State = model.JobStateCancelled
}
return ver, errors.Trace(err)
}
ver, err = updateSchemaVersion(d, t, job)
if err != nil {
return ver, errors.Trace(err)
}
switch tbInfo.State {
case model.StateNone:
// none -> public
tbInfo.State = model.StatePublic
tbInfo.UpdateTS = t.StartTS
err = createSequenceWithCheck(t, job, schemaID, tbInfo)
if err != nil {
return ver, errors.Trace(err)
}
// Finish this job.
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tbInfo)
asyncNotifyEvent(d, &util.Event{Tp: model.ActionCreateSequence, TableInfo: tbInfo})
return ver, nil
default:
return ver, dbterror.ErrInvalidDDLState.GenWithStackByArgs("sequence", tbInfo.State)
}
}
func createSequenceWithCheck(t *meta.Meta, job *model.Job, schemaID int64, tbInfo *model.TableInfo) error {
err := checkTableInfoValid(tbInfo)
if err != nil {
job.State = model.JobStateCancelled
return errors.Trace(err)
}
var sequenceBase int64
if tbInfo.Sequence.Increment >= 0 {
sequenceBase = tbInfo.Sequence.Start - 1
} else {
sequenceBase = tbInfo.Sequence.Start + 1
}
return t.CreateSequenceAndSetSeqValue(schemaID, tbInfo, sequenceBase)
}
func handleSequenceOptions(seqOptions []*ast.SequenceOption, sequenceInfo *model.SequenceInfo) {
var (
minSetFlag bool
maxSetFlag bool
startSetFlag bool
)
for _, op := range seqOptions {
switch op.Tp {
case ast.SequenceOptionIncrementBy:
sequenceInfo.Increment = op.IntValue
case ast.SequenceStartWith:
sequenceInfo.Start = op.IntValue
startSetFlag = true
case ast.SequenceMinValue:
sequenceInfo.MinValue = op.IntValue
minSetFlag = true
case ast.SequenceMaxValue:
sequenceInfo.MaxValue = op.IntValue
maxSetFlag = true
case ast.SequenceCache:
sequenceInfo.CacheValue = op.IntValue
sequenceInfo.Cache = true
case ast.SequenceNoCache:
sequenceInfo.CacheValue = 0
sequenceInfo.Cache = false
case ast.SequenceCycle:
sequenceInfo.Cycle = true
case ast.SequenceNoCycle:
sequenceInfo.Cycle = false
}
}
// Fill the default value, min/max/start should be adjusted with the sign of sequenceInfo.Increment.
if !(minSetFlag && maxSetFlag && startSetFlag) {
if sequenceInfo.Increment >= 0 {
if !minSetFlag {
sequenceInfo.MinValue = model.DefaultPositiveSequenceMinValue
}
if !startSetFlag {
sequenceInfo.Start = mathutil.Max(sequenceInfo.MinValue, model.DefaultPositiveSequenceStartValue)
}
if !maxSetFlag {
sequenceInfo.MaxValue = model.DefaultPositiveSequenceMaxValue
}
} else {
if !maxSetFlag {
sequenceInfo.MaxValue = model.DefaultNegativeSequenceMaxValue
}
if !startSetFlag {
sequenceInfo.Start = mathutil.Min(sequenceInfo.MaxValue, model.DefaultNegativeSequenceStartValue)
}
if !minSetFlag {
sequenceInfo.MinValue = model.DefaultNegativeSequenceMinValue
}
}
}
}
func validateSequenceOptions(seqInfo *model.SequenceInfo) bool {
// To ensure that cache * increment will never overflow.
var maxIncrement int64
if seqInfo.Increment == 0 {
// Increment shouldn't be set as 0.
return false
}
if seqInfo.Cache && seqInfo.CacheValue <= 0 {
// Cache value should be bigger than 0.
return false
}
maxIncrement = mathutil.Abs(seqInfo.Increment)
return seqInfo.MaxValue >= seqInfo.Start &&
seqInfo.MaxValue > seqInfo.MinValue &&
seqInfo.Start >= seqInfo.MinValue &&
seqInfo.MaxValue != math.MaxInt64 &&
seqInfo.MinValue != math.MinInt64 &&
seqInfo.CacheValue < (math.MaxInt64-maxIncrement)/maxIncrement
}
func buildSequenceInfo(stmt *ast.CreateSequenceStmt, ident ast.Ident) (*model.SequenceInfo, error) {
sequenceInfo := &model.SequenceInfo{
Cache: model.DefaultSequenceCacheBool,
Cycle: model.DefaultSequenceCycleBool,
CacheValue: model.DefaultSequenceCacheValue,
Increment: model.DefaultSequenceIncrementValue,
}
// Handle table comment options.
for _, op := range stmt.TblOptions {
switch op.Tp {
case ast.TableOptionComment:
sequenceInfo.Comment = op.StrValue
case ast.TableOptionEngine:
// TableOptionEngine will always be 'InnoDB', thus we do nothing in this branch to avoid error happening.
default:
return nil, dbterror.ErrSequenceUnsupportedTableOption.GenWithStackByArgs(op.StrValue)
}
}
handleSequenceOptions(stmt.SeqOptions, sequenceInfo)
if !validateSequenceOptions(sequenceInfo) {
return nil, dbterror.ErrSequenceInvalidData.GenWithStackByArgs(ident.Schema.L, ident.Name.L)
}
return sequenceInfo, nil
}
func alterSequenceOptions(sequenceOptions []*ast.SequenceOption, ident ast.Ident, oldSequence *model.SequenceInfo) (bool, int64, error) {
var (
restartFlag bool
restartWithFlag bool
restartValue int64
)
// Override the old sequence value with new option.
for _, op := range sequenceOptions {
switch op.Tp {
case ast.SequenceOptionIncrementBy:
oldSequence.Increment = op.IntValue
case ast.SequenceStartWith:
oldSequence.Start = op.IntValue
case ast.SequenceMinValue:
oldSequence.MinValue = op.IntValue
case ast.SequenceMaxValue:
oldSequence.MaxValue = op.IntValue
case ast.SequenceCache:
oldSequence.CacheValue = op.IntValue
oldSequence.Cache = true
case ast.SequenceNoCache:
oldSequence.CacheValue = 0
oldSequence.Cache = false
case ast.SequenceCycle:
oldSequence.Cycle = true
case ast.SequenceNoCycle:
oldSequence.Cycle = false
case ast.SequenceRestart:
restartFlag = true
case ast.SequenceRestartWith:
restartWithFlag = true
restartValue = op.IntValue
}
}
if !validateSequenceOptions(oldSequence) {
return false, 0, dbterror.ErrSequenceInvalidData.GenWithStackByArgs(ident.Schema.L, ident.Name.L)
}
if restartWithFlag {
return true, restartValue, nil
}
if restartFlag {
return true, oldSequence.Start, nil
}
return false, 0, nil
}
func onAlterSequence(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) {
schemaID := job.SchemaID
var (
sequenceOpts []*ast.SequenceOption
ident ast.Ident
)
if err := job.DecodeArgs(&ident, &sequenceOpts); err != nil {
// Invalid arguments, cancel this job.
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
// Get the old tableInfo.
tblInfo, err := checkTableExistAndCancelNonExistJob(t, job, schemaID)
if err != nil {
return ver, errors.Trace(err)
}
// Substitute the sequence info.
copySequenceInfo := *tblInfo.Sequence
restart, restartValue, err := alterSequenceOptions(sequenceOpts, ident, ©SequenceInfo)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
same := reflect.DeepEqual(*tblInfo.Sequence, copySequenceInfo)
if same && !restart {
job.State = model.JobStateDone
return ver, errors.Trace(err)
}
tblInfo.Sequence = ©SequenceInfo
// Restart the sequence value.
// Notice: during the alter sequence process, if there is some dml continually consumes sequence (nextval/setval),
// the below cases will occur:
// Since the table schema haven't been refreshed in local/other node, dml will still use old definition of sequence
// to allocate sequence ids. Once the restart value is updated to kv here, the allocated ids in the upper layer won't
// guarantee to be consecutive and monotonous.
if restart {
err := restartSequenceValue(t, schemaID, tblInfo, restartValue)
if err != nil {
return ver, errors.Trace(err)
}
}
// Store the sequence info into kv.
// Set shouldUpdateVer always to be true even altering doesn't take effect, since some tools like drainer won't take
// care of SchemaVersion=0.
ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, true)
if err != nil {
return ver, errors.Trace(err)
}
// Finish this job.
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
return ver, nil
}
// Like setval does, restart sequence value won't affect current the step frequency. It will look backward for
// the first valid sequence valid rather than return the restart value directly.
func restartSequenceValue(t *meta.Meta, dbID int64, tblInfo *model.TableInfo, seqValue int64) error {
var sequenceBase int64
if tblInfo.Sequence.Increment >= 0 {
sequenceBase = seqValue - 1
} else {
sequenceBase = seqValue + 1
}
return t.RestartSequenceValue(dbID, tblInfo, sequenceBase)
}
|
package puzzle
import (
"testing"
)
func TestSetPossibilities(t *testing.T) {
puzzle := CreateTestPuzzleEasy()
puzzle = puzzle.CalculatePossibilities()
var set Set = puzzle[0]
tables := []struct {
index int
possible bool
}{
{0, true},
{1, true},
{2, true},
{3, true},
{4, true},
{5, true},
{6, true},
{7, true},
{8, true},
}
poss := set.Possibilities(9)
for _, table := range tables {
if poss[table.index] != table.possible {
t.Errorf("Possibilities at index %d wrong, expected: %t, got %t", table.index, table.possible, poss[table.index])
}
}
tables[5].possible = false
poss = set.Possibilities(0)
for _, table := range tables {
if poss[table.index] != table.possible {
t.Errorf("Possibilities at index %d wrong, expected: %t, got %t", table.index, table.possible, poss[table.index])
}
}
}
|
package main
import (
"flag"
"fmt"
"time"
)
var period = flag.Duration("period", 1*time.Second, "sleep period")
func main() {
flag.Parse()
fmt.Printf("Sleeping for %v ... ", *period)
time.Sleep(*period)
fmt.Println()
}
/* 注意flag的用法
package flag
// Value is the interface to the value stored in a flag
type Value interface {
String() string
Set(string) error
}
*/
|
package Core
import "com/pdool/DataStruct"
type PropertyManager struct {
guid GUID
props DataStruct.Dictionary
}
// 生成一个属性管理器
func NewPropertyManager(guid GUID) *PropertyManager {
pMgr := new(PropertyManager)
pMgr.guid = guid
pMgr.props = DataStruct.Dictionary{}
return pMgr
}
// 添加一个属性
func (p *PropertyManager) AddProperty(guid GUID, propName string, propType int, defaultValue interface{}, isSave bool) bool {
if p.HasProperty(propName) {
return false
}
prop := NewProperty(guid, propName, propType, defaultValue, isSave)
p.props.Set(propName, prop)
return true
}
// 设置属性值
func (p *PropertyManager) SetProperty(propName string, value interface{}) bool {
prop := p.props.Get(propName).(*Property)
if prop == nil {
return false
}
prop.SetValue(value)
return true
}
// 属性是否存在
func (p *PropertyManager) HasProperty(propName string) bool {
return p.props.Has(propName)
}
// 获取整型
func (p *PropertyManager) GetPropertyInt(propName string) int {
return p.props.Get(propName).(*Property).GetValue().(int)
}
func (p *PropertyManager) GetPropertyFloat(propName string) float32 {
return p.props.Get(propName).(*Property).GetValue().(float32)
}
func (p *PropertyManager) GetPropertyString(propName string) string {
return p.props.Get(propName).(*Property).GetValue().(string)
}
func (p *PropertyManager) GetPropertyObj(propName string) interface{} {
return p.props.Get(propName).(*Property).GetValue().(GUID)
}
|
package main
import (
"context"
api "github.com/micro/go-api/proto"
"github.com/micro/go-micro/errors"
"encoding/json"
"strings"
)
type Handler struct {
}
func(h *Handler) Hello(ctx context.Context,req *api.Request,resp *api.Response) (error) {
name, ok := req.Get["name"]
if !ok || len(name.Values) == 0 {
return errors.BadRequest("go.micro.api.user","no content")
}
resp.StatusCode = 200
b, _ := json.Marshal(map[string]string{
"message": "got you request" + strings.Join(name.Values," "),
})
resp.Body = string(b)
return nil
}
|
/*
* Copyright (c) 2014-2015, Yawning Angel <yawning at torproject dot org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
// Go language Tor Pluggable Transport suite. Works only as a managed
// client/server.
package stun_udp
import (
"fmt"
"io"
golog "log"
"net"
"net/url"
"github.com/OperatorFoundation/shapeshifter-dispatcher/modes"
common "github.com/willscott/goturn/common"
"github.com/willscott/goturn"
"github.com/OperatorFoundation/shapeshifter-dispatcher/common/log"
"github.com/OperatorFoundation/shapeshifter-dispatcher/common/pt_extras"
)
func ClientSetup(socksAddr string, ptClientProxy *url.URL, names []string, options string) bool {
return modes.ClientSetupUDP(socksAddr, ptClientProxy, names, options, clientHandler)
}
func clientHandler(name string, options string, conn *net.UDPConn, proxyURI *url.URL) {
//defers are never called due to infinite loop
fmt.Println("@@@ handling...")
tracker := make(modes.ConnTracker)
fmt.Println("Transport is", name)
buf := make([]byte, 1024)
// Receive UDP packets and forward them over transport connections forever
for {
numBytes, addr, err := conn.ReadFromUDP(buf)
fmt.Println("Received ", string(buf[0:numBytes]), " from ", addr)
if err != nil {
fmt.Println("Error: ", err)
}
goodBytes := buf[:numBytes]
fmt.Println(tracker)
if state, ok := tracker[addr.String()]; ok {
// There is an open transport connection, or a connection attempt is in progress.
if state.Waiting {
// The connection attempt is in progress.
// Drop the packet.
fmt.Println("recv: waiting")
} else {
// There is an open transport connection.
// Send the packet through the transport.
fmt.Println("recv: write")
//ignoring failed writes because packets can be dropped
_, _ = state.Conn.Write(goodBytes)
}
} else {
// There is not an open transport connection and a connection attempt is not in progress.
// Open a transport connection.
fmt.Println("Opening connection to ")
modes.OpenConnection(&tracker, addr.String(), name, options, proxyURI, false, "")
// Drop the packet.
fmt.Println("recv: Open")
}
}
}
func ServerSetup(ptServerInfo pt_extras.ServerInfo, stateDir string, options string) (launched bool) {
return modes.ServerSetupUDP(ptServerInfo, stateDir, options, serverHandler)
}
func serverHandler(name string, remote net.Conn, info *pt_extras.ServerInfo) {
var header *common.Message
addrStr := log.ElideAddr(remote.RemoteAddr().String())
fmt.Println("### handling", name)
log.Infof("%s(%s) - new connection", name, addrStr)
serverAddr, err := net.ResolveUDPAddr("udp", info.OrAddr.String())
if err != nil {
_ = remote.Close()
golog.Fatal(err)
}
localAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
if err != nil {
_ = remote.Close()
golog.Fatal(err)
}
dest, err := net.DialUDP("udp", localAddr, serverAddr)
if err != nil {
_ = remote.Close()
golog.Fatal(err)
}
fmt.Println("pumping")
headerBuffer := make([]byte, 20)
for {
fmt.Println("reading...")
// Read the incoming connection into the buffer.
_, err := io.ReadFull(remote, headerBuffer)
if err != nil {
fmt.Println("read error")
break
}
header, err = goturn.ParseStun(headerBuffer)
if err != nil {
fmt.Println("parse error")
break
}
fmt.Println(header.Length)
fmt.Println("reading data")
readBuffer := make([]byte, header.Length)
_, err = io.ReadFull(remote, readBuffer)
if err != nil {
fmt.Println("read error")
break
}
writeBuffer := append(headerBuffer, readBuffer...)
_, _ = dest.Write(writeBuffer)
}
_ = dest.Close()
_ = remote.Close()
}
|
package main
import (
"fmt"
"io"
"log"
"net/http"
)
func hello(w http.ResponseWriter, r *http.Request) {
fmt.Printf("%+v", r)
_, err := io.WriteString(w, "Hello world!")
if err != nil {
panic(err)
}
}
func main() {
http.HandleFunc("/hello", hello)
fs := http.FileServer(http.Dir("static"))
http.Handle("/", fs)
log.Println("Listening...")
err := http.ListenAndServe(":3000", nil)
if err != nil {
panic(err)
}
}
|
package wphash
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestCheckWordPressPasswordHash(t *testing.T) {
// true case 123456 -> $P$BmIaPlVaAl6kEsffVZGdASCVH.i1cZ0
ret := CheckWordPressPasswordHash("123456", "$P$BmIaPlVaAl6kEsffVZGdASCVH.i1cZ0")
assert.Equal(t, true, ret)
// false case 123456 -> $P$B4VXOnAaJ9nC10J0bJ8jnBxcP2L6Iv0
ret2 := CheckWordPressPasswordHash("123456", "$P$B4VXOnAaJ9nC10J0bJ8jnBxcP2L6Iv0")
assert.Equal(t, false, ret2)
}
func TestHashPassword(t *testing.T) {
// case 123456 -> <dynamic hash> -> check match
ret := HashPassword("123456")
ret2 := CheckWordPressPasswordHash("123456", ret)
assert.Equal(t, true, ret2)
}
|
package ksqlparser
import (
"fmt"
"strings"
)
func (p *parser) peek(reservedWords ...string) string {
peeked, _ := p.peekWithLength(reservedWords...)
return peeked
}
func (p *parser) pop(reservedWords ...string) string {
peeked, l := p.peekWithLength(reservedWords...)
p.i += l
p.popWhitespace()
return peeked
}
func (p *parser) popOrError(reservedWords ...string) (string, error) {
peeked, l, err := p.peekWithLengthOrError(reservedWords...)
if err != nil {
return "", err
}
p.i += l
p.popWhitespace()
return peeked, nil
}
func (p *parser) popLength(len int) {
p.i += len
p.popWhitespace()
}
func (p *parser) popWhitespace() {
for ; p.i < len(p.sql) && isWhitespaceRune(rune(p.sql[p.i])); p.i++ {
if p.sql[p.i] == '\n' {
p.line++
p.col = -1
}
p.col++
}
// check for multiline comment start
if "/*" == p.sql[p.i:min(len(p.sql), p.i+2)] {
p.col += 2
for p.i += 2; p.i < len(p.sql); p.i++ {
if "*/" == strings.ToUpper(p.sql[p.i:min(len(p.sql), p.i+2)]) {
p.col += 2
p.i += 2
break
}
if p.sql[p.i] == '\n' {
p.line++
p.col = -1
}
p.col++
}
//ensure that were out of whitespace
p.popWhitespace()
}
// check for singleline comment
if "--" == p.sql[p.i:min(len(p.sql), p.i+2)] {
p.col += 2
for p.i += 2; p.i < len(p.sql); p.i++ {
if p.sql[p.i] == '\n' {
p.line++
p.col = 0
break
}
p.col++
}
p.popWhitespace()
}
}
func (p *parser) peekWithLengthOrError(reservedWords ...string) (string, int, error) {
if p.i >= len(p.sql) {
return "", 0, p.Error(fmt.Sprintf("[%s]", strings.Join(reservedWords, ", ")))
}
for _, rWord := range reservedWords {
token := strings.ToUpper(p.sql[p.i:min(len(p.sql), p.i+len(rWord))])
if token == rWord {
return token, len(token), nil
}
}
return "", 0, p.Error(fmt.Sprintf("[%s]", strings.Join(reservedWords, ", ")))
}
func (p *parser) peekWithLength(reservedWords ...string) (string, int) {
if p.i >= len(p.sql) {
return "", 0
}
for _, rWord := range reservedWords {
token := strings.ToUpper(p.sql[p.i:min(len(p.sql), p.i+len(rWord))])
if token == rWord {
return token, len(token)
}
}
if p.sql[p.i] == '\'' { // Quoted string
return p.peekQuotedStringWithLength()
}
return p.peekIdentifierWithLength()
}
func (p *parser) peekQuotedStringWithLength() (string, int) {
if len(p.sql) < p.i || p.sql[p.i] != '\'' {
return "", 0
}
for i := p.i + 1; i < len(p.sql); i++ {
if p.sql[i] == '\'' && p.sql[i-1] != '\\' {
return p.sql[p.i : i+1], (i + 1) - p.i
}
}
return "", 0
}
func (p *parser) peekIdentifierWithLength() (string, int) {
for i := p.i; i < len(p.sql); i++ {
snip := p.sql[i:min(len(p.sql), i+2)]
if "->" == snip {
i++ //progress past the -
continue //continue will progress the >
}
if !isIdentifierRune(rune(p.sql[i])) {
return p.sql[p.i:i], len(p.sql[p.i:i])
}
}
return p.sql[p.i:], len(p.sql[p.i:])
}
|
package main
/*
事件驱动调度
*/
import (
"fmt"
"time"
"sync"
)
type eventMap struct {
index int
length int
bitmap []int
ticker *time.Ticker
mux sync.Mutex
}
func (s *eventMap) set(index int) {
word, bit := index/32, uint(index%32)
s.lock()
s.bitmap[word] |= 1 << bit
}
func (s *eventMap) clear(index int) {
word, bit := index/32, uint(index%32)
s.lock()
s.bitmap[word] &= ^(1 << bit)
}
func (s *eventMap) lock() {
scheduler.mux.Lock()
defer scheduler.mux.Unlock()
}
func (s *eventMap) has(index int) bool {
word, bit := index/32, uint(index%32)
s.lock()
return word < len(s.bitmap) && s.bitmap[word]&(1<<bit) != 0
}
var scheduler eventMap
func interrupt(signal chan bool) {
for {
<-scheduler.ticker.C
scheduler.index--
if scheduler.index <= 0 {
break
}
scheduler.set(scheduler.index)
}
// interrupt exit
signal <- true
}
func init() {
scheduler = eventMap{index: 7, length: 7}
scheduler.bitmap = make([]int, scheduler.index/32+1)
scheduler.ticker = time.NewTicker(time.Second)
}
func eventScheduler(mainWait chan bool, interruptExit chan bool) {
flag := true
exitFlag := false
for !exitFlag || flag {
select {
case <-interruptExit:
exitFlag = true
// if not default and no channel in cases can read or write, select will block.
default:
break
}
flag = false
for i := 1; i < scheduler.length; i++ {
if scheduler.has(i) {
flag = true
ticker := time.NewTicker(time.Millisecond * 600)
for t := 0; t < i; t++ {
<-ticker.C
fmt.Printf("task_%d is running\n", i)
}
// clear this event
scheduler.clear(i)
i = 0
}
}
}
// event scheduler exit
mainWait <- true
}
func main() {
// create a ticker to do something after 1s
waitChan := make(chan bool)
go interrupt(waitChan)
mainWait := make(chan bool)
go eventScheduler(mainWait, waitChan)
<-mainWait
}
|
package account
import (
"core/positions"
"errors"
"qutils/coder"
)
//Ruturn user object based on login and password
func logInAccount(credential LoginRequest) *Account {
chekInitialization()
keys := map[string]interface{}{
"email": credential.Email,
"password": coder.EncodeSha1(credential.Password),
}
res, err := connection.SelectBy(TABLENAME, keys, "id", "password", "home_position")
defer res.Close()
var user *Account = nil
for err == nil && res.Next() {
user = &Account{
Email: credential.Email,
rawPassword: credential.Password,
}
res.Scan(&user.Id, &user.Password, &user.HomePositionId)
}
return user
}
//Check if account already exist and if no - create it
func registerAccount(credential RegisterRequest) (*Account, error) {
chekInitialization()
if !positions.CanBuild(credential.Position) {
return nil, errors.New("position")
}
if isFieldExist("email", credential.Email) {
return nil, errors.New("email")
}
connection.BeginTransaction()
userPos := positions.SavePosition(credential.Position)
args := map[string]interface{}{
"email": credential.Email,
"password": coder.EncodeSha1(credential.Password),
"home_position": userPos.Id,
}
insertError := connection.Insert(TABLENAME, args)
if insertError != nil {
connection.RollbackTransaction()
return nil, insertError
}
connection.CommitTransaction()
return logInAccount(LoginRequest{credential.Email, credential.Password}), nil
}
|
package pgsql
import (
"database/sql"
"database/sql/driver"
"encoding/json"
)
// JSON returns a value that implements both the driver.Valuer and sql.Scanner
// interfaces. The driver.Valuer produces a PostgreSQL json(b) from the given val
// and the sql.Scanner unmarshals a PostgreSQL json(b) into the given val.
func JSON(val interface{}) interface {
driver.Valuer
sql.Scanner
} {
return jsontype{val: val}
}
type jsontype struct {
val interface{}
}
func (j jsontype) Value() (driver.Value, error) {
if j.val == nil {
return nil, nil
}
return json.Marshal(j.val)
}
func (j jsontype) Scan(src interface{}) error {
if b, ok := src.([]byte); ok {
return json.Unmarshal(b, j.val)
}
return nil
}
|
package routers
import (
"encoding/json"
"net/http"
"github.com/rodzy/flash/db"
"github.com/rodzy/flash/models"
)
//ModifyUserInfo our method to env the new user info
func ModifyUserInfo(w http.ResponseWriter,r *http.Request) {
var user models.User
err:=json.NewDecoder(r.Body).Decode(&user)
if err != nil {
http.Error(w,"Incorrect data "+err.Error(),400)
return
}
//Setting a status w/ the global var from the logged user
var status bool
status,err=db.ModifyUser(user,UserID)
if err != nil {
http.Error(w,"Error trying to insert the data "+err.Error(),400)
return
}
if status==false {
http.Error(w,"Register not bound in the database ",400)
return
}
w.WriteHeader(http.StatusCreated)
}
|
// Copyright 2020
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"github.com/beego/beego/v2/core/logs"
"github.com/beego/beego/v2/server/web"
)
func main() {
ctrl := &MainController{}
// upload file by path /upload
web.Router("/upload", ctrl, "post:Upload")
web.Router("/upload", ctrl, "get:UploadPage")
web.Router("/save", ctrl, "post:Save")
web.Run()
}
// MainController:
// The controller must implement ControllerInterface
// Usually we extends web.Controller
type MainController struct {
web.Controller
}
// GET http://localhost:8080/upload
// and you will see the upload page
func (ctrl *MainController) UploadPage() {
ctrl.TplName = "upload.html"
}
// POST http://localhost:8080/upload
// you will see "success"
// and the file name (actual file name, not the key you use in GetFile
func (ctrl *MainController) Upload() {
// key is the file name
file, fileHeader, err := ctrl.GetFile("upload.txt")
if err != nil {
logs.Error("save file failed, ", err)
ctrl.Ctx.Output.Body([]byte(err.Error()))
} else {
// don't forget to close
defer file.Close()
logs.Info(fileHeader.Filename)
ctrl.Ctx.Output.Body([]byte("success"))
}
}
// POST http://localhost:8080/save
// you will see the file /tmp/upload.txt and "success"
// and if you run this on Windows platform, don't forget to change the target file path
func (ctrl *MainController) Save() {
err := ctrl.SaveToFile("save.txt", "/tmp/upload.txt")
if err != nil {
logs.Error("save file failed, ", err)
ctrl.Ctx.Output.Body([]byte(err.Error()))
} else {
ctrl.Ctx.Output.Body([]byte("success"))
}
}
|
package crypto
import (
"testing"
)
func TestPKCS5Padding(t *testing.T) {
padding := &PKCS5Padding{
BlockSize: 15,
}
data := []byte("0123456789")
pd := padding.Padding(data)
t.Log(data, pd)
upd := padding.UnPadding(pd)
t.Log(upd)
if string(data) != string(upd) {
t.Error(upd)
}
}
|
package weather
import (
forecast "github.com/mlbright/forecast/v2"
"log"
"os"
"strconv"
"time"
)
var apiKey string
func init() {
envVar := "FORECAST_API_KEY"
apiKey = os.Getenv(envVar)
if apiKey == "" {
log.Fatalf("Missing value for environment variable %s. See https://developer.forecast.io.\n", envVar)
}
}
type Point struct {
Latitude float64
Longitude float64
}
type WeatherData struct {
Timestamp float64
Summary string
Temperature float64
CloudCover float64
Humidity float64
Pressure float64
Visibility float64
}
func NewWeatherDataFromForecast(forecast *forecast.Forecast) (weatherData *WeatherData) {
currentForecast := forecast.Currently
return &WeatherData{Timestamp: currentForecast.Time * 1000,
Summary: currentForecast.Summary,
Temperature: currentForecast.Temperature,
CloudCover: currentForecast.CloudCover,
Humidity: currentForecast.Humidity,
Pressure: currentForecast.Pressure,
Visibility: currentForecast.Visibility}
}
func GetPeriodicWeatherData(point *Point, weatherCh chan *WeatherData) {
// We're supposed to get up to 1000 api calls per day for free so it should be safe to fetch weather data
// every 2 minutes.
ticker := time.NewTicker(time.Duration(2) * time.Minute)
for {
f, err := forecast.Get(apiKey,
strconv.FormatFloat(point.Latitude, 'f', 4, 64),
strconv.FormatFloat(point.Longitude, 'f', 4, 64),
"now", forecast.SI)
if err == nil {
weatherCh <- NewWeatherDataFromForecast(f)
} else {
log.Printf("weather data fetch failure: %v\n", err)
}
// block until we get the timer tick
<-ticker.C
}
}
|
package handler
import (
"backend/mux"
"backend/nullable"
"encoding/json"
"fmt"
"net/http"
"strconv"
)
func (h *Handler) GetUser(writer http.ResponseWriter, request *http.Request) {
vals := mux.GetPathVals(request)
userID, err := strconv.Atoi(vals["uid"])
if err != nil {
writer.WriteHeader(http.StatusBadRequest)
writer.Write([]byte("TODO: return json ---- User id is not valid: " + vals["uid"]))
return
}
user, err := h.DB.GetUser(userID)
if err != nil {
fmt.Print(err)
writer.WriteHeader(http.StatusInternalServerError)
return
}
user.Password = ""
writer.WriteHeader(http.StatusOK)
err = json.NewEncoder(writer).Encode(user)
if err != nil {
panic(err) // FIXME
}
return
}
func (h *Handler) GetUsers(writer http.ResponseWriter, request *http.Request) {
users, err := h.DB.GetAllUsers()
if err != nil {
fmt.Print(err)
writer.WriteHeader(http.StatusInternalServerError)
}
for i := range users {
users[i].Password = ""
}
writer.WriteHeader(http.StatusOK)
err = json.NewEncoder(writer).Encode(users)
if err != nil {
panic(err)
}
return
}
func (h *Handler) Enroll(writer http.ResponseWriter, request *http.Request) {
type requestStruct struct {
StartDate nullable.Time `json:"startDate"`
EndDate nullable.Time `json:"endDate"`
}
var req requestStruct
err := json.NewDecoder(request.Body).Decode(&req)
if err != nil {
fmt.Println(err)
writer.WriteHeader(http.StatusBadRequest)
return
}
// TODO make sure authed user is admin, or matches uid in query
user := mux.GetAuthedUser(request)
vals := mux.GetPathVals(request)
userID, err := strconv.Atoi(vals["uid"])
if err != nil {
writer.WriteHeader(http.StatusBadRequest)
writer.Write([]byte("TODO: return json ---- User id is not valid: " + vals["uid"]))
return
}
schoolID, err := strconv.Atoi(vals["sid"])
if err != nil {
writer.WriteHeader(http.StatusBadRequest)
writer.Write([]byte("TODO: return json ---- School id is not valid: " + vals["sid"]))
return
}
fmt.Printf("%+v%+v\n", req, user)
err = h.DB.AddEnrollment(userID, schoolID, req.StartDate, req.EndDate)
if err != nil {
fmt.Println(err)
writer.WriteHeader(http.StatusInternalServerError)
return
}
writer.WriteHeader(http.StatusOK)
}
|
package main
import (
"container/ring"
"context"
"crypto/md5"
"fmt"
"os"
"os/signal"
"strconv"
"sync"
randomdata "github.com/Pallinder/go-randomdata"
)
func inserisci(num int, wg *sync.WaitGroup) {
defer wg.Done()
defer close(in)
for n := 1; n <= num; n++ {
aggettivo := randomdata.Adjective()
//fmt.Println(aggettivo) //debug
in <- aggettivo
//fmt.Println("Inserito")
}
}
func elabora(wg *sync.WaitGroup) {
defer wg.Done()
//defer close(out)
for i := range in {
h := md5.Sum([]byte(i))
ringlock.Lock()
r.Value = h
r = r.Next()
ringlock.Unlock()
//fmt.Println("Elaborato")
}
}
var num int
var ringlock sync.Mutex
var r *ring.Ring
var in = make(chan string, num)
//var out = make(chan [16]byte, 1)
func main() {
ctx, cancel := context.WithCancel(context.Background()) //crea un context globale
defer cancel()
go func() {
// If ctrl+c is pressed it saves the situation and exit cleanly
c := make(chan os.Signal, 1) //crea un canale con buffer unitario
signal.Notify(c, os.Interrupt)
s := <-c
fmt.Println("Got signal:", s)
ctx.Done()
cancel() //fa terminare il context background
//esce pulito
fmt.Println("Uscita pulita")
os.Exit(0)
}()
num, err := strconv.Atoi(os.Args[1])
if err != nil {
panic(err.Error())
}
r = ring.New(num)
var wg sync.WaitGroup
wg.Add(1)
go inserisci(num, &wg)
for i := 1; i <= num; i++ {
wg.Add(1)
go elabora(&wg)
}
wg.Wait()
r.Do(
func(p interface{}) {
fmt.Printf("%x\n", p)
})
return
}
|
package ast
import (
"log"
"strings"
"github.com/emptyland/akino/sql/token"
)
type Node interface {
Pos() int
End() int
}
type Command interface {
Node
}
type Expr interface {
Node
}
type NameRef struct {
First string
Second string
}
func (self *NameRef) Table() string {
if self.Second == "" {
return self.First
} else {
return self.Second
}
}
func (self *NameRef) Database() string {
if self.Second == "" {
return ""
} else {
return self.First
}
}
func (self *NameRef) Full() string {
if self.Second == "" {
return self.First
} else {
return self.First + "." + self.Second
}
}
//------------------------------------------------------------------------------
type Select struct {
SelectPos int
SelectEnd int
Op token.Token
Prior *Select
Distinct bool
Limit Expr
Offset Expr
SelColList []SelectColumn
From []Source
Where Expr
Having Expr
GroupBy []Expr
OrderBy []OrderByItem
}
func (self *Select) Pos() int {
return self.SelectPos
}
func (self *Select) End() int {
return self.SelectEnd
}
type SelectColumn struct {
SelectExpr Expr
Alias string
}
type OrderByItem struct {
Item Expr
Desc bool
}
//------------------------------------------------------------------------------
type Source struct {
SourcePos int
SourceEnd int
JoinType int
Table *NameRef
Subquery *Select
Alias string
Indexed string
On Expr
Using []Identifier
}
func (self *Source) Pos() int {
return self.SourcePos
}
func (self *Source) End() int {
return self.SourceEnd
}
func (self *Source) IsSubquery() (bool, *Select) {
return self.Subquery != nil, self.Subquery
}
func (self *Source) IsTable() (bool, *NameRef) {
return self.Table != nil, self.Table
}
const (
JT_INNER = (1 << iota)
JT_CROSS
JT_NATURAL
JT_LEFT
JT_RIGHT
JT_OUTER
)
//------------------------------------------------------------------------------
type Transaction struct {
TransactionPos int
Op token.Token // token.BEGIN | START | COMMIT | ROLLBACK
Type token.Token // token.DEFERREF | IMMEDIATE | EXCLUSIVE
}
func (self *Transaction) Pos() int {
return self.TransactionPos
}
func (self *Transaction) End() int {
return self.Pos()
}
//------------------------------------------------------------------------------
type Show struct {
ShowPos int
Dest token.Token // token.TABLES | DATABASES
}
func (self *Show) Pos() int {
return self.ShowPos
}
func (self *Show) End() int {
return self.Pos()
}
//------------------------------------------------------------------------------
type Comment struct {
CommentPos int
Text string
}
func (self *Comment) Pos() int {
return self.CommentPos
}
func (self *Comment) End() int {
return self.Pos() + len(self.Text)
}
func (self *Comment) Block() bool {
switch {
case strings.HasPrefix(self.Text, "--") && strings.HasSuffix(self.Text, "--"):
return true
case strings.HasPrefix(self.Text, "/*") && strings.HasSuffix(self.Text, "*/"):
return false
default:
log.Fatal("Bad comment prefix and suffix!")
panic("fatal")
}
}
func (self *Comment) Content() string {
if self.Block() {
return strings.Trim(self.Text, "--")
} else {
return "" // FIXME
}
}
//------------------------------------------------------------------------------
type CreateTable struct {
CreatePos int
CreateEnd int
Temp bool
IfNotExists bool
Table NameRef
Scheme []ColumnDefine
Template *Select
CheckConstraint []Expr
}
func (self *CreateTable) Pos() int {
return self.CreatePos
}
func (self *CreateTable) End() int {
return self.CreateEnd
}
/*
* On Conf:
* token.IGNORE
* token.DEFAULT
* token.REPLACE
* token.ROLLBACK
* token.ABORT
* token.FAIL
*/
type ColumnDefine struct {
Name string
ColumnType Type
Default Expr
NotNull bool
NotNullOn token.Token
PrimaryKey bool
PrimaryKeyOn token.Token
PrimaryKeyDesc bool
Unique bool
UniqueOn token.Token
AutoIncr bool
Collate string
}
//------------------------------------------------------------------------------
type CreateIndex struct {
CreatePos int
CreateEnd int
Unique bool
IfNotExists bool
Name NameRef // Index name
Table string // For table name
Index []IndexDefine
}
func (self *CreateIndex) Pos() int {
return self.CreatePos
}
func (self *CreateIndex) End() int {
return self.CreateEnd
}
type IndexDefine struct {
Name string
Collate string
Desc bool
}
//------------------------------------------------------------------------------
type Insert struct {
InsertPos int
InsertEnd int
Op token.Token
Dest NameRef
Column []Identifier
Item []Expr
From *Select
}
func (self *Insert) Pos() int {
return self.InsertPos
}
func (self *Insert) End() int {
return self.InsertEnd
}
func (self *Insert) DefaultValues() bool {
return len(self.Item) == 0 && self.From == nil
}
//------------------------------------------------------------------------------
type Update struct {
UpdatePos int
UpdateEnd int
Op token.Token
Dest NameRef
Indexed string
Set []SetDefine
Where Expr
OrderBy []OrderByItem
Limit Expr
Offset Expr
}
func (self *Update) Pos() int {
return self.UpdatePos
}
func (self *Update) End() int {
return self.UpdateEnd
}
type SetDefine struct {
Column string
Value Expr
}
//------------------------------------------------------------------------------
type Delete struct {
DeletePos int
DeleteEnd int
Dest NameRef
Indexed string
Where Expr
OrderBy []OrderByItem
Limit Expr
Offset Expr
}
func (self *Delete) Pos() int {
return self.DeletePos
}
func (self *Delete) End() int {
return self.DeleteEnd
}
//------------------------------------------------------------------------------
type Type struct {
TokenPos int
Kind token.Token
Width *Literal
Decimal *Literal
Unsigned bool
}
func (self *Type) Pos() int {
return self.TokenPos
}
func (self *Type) End() int {
return self.Pos()
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"fmt"
"hash"
"hash/fnv"
"sync/atomic"
"time"
"unsafe"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/bitmap"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/disk"
"github.com/pingcap/tidb/util/execdetails"
"github.com/pingcap/tidb/util/hack"
"github.com/pingcap/tidb/util/memory"
)
// hashContext keeps the needed hash context of a db table in hash join.
type hashContext struct {
// allTypes one-to-one correspondence with keyColIdx
allTypes []*types.FieldType
keyColIdx []int
naKeyColIdx []int
buf []byte
hashVals []hash.Hash64
hasNull []bool
naHasNull []bool
naColNullBitMap []*bitmap.ConcurrentBitmap
}
func (hc *hashContext) initHash(rows int) {
if hc.buf == nil {
hc.buf = make([]byte, 1)
}
if len(hc.hashVals) < rows {
hc.hasNull = make([]bool, rows)
hc.hashVals = make([]hash.Hash64, rows)
for i := 0; i < rows; i++ {
hc.hashVals[i] = fnv.New64()
}
} else {
for i := 0; i < rows; i++ {
hc.hasNull[i] = false
hc.hashVals[i].Reset()
}
}
if len(hc.naKeyColIdx) > 0 {
// isNAAJ
if len(hc.naColNullBitMap) < rows {
hc.naHasNull = make([]bool, rows)
hc.naColNullBitMap = make([]*bitmap.ConcurrentBitmap, rows)
for i := 0; i < rows; i++ {
hc.naColNullBitMap[i] = bitmap.NewConcurrentBitmap(len(hc.naKeyColIdx))
}
} else {
for i := 0; i < rows; i++ {
hc.naHasNull[i] = false
hc.naColNullBitMap[i].Reset(len(hc.naKeyColIdx))
}
}
}
}
type hashStatistic struct {
// NOTE: probeCollision may be accessed from multiple goroutines concurrently.
probeCollision int64
buildTableElapse time.Duration
}
func (s *hashStatistic) String() string {
return fmt.Sprintf("probe_collision:%v, build:%v", s.probeCollision, execdetails.FormatDuration(s.buildTableElapse))
}
type hashNANullBucket struct {
entries []*naEntry
}
// hashRowContainer handles the rows and the hash map of a table.
// NOTE: a hashRowContainer may be shallow copied by the invoker, define all the
// member attributes as pointer type to avoid unexpected problems.
type hashRowContainer struct {
sc *stmtctx.StatementContext
hCtx *hashContext
stat *hashStatistic
// hashTable stores the map of hashKey and RowPtr
hashTable baseHashTable
// hashNANullBucket stores the rows with any null value in NAAJ join key columns.
// After build process, NANUllBucket is read only here for multi probe worker.
hashNANullBucket *hashNANullBucket
rowContainer *chunk.RowContainer
memTracker *memory.Tracker
// chkBuf buffer the data reads from the disk if rowContainer is spilled.
chkBuf *chunk.Chunk
chkBufSizeForOneProbe int64
}
func newHashRowContainer(sCtx sessionctx.Context, hCtx *hashContext, allTypes []*types.FieldType) *hashRowContainer {
maxChunkSize := sCtx.GetSessionVars().MaxChunkSize
rc := chunk.NewRowContainer(allTypes, maxChunkSize)
c := &hashRowContainer{
sc: sCtx.GetSessionVars().StmtCtx,
hCtx: hCtx,
stat: new(hashStatistic),
hashTable: newConcurrentMapHashTable(),
rowContainer: rc,
memTracker: memory.NewTracker(memory.LabelForRowContainer, -1),
}
if isNAAJ := len(hCtx.naKeyColIdx) > 0; isNAAJ {
c.hashNANullBucket = &hashNANullBucket{}
}
rc.GetMemTracker().AttachTo(c.GetMemTracker())
return c
}
func (c *hashRowContainer) ShallowCopy() *hashRowContainer {
newHRC := *c
newHRC.rowContainer = c.rowContainer.ShallowCopyWithNewMutex()
// multi hashRowContainer ref to one single NA-NULL bucket slice.
// newHRC.hashNANullBucket = c.hashNANullBucket
return &newHRC
}
// GetMatchedRows get matched rows from probeRow. It can be called
// in multiple goroutines while each goroutine should keep its own
// h and buf.
func (c *hashRowContainer) GetMatchedRows(probeKey uint64, probeRow chunk.Row, hCtx *hashContext, matched []chunk.Row) ([]chunk.Row, error) {
matchedRows, _, err := c.GetMatchedRowsAndPtrs(probeKey, probeRow, hCtx, matched, nil, false)
return matchedRows, err
}
func (c *hashRowContainer) GetAllMatchedRows(probeHCtx *hashContext, probeSideRow chunk.Row,
probeKeyNullBits *bitmap.ConcurrentBitmap, matched []chunk.Row, needCheckBuildColPos, needCheckProbeColPos []int, needCheckBuildTypes, needCheckProbeTypes []*types.FieldType) ([]chunk.Row, error) {
// for NAAJ probe row with null, we should match them with all build rows.
var (
ok bool
err error
innerPtrs []chunk.RowPtr
)
c.hashTable.Iter(
func(_ uint64, e *entry) {
entryAddr := e
for entryAddr != nil {
innerPtrs = append(innerPtrs, entryAddr.ptr)
entryAddr = entryAddr.next
}
})
matched = matched[:0]
if len(innerPtrs) == 0 {
return matched, nil
}
// all built bucket rows come from hash table, their bitmap are all nil (doesn't contain any null). so
// we could only use the probe null bits to filter valid rows.
if probeKeyNullBits != nil && len(probeHCtx.naKeyColIdx) > 1 {
// if len(probeHCtx.naKeyColIdx)=1
// that means the NA-Join probe key is directly a (null) <-> (fetch all buckets), nothing to do.
// else like
// (null, 1, 2), we should use the not-null probe bit to filter rows. Only fetch rows like
// ( ? , 1, 2), that exactly with value as 1 and 2 in the second and third join key column.
needCheckProbeColPos = needCheckProbeColPos[:0]
needCheckBuildColPos = needCheckBuildColPos[:0]
needCheckBuildTypes = needCheckBuildTypes[:0]
needCheckProbeTypes = needCheckProbeTypes[:0]
keyColLen := len(c.hCtx.naKeyColIdx)
for i := 0; i < keyColLen; i++ {
// since all bucket is from hash table (Not Null), so the buildSideNullBits check is eliminated.
if probeKeyNullBits.UnsafeIsSet(i) {
continue
}
needCheckBuildColPos = append(needCheckBuildColPos, c.hCtx.naKeyColIdx[i])
needCheckBuildTypes = append(needCheckBuildTypes, c.hCtx.allTypes[i])
needCheckProbeColPos = append(needCheckProbeColPos, probeHCtx.naKeyColIdx[i])
needCheckProbeTypes = append(needCheckProbeTypes, probeHCtx.allTypes[i])
}
}
var mayMatchedRow chunk.Row
for _, ptr := range innerPtrs {
mayMatchedRow, c.chkBuf, err = c.rowContainer.GetRowAndAppendToChunk(ptr, c.chkBuf)
if err != nil {
return nil, err
}
if probeKeyNullBits != nil && len(probeHCtx.naKeyColIdx) > 1 {
// check the idxs-th value of the join columns.
ok, err = codec.EqualChunkRow(c.sc, mayMatchedRow, needCheckBuildTypes, needCheckBuildColPos, probeSideRow, needCheckProbeTypes, needCheckProbeColPos)
if err != nil {
return nil, err
}
if !ok {
continue
}
// once ok. just append the (maybe) valid build row for latter other conditions check if any.
}
matched = append(matched, mayMatchedRow)
}
return matched, nil
}
// signalCheckpointForJoinMask indicates the times of row probe that a signal detection will be triggered.
const signalCheckpointForJoinMask int = 1<<17 - 1
// rowSize is the size of Row.
const rowSize = int64(unsafe.Sizeof(chunk.Row{}))
// rowPtrSize is the size of RowPtr.
const rowPtrSize = int64(unsafe.Sizeof(chunk.RowPtr{}))
// GetMatchedRowsAndPtrs get matched rows and Ptrs from probeRow. It can be called
// in multiple goroutines while each goroutine should keep its own
// h and buf.
func (c *hashRowContainer) GetMatchedRowsAndPtrs(probeKey uint64, probeRow chunk.Row, hCtx *hashContext, matched []chunk.Row, matchedPtrs []chunk.RowPtr, needPtr bool) ([]chunk.Row, []chunk.RowPtr, error) {
var err error
innerPtrs := c.hashTable.Get(probeKey)
if len(innerPtrs) == 0 {
return nil, nil, err
}
matched = matched[:0]
var matchedRow chunk.Row
matchedPtrs = matchedPtrs[:0]
// Some variables used for memTracker.
var (
matchedDataSize = int64(cap(matched))*rowSize + int64(cap(matchedPtrs))*rowPtrSize
needTrackMemUsage = cap(innerPtrs) > signalCheckpointForJoinMask
lastChunkBufPointer *chunk.Chunk
memDelta int64
)
c.chkBuf = nil
c.memTracker.Consume(-c.chkBufSizeForOneProbe)
if needTrackMemUsage {
c.memTracker.Consume(int64(cap(innerPtrs)) * rowPtrSize)
defer c.memTracker.Consume(-int64(cap(innerPtrs))*rowPtrSize + memDelta)
}
c.chkBufSizeForOneProbe = 0
for i, ptr := range innerPtrs {
matchedRow, c.chkBuf, err = c.rowContainer.GetRowAndAppendToChunk(ptr, c.chkBuf)
if err != nil {
return nil, nil, err
}
var ok bool
ok, err = c.matchJoinKey(matchedRow, probeRow, hCtx)
if err != nil {
return nil, nil, err
}
if needTrackMemUsage && c.chkBuf != lastChunkBufPointer && lastChunkBufPointer != nil {
lastChunkSize := lastChunkBufPointer.MemoryUsage()
c.chkBufSizeForOneProbe += lastChunkSize
memDelta += lastChunkSize
}
lastChunkBufPointer = c.chkBuf
if needTrackMemUsage && (i&signalCheckpointForJoinMask == signalCheckpointForJoinMask) {
// Trigger Consume for checking the OOM Action signal
memDelta += int64(cap(matched))*rowSize + int64(cap(matchedPtrs))*rowPtrSize - matchedDataSize
matchedDataSize = int64(cap(matched))*rowSize + int64(cap(matchedPtrs))*rowPtrSize
c.memTracker.Consume(memDelta + 1)
memDelta = 0
}
if !ok {
atomic.AddInt64(&c.stat.probeCollision, 1)
continue
}
matched = append(matched, matchedRow)
if needPtr {
matchedPtrs = append(matchedPtrs, ptr)
}
}
return matched, matchedPtrs, err
}
func (c *hashRowContainer) GetNullBucketRows(probeHCtx *hashContext, probeSideRow chunk.Row,
probeKeyNullBits *bitmap.ConcurrentBitmap, matched []chunk.Row, needCheckBuildColPos, needCheckProbeColPos []int, needCheckBuildTypes, needCheckProbeTypes []*types.FieldType) ([]chunk.Row, error) {
var (
ok bool
err error
mayMatchedRow chunk.Row
)
matched = matched[:0]
for _, nullEntry := range c.hashNANullBucket.entries {
mayMatchedRow, c.chkBuf, err = c.rowContainer.GetRowAndAppendToChunk(nullEntry.ptr, c.chkBuf)
if err != nil {
return nil, err
}
// since null bucket is a unified bucket. cases like below:
// case1: left side (probe side) has null
// left side key <1,null>, actually we can fetch all bucket <1, ?> and filter 1 at the first join key, once
// got a valid right row after other condition, then we can just return.
// case2: left side (probe side) don't have null
// left side key <1, 2>, actually we should fetch <1,null>, <null, 2>, <null, null> from the null bucket because
// case like <3,null> is obviously not matched with the probe key.
needCheckProbeColPos = needCheckProbeColPos[:0]
needCheckBuildColPos = needCheckBuildColPos[:0]
needCheckBuildTypes = needCheckBuildTypes[:0]
needCheckProbeTypes = needCheckProbeTypes[:0]
keyColLen := len(c.hCtx.naKeyColIdx)
if probeKeyNullBits != nil {
// when the probeKeyNullBits is not nil, it means the probe key has null values, where we should distinguish
// whether is empty set or not. In other words, we should fetch at least a valid from the null bucket here.
// for values at the same index of the join key in which they are both not null, the values should be exactly the same.
//
// step: probeKeyNullBits & buildKeyNullBits, for those bits with 0, we should check if both values are the same.
// we can just use the UnsafeIsSet here, because insert action of the build side has all finished.
//
// 1 0 1 0 means left join key : null ? null ?
// 1 0 0 0 means right join key : null ? ? ?
// ---------------------------------------------
// left & right: 1 0 1 0: just do the explicit column value check for whose bit is 0. (means no null from both side)
for i := 0; i < keyColLen; i++ {
if probeKeyNullBits.UnsafeIsSet(i) || nullEntry.nullBitMap.UnsafeIsSet(i) {
continue
}
needCheckBuildColPos = append(needCheckBuildColPos, c.hCtx.naKeyColIdx[i])
needCheckBuildTypes = append(needCheckBuildTypes, c.hCtx.allTypes[i])
needCheckProbeColPos = append(needCheckProbeColPos, probeHCtx.naKeyColIdx[i])
needCheckProbeTypes = append(needCheckProbeTypes, probeHCtx.allTypes[i])
}
// check the idxs-th value of the join columns.
ok, err = codec.EqualChunkRow(c.sc, mayMatchedRow, needCheckBuildTypes, needCheckBuildColPos, probeSideRow, needCheckProbeTypes, needCheckProbeColPos)
if err != nil {
return nil, err
}
if !ok {
continue
}
} else {
// when the probeKeyNullBits is nil, it means the probe key is not null. But in the process of matching the null bucket,
// we still need to do the non-null (explicit) value check.
//
// eg: the probe key is <1,2>, we only get <2, null> in the null bucket, even we can take the null as a wildcard symbol,
// the first value of this two tuple is obviously not a match. So we need filter it here.
for i := 0; i < keyColLen; i++ {
if nullEntry.nullBitMap.UnsafeIsSet(i) {
continue
}
needCheckBuildColPos = append(needCheckBuildColPos, c.hCtx.naKeyColIdx[i])
needCheckBuildTypes = append(needCheckBuildTypes, c.hCtx.allTypes[i])
needCheckProbeColPos = append(needCheckProbeColPos, probeHCtx.naKeyColIdx[i])
needCheckProbeTypes = append(needCheckProbeTypes, probeHCtx.allTypes[i])
}
// check the idxs-th value of the join columns.
ok, err = codec.EqualChunkRow(c.sc, mayMatchedRow, needCheckBuildTypes, needCheckBuildColPos, probeSideRow, needCheckProbeTypes, needCheckProbeColPos)
if err != nil {
return nil, err
}
if !ok {
continue
}
}
// once ok. just append the (maybe) valid build row for latter other conditions check if any.
matched = append(matched, mayMatchedRow)
}
return matched, err
}
// matchJoinKey checks if join keys of buildRow and probeRow are logically equal.
func (c *hashRowContainer) matchJoinKey(buildRow, probeRow chunk.Row, probeHCtx *hashContext) (ok bool, err error) {
if len(c.hCtx.naKeyColIdx) > 0 {
return codec.EqualChunkRow(c.sc,
buildRow, c.hCtx.allTypes, c.hCtx.naKeyColIdx,
probeRow, probeHCtx.allTypes, probeHCtx.naKeyColIdx)
}
return codec.EqualChunkRow(c.sc,
buildRow, c.hCtx.allTypes, c.hCtx.keyColIdx,
probeRow, probeHCtx.allTypes, probeHCtx.keyColIdx)
}
// alreadySpilledSafeForTest indicates that records have spilled out into disk. It's thread-safe.
// nolint: unused
func (c *hashRowContainer) alreadySpilledSafeForTest() bool {
return c.rowContainer.AlreadySpilledSafeForTest()
}
// PutChunk puts a chunk into hashRowContainer and build hash map. It's not thread-safe.
// key of hash table: hash value of key columns
// value of hash table: RowPtr of the corresponded row
func (c *hashRowContainer) PutChunk(chk *chunk.Chunk, ignoreNulls []bool) error {
return c.PutChunkSelected(chk, nil, ignoreNulls)
}
// PutChunkSelected selectively puts a chunk into hashRowContainer and build hash map. It's not thread-safe.
// key of hash table: hash value of key columns
// value of hash table: RowPtr of the corresponded row
func (c *hashRowContainer) PutChunkSelected(chk *chunk.Chunk, selected, ignoreNulls []bool) error {
start := time.Now()
defer func() { c.stat.buildTableElapse += time.Since(start) }()
chkIdx := uint32(c.rowContainer.NumChunks())
err := c.rowContainer.Add(chk)
if err != nil {
return err
}
numRows := chk.NumRows()
c.hCtx.initHash(numRows)
hCtx := c.hCtx
// By now, the combination of 1 and 2 can't take a run at same time.
// 1: write the row data of join key to hashVals. (normal EQ key should ignore the null values.) null-EQ for Except statement is an exception.
for keyIdx, colIdx := range c.hCtx.keyColIdx {
ignoreNull := len(ignoreNulls) > keyIdx && ignoreNulls[keyIdx]
err := codec.HashChunkSelected(c.sc, hCtx.hashVals, chk, hCtx.allTypes[keyIdx], colIdx, hCtx.buf, hCtx.hasNull, selected, ignoreNull)
if err != nil {
return errors.Trace(err)
}
}
// 2: write the row data of NA join key to hashVals. (NA EQ key should collect all rows including null value as one bucket.)
isNAAJ := len(c.hCtx.naKeyColIdx) > 0
hasNullMark := make([]bool, len(hCtx.hasNull))
for keyIdx, colIdx := range c.hCtx.naKeyColIdx {
// NAAJ won't ignore any null values, but collect them as one hash bucket.
err := codec.HashChunkSelected(c.sc, hCtx.hashVals, chk, hCtx.allTypes[keyIdx], colIdx, hCtx.buf, hCtx.hasNull, selected, false)
if err != nil {
return errors.Trace(err)
}
// todo: we can collect the bitmap in codec.HashChunkSelected to avoid loop here, but the params modification is quite big.
// after fetch one NA column, collect the null value to null bitmap for every row. (use hasNull flag to accelerate)
// eg: if a NA Join cols is (a, b, c), for every build row here we maintained a 3-bit map to mark which column are null for them.
for rowIdx := 0; rowIdx < numRows; rowIdx++ {
if hCtx.hasNull[rowIdx] {
hCtx.naColNullBitMap[rowIdx].UnsafeSet(keyIdx)
// clean and try fetch next NA join col.
hCtx.hasNull[rowIdx] = false
// just a mark variable for whether there is a null in at least one NA join column.
hasNullMark[rowIdx] = true
}
}
}
for i := 0; i < numRows; i++ {
if isNAAJ {
if selected != nil && !selected[i] {
continue
}
if hasNullMark[i] {
// collect the null rows to slice.
rowPtr := chunk.RowPtr{ChkIdx: chkIdx, RowIdx: uint32(i)}
// do not directly ref the null bits map here, because the bit map will be reset and reused in next batch of chunk data.
c.hashNANullBucket.entries = append(c.hashNANullBucket.entries, &naEntry{rowPtr, c.hCtx.naColNullBitMap[i].Clone()})
} else {
// insert the not-null rows to hash table.
key := c.hCtx.hashVals[i].Sum64()
rowPtr := chunk.RowPtr{ChkIdx: chkIdx, RowIdx: uint32(i)}
c.hashTable.Put(key, rowPtr)
}
} else {
if (selected != nil && !selected[i]) || c.hCtx.hasNull[i] {
continue
}
key := c.hCtx.hashVals[i].Sum64()
rowPtr := chunk.RowPtr{ChkIdx: chkIdx, RowIdx: uint32(i)}
c.hashTable.Put(key, rowPtr)
}
}
c.GetMemTracker().Consume(c.hashTable.GetAndCleanMemoryDelta())
return nil
}
// NumChunks returns the number of chunks in the rowContainer
func (c *hashRowContainer) NumChunks() int {
return c.rowContainer.NumChunks()
}
// NumRowsOfChunk returns the number of rows of a chunk
func (c *hashRowContainer) NumRowsOfChunk(chkID int) int {
return c.rowContainer.NumRowsOfChunk(chkID)
}
// GetChunk returns chkIdx th chunk of in memory records, only works if rowContainer is not spilled
func (c *hashRowContainer) GetChunk(chkIdx int) (*chunk.Chunk, error) {
return c.rowContainer.GetChunk(chkIdx)
}
// GetRow returns the row the ptr pointed to in the rowContainer
func (c *hashRowContainer) GetRow(ptr chunk.RowPtr) (chunk.Row, error) {
return c.rowContainer.GetRow(ptr)
}
// Len returns number of records in the hash table.
func (c *hashRowContainer) Len() uint64 {
return c.hashTable.Len()
}
func (c *hashRowContainer) Close() error {
defer c.memTracker.Detach()
c.chkBuf = nil
return c.rowContainer.Close()
}
// GetMemTracker returns the underlying memory usage tracker in hashRowContainer.
func (c *hashRowContainer) GetMemTracker() *memory.Tracker { return c.memTracker }
// GetDiskTracker returns the underlying disk usage tracker in hashRowContainer.
func (c *hashRowContainer) GetDiskTracker() *disk.Tracker { return c.rowContainer.GetDiskTracker() }
// ActionSpill returns a memory.ActionOnExceed for spilling over to disk.
func (c *hashRowContainer) ActionSpill() memory.ActionOnExceed {
return c.rowContainer.ActionSpill()
}
const (
initialEntrySliceLen = 64
maxEntrySliceLen = 8192
)
type entry struct {
ptr chunk.RowPtr
next *entry
}
type naEntry struct {
ptr chunk.RowPtr
nullBitMap *bitmap.ConcurrentBitmap
}
type entryStore struct {
slices [][]entry
cursor int
}
func newEntryStore() *entryStore {
es := new(entryStore)
es.slices = [][]entry{make([]entry, initialEntrySliceLen)}
es.cursor = 0
return es
}
func (es *entryStore) GetStore() (e *entry, memDelta int64) {
sliceIdx := uint32(len(es.slices) - 1)
slice := es.slices[sliceIdx]
if es.cursor >= cap(slice) {
size := cap(slice) * 2
if size >= maxEntrySliceLen {
size = maxEntrySliceLen
}
slice = make([]entry, size)
es.slices = append(es.slices, slice)
sliceIdx++
es.cursor = 0
memDelta = int64(unsafe.Sizeof(entry{})) * int64(size)
}
e = &es.slices[sliceIdx][es.cursor]
es.cursor++
return
}
type baseHashTable interface {
Put(hashKey uint64, rowPtr chunk.RowPtr)
Get(hashKey uint64) (rowPtrs []chunk.RowPtr)
Len() uint64
// GetAndCleanMemoryDelta gets and cleans the memDelta of the baseHashTable. Memory delta will be cleared after each fetch.
// It indicates the memory delta of the baseHashTable since the last calling GetAndCleanMemoryDelta().
GetAndCleanMemoryDelta() int64
Iter(func(uint64, *entry))
}
// TODO (fangzhuhe) remove unsafeHashTable later if it not used anymore
// unsafeHashTable stores multiple rowPtr of rows for a given key with minimum GC overhead.
// A given key can store multiple values.
// It is not thread-safe, should only be used in one goroutine.
type unsafeHashTable struct {
hashMap map[uint64]*entry
entryStore *entryStore
length uint64
bInMap int64 // indicate there are 2^bInMap buckets in hashMap
memDelta int64 // the memory delta of the unsafeHashTable since the last calling GetAndCleanMemoryDelta()
}
// newUnsafeHashTable creates a new unsafeHashTable. estCount means the estimated size of the hashMap.
// If unknown, set it to 0.
func newUnsafeHashTable(estCount int) *unsafeHashTable {
ht := new(unsafeHashTable)
ht.hashMap = make(map[uint64]*entry, estCount)
ht.entryStore = newEntryStore()
return ht
}
// Put puts the key/rowPtr pairs to the unsafeHashTable, multiple rowPtrs are stored in a list.
func (ht *unsafeHashTable) Put(hashKey uint64, rowPtr chunk.RowPtr) {
oldEntry := ht.hashMap[hashKey]
newEntry, memDelta := ht.entryStore.GetStore()
newEntry.ptr = rowPtr
newEntry.next = oldEntry
ht.hashMap[hashKey] = newEntry
if len(ht.hashMap) > (1<<ht.bInMap)*hack.LoadFactorNum/hack.LoadFactorDen {
memDelta += hack.DefBucketMemoryUsageForMapIntToPtr * (1 << ht.bInMap)
ht.bInMap++
}
ht.length++
ht.memDelta += memDelta
}
// Get gets the values of the "key" and appends them to "values".
func (ht *unsafeHashTable) Get(hashKey uint64) (rowPtrs []chunk.RowPtr) {
entryAddr := ht.hashMap[hashKey]
for entryAddr != nil {
rowPtrs = append(rowPtrs, entryAddr.ptr)
entryAddr = entryAddr.next
}
return
}
// Len returns the number of rowPtrs in the unsafeHashTable, the number of keys may be less than Len
// if the same key is put more than once.
func (ht *unsafeHashTable) Len() uint64 { return ht.length }
// GetAndCleanMemoryDelta gets and cleans the memDelta of the unsafeHashTable.
func (ht *unsafeHashTable) GetAndCleanMemoryDelta() int64 {
memDelta := ht.memDelta
ht.memDelta = 0
return memDelta
}
func (ht *unsafeHashTable) Iter(traverse func(key uint64, e *entry)) {
for k := range ht.hashMap {
entryAddr := ht.hashMap[k]
traverse(k, entryAddr)
}
}
// concurrentMapHashTable is a concurrent hash table built on concurrentMap
type concurrentMapHashTable struct {
hashMap concurrentMap
entryStore *entryStore
length uint64
memDelta int64 // the memory delta of the concurrentMapHashTable since the last calling GetAndCleanMemoryDelta()
}
// newConcurrentMapHashTable creates a concurrentMapHashTable
func newConcurrentMapHashTable() *concurrentMapHashTable {
ht := new(concurrentMapHashTable)
ht.hashMap = newConcurrentMap()
ht.entryStore = newEntryStore()
ht.length = 0
ht.memDelta = hack.DefBucketMemoryUsageForMapIntToPtr + int64(unsafe.Sizeof(entry{}))*initialEntrySliceLen
return ht
}
// Len return the number of rowPtrs in the concurrentMapHashTable
func (ht *concurrentMapHashTable) Len() uint64 {
return ht.length
}
// Put puts the key/rowPtr pairs to the concurrentMapHashTable, multiple rowPtrs are stored in a list.
func (ht *concurrentMapHashTable) Put(hashKey uint64, rowPtr chunk.RowPtr) {
newEntry, memDelta := ht.entryStore.GetStore()
newEntry.ptr = rowPtr
newEntry.next = nil
memDelta += ht.hashMap.Insert(hashKey, newEntry)
if memDelta != 0 {
atomic.AddInt64(&ht.memDelta, memDelta)
}
atomic.AddUint64(&ht.length, 1)
}
// Get gets the values of the "key" and appends them to "values".
func (ht *concurrentMapHashTable) Get(hashKey uint64) (rowPtrs []chunk.RowPtr) {
entryAddr, _ := ht.hashMap.Get(hashKey)
for entryAddr != nil {
rowPtrs = append(rowPtrs, entryAddr.ptr)
entryAddr = entryAddr.next
}
return
}
// Iter gets the every value of the hash table.
func (ht *concurrentMapHashTable) Iter(traverse func(key uint64, e *entry)) {
ht.hashMap.IterCb(traverse)
}
// GetAndCleanMemoryDelta gets and cleans the memDelta of the concurrentMapHashTable. Memory delta will be cleared after each fetch.
func (ht *concurrentMapHashTable) GetAndCleanMemoryDelta() int64 {
var memDelta int64
for {
memDelta = atomic.LoadInt64(&ht.memDelta)
if atomic.CompareAndSwapInt64(&ht.memDelta, memDelta, 0) {
break
}
}
return memDelta
}
|
package main
import (
"encoding/json"
"fmt"
"sync"
"github.com/couchbaselabs/go-couchbase"
)
type Event struct {
Type string `json:"type"`
Name string `json:"name"`
Likes int `json:"likes"`
}
func NewEvent(name string) *Event {
return &Event{"event", name, 0}
}
func NewEventJSON(jsonbytes []byte) (event *Event) {
err := json.Unmarshal(jsonbytes, &event)
handleError(err)
return
}
func (e *Event) String() string {
return fmt.Sprintf("Event '%s', Likes: %d", e.Name, e.Likes)
}
func handleError(err error) {
if err != nil {
panic(err)
}
}
// LIKE OMIT
func likeEvent(bucket *couchbase.Bucket, id string) {
bucket.Update(id, 0, func(current []byte) ([]byte, error) { // HL
event := NewEventJSON(current)
event.Likes++
return json.Marshal(event)
})
}
// LIKE OMIT
func main() {
bucket, err := couchbase.GetBucket("http://localhost:8091/", "default", "demo") // HL
handleError(err)
// START OMIT
var wg sync.WaitGroup
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
likeEvent(bucket, "cc2014")
}()
}
wg.Wait()
var event Event
err = bucket.Get("cc2014", &event)
handleError(err)
fmt.Println(&event)
// END OMIT
}
|
package cache
import (
"github.com/go-redis/redis"
)
// cache caches the frames using redis
// specs: 64MB cache size
var client *redis.Client
func init() {
client = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "", // no password set
DB: 0, // use default DB
})
pong, err := client.Ping().Result()
println(pong, err)
}
// Set a key/value pair in the redis store
func Set(key string, value string) (bool, error) {
err := client.Set(key, value, 0).Err()
if err != nil {
return false, err
}
return true, nil
}
// Get a value from the store associated with a key
func Get(key string) (string, error) {
val, err := client.Get(key).Result()
if err != nil {
return "", err
}
return val, nil
}
// Get info associated with redis db
func Info(key string) string {
info := client.Info(key)
return info.Val()
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.