text stringlengths 11 4.05M |
|---|
package main
import (
"net/http"
)
type Route struct {
Method string
Pattern string
Handler http.HandlerFunc
Name string
}
type Routes []Route
var routes = Routes{
Route{"GET", "/", Index, "index"},
Route{"GET", "/tasks", TaskIndex, "task.index"},
Route{"GET", "/tasks/{id}", TaskShow, "task.show"},
}
|
package assets
import (
"os"
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
)
type testBindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi testBindataFileInfo) Name() string {
return fi.name
}
func (fi testBindataFileInfo) Size() int64 {
return fi.size
}
func (fi testBindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi testBindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi testBindataFileInfo) IsDir() bool {
return false
}
func (fi testBindataFileInfo) Sys() interface{} {
return nil
}
func TestMain(t *testing.T) {
Convey("ETags generated on startup", t, func() {
So(etags, ShouldHaveLength, len(_bindata))
tag, err := GetAssetETag("path/doesnt/exist")
So(tag, ShouldBeEmpty)
So(err, ShouldEqual, ErrETagNotFound)
_bindata["path/does/exist"] = func() (*asset, error) {
return &asset{
bytes: []byte("test"),
info: testBindataFileInfo{"test", 4, 0600, time.Now()},
}, nil
}
updateETags()
tag, err = GetAssetETag("path/does/exist")
So(tag, ShouldEqual, `W/"4-D87F7E0C"`)
So(err, ShouldBeNil)
})
}
|
package outbound
import (
"database/sql"
"fmt"
"github.com/canmor/go_ms_clean_arch/pkg/domain/blog"
"github.com/canmor/go_ms_clean_arch/pkg/util"
)
type BlogRepositoryImpl struct {
db *sql.DB
}
func NewBlogRepository(db *sql.DB) blog.BlogRepository {
return BlogRepositoryImpl{db}
}
func (b BlogRepositoryImpl) Save(blog blog.Blog) (int64, error) {
res, err := b.db.Exec("insert into blogs(title, body, created_at) values(?, ?, ?)", blog.Title, blog.Body, blog.CreatedAt)
if err != nil {
err = fmt.Errorf("BlogRepositoryImpl.Save failed, err: %s", err)
util.Log().Error(err)
return -1, err
}
id, err := res.LastInsertId()
return id, err
}
func (b BlogRepositoryImpl) Find(id int) (*blog.Blog, error) {
// FIXME: add created_at
r := b.db.QueryRow("select id, title, body FROM blogs WHERE id=?", id)
found := blog.Blog{}
err := r.Scan(&found.Id, &found.Title, &found.Body)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, err
}
return &found, nil
}
|
package main
import (
"flag"
"log"
"github.com/emcfarlane/starlarkrepl"
"go.starlark.net/repl"
"go.starlark.net/starlark"
)
func run() error {
flag.Parse()
thread := &starlark.Thread{Load: repl.MakeLoad()}
globals := make(starlark.StringDict)
options := starlarkrepl.Options{AutoComplete: true}
return starlarkrepl.Run(thread, globals, options)
}
func main() {
if err := run(); err != nil {
log.Fatal(err)
}
}
|
package main
import (
"database/sql"
"fmt"
_ "github.com/jinzhu/gorm/dialects/mysql"
"log"
)
type Record struct {
Id int64
UserId int64
Good int64
Type int64
Amount int64
Price float64
ToFrom sql.NullString
Time []uint8
}
func FindeRecords( id uint64,step uint64)[]*Record {
db,err:=sql.Open("mysql","root:Yad@121413@tcp(117.50.97.181:3306)/repo?charset=utf8")
if err!=nil {
log.Panic(err)
}
sql :="SELECT * FROM record WHERE id > ? AND id <= ? "
stmt,err:=db.Prepare(sql) //sql
if err!=nil {
log.Panic(err)
}
rows ,err :=stmt.Query(id,id+step)
var records []*Record
for rows.Next(){
record := Record{}
err = rows.Scan(&record.Id,&record.Time,&record.UserId,&record.Good,&record.Type,&record.Amount,&record.Price,&record.ToFrom)
if err!=nil {
log.Panic(err)
}
records = append(records, &record)
fmt.Printf("%v \n",record.Good)
}
//关闭连接
defer db.Close()
defer stmt.Close()
defer rows.Close()
return records
} |
package storage
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/naelyn/go-docker-registry/Godeps/_workspace/src/github.com/golang/glog"
"github.com/naelyn/go-docker-registry/types"
)
var ErrNotFound = errors.New("not found")
var ErrExist = errors.New("already exists")
func checkSlash(id string) error {
idx := strings.Index(id, "/")
if idx > 0 && idx < len(id) {
return fmt.Errorf("illegally namespaced image name: %s", id)
}
return nil
}
type BoundedReadCloser interface {
io.ReadCloser
Size() int
}
type simpleBoundedReadCloser struct {
io.ReadCloser
size int
}
func (rc *simpleBoundedReadCloser) Size() int { return rc.size }
type Driver interface {
// repository
ListImageData(repo types.Repo) ([]types.ImgData, error)
UpdateImages(repo types.Repo, imgData []types.ImgData) error
ListTags(repo types.Repo) (map[string]string, error)
GetTag(repo types.Repo, tag string) (string, error)
PutTag(repo types.Repo, tag string, imageID string) error
RemoveRepository(repo types.Repo) error
RemoveTag(repo types.Repo, tag string) error
Search(q string) ([]string, error)
// image
ImageAncestry(imageID string) ([]string, error)
StreamImageJson(imageID string) (*ImageJsonStat, error)
PutImageJson(imageID string, attrJson []byte) error
PutImageChecksum(imageID string, checksum string) (ok bool, err error)
PutImageLayer(imageID string, r io.Reader) error
StreamImageLayer(imageID string) (BoundedReadCloser, error)
}
// aka Repository + Image
type LocalDriver struct {
Dir string
}
func NewLocalDriver(dir string) *LocalDriver {
return &LocalDriver{Dir: dir}
}
// ------------------------- repository functions ---------------------------
// ListImageData dumps a list of all known images in a repo. Returns
// ErrNotFound if the repo does not exist.
func (r *LocalDriver) ListImageData(repo types.Repo) ([]types.ImgData, error) {
v, err := r.loadImages(repo)
return v, err
}
func (r *LocalDriver) loadImages(repo types.Repo) ([]types.ImgData, error) {
path := r.imagesPath(repo)
b, err := ioutil.ReadFile(path)
if os.IsNotExist(err) {
return nil, ErrNotFound
} else if err != nil {
return nil, err
}
var v []types.ImgData
if err := json.Unmarshal(b, &v); err != nil {
return nil, err
}
return v, nil
}
// UpdateImages appends to the repo's memory of images and tags
// contained within.
func (r *LocalDriver) UpdateImages(repo types.Repo, imgData []types.ImgData) error {
oldJson, err := r.loadImages(repo)
if err == ErrNotFound {
err = nil // no existing to merge with
} else if err != nil {
return err
}
newJson := imgData
// merge
var rawall []types.ImgData
rawall = append(rawall, newJson...)
rawall = append(rawall, oldJson...)
// dedupe
set := map[string]struct{}{}
var all []types.ImgData
for _, d := range rawall {
if _, ok := set[d.ID]; ok {
continue
}
set[d.ID] = struct{}{}
all = append(all, d)
}
b, err := json.Marshal(all)
if err != nil {
return err
}
path := r.imagesPath(repo)
return writeFileIfChanged(path, b)
}
// RemoveRepository erases the entire repository and all history. If the
// repo does not exist, it simply returns nil.
func (r *LocalDriver) RemoveRepository(repo types.Repo) error {
repoDir := r.repoPath(repo)
return os.RemoveAll(repoDir)
}
// RemoveTag erases a single tag in a repo. You cannot erase the
// 'latest' tag with this method. For that you must call
// RemoveRepository instead. If the repo does not exist, it simply
// returns nil.
func (r *LocalDriver) RemoveTag(repo types.Repo, tag string) error {
if tag == "latest" || tag == "" {
return errors.New("cannot erase latest tag")
}
repoDir := r.repoPath(repo)
path := filepath.Join(repoDir, "tags", tag)
err := os.Remove(path)
if os.IsNotExist(err) {
return nil // don't care
}
return err
}
// PutTag records a new tag for an image in a repo. If the image
// referenced does not exist it returns ErrNotFound.
func (r *LocalDriver) PutTag(repo types.Repo, tag string, imageID string) error {
// assert image exists with that id
jsonExists, err := r.imageJsonExists(imageID)
if err != nil {
return fmt.Errorf("failed to assert existence of image: %v", err)
} else if !jsonExists {
return ErrNotFound
}
td := types.TagData{ImageID: imageID}
repoDir := r.repoPath(repo)
path := filepath.Join(repoDir, "tags", tag)
b, err := json.Marshal(&td)
if err != nil {
return err
}
return writeFileIfChanged(path, b)
}
// GetTag returns the imageID that the tag points to, or ErrNotFound if
// no such tag or repo exists.
func (r *LocalDriver) GetTag(repo types.Repo, tag string) (string, error) {
repoDir := r.repoPath(repo)
path := filepath.Join(repoDir, "tags", tag)
data, err := ioutil.ReadFile(path)
if os.IsNotExist(err) {
return "", ErrNotFound
} else if err != nil {
return "", err
}
var td types.TagData
if err := json.Unmarshal(data, &td); err != nil {
return "", err
}
return td.ImageID, nil
}
// ListTags lists all tags for a given repo. If the repo does not exist
// it returns ErrNotFound. All other errors are opaque.
func (r *LocalDriver) ListTags(repo types.Repo) (map[string]string, error) {
repoDir := r.repoPath(repo)
files, err := filepath.Glob(filepath.Join(repoDir, "tags", "*"))
if err != nil {
return nil, err
}
if len(files) == 0 {
// maybe the repo doesn't exist?
path := filepath.Join(repoDir, "tags")
exist, err := fileExists(path)
if err != nil {
return nil, err
}
if !exist {
return nil, ErrNotFound
}
return nil, nil
}
m := map[string]string{}
for _, path := range files {
name := filepath.Base(path)
data, err := ioutil.ReadFile(path)
if os.IsNotExist(err) {
continue // don't care; vanished since glob
} else if err != nil {
return nil, err
}
var td types.TagData
if err := json.Unmarshal(data, &td); err != nil {
return nil, err
}
m[name] = td.ImageID
}
return m, nil
}
// Search searches for the query string and returns a list of 'ns/repo'.
// If the query is blank a list of all repos is returned.
func (f *LocalDriver) Search(q string) ([]string, error) {
ownerList, err := filepath.Glob(filepath.Join(f.Dir, "repositories", "*"))
glog.Info("ns", ownerList)
if err != nil {
return nil, err
}
if len(ownerList) == 0 {
return nil, nil
}
matches := func(s string) bool {
return strings.Contains(s, q)
}
var res []string
for _, ownerFull := range ownerList {
repoList, err := filepath.Glob(filepath.Join(ownerFull, "*"))
if err != nil {
return nil, err
}
for _, repoFull := range repoList {
ns := filepath.Base(ownerFull)
repo := filepath.Base(repoFull)
// todo: check that this is a real repo
fullname := ns + "/" + repo
if q == "" || matches(ns) || matches(repo) {
res = append(res, fullname)
}
}
}
return res, nil
}
func (i *LocalDriver) repoPath(repo types.Repo) string {
return filepath.Join(i.Dir, "repositories", repo.Owner(), repo.Name())
}
func (r *LocalDriver) imagesPath(repo types.Repo) string {
repoDir := r.repoPath(repo)
return filepath.Join(repoDir, "_index_images")
}
// ------------------------- image functions ---------------------------
func (i *LocalDriver) readImageChecksum(imageID string) ([]string, error) {
f, err := os.Open(i.checksumPath(imageID))
if os.IsNotExist(err) {
return nil, ErrNotFound
} else if err != nil {
return nil, err
}
defer f.Close()
csb, err := ioutil.ReadAll(f)
if err != nil {
return nil, err
}
var csArr []string
if err := json.Unmarshal(csb, &csArr); err != nil {
return nil, err
}
return csArr, nil
}
func (i *LocalDriver) openAndStatImageChecksum(imageID string) (*os.File, os.FileInfo, error) {
f, err := os.Open(i.checksumPath(imageID))
if err != nil {
return nil, nil, err
}
stat, err := f.Stat()
if err != nil {
_ = f.Close()
return nil, nil, err
}
return f, stat, nil
}
func (i *LocalDriver) imageJsonExists(imageID string) (bool, error) {
f, _, jsonExist, err := i.openAndStatImageJson(imageID)
if os.IsNotExist(err) {
return false, nil // Image not found
} else if err != nil {
return false, err
}
defer f.Close()
return jsonExist, nil
}
// TODO: move to types?
type ImageJsonStat struct {
Data io.ReadCloser // stream of data representing the json structure
Size int // size of Data
LayerSize int // size of the layer itself (not the json for it)
Checksum []string // optional
}
// PutImageChecksum poorly named function to "verify that a checksum for
// a layer already exists". Returns true if the checksum is found, or
// ErrNotFound if the layer doesn't exist.
func (i *LocalDriver) PutImageChecksum(imageID string, checksum string) (ok bool, err error) {
// preflight check that the /json file exists first and 404 if not ("image not found")
jsonExist, err := i.imageJsonExists(imageID)
if err != nil {
return false, err
} else if !jsonExist {
return false, ErrNotFound // Image not found
}
// TODO: check mark path and abort with 409 if exists
csArr, err := i.readImageChecksum(imageID)
if err != nil {
return false, err
}
for _, cs := range csArr {
if cs == checksum {
return true, nil
}
}
glog.Warningf("put_image_checksum: Wrong checksum. Provided: %s; Expected: %s", checksum, csArr)
return false, nil
}
// PutImageLayer saves layer data assuming that the json data has
// already been saved. Returns ErrExist if the layer alrady exists.
func (i *LocalDriver) PutImageLayer(imageID string, r io.Reader) error {
// preflight check that the /json file exists first and 404 if not ("image not found")
f, _, jsonExist, err := i.openAndStatImageJson(imageID)
if err != nil {
return err
}
defer f.Close()
if !jsonExist {
return ErrNotFound // Image not found
}
jsonData, err := ioutil.ReadAll(f)
if err != nil {
return err
}
// sha256:FOO_HEX
csh := sha256.New()
_, _ = csh.Write(jsonData)
_, _ = csh.Write([]byte{'\n'})
tr := io.TeeReader(r, csh)
exists, err := i.saveImageResourceExclusive(imageID, "layer", tr)
if err != nil {
return err
} else if exists {
return ErrExist // Image already exists
}
cs := csh.Sum(nil)
csStr := "sha256:" + hex.EncodeToString(cs)
csArr := []string{csStr}
checksumOut, err := json.Marshal(csArr)
if err != nil {
return err
}
if err := i.saveImageResource(imageID, "_checksum", bytes.NewReader(checksumOut)); err != nil {
return err
}
return nil
}
// PutImageJson persists the json provided. Because of how checksums are done, we must always keep the original JSON bytes verbatim as provided by the docker client!
func (i *LocalDriver) PutImageJson(imageID string, attrJson []byte) error {
var attr types.ImageAttributes
if err := json.Unmarshal(attrJson, &attr); err != nil {
// invalid input
return fmt.Errorf("cannot read json request body: %v", err)
}
if attr.Id != imageID {
// invalid input
return errors.New("JSON data contains invalid id")
}
if attr.Parent != "" {
// assert image exists with that id
parentExists, err := i.imageJsonExists(attr.Parent)
if err != nil {
return fmt.Errorf("failed to assert existence of parent image: %v", err)
} else if !parentExists {
// invalid input
return errors.New("Image depends on a non existing parent")
}
}
// don't regenerate the json bytes, write exactly what we were given
return i.saveImageResourceIfChanged(imageID, "json", attrJson)
}
func (i *LocalDriver) StreamImageJson(imageID string) (*ImageJsonStat, error) {
f, stat, jsonExist, err := i.openAndStatImageJson(imageID)
if err != nil {
return nil, err
} else if !jsonExist {
return nil, ErrNotFound
}
layerSize, layerExist, err := i.imageLayerSize(imageID)
if err != nil {
f.Close()
return nil, err
} else if !layerExist {
f.Close()
return nil, ErrNotFound
}
imageStat := &ImageJsonStat{
Data: f,
Size: int(stat.Size()),
LayerSize: layerSize,
}
csArr, err := i.readImageChecksum(imageID)
if err == ErrNotFound {
// skip
} else if err != nil {
f.Close()
return nil, err
} else {
imageStat.Checksum = csArr
}
return imageStat, nil
}
func (i *LocalDriver) openAndStatImageJson(imageID string) (*os.File, os.FileInfo, bool, error) {
f, err := os.Open(i.jsonPath(imageID))
if os.IsNotExist(err) {
return nil, nil, false, nil
} else if err != nil {
return nil, nil, false, err
}
stat, err := f.Stat()
if err != nil {
_ = f.Close()
return nil, nil, true, err
}
return f, stat, true, nil
}
func (i *LocalDriver) imageLayerSize(imageID string) (int, bool, error) {
stat, err := os.Stat(i.layerPath(imageID))
if os.IsNotExist(err) {
return 0, false, nil
} else if err != nil {
return 0, false, err
}
return int(stat.Size()), true, nil
}
func (i *LocalDriver) StreamImageLayer(imageID string) (BoundedReadCloser, error) {
f, stat, err := i.openAndStatImageLayer(imageID)
if err != nil {
return nil, err
}
return &simpleBoundedReadCloser{
ReadCloser: f,
size: int(stat.Size()),
}, nil
}
func (i *LocalDriver) openAndStatImageLayer(imageID string) (*os.File, os.FileInfo, error) {
f, err := os.Open(i.layerPath(imageID))
if os.IsNotExist(err) {
return nil, nil, ErrNotFound
} else if err != nil {
return nil, nil, err
}
stat, err := f.Stat()
if os.IsNotExist(err) {
return nil, nil, ErrNotFound
} else if err != nil {
_ = f.Close()
return nil, nil, err
}
return f, stat, nil
}
func (i *LocalDriver) ImageAncestry(imageID string) ([]string, error) {
a := []string{imageID}
current := imageID
for {
atts, err := i.imageAttributes(current)
if err != nil {
return nil, err
}
if atts.Parent == "" {
break
}
a = append(a, atts.Parent)
current = atts.Parent
}
return a, nil
}
func (i *LocalDriver) imageAttributes(imageID string) (*types.ImageAttributes, error) {
path := i.jsonPath(imageID)
//glog.Info("reading attributes from path", path)
data, err := ioutil.ReadFile(path)
if os.IsNotExist(err) {
return nil, ErrNotFound
} else if err != nil {
return nil, err
}
var a types.ImageAttributes
if err = json.Unmarshal(data, &a); err != nil {
return nil, err
}
return &a, nil
}
func (i *LocalDriver) saveImageResource(imageID, name string, r io.Reader) error {
dirpath := i.imagePath(imageID)
path := filepath.Join(dirpath, name)
return writeFile(path, r)
}
func (i *LocalDriver) saveImageResourceIfChanged(imageID, name string, b []byte) error {
dirpath := i.imagePath(imageID)
path := filepath.Join(dirpath, name)
return writeFileIfChanged(path, b)
}
// saveImageResourceExclusive returns (true,nil) if the resource already existed
func (i *LocalDriver) saveImageResourceExclusive(imageID, name string, r io.Reader) (bool, error) {
dirpath := i.imagePath(imageID)
path := filepath.Join(dirpath, name)
return writeFileExclusive(path, r)
}
func (i *LocalDriver) imagePath(imageID string) string {
return filepath.Join(i.Dir, "images", imageID)
}
func (i *LocalDriver) jsonPath(imageID string) string {
dirpath := i.imagePath(imageID)
return filepath.Join(dirpath, "json")
}
func (i *LocalDriver) layerPath(imageID string) string {
dirpath := i.imagePath(imageID)
return filepath.Join(dirpath, "layer")
}
func (i *LocalDriver) checksumPath(imageID string) string {
dirpath := i.imagePath(imageID)
return filepath.Join(dirpath, "_checksum")
}
|
package _058_最后一个单词的长度
func lengthOfLastWord(s string) int {
started := false
var count int
for i := len(s) - 1; i >= 0; i-- {
// 如果是空格
if s[i] == ' ' && started {
if started {
// 且已经开始数字母, 说明单词已经结束
return count
} else {
// 且未开始数字母, 忽略之
continue
}
} else {
// 如果不是空格,则计数,并标记已经开始数字母
started = true
count++
}
}
return count
}
|
package jex
import (
"bytes"
"io/ioutil"
"reflect"
"testing"
"time"
)
type TestA struct {
Int8 int8
UInt8 uint8
Int int
UInt uint
Map map[string]TestA
Array [2]float32
Slice []float64
}
func TestMarshalStruct(t *testing.T) {
m := map[string]TestA{
"ASDF": TestA{1, 2, 3, 4, nil, [2]float32{}, nil},
}
a := TestA{-123, 234, -456780, 567890, m, [2]float32{3.14, 1.414}, []float64{0.123, -0.000000001}}
buf := bytes.NewBuffer(nil)
err := Marshal(buf, &a, false)
if err != nil {
t.Fatal(err)
}
ioutil.WriteFile("a.jex", buf.Bytes(), 0644)
var b TestA
err = Unmarshal(buf, &b)
if err != nil {
t.Fatal(err)
}
// t.Logf("%#v", a)
// t.Logf("%#v", b)
if !reflect.DeepEqual(a, b) {
t.Error("a != b")
}
}
func TesMarshalSlice(t *testing.T) {
var slice = []float64{1, 3, 4}
buf := bytes.NewBuffer(nil)
err := Marshal(buf, slice, true)
if err != nil {
t.Fatal(err)
}
//ioutil.WriteFile("b.jex", buf.Bytes(), 0644)
}
func TestSimpleUnmarshal(t *testing.T) {
buf := bytes.NewBuffer(nil)
var x = true
err := Marshal(buf, x, false)
if err != nil {
t.Fatal(err)
}
//buf.Reset()
var y bool
err = Unmarshal(buf, &y)
if err != nil {
t.Fatal(err)
}
if x != y {
t.Error("x != y")
}
}
func TestJexTime(t *testing.T) {
var t0 = time.Now()
var j = jexTime(t0)
var t1 = golangTime(j)
var d = t1.Sub(t0)
if d < -time.Microsecond || d > time.Microsecond {
t.Error("t != t1", t0, t1)
}
}
|
package effects
import "github.com/faiface/beep"
// Gain amplifies the wrapped Streamer. The output of the wrapped Streamer gets multiplied by
// 1+Gain.
//
// Note that gain is not equivalent to the human perception of volume. Human perception of volume is
// roughly exponential, while gain only amplifies linearly.
type Gain struct {
Streamer beep.Streamer
Gain float64
}
// Stream streams the wrapped Streamer amplified by Gain.
func (g *Gain) Stream(samples [][2]float64) (n int, ok bool) {
n, ok = g.Streamer.Stream(samples)
for i := range samples[:n] {
samples[i][0] *= 1 + g.Gain
samples[i][1] *= 1 + g.Gain
}
return n, ok
}
// Err propagates the wrapped Streamer's errors.
func (g *Gain) Err() error {
return g.Streamer.Err()
}
|
// Copyright (c) 2018 Andreas Auernhammer. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
package siv
import (
"bytes"
"testing"
"golang.org/x/sys/cpu"
)
func TestAESCMAC(t *testing.T) {
hasAES := cpu.X86.HasAES
defer func(hasAES bool) { cpu.X86.HasAES = hasAES }(hasAES)
if hasAES {
t.Run("Asm", testAESCMAC)
cpu.X86.HasAES = false
}
t.Run("Generic", testAESCMAC)
}
func testAESCMAC(t *testing.T) {
for i, v := range aesSivTests {
c, err := NewCMAC(v.Key())
if err != nil {
t.Errorf("Test %d: Failed to create AES_SIV: %v", i, err)
continue
}
ciphertext := c.Seal(nil, v.Nonce(), v.Plaintext(), v.AdditionalData())
if !bytes.Equal(ciphertext, v.Ciphertext()) {
t.Errorf("Test %d: Seal - ciphertext mismatch", i)
}
plaintext, err := c.Open(ciphertext[c.Overhead():c.Overhead()], v.Nonce(), ciphertext, v.AdditionalData())
if err != nil {
t.Errorf("Test %d: Open - %v", i, err)
}
if !bytes.Equal(plaintext, v.Plaintext()) {
t.Errorf("Test %d: Open - plaintext mismatch", i)
}
}
}
func TestAESCMACAssembler(t *testing.T) {
if !cpu.X86.HasAES {
t.Skip("No assembler implementation / AES hardware support")
}
keys := [][]byte{make([]byte, 32), make([]byte, 48), make([]byte, 64)}
for i := range keys {
for j := range keys[i] {
keys[i][j] = byte(i*j + len(keys))
}
}
nonce := make([]byte, 16)
for i := range nonce {
nonce[i] = byte(i)
}
plaintext := make([]byte, 1024)
ciphertext := make([]byte, len(plaintext)+16)
for i := range keys {
for j := range plaintext {
plaintext[i] = byte(j + i)
testAESCMACAssmebler(i, ciphertext[:16+j], nonce, plaintext[:j], plaintext[j:], keys[i], t)
}
}
}
func testAESCMACAssmebler(i int, ciphertext, nonce, plaintext, additionalData, key []byte, t *testing.T) {
hasAES := cpu.X86.HasAES
defer func(hasAES bool) { cpu.X86.HasAES = hasAES }(hasAES)
c, err := NewCMAC(key)
if err != nil {
t.Fatalf("Test %d: failed to create AES-SIV-CMAC: %v", i, err)
}
ciphertext = c.Seal(ciphertext[:0], nonce, plaintext, additionalData)
asmPlaintext, err := c.Open(nil, nonce, ciphertext, additionalData)
if err != nil {
t.Fatalf("Test %d: Open failed: %v", i, err)
}
if !bytes.Equal(plaintext, asmPlaintext) {
t.Fatalf("Test %d: plaintext mismatch", i)
}
cpu.X86.HasAES = false // Disable AES assembler implementations
c, err = NewCMAC(key)
if err != nil {
t.Fatalf("Test %d: failed to create AES-SIV-CMAC: %v", i, err)
}
refCiphertext := c.Seal(nil, nonce, plaintext, additionalData)
if !bytes.Equal(refCiphertext, ciphertext) {
t.Fatalf("Test %d: ciphertext mismatch", i)
}
refPlaintext, err := c.Open(ciphertext[16:16], nonce, ciphertext, additionalData)
if err != nil {
t.Fatalf("Test %d: Open failed: %v", i, err)
}
if !bytes.Equal(plaintext, refPlaintext) {
t.Fatalf("Test %d: plaintext mismatch", i)
}
}
func BenchmarkAES128CMACSeal64(b *testing.B) { benchmarkAESCMACSeal(make([]byte, 32), 64, b) }
func BenchmarkAES128CMACSeal1K(b *testing.B) { benchmarkAESCMACSeal(make([]byte, 32), 1024, b) }
func BenchmarkAES128CMACSeal8K(b *testing.B) { benchmarkAESCMACSeal(make([]byte, 32), 8*1024, b) }
func BenchmarkAES128CMACOpen64(b *testing.B) { benchmarkAESCMACOpen(make([]byte, 32), 64, b) }
func BenchmarkAES128CMACOpen1K(b *testing.B) { benchmarkAESCMACOpen(make([]byte, 32), 1024, b) }
func BenchmarkAES128CMACOpen8K(b *testing.B) { benchmarkAESCMACOpen(make([]byte, 32), 8*1024, b) }
func BenchmarkAES192CMACSeal64(b *testing.B) { benchmarkAESCMACSeal(make([]byte, 48), 64, b) }
func BenchmarkAES192CMACSeal1K(b *testing.B) { benchmarkAESCMACSeal(make([]byte, 48), 1024, b) }
func BenchmarkAES192CMACSeal8K(b *testing.B) { benchmarkAESCMACSeal(make([]byte, 48), 8*1024, b) }
func BenchmarkAES192CMACOpen64(b *testing.B) { benchmarkAESCMACOpen(make([]byte, 48), 64, b) }
func BenchmarkAES192CMACOpen1K(b *testing.B) { benchmarkAESCMACOpen(make([]byte, 48), 1024, b) }
func BenchmarkAES192CMACOpen8K(b *testing.B) { benchmarkAESCMACOpen(make([]byte, 48), 8*1024, b) }
func BenchmarkAES256CMACSeal64(b *testing.B) { benchmarkAESCMACSeal(make([]byte, 64), 64, b) }
func BenchmarkAES256CMACSeal1K(b *testing.B) { benchmarkAESCMACSeal(make([]byte, 64), 1024, b) }
func BenchmarkAES256CMACSeal8K(b *testing.B) { benchmarkAESCMACSeal(make([]byte, 64), 8*1024, b) }
func BenchmarkAES256CMACOpen64(b *testing.B) { benchmarkAESCMACOpen(make([]byte, 64), 64, b) }
func BenchmarkAES256CMACOpen1K(b *testing.B) { benchmarkAESCMACOpen(make([]byte, 64), 1024, b) }
func BenchmarkAES256CMACOpen8K(b *testing.B) { benchmarkAESCMACOpen(make([]byte, 64), 8*1024, b) }
func benchmarkAESCMACSeal(key []byte, size int64, b *testing.B) {
c, err := NewCMAC(key)
if err != nil {
b.Fatal(err)
}
plaintext := make([]byte, size)
ciphertext := make([]byte, len(plaintext)+16)
b.ResetTimer()
b.SetBytes(size)
for i := 0; i < b.N; i++ {
c.Seal(ciphertext[:0], nil, plaintext, nil)
}
}
func benchmarkAESCMACOpen(key []byte, size int64, b *testing.B) {
c, err := NewCMAC(key)
if err != nil {
b.Fatal(err)
}
plaintext := make([]byte, size)
ciphertext := c.Seal(nil, nil, plaintext, nil)
b.ResetTimer()
b.SetBytes(size)
for i := 0; i < b.N; i++ {
if _, err := c.Open(plaintext[:0], nil, ciphertext, nil); err != nil {
panic(err)
}
}
}
|
package main
import (
"fmt"
"github.com/ksclouds/PowerNLP/Seg"
BaseTrie "github.com/ksclouds/PowerNLP/Seg/Collections"
)
func main() {
tree := BaseTrie.NewMapTrie()
tree.Insert("word.py")
tree.Insert("wor")
tree.Insert("wx")
tree.Insert("abastract")
tree.Insert("中国人")
tree.Insert("国足")
//tree.Insert("中国")
//Trie.PreTraverse(tree.Root)
//fmt.Println(tree.CountPrefix("wordf"))
//fmt.Println(tree.Has("word.py"))
//fmt.Println(tree.CountPrefix("wor"))
//fmt.Println(tree.CountPrefix("wo"))
//fmt.Println(tree.CountPrefix("w"))
//fmt.Println(tree.CountPrefix("ab"))
//fmt.Println(tree.CountPrefix("中"))
//fmt.Println(tree.Has("ab"))
//fmt.Println(tree.Has("中国人"))
//fmt.Println(tree.Has("中国"))
r := tree.Segment("大中国word中国人wxabwo大中国足abastract")
for _, v := range r {
fmt.Println(v)
}
Seg.DeafaultSegment().Segment("")
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package engine
import (
"github.com/Azure/azure-sdk-for-go/services/preview/msi/mgmt/2015-08-31-preview/msi"
"github.com/Azure/go-autorest/autorest/to"
)
func createUserAssignedIdentities() UserAssignedIdentitiesARM {
return UserAssignedIdentitiesARM{
ARMResource: ARMResource{
APIVersion: "[variables('apiVersionManagedIdentity')]",
},
Identity: msi.Identity{
Type: to.StringPtr("Microsoft.ManagedIdentity/userAssignedIdentities"),
Name: to.StringPtr("[variables('userAssignedID')]"),
Location: to.StringPtr("[variables('location')]"),
},
}
}
func createAppGwUserAssignedIdentities() UserAssignedIdentitiesARM {
return UserAssignedIdentitiesARM{
ARMResource: ARMResource{
APIVersion: "[variables('apiVersionManagedIdentity')]",
},
Identity: msi.Identity{
Type: to.StringPtr("Microsoft.ManagedIdentity/userAssignedIdentities"),
Name: to.StringPtr("[variables('appGwICIdentityName')]"),
Location: to.StringPtr("[variables('location')]"),
},
}
}
|
package leetcode
func minCostToMoveChips(chips []int) int {
odds, evens := 0, 0
for _, v := range chips {
if v%2 == 0 {
evens++
} else {
odds++
}
}
if evens > odds {
return odds
}
return evens
}
|
package channels
import "time"
//Sender 함수는 done 채널에 데이터가 기록될 때까지
//ch 채널에 "tick"을 보내고, done 채널에 데이터가
//기록되면 "sender done"을 보내고 종료한다.
func Sender(ch chan string, done chan bool) {
t := time.Tick(100 * time.Millisecond)
for {
select {
case <-done:
ch <- "sender done."
return
case <-t:
ch <- "tick"
}
}
}
|
package ecr
import (
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/aws/aws-sdk-go/service/ecr/ecriface"
)
var response ecr.GetAuthorizationTokenOutput
// Mocks ECR API calls for GetAuthorizationToken
type mockGetAuthorizationToken struct {
ecriface.ECRAPI
Resp ecr.GetAuthorizationTokenOutput
}
func init() {
// Setup mock response
response = ecr.GetAuthorizationTokenOutput{
AuthorizationData: []*ecr.AuthorizationData{
{
AuthorizationToken: aws.String("QVdTOnBhc3N3b3JkCg=="), // Base64 encoded "AWS:password"
ProxyEndpoint: aws.String("https://default.dkr.ecr.us-east-1.amazonaws.com"),
},
},
}
}
func (m mockGetAuthorizationToken) GetAuthorizationToken(*ecr.GetAuthorizationTokenInput) (*ecr.GetAuthorizationTokenOutput, error) {
return &m.Resp, nil
}
func Test_GetCredential_UpdatesReceiverWithCredential(t *testing.T) {
// Create new registry
registry := Registry{
Client: mockGetAuthorizationToken{Resp: response},
}
// Test that Credentials is nil
if registry.Credential != nil {
t.Errorf("Expected credential to be nil. Got: %v", registry.Credential)
}
// Get credential
err := registry.GetCredential()
if err != nil {
t.Fatalf("registry.GetCredential() call failed. Caused by: %v", err.Error())
}
// Test that registry credential was updated
if registry.Credential.Username != "AWS" {
t.Errorf("Expected username to be \"AWS\". Got: \"%v\"", registry.Credential.Username)
}
if registry.Credential.Password != "password" {
t.Errorf("Expected password to be \"password\". Got: \"%v\"", registry.Credential.Password)
}
if registry.Credential.ProxyEndpoint != "https://default.dkr.ecr.us-east-1.amazonaws.com" {
t.Errorf("Expected proxyendpoint to be \"https://default.dkr.ecr.us-east-1.amazonaws.com\". Got: \"%v\"", registry.Credential.ProxyEndpoint)
}
}
|
package main
import (
"fmt"
"math/rand"
)
func biasedCoin() int {
n := rand.Intn(100)
if n < 60 {
return 0
}
return 1
}
func fairCoin() int {
for {
coin1 := biasedCoin()
coin2 := biasedCoin()
if coin1 != coin2 {
return coin1
}
}
}
type CoinFn func() int
func flip() {
zeros := 0
ones := 0
var flipCoin CoinFn
// flipCoin = biasedCoin
flipCoin = fairCoin
for i := 0; i < 100; i++ {
flipResult := flipCoin()
if flipResult == 0 {
zeros++
} else {
ones++
}
}
fmt.Printf("zeros: %d; ones: %d\n", zeros, ones)
}
func main() {
flip()
}
|
package site
type Menu struct {
Name string `json:"name"`
Alias string `json:"alias"`
}
|
package hooks
import (
"testing"
)
// implementing the hook
func (h *Hook) execute(th *thing) *thing {
th.SetText(th.text + h.name)
return th
}
func TestHoox(t *testing.T) {
a := Hook{name: "foo"}
b := Hook{name: "bar"}
c := Hook{name: "baz"}
a.Sethook(b)
a.Sethook(c)
th := thing{text: "Hello"}
a.Process(&th)
result := th.text
expect := "Hellofoobarbaz"
if result != expect {
t.Errorf("Expect result to be equal %s, but %s\n", expect, result)
}
}
|
package pypwsh
import (
"github.com/hashicorp/terraform/helper/schema"
"github.com/rfalias/gopypwsh"
"os"
"time"
"math/rand"
)
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
func waitForLock(client *Powershell) bool {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
time.Sleep(time.Duration(r.Intn(100)) * time.Millisecond)
locked := fileExists(client.lockfile)
for locked == true {
time.Sleep(100 * time.Millisecond)
locked = fileExists(client.lockfile)
}
time.Sleep(1000 * time.Millisecond)
return true
}
func resourcePyPwsh() *schema.Resource {
return &schema.Resource{
Create: resourcePyPwshRecordCreate,
Read: resourcePyPwshRecordRead,
Delete: resourcePyPwshRecordDelete,
Schema: map[string]*schema.Schema{
"cmd": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
},
}
}
func resourcePyPwshRecordCreate(d *schema.ResourceData, m interface{}) error {
//convert the interface so we can use the variables like username, etc
client := m.(*Powershell)
cmd := d.Get("cmd").(string)
waitForLock(client)
file, err := os.Create(client.lockfile)
if err != nil {
return err
}
var id string = cmd
_, err = gopypwsh.RunPyCommandCreate(client.username, client.password, client.server, cmd, client.py)
if err != nil {
//something bad happened
return err
}
d.SetId(id)
file.Close()
os.Remove(client.lockfile)
return nil
}
func resourcePyPwshRecordRead(d *schema.ResourceData, m interface{}) error {
return nil
}
func resourcePyPwshRecordDelete(d *schema.ResourceData, m interface{}) error {
return nil
}
|
package main
import (
"encoding/json"
"io/ioutil"
"log"
"net/http"
"os"
"runtime"
"time"
nats "github.com/nats-io/nats.go"
)
type JobOrder struct {
ID string `json:"ID"`
Name string `json:"name"`
}
type SalesItem struct {
ItemID string `json:"ID"`
Name string `json:"name"`
Qty float32 `json:"qty"`
UnitPrice float32 `json:"unitprice"`
}
type SalesOrder struct {
SalesOrderID string `json:"ID"`
Amount float32
Items []SalesItem `json:"items"`
}
type server struct {
nc *nats.Conn
}
var natsServer server
func salesOrderHandler(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet:
resp, err := natsServer.nc.Request("All.SalesOrder.List", nil, 500*time.Millisecond)
if err != nil {
responseError(w, http.StatusBadRequest, "Error on request 'All.SalesOrder.List'")
return
}
if resp == nil {
responseError(w, http.StatusBadRequest, "Problem, has response but no message.")
return
}
var salesOrders []SalesOrder
err = json.Unmarshal(resp.Data, &salesOrders)
if err != nil {
log.Println("Error on unmarshal salesOrders", err)
}
log.Println(salesOrders)
responseOk(w, salesOrders)
case http.MethodPost:
body, _ := ioutil.ReadAll(r.Body)
var salesOrder SalesOrder
err := json.Unmarshal(body, &salesOrder)
if err != nil {
responseError(w, http.StatusBadRequest, "Invalid data")
return
}
resp, err := natsServer.nc.Request("Process.New.SalesOrder", body, 500*time.Millisecond)
if err != nil {
responseError(w, http.StatusBadRequest, "Error on request 'Process.New.SalesOrder'")
return
}
log.Println("Response to request Process.New.SalesOrder'", string(resp.Data))
type response struct {
ID string `json:"id"`
Message string `json:"message"`
}
responseOk(w, response{ID: salesOrder.SalesOrderID, Message: "Sales order queued for processing."})
}
}
func main() {
serverPort := os.Getenv("SERVER_PORT")
clientID := os.Getenv("CLIENT_ID")
natsServers := os.Getenv("NATS_SERVER_ADDR")
connector := NewConnector(clientID)
log.Printf("serverPort: %s, clientID: %s, natsServers: %s\n", serverPort, clientID, natsServers)
err := connector.SetupConnectionToNATS(natsServers, nats.MaxReconnects(-1))
if err != nil {
log.Printf("Problem setting up connection to NATS servers, %v", err)
runtime.Goexit()
}
defer connector.Shutdown()
natsServer = server{nc: connector.NATS()}
http.HandleFunc("/salesorder", salesOrderHandler)
http.HandleFunc("/salesorders", salesOrderHandler)
log.Printf("====== Generator server listening on port %s...", serverPort)
if err := http.ListenAndServe(":"+serverPort, nil); err != nil {
log.Fatal(err)
}
}
func responseError(w http.ResponseWriter, code int, message string) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
body := map[string]string{
"error": message,
}
json.NewEncoder(w).Encode(body)
}
func responseOk(w http.ResponseWriter, body interface{}) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(body)
}
|
// Copyright © 2018 VMware, Inc. All Rights Reserved.
// SPDX-License-Identifier: BSD-2-Clause
package processors
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/vmware/kube-fluentd-operator/config-reloader/fluentd"
)
func TestMakeRewriteTagFragment(t *testing.T) {
frag, err := makeRewriteTagFragment("src", "dest")
assert.Nil(t, err)
str := `<match kube.src.**>
@type rewrite_tag_filter
<rule>
invert true
key _dummy_
pattern /ZZ/
tag kube.dest.${tag_parts[2]}.${tag_parts[3]}
</rule>
</match>
`
assert.Equal(t, str, frag.String())
}
func TestExtractSourceNsFromMacro(t *testing.T) {
data := []struct {
Expr string
Result string
}{
{"asfag", ""},
{"$from(a)", ""},
{"@$from ( a ", ""},
{"@$from ()", ""},
{"@$from ( )", ""},
{"@$from a)", ""},
{"@$from (a", ""},
{"@$from(a)", "a"},
{"@$from ( a ) ", "a"},
}
for _, m := range data {
assert.Equal(t, m.Result, extractSourceNsFromMacro(m.Expr), "%s should parse into %s", m.Expr, m.Result)
}
}
func TestMakeBridgeName(t *testing.T) {
from := "from"
to := "to"
assert.Equal(t, "@bridge-from__to", makeBridgeName(from, to))
}
func TestProcessShareDirectiveFromReceivingNs(t *testing.T) {
// sourceNsConf := `
// <match $labels(msg=stdout)>
// @type copy
// <store>
// @type share
// with_namespace kfo-consumer
// </store>
// <store>
// @type share
// with_namespace no-such-namespace
// </store>
// </match>
// <match **>
// @type logzio_buffered
// endpoint_url https://listener.logz.io:8071?token=$LOGZ_TOKEN
// </match>
//`
destNsConf := `
<label @$from(source-ns)>
<match **>
@type elasticsearch
num_threads 8
</match>
</label>
`
gen := &GenerationContext{
ReferencedBridges: map[string]bool{"@bridge-source-ns__dest-ns": true},
}
ctx := &ProcessorContext{
Namespace: "dest-ns",
GenerationContext: gen,
}
input, err := fluentd.ParseString(destNsConf)
assert.Nil(t, err)
state := &shareLogsState{}
state.SetContext(ctx)
vt := state.GetValidationTrailer(input)
assert.True(t, len(vt) == 0)
processed, err := state.Process(input)
assert.Nil(t, err)
assert.Equal(t, "label", processed[0].Name)
assert.Equal(t, "@bridge-source-ns__dest-ns", processed[0].Tag)
assert.Equal(t, "match", processed[0].Nested[0].Name)
assert.Equal(t, "kube.source-ns.**", processed[0].Nested[0].Tag)
assert.Equal(t, "match", processed[0].Nested[1].Name)
assert.Equal(t, "**", processed[0].Nested[1].Tag)
}
func TestProcessShareDirectiveFromPublishigNs(t *testing.T) {
sourceNsConf := `
<match $labels(msg=stdout)>
@type copy
<store>
@type share
with_namespace dest-ns
</store>
<store>
@type share
with_namespace no-such-namespace
</store>
</match>
`
gen := &GenerationContext{
ReferencedBridges: map[string]bool{"@bridge-source-ns__dest-ns": true},
}
ctx := &ProcessorContext{
Namespace: "source-ns",
GenerationContext: gen,
}
input, err := fluentd.ParseString(sourceNsConf)
assert.Nil(t, err)
state := &shareLogsState{}
state.SetContext(ctx)
vt := state.GetValidationTrailer(input)
assert.True(t, len(vt) == 1)
assert.Equal(t, "label", vt[0].Name)
assert.Equal(t, "@bridge-source-ns__dest-ns", vt[0].Tag)
assert.Equal(t, "match", vt[0].Nested[0].Name)
processed, err := state.Process(input)
assert.Nil(t, err)
assert.Equal(t, "copy", processed[0].Type())
assert.Equal(t, 1, len(processed[0].Nested))
assert.Equal(t, "relabel", processed[0].Nested[0].Type())
assert.Equal(t, "@bridge-source-ns__dest-ns", processed[0].Nested[0].Param("@label"))
}
func TestProcessShareDirectiveCollectBridges(t *testing.T) {
destNsConf := `<label @$from(source-ns)>
<match **>
@type elasticsearch
num_threads 8
</match>
</label>
`
gen := &GenerationContext{
ReferencedBridges: map[string]bool{},
}
ctx := &ProcessorContext{
Namespace: "dest-ns",
GenerationContext: gen,
}
input, err := fluentd.ParseString(destNsConf)
assert.Nil(t, err)
state := &shareLogsState{}
state.SetContext(ctx)
_, err = state.Prepare(input)
assert.Nil(t, err)
assert.Equal(t, 1, len(gen.ReferencedBridges))
assert.Equal(t, true, gen.ReferencedBridges["@bridge-source-ns__dest-ns"])
}
|
package main
import (
"fmt"
)
func main() {
c := make(chan int)
c <- 1
fmt.Println(<-c)
}
/*
This results in a deadlock.
Can you determine why?
And what would you do to fix it?
*/
// go run main.go
// fatal error: all goroutines are asleep - deadlock!
// goroutine 1 [chan send]:
// main.main()
// /home/ajoncode/goworkspace/src/github.com/AJONCODE/Golang-Fundamentals/22_go-routines/10_deadlock-challenges/01_deadlock-challenge/main.go:9 +0x59
// exit status 2
|
package main
import (
"context"
"flag"
"log"
"net/http"
"os"
"os/signal"
"time"
"github.com/gorilla/mux"
"github.com/matscus/Hamster/Mock/info_service/datapool"
"github.com/matscus/Hamster/Mock/info_service/handlers"
)
var (
pemPath string
keyPath string
proto string
listenport string
wait time.Duration
writeTimeout time.Duration
readTimeout time.Duration
idleTimeout time.Duration
)
func init() {
go datapool.IntitDataPool()
}
func main() {
flag.StringVar(&pemPath, "pempath", os.Getenv("SERVERREM"), "path to pem file")
flag.StringVar(&keyPath, "keypath", os.Getenv("SERVERKEY"), "path to key file")
flag.StringVar(&listenport, "port", "10000", "port to Listen")
flag.StringVar(&datapool.FilePath, "filepath", "datapool.csv", "path from csv data file")
flag.StringVar(&proto, "proto", "http", "http or https")
flag.BoolVar(&handlers.Requestlog, "request-log", false, "idle server timeout")
flag.DurationVar(&wait, "graceful-timeout", time.Second*15, "the duration for which the server gracefully")
flag.DurationVar(&readTimeout, "read-timeout", time.Second*15, "read server timeout")
flag.DurationVar(&writeTimeout, "write-timeout", time.Second*15, "write server timeout")
flag.DurationVar(&idleTimeout, "idle-timeout", time.Second*60, "idle server timeout")
flag.Float64Var(&handlers.Mean, "mean-timeout", 0.0, "mean responce timeout")
flag.Float64Var(&handlers.Deviation, "deviation-timeout", 0, "deviation responce timeout")
flag.Parse()
r := mux.NewRouter()
srv := &http.Server{
Addr: "0.0.0.0:" + listenport,
WriteTimeout: writeTimeout,
ReadTimeout: readTimeout,
IdleTimeout: idleTimeout,
}
r.HandleFunc("/omni-information/api/v1/client/search", handlers.Middleware(handlers.ClientSearch)).Methods(http.MethodPost)
r.HandleFunc("/omni-information/api/v2/client/product/deposit/list", handlers.Middleware(handlers.DepositList)).Methods(http.MethodPost)
r.HandleFunc("/omni-information/api/v2/client/product/account/list", handlers.Middleware(handlers.AccountList)).Methods(http.MethodPost)
http.Handle("/", r)
r.Use(mux.CORSMethodMiddleware(r))
go func() {
switch proto {
case "https":
log.Printf("Server is run, proto: https, address: %s ", srv.Addr)
if err := srv.ListenAndServeTLS(pemPath, keyPath); err != nil {
log.Println(err)
}
case "http":
log.Printf("Server is run, proto: http, address: %s ", srv.Addr)
if err := srv.ListenAndServe(); err != nil {
log.Println(err)
}
}
}()
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
<-c
ctx, cancel := context.WithTimeout(context.Background(), wait)
defer cancel()
srv.Shutdown(ctx)
log.Println("server shutting down")
os.Exit(0)
}
|
package client
import (
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"strings"
"github.com/luno/moonbeam/models"
)
var debugRPC = flag.Bool("debug_rpc", true, "Debug RPC")
type Client struct {
endpoint string
c *http.Client
}
func NewClient(c *http.Client, endpoint string) (*Client, error) {
if strings.HasSuffix(endpoint, "/") {
return nil, errors.New("endpoint must not have a trailing slash")
}
return &Client{
endpoint: endpoint,
c: c,
}, nil
}
func (c *Client) do(method, path string, authToken string, req, resp interface{}) error {
url := c.endpoint + path
buf, err := json.Marshal(req)
if err != nil {
return err
}
if *debugRPC {
log.Printf("moonchan/client: %s %s\n%s\n", method, url, string(buf))
}
hreq, err := http.NewRequest(method, url, bytes.NewReader(buf))
if err != nil {
return err
}
if authToken != "" {
hreq.Header.Add("Authorization", "Bearer "+authToken)
}
hresp, err := c.c.Do(hreq)
if err != nil {
return err
}
defer hresp.Body.Close()
respBuf, err := ioutil.ReadAll(hresp.Body)
if err != nil {
return err
}
if *debugRPC {
log.Printf("moonchan/client: %s %s\n%s\n%s\n",
method, url, hresp.Status, string(respBuf))
}
if hresp.StatusCode != http.StatusOK {
if len(respBuf) > 256 {
respBuf = respBuf[:256]
}
return fmt.Errorf("moonchan/client: http error code %d: %s",
hresp.StatusCode, string(respBuf))
}
return json.Unmarshal(respBuf, resp)
}
func (c *Client) Create(req models.CreateRequest) (*models.CreateResponse, error) {
var resp models.CreateResponse
if err := c.do(http.MethodPost, "/create", "", req, &resp); err != nil {
return nil, err
}
return &resp, nil
}
func getChannelID(txid string, vout uint32) string {
return fmt.Sprintf("%s-%d", txid, vout)
}
func (c *Client) Open(req models.OpenRequest) (*models.OpenResponse, error) {
path := "/open/" + getChannelID(req.TxID, req.Vout)
var resp models.OpenResponse
if err := c.do(http.MethodPut, path, "", req, &resp); err != nil {
return nil, err
}
return &resp, nil
}
func (c *Client) Validate(req models.ValidateRequest, authToken string) (*models.ValidateResponse, error) {
path := "/validate/" + getChannelID(req.TxID, req.Vout)
var resp models.ValidateResponse
if err := c.do(http.MethodPut, path, authToken, req, &resp); err != nil {
return nil, err
}
return &resp, nil
}
func (c *Client) Send(req models.SendRequest, authToken string) (*models.SendResponse, error) {
path := "/send/" + getChannelID(req.TxID, req.Vout)
var resp models.SendResponse
if err := c.do(http.MethodPost, path, authToken, req, &resp); err != nil {
return nil, err
}
return &resp, nil
}
func (c *Client) Close(req models.CloseRequest, authToken string) (*models.CloseResponse, error) {
path := "/close/" + getChannelID(req.TxID, req.Vout)
var resp models.CloseResponse
if err := c.do(http.MethodDelete, path, authToken, req, &resp); err != nil {
return nil, err
}
return &resp, nil
}
func (c *Client) Status(req models.StatusRequest, authToken string) (*models.StatusResponse, error) {
path := "/status/" + getChannelID(req.TxID, req.Vout)
var resp models.StatusResponse
if err := c.do(http.MethodGet, path, authToken, req, &resp); err != nil {
return nil, err
}
return &resp, nil
}
|
package g2util
import (
"time"
)
// TimeoutExecFunc ...
func TimeoutExecFunc(fn func(), timeout time.Duration) {
ch1 := make(chan struct{}, 1)
go func() {
fn()
ch1 <- struct{}{}
}()
select {
case <-time.After(timeout):
return
case <-ch1:
return
}
}
|
package model
import (
"time"
)
type PlayerLicense struct {
// Id of the resource
Id string `json:"id,omitempty"`
// Name of the resource
Name string `json:"name,omitempty"`
// Creation timestamp formatted in UTC: YYYY-MM-DDThh:mm:ssZ
CreatedAt *time.Time `json:"createdAt,omitempty"`
// License Key
LicenseKey string `json:"licenseKey,omitempty"`
// Number of impressions recorded
Impressions *int32 `json:"impressions,omitempty"`
// Maximum number of impressions
MaxImpressions *int32 `json:"maxImpressions,omitempty"`
// Flag if third party licensing is enabled
ThirdPartyLicensingEnabled *bool `json:"thirdPartyLicensingEnabled,omitempty"`
// Whitelisted domains
Domains []Domain `json:"domains,omitempty"`
// Analytics License Key
AnalyticsKey string `json:"analyticsKey,omitempty"`
}
|
package file
import (
"fmt"
"log"
"net/http"
"sync"
)
type FileUploadAPI struct{}
var lock sync.Mutex
func (f FileUploadAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", r.Header.Get("Origin"))
switch r.Method {
case http.MethodPost:
doPost(w, r)
default:
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "Unsupported methos '%v' to %v\n", r.Method, r.URL)
log.Printf("Unsupported methos '%v' to %v\n", r.Method, r.URL)
}
}
|
package main
import (
"code.google.com/p/go-tour/pic"
)
func Pic(dx, dy int) [][]uint8 {
vals := make([][]uint8, dx)
for x := 0; x<dx; x++ {
inner := make([]uint8, dy)
for y := 0; y<dy; y++ {
inner[y] = uint8(100*x*(y*y)/(x+7))
}
vals[x] = inner
}
return vals
}
func main() {
pic.Show(Pic)
} |
package helper
import (
"testing"
)
func TestColorForStatus(t *testing.T) {
tests := []struct {
name string
args int
want string
}{
{
name: "Testcase #1: Return Green",
args: 200,
want: Green,
},
{
name: "Testcase #2: Return White",
args: 300,
want: White,
},
{
name: "Testcase #3: Return Yellow",
args: 400,
want: Yellow,
},
{
name: "Testcase #4: Return Red",
args: 500,
want: Red,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := ColorForStatus(tt.args); got != tt.want {
t.Errorf("ColorForStatus() = %v, want %v", got, tt.want)
}
})
}
}
func TestColorForMethod(t *testing.T) {
tests := []struct {
name string
args string
want string
}{
{
name: "Testcase #1: Return Blue",
args: "GET",
want: Blue,
},
{
name: "Testcase #2: Return Cyan",
args: "POST",
want: Cyan,
},
{
name: "Testcase #3: Return Yellow",
args: "PUT",
want: Yellow,
},
{
name: "Testcase #4: Return Red",
args: "DELETE",
want: Red,
},
{
name: "Testcase #5: Return Green",
args: "PATCH",
want: Green,
},
{
name: "Testcase #6: Return Magenta",
args: "HEAD",
want: Magenta,
},
{
name: "Testcase #7: Return White",
args: "OPTIONS",
want: White,
},
{
name: "Testcase #8: Return RESET",
args: "RESET",
want: Reset,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := ColorForMethod(tt.args); got != tt.want {
t.Errorf("ColorForMethod() = %v, want %v", got, tt.want)
}
})
}
}
func TestColorString(t *testing.T) {
t.Run("Test String Red", func(t *testing.T) {
if got := StringRed("String Red"); got != "\x1b[31;1mString Red\x1b[0m" {
t.Errorf("StringRed() = %v, want %v", got, "String Red")
}
})
t.Run("Test String Green", func(t *testing.T) {
if got := StringGreen("String Green"); got != "\x1b[32;1mString Green\x1b[0m" {
t.Errorf("StringGreen() = %v, want %v", got, "String Green")
}
})
t.Run("Test String Yellow", func(t *testing.T) {
if got := StringYellow("String Yellow"); got != "\x1b[33;1mString Yellow\x1b[0m" {
t.Errorf("StringYellow() = %v, want %v", got, "String Yellow")
}
})
}
|
/*
* @lc app=leetcode.cn id=1370 lang=golang
*
* [1370] 上升下降字符串
*/
// @lc code=start
package main
func sortString(s string) string {
counter := make([]int, 26)
ret := make([]byte, len(s))
for i := 0; i < len(s); i++ {
counter[s[i]-'a']++
}
charsIndex := 0
index := 0
for index < len(s) {
realIndex := charsIndex % (52)
if realIndex >= 26 {
realIndex = 51 - realIndex
}
if counter[realIndex] > 0 {
ret[index] = byte('a' + realIndex)
index++
counter[realIndex]--
}
charsIndex++
}
return string(ret)
}
// @lc code=end
|
package main
import (
"net/http"
_ "net/http/pprof"
"testing"
)
func Test_extractLang(t *testing.T) {
type args struct {
lang string
}
tests := []struct {
name string
args args
want string
}{
{"Handles default value lang parameter", args{""}, ""},
{"Handle language that is not real", args{"fakelanguage"}, ""},
{"Handle correct language", args{"go"}, "go"},
{"Handle correct language that requires hyphens", args{"apollo guidance computer"}, "apollo-guidance-computer"},
{"Handle language with strange capitalization", args{"JavaScript"}, "javascript"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := extractLang(http.DefaultClient, tt.args.lang); got != tt.want {
t.Errorf("extractLang() = %v, want %v", got, tt.want)
}
})
}
}
func Test_minInt(t *testing.T) {
type args struct {
x int
y int
}
tests := []struct {
name string
args args
want int
}{
{"Test same number doesn't fail", args{5, 5}, 5},
{"Test first order", args{5, 4}, 4},
{"Test second order", args{4, 5}, 4},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := minInt(tt.args.x, tt.args.y); got != tt.want {
t.Errorf("minInt() = %v, want %v", got, tt.want)
}
})
}
}
|
package lib
// VersionNumber of the app
const VersionNumber = "1.1.2"
|
/*
Create a function which concantenates the number 7 to the end of every chord in an array. Ignore all chords which already end with 7.
Examples
jazzify(["G", "F", "C"]) ➞ ["G7", "F7", "C7"]
jazzify(["Dm", "G", "E", "A"]) ➞ ["Dm7", "G7", "E7", "A7"]
jazzify(["F7", "E7", "A7", "Ab7", "Gm7", "C7"]) ➞ ["F7", "E7", "A7", "Ab7", "Gm7", "C7"]
jazzify([]) ➞ []
Notes
Return an empty array if the given array is empty.
You can expect all the tests to have valid chords.
*/
package main
import (
"strings"
)
func main() {
eq(jazzify([]string{"G", "F", "C"}), []string{"G7", "F7", "C7"})
eq(jazzify([]string{"Dm", "G", "E", "A"}), []string{"Dm7", "G7", "E7", "A7"})
eq(jazzify([]string{"F7", "E7", "A7", "Ab7", "Gm7", "C7"}), []string{"F7", "E7", "A7", "Ab7", "Gm7", "C7"})
eq(jazzify([]string{}), []string{})
}
func jazzify(s []string) []string {
p := make([]string, len(s))
for i := range s {
p[i] = s[i]
if !strings.HasSuffix(p[i], "7") {
p[i] += "7"
}
}
return p
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func eq(a, b []string) {
assert(len(a) == len(b))
for i := range a {
assert(a[i] == b[i])
}
}
|
package functions
import (
"go.mongodb.org/mongo-driver/bson"
"context"
"go.mongodb.org/mongo-driver/mongo"
// "fmt"
"rank-server-pikachu/app/models"
)
type Leaderboard struct {
Name string `json:"name"`
Score int64 `json:"score"`
HighScore int64 `json:"high_score"`
}
// func UpdateScoreUser(levelModel *models.LevelModel, fbId string, name string, idLv int, time int64, highScore int64, combo int, bestCombo int) {
// for index, value := range rankUser.Data {
// if value.IDLevel == idLv {
// value.Time = time
// value.HighScore = highScore
// value.Combo = combo
// value.BestCombo = bestCombo
// rankUser.Data[index] = value
// return
// }
// }
// newLevel := models.LevelModel {
// Time : time,
// HighScore : highScore,
// Combo : combo,
// BestCombo : bestCombo,
// IDLevel : idLv,
// }
// rankUser.Data = append(rankUser.Data, newLevel)
// }
func ChkUserExist(db *mongo.Database, fbID string) bool {
findUser := db.Collection("users").FindOne(context.TODO(), bson.M{ "fb_id": fbID });
if findUser.Err() != nil {
return false
}
return true
}
func ChkLevelUserExist(db *mongo.Database, userID string, idLv int) bool {
findUser := db.Collection("levels").FindOne(context.TODO(), bson.M{ "$and": []interface{}{
bson.M{"user_id": userID},
bson.M{"id_level": idLv},
}});
if findUser.Err() != nil {
return false
}
return true
}
func FindUserByFBId(db *mongo.Database, fbID string) (bool, models.UserModel) {
findUser := db.Collection("users").FindOne(context.TODO(), bson.M{ "fb_id": fbID });
if findUser.Err() != nil {
return false, models.UserModel{}
}
var userModel models.UserModel
findUser.Decode(&userModel)
return true, userModel
}
func InitChallenge(db *mongo.Database, data models.ChallengeModel) bool {
_, errInsert := db.Collection("challenges").InsertOne(context.TODO(), data)
if errInsert != nil {
return false
}
return true
}
// func GetLeaderboard(data models.LevelModel, idLevel int) Leaderboard {
// var tmp Leaderboard
// v := findLevel(data.Data, idLevel)
// if v != nil {
// tmp.Name = data.Name
// tmp.Score = v.HighScore
// }
// return tmp
// }
// func findLevel(data []models.LevelModel, idLevel int) *models.LevelModel {
// for _, value := range data {
// if value.IDLevel == idLevel {
// return &value
// }
// }
// return nil
// } |
package utils
import (
"fmt"
"os"
"strconv"
)
// load environment variable or return default value
func Getenv(key, defaultt string) string {
if value, ok := os.LookupEnv(key); ok {
return value
}
return defaultt
}
// load environment variable or fail
func GetenvOrFail(envname string) string {
value := os.Getenv(envname)
if value == "" {
panic(fmt.Sprintf("Set %s env variable!", envname))
}
return value
}
// load int environment variable or fail
func GetIntEnvOrFail(envname string) int {
valueStr := os.Getenv(envname)
if valueStr == "" {
panic(fmt.Sprintf("Set %s env variable!", envname))
}
value, err:= strconv.Atoi(valueStr)
if err != nil {
panic(fmt.Sprintf("Unable convert '%s' env var '%s' to int!", envname, valueStr))
}
return value
}
// set environment variable or fail
func SetenvOrFail(envname, value string) string {
err := os.Setenv(envname, value)
if err != nil {
panic(err)
}
return value
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//38. Count and Say
//The count-and-say sequence is the sequence of integers with the first five terms as following:
//1. 1
//2. 11
//3. 21
//4. 1211
//5. 111221
//1 is read off as "one 1" or 11.
//11 is read off as "two 1s" or 21.
//21 is read off as "one 2, then one 1" or 1211.
//Given an integer n, generate the nth term of the count-and-say sequence.
//Note: Each term of the sequence of integers will be represented as a string.
//Example 1:
//Input: 1
//Output: "1"
//Example 2:
//Input: 4
//Output: "1211"
//func countAndSay(n int) string {
//}
// Time Is Money |
package main
import (
"bufio"
"compress/gzip"
"flag"
"fmt"
"io"
"log"
"os"
"strconv"
"strings"
"github.com/dgraph-io/dgraph/x"
)
var (
output = flag.String("output", "out.rdf.gz", "Output rdf.gz file")
genre = flag.String("genre", "ml-100k/u.genre", "")
users = flag.String("rating", "ml-100k/u.user", "")
data = flag.String("user", "ml-100k/u.data", "")
movie = flag.String("movie", "ml-100k/u.item", "")
GC = 100000
MC = 200000
)
func main() {
o, err := os.OpenFile(*output, os.O_WRONLY|os.O_CREATE, 0755)
x.Check(err)
w := gzip.NewWriter(o)
gf, err := os.Open(*genre)
x.Check(err)
uf, err := os.Open(*users)
x.Check(err)
df, err := os.Open(*data)
x.Check(err)
mf, err := os.Open(*movie)
x.Check(err)
var str string
br := bufio.NewReader(gf)
log.Println("Reading genre file")
for {
line, err := br.ReadString('\n')
if err != nil && err == io.EOF {
break
}
line = strings.Trim(line, "\n")
csv := strings.Split(line, "|")
if len(csv) != 2 {
continue
}
g, err := strconv.ParseInt(csv[1], 10, 32)
x.Check(err)
gi := int(g)
str = fmt.Sprintf("<%v> <name> \"%v\" .\n", GC+gi, csv[0])
w.Write([]byte(str))
}
br = bufio.NewReader(uf)
log.Println("Reading user file")
for {
line, err := br.ReadString('\n')
if err != nil && err == io.EOF {
break
}
line = strings.Trim(line, "\n")
csv := strings.Split(line, "|")
if len(csv) != 5 {
continue
}
str = fmt.Sprintf("<%v> <age> \"%v\"^^<xs:int> .\n", csv[0], csv[1])
w.Write([]byte(str))
str = fmt.Sprintf("<%v> <gender> \"%v\" .\n", csv[0], csv[2])
w.Write([]byte(str))
str = fmt.Sprintf("<%v> <occupation> \"%v\" .\n", csv[0], csv[3])
w.Write([]byte(str))
str = fmt.Sprintf("<%v> <zipcode> \"%v\" .\n", csv[0], csv[4])
w.Write([]byte(str))
}
br = bufio.NewReader(df)
log.Println("Reading rating file")
for {
line, err := br.ReadString('\n')
if err != nil && err == io.EOF {
break
}
line = strings.Trim(line, "\n")
csv := strings.Split(line, "\t")
if len(csv) != 4 {
continue
}
g, err := strconv.ParseInt(csv[1], 10, 32)
x.Check(err)
gi := int(g)
str = fmt.Sprintf("<%v> <rated> <%v> (rating=%v) .\n", csv[0], MC+gi, csv[2])
w.Write([]byte(str))
// TODO: can add timestamp in facets.
}
br = bufio.NewReader(mf)
log.Println("Reading movies file")
for {
line, err := br.ReadString('\n')
if err != nil && err == io.EOF {
break
}
line = strings.Trim(line, "\n")
csv := strings.Split(line, "|")
if len(csv) != 24 {
continue
}
g, err := strconv.ParseInt(csv[0], 10, 32)
x.Check(err)
gi := int(g)
str = fmt.Sprintf("<%v> <name> \"%v\" .\n", MC+gi, csv[1])
w.Write([]byte(str))
for i := 5; i < 24; i++ {
if csv[i] == "0" {
continue
}
str = fmt.Sprintf("<%v> <genre> <%v> .\n", MC+gi, GC+i-5)
w.Write([]byte(str))
}
}
log.Println("Finised.")
x.Check(w.Flush())
x.Check(w.Close())
x.Check(o.Close())
}
|
package provider
import (
"context"
"errors"
"fmt"
"github.com/alexzimmer96/eventing"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
type MongoEventGenerator func(cursor *mongo.Cursor) (eventing.Event, error)
type MongoProjectionGenerator func(result *mongo.SingleResult) (eventing.Projection, error)
type MongoStorageProvider struct {
db *mongo.Database
collectionName string
eventRegistry map[string]MongoEventGenerator
projectionGenerator MongoProjectionGenerator
}
func NewMongoStorageProvider(db *mongo.Database, collection string, generator MongoProjectionGenerator) *MongoStorageProvider {
return &MongoStorageProvider{
db: db,
collectionName: collection,
eventRegistry: map[string]MongoEventGenerator{},
projectionGenerator: generator,
}
}
func (provider *MongoStorageProvider) WithEvent(name string, generator MongoEventGenerator) *MongoStorageProvider {
provider.eventRegistry[name] = generator
return provider
}
func (provider *MongoStorageProvider) SaveEvent(ctx context.Context, event eventing.Event) error {
_, err := provider.db.Collection(provider.collectionName).InsertOne(ctx, event)
return err
}
func (provider *MongoStorageProvider) SaveProjection(ctx context.Context, projection eventing.Projection) error {
_, err := provider.db.Collection(projection.GetCollectionName()).ReplaceOne(
ctx,
bson.M{"entity_id": projection.GetEntityID()},
projection,
options.Replace().SetUpsert(true),
)
return err
}
func (provider *MongoStorageProvider) GetProjection(ctx context.Context, entityID string, projection eventing.Projection) (eventing.Projection, error) {
result := provider.db.Collection(projection.GetCollectionName()).FindOne(
ctx,
bson.D{{"entity_id", entityID}},
options.FindOne().SetSort(bson.D{{"last_event_time", -1}}), // ordering by -1 means newest first
)
if result.Err() == mongo.ErrNoDocuments {
return nil, nil
}
if result.Err() != nil {
return nil, result.Err()
}
return provider.projectionGenerator(result)
}
func (provider *MongoStorageProvider) GetLatestEventIDForEntityID(ctx context.Context, entityID string) (string, error) {
result := provider.db.Collection(provider.collectionName).FindOne(
ctx,
bson.D{{"entity_id", entityID}},
options.FindOne().SetSort(bson.D{{"created_at", -1}}), // ordering by -1 means newest first
)
if result.Err() != nil {
return "", result.Err()
}
m := make(map[string]interface{})
if err := result.Decode(&m); err != nil {
return "", err
}
eventID := fmt.Sprintf("%v", m["event_id"])
if len(eventID) > 0 {
return eventID, nil
}
return "", nil
}
func (provider *MongoStorageProvider) GetSortedEventsForEntityID(ctx context.Context, entityID string) ([]eventing.Event, error) {
cursor, err := provider.db.Collection(provider.collectionName).Find(
context.TODO(),
bson.D{{"entity_id", entityID}},
options.Find().SetSort(bson.D{{"created_at", 1}}), // ordering by 1 means oldest first
)
if err != nil {
return nil, err
}
var events []eventing.Event
for cursor.Next(ctx) {
generator, err := provider.getEventFromRaw(cursor)
if err != nil {
return nil, errors.New("no fetchedEvent found for this aggregate")
}
events = append(events, generator)
}
return events, nil
}
func (provider *MongoStorageProvider) getEventFromRaw(raw *mongo.Cursor) (eventing.Event, error) {
rawEventType, err := raw.Current.LookupErr("event_type")
if err != nil {
return nil, errors.New("could not process event")
}
eventType := rawEventType.StringValue()
generator, ok := provider.eventRegistry[eventType]
if ok == false {
return nil, errors.New(fmt.Sprintf("event is not registered in mongo storage provider: %s", eventType))
}
event, err := generator(raw)
// Error while generating event from Raw mongo entry
if err != nil {
return nil, err
}
return event, nil
}
|
package btree
type BTree struct {
Top *Node
}
func NewBTree() *BTree {
return &BTree{Top: nil}
}
func (b *BTree) Insert(v int) {
if b.Top == nil {
b.Top = NewNode(v)
return
}
b.Top.Insert(v)
}
func (b *BTree) InsertMany(values []int) {
for _, v := range values {
b.Insert(v)
}
}
func (b *BTree) FindDeepest() (*Node, int) {
return b.Top.FindDeepest(0)
}
|
package main
import (
"testing"
)
func TestStudentLearn(t *testing.T) {
teacher := Teacher{}
student := NewClassMate("Mario")
teacher.TeachesTo(student)
teacher.Spread("Message sent to everyone")
if student.Learned() != "Message sent to everyone" {
t.Error("Student should learn")
}
}
|
package css
type PseudoClass string
const FirstChild PseudoClass = "first-child"
const LastChild PseudoClass = "last-child"
const After PseudoClass = "after"
const Before PseudoClass = "before"
const Hover PseudoClass = "hover"
const Visited PseudoClass = "visited"
const Active PseudoClass = "active"
const Link PseudoClass = "link"
const MozFocusRing PseudoClass = "-moz-focusring"
type SelectorWithPseudoClass struct {
Element Selector
PseudoClass PseudoClass
}
func (selector SelectorWithPseudoClass) Selector() string {
return selector.Element.Selector() + ":" + string(selector.PseudoClass)
}
func (selector SelectorWithPseudoClass) Style(properties ...Property) RuleSet {
return For(selector).Set(properties...)
}
|
package schemas
type Scope int
// Table enumerator
const (
ScopeUndefined Scope = iota
Application
Execution
Metric
Planning
Prediction
Recommendation
Resource
)
type MetricType int
// Metric type enumerator
const (
MetricTypeUndefined MetricType = iota
CPUUsageSecondsPercentage
MemoryUsageBytes
PowerUsageWatts
TemperatureCelsius
DutyCycle
CurrentOffset
Lag
Latency
Number
CPUCores
)
type ResourceBoundary int
// Resource boundary enumerator
const (
ResourceBoundaryUndefined ResourceBoundary = iota
ResourceRaw
ResourceUpperBound
ResourceLowerBound
)
type ResourceQuota int
// Resource quota enumerator
const (
ResourceQuotaUndefined ResourceQuota = iota
ResourceLimit
ResourceRequest
ResourceInitialLimit
ResourceInitialRequest
)
type ColumnType int
// Influxdb column type enumerator
const (
ColumnTypeUndefined ColumnType = iota
Tag
Field
)
|
/*
In a letter to Lord Bowden in 1837, Charles Babbage asked, "What is the smallest positive integer whose square ends in 269,696?". He thought the answer was 99,736 whose square is 9,947,269,696. Was he right?
Write a function that takes a positive integer n and returns the smallest number whose square ends with n.
One small twist, if n == 269696 return "Babbage was correct!" if the smallest number whose square ends with 269,696 is 99,736, otherwise return "Babbage was incorrect!".
Examples
babbage(25) ➞ 5
babbage(161) ➞ 119
// 119 * 119 == 14,161
// Ends with 161
babbage(269696) ➞ "Babbage was {?}!"
// Replace {?} with the word "correct" or "incorrect".
Notes
n will always be a positive integer n > 0.
Make sure your solution is efficient enough to pass the tests within a 12 second time limit.
*/
package main
import (
"fmt"
"strings"
)
func main() {
assert(babbage(25) == 5)
assert(babbage(161) == 119)
assert(babbage(481) == 59)
assert(babbage(7009) == 497)
assert(babbage(990025) == 995)
assert(babbage(327369) == 57213)
assert(babbage(269696) == "Babbage was incorrect!")
assert(babbage(33765625) == 28875)
assert(babbage(314062500) == 36250)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func babbage(n int) interface{} {
s := fmt.Sprint(n)
for i := 0; i < n; i++ {
t := fmt.Sprint(i * i)
if !strings.HasSuffix(t, s) {
continue
}
if n == 269696 {
if i == 99736 {
return "Babbage was correct!"
}
return "Babbage was incorrect!"
}
return i
}
return n
}
|
package service
import (
"github.com/godcong/role-manager-server/model"
"github.com/sirupsen/logrus"
)
// Seed ...
func Seed() {
//for _, v := range Permissions() {
// e := model.InsertOne(v)
// if e != nil {
// return
// }
//}
for _, v := range Menus() {
e := model.InsertOne(v)
if e != nil {
return
}
}
}
// Menus ...
func Menus() []*model.Menu {
var menus []*model.Menu
var m *model.Menu
m = model.NewMenu()
m.Slug = "org.media.list"
m.Name = "商户面板"
m.Description = "商户面板"
m.Active = "slideOrgHome"
menus = append(menus, m)
return menus
}
// Permissions ...
func Permissions() []*model.Permission {
var permissions []*model.Permission
var p *model.Permission
p = model.NewPermission()
p.Slug = "dashboard.log.list"
p.Name = "日志信息"
p.Description = "日志信息"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "dashboard.permission.list"
p.Name = "权限列表"
p.Description = "权限列表"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "dashboard.permission.add"
p.Name = "添加权限"
p.Description = "添加权限"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "dashboard.permission.update"
p.Name = "更新权限"
p.Description = "更新权限"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "dashboard.permission.delete"
p.Name = "删除权限"
p.Description = "删除权限"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "dashboard.user.list"
p.Name = "管理用户列表"
p.Description = "管理用户列表"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "dashboard.user.add"
p.Name = "添加管理用户"
p.Description = "添加管理用户"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "dashboard.user.update"
p.Name = "更新管理用户"
p.Description = "更新管理用户"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "dashboard.user.delete"
p.Name = "删除管理用户"
p.Description = "删除管理用户"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "dashboard.role.list"
p.Name = "角色列表"
p.Description = "角色列表"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "dashboard.role.add"
p.Name = "添加角色权限"
p.Description = "添加角色权限"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "dashboard.role.delete"
p.Name = "删除权限"
p.Description = "删除权限"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "dashboard.role.update"
p.Name = "更新权限"
p.Description = "更新权限"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "admin.organization.list"
p.Name = "组织列表"
p.Description = "组织列表"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "admin.organization.add"
p.Name = "添加组织管理用户"
p.Description = "添加组织管理用户"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "admin.organization.update"
p.Name = "更新组织"
p.Description = "更新组织"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "admin.organization.delete"
p.Name = "删除组织"
p.Description = "删除组织"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "org.media.list"
p.Name = "视频列表"
p.Description = "视频列表"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "org.media.add"
p.Name = "视频添加"
p.Description = "视频添加"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "org.media.update"
p.Name = "视频更新"
p.Description = "视频更新"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "org.media.update"
p.Name = "视频更新"
p.Description = "视频更新"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "user.media.list"
p.Name = "用户视频列表"
p.Description = "用户视频列表"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "user.permission.list"
p.Name = "用户权限列表"
p.Description = "用户权限列表"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "user.role.list"
p.Name = "用户角色列表"
p.Description = "用户角色列表"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "user.report.list"
p.Name = "用户举报列表"
p.Description = "用户举报列表"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "exorcist.user.list"
p.Name = "用户列表"
p.Description = "用户列表"
permissions = append(permissions, p)
p = model.NewPermission()
p.Slug = "exorcist.user.update"
p.Name = "更新用户"
p.Description = "更新用户"
permissions = append(permissions, p)
//p = model.NewPermission()
//p.Slug = "dashboard.user.delete"
//p.Name = "管理用户信息"
//p.Description = "管理用户信息"
//permissions = append(permissions, p)
logrus.Info(permissions)
return permissions
}
|
package main
import (
"fmt"
"html"
)
//START OMIT
func main() {
// value recieved from query args
query_msg := "<b>Evil Hacker Script!</b>"
// Print as substitute for echoing message on Web Page
fmt.Printf("Dangerous: \n- %s\n\n", query_msg)
fmt.Printf("Save: \n- %s\n", html.EscapeString(query_msg)) // HL
}
//END OMIT
|
// Copyright © 2018 Inanc Gumus
// Learn Go Programming Course
// License: https://creativecommons.org/licenses/by-nc-sa/4.0/
//
// For more tutorials : https://learngoprogramming.com
// In-person training : https://www.linkedin.com/in/inancgumus/
// Follow me on twitter: https://twitter.com/inancgumus
package main
import "fmt"
// ---------------------------------------------------------
// EXERCISE: Convert and Fix #5
//
// Fix the code.
//
// HINTS
// maximum of int8 can be 127
// maximum of int16 can be 32767
//
// EXPECTED OUTPUT
// 1127
// ---------------------------------------------------------
func main() {
// DO NOT TOUCH THIS VARIABLES
min := int8(127)
max := int16(1000)
// FIX THE CODE HERE
fmt.Println(int8(max) + min)
}
|
package bccsp
const (
ECDSA = "ECDSA"
ECDSAP256 = "ECDSAP256"
ECDSAP384 = "ECDSAP384"
ECDSAReRand = "ECDSA_RERAND"
RSA = "RSA"
RSA1024 = "RSA1024"
RSA2048 = "RSA2048"
RSA3072 = "RSA3072"
RSA4096 = "RSA4096"
AES = "AES"
AES128 = "AES128"
AES192 = "AES192"
AES256 = "AES256"
HMAC = "HMAC"
HMACTruncated256 = "HMAC_TRUNCATED_256"
SHA = "SHA"
SHA2 = "SHA2"
SHA3 = "SHA3"
SHA256 = "SHA256"
SHA384 = "SHA384"
SHA3_256 = "SHA3_256"
SHA3_384 = "SHA3_384"
X509Certificate = "X509Certificate"
)
type ECDSAKeyGenOpts struct {
Temporary bool
}
func (opts *ECDSAKeyGenOpts) Algorithm() string {
return ECDSA
}
func (opts *ECDSAKeyGenOpts) Ephemeral() bool {
return opts.Temporary
}
type ECDSAPKIXPublicKeyImportOpts struct {
Temporary bool
}
func (opts *ECDSAPKIXPublicKeyImportOpts) Algorithm() string {
return ECDSA
}
func (opts *ECDSAPKIXPublicKeyImportOpts) Ephemeral() bool {
return opts.Temporary
}
type ECDSAPrivateKeyImportOpts struct {
Temporary bool
}
func (opts *ECDSAPrivateKeyImportOpts) Algorithm() string {
return ECDSA
}
func (opts *ECDSAPrivateKeyImportOpts) Ephemeral() bool {
return opts.Temporary
}
type ECDSAPrivateKey256K1ImportOpts struct {
Temporary bool
}
func (opts *ECDSAPrivateKey256K1ImportOpts) Algorithm() string {
return ECDSA
}
func (opts *ECDSAPrivateKey256K1ImportOpts) Ephemeral() bool {
return opts.Temporary
}
type ECDSAGoPublicKeyImportOpts struct {
Temporary bool
}
func (opts *ECDSAGoPublicKeyImportOpts) Algorithm() string {
return ECDSA
}
func (opts *ECDSAGoPublicKeyImportOpts) Ephemeral() bool {
return opts.Temporary
}
type ECDSAReRandKeyOpts struct {
Temporary bool
Expansion []byte
}
func (opts *ECDSAReRandKeyOpts) Algorithm() string {
return ECDSAReRand
}
func (opts *ECDSAReRandKeyOpts) Ephemeral() bool {
return opts.Temporary
}
func (opts *ECDSAReRandKeyOpts) ExpansionValue() []byte {
return opts.Expansion
}
type AESKeyGenOpts struct {
Temporary bool
}
func (opts *AESKeyGenOpts) Algorithm() string {
return AES
}
func (opts *AESKeyGenOpts) Ephemeral() bool {
return opts.Temporary
}
type AESCBCPKCS7ModeOpts struct{}
type HMACTruncated256AESDeriveKeyOpts struct {
Temporary bool
Arg []byte
}
func (opts *HMACTruncated256AESDeriveKeyOpts) Algorithm() string {
return HMACTruncated256
}
func (opts *HMACTruncated256AESDeriveKeyOpts) Ephemeral() bool {
return opts.Temporary
}
func (opts *HMACTruncated256AESDeriveKeyOpts) Argument() []byte {
return opts.Arg
}
type HMACDeriveKeyOpts struct {
Temporary bool
Arg []byte
}
func (opts *HMACDeriveKeyOpts) Algorithm() string {
return HMAC
}
func (opts *HMACDeriveKeyOpts) Ephemeral() bool {
return opts.Temporary
}
func (opts *HMACDeriveKeyOpts) Argument() []byte {
return opts.Arg
}
type AES256ImportKeyOpts struct {
Temporary bool
}
func (opts *AES256ImportKeyOpts) Algorithm() string {
return AES
}
func (opts *AES256ImportKeyOpts) Ephemeral() bool {
return opts.Temporary
}
type HMACImportKeyOpts struct {
Temporary bool
}
func (opts *HMACImportKeyOpts) Algorithm() string {
return HMAC
}
func (opts *HMACImportKeyOpts) Ephemeral() bool {
return opts.Temporary
}
type SHAOpts struct {
}
func (opts *SHAOpts) Algorithm() string {
return SHA
}
type RSAKeyGenOpts struct {
Temporary bool
}
func (opts *RSAKeyGenOpts) Algorithm() string {
return RSA
}
func (opts *RSAKeyGenOpts) Ephemeral() bool {
return opts.Temporary
}
type RSAGoPublicKeyImportOpts struct {
Temporary bool
}
func (opts *RSAGoPublicKeyImportOpts) Algorithm() string {
return RSA
}
func (opts *RSAGoPublicKeyImportOpts) Ephemeral() bool {
return opts.Temporary
}
type X509PublicKeyImportOpts struct {
Temporary bool
}
func (opts *X509PublicKeyImportOpts) Algorithm() string {
return X509Certificate
}
func (opts *X509PublicKeyImportOpts) Ephemeral() bool {
return opts.Temporary
}
|
package jsoniter
import (
"unsafe"
"reflect"
)
type mapDecoder struct {
mapType reflect.Type
elemType reflect.Type
elemDecoder Decoder
mapInterface emptyInterface
}
func (decoder *mapDecoder) decode(ptr unsafe.Pointer, iter *Iterator) {
// dark magic to cast unsafe.Pointer back to interface{} using reflect.Type
mapInterface := decoder.mapInterface
mapInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
realVal := reflect.ValueOf(*realInterface).Elem()
if realVal.IsNil() {
realVal.Set(reflect.MakeMap(realVal.Type()))
}
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
elem := reflect.New(decoder.elemType)
decoder.elemDecoder.decode(unsafe.Pointer(elem.Pointer()), iter)
// to put into map, we have to use reflection
realVal.SetMapIndex(reflect.ValueOf(string([]byte(field))), elem.Elem())
}
}
type mapEncoder struct {
mapType reflect.Type
elemType reflect.Type
elemEncoder Encoder
mapInterface emptyInterface
}
func (encoder *mapEncoder) encode(ptr unsafe.Pointer, stream *Stream) {
mapInterface := encoder.mapInterface
mapInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
realVal := reflect.ValueOf(*realInterface)
stream.WriteObjectStart()
for i, key := range realVal.MapKeys() {
if i != 0 {
stream.WriteMore()
}
stream.WriteObjectField(key.String())
val := realVal.MapIndex(key).Interface()
encoder.elemEncoder.encodeInterface(val, stream)
}
stream.WriteObjectEnd()
}
func (encoder *mapEncoder) encodeInterface(val interface{}, stream *Stream) {
writeToStream(val, stream, encoder)
}
func (encoder *mapEncoder) isEmpty(ptr unsafe.Pointer) bool {
mapInterface := encoder.mapInterface
mapInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
realVal := reflect.ValueOf(*realInterface)
return realVal.Len() == 0
}
type mapInterfaceEncoder struct {
mapType reflect.Type
elemType reflect.Type
elemEncoder Encoder
mapInterface emptyInterface
}
func (encoder *mapInterfaceEncoder) encode(ptr unsafe.Pointer, stream *Stream) {
mapInterface := encoder.mapInterface
mapInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
realVal := reflect.ValueOf(*realInterface)
stream.WriteObjectStart()
for i, key := range realVal.MapKeys() {
if i != 0 {
stream.WriteMore()
}
stream.WriteObjectField(key.String())
val := realVal.MapIndex(key).Interface()
encoder.elemEncoder.encode(unsafe.Pointer(&val), stream)
}
stream.WriteObjectEnd()
}
func (encoder *mapInterfaceEncoder) encodeInterface(val interface{}, stream *Stream) {
writeToStream(val, stream, encoder)
}
func (encoder *mapInterfaceEncoder) isEmpty(ptr unsafe.Pointer) bool {
mapInterface := encoder.mapInterface
mapInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
realVal := reflect.ValueOf(*realInterface)
return realVal.Len() == 0
} |
// Copyright 2017 Vlad Didenko. All rights reserved.
// See the included LICENSE.md file for licensing information
package slops // import "go.didenko.com/slops"
// Merge returns a slice with a union of strings in slices.
// For duplicate entries, the resulting slice contains the
// maximum numbers of duplicate strings between the original
// slices. Both left and right slices are expected to be sorted.
func Merge(left, right []string) []string {
return CollectVariety(left, right, GetAll, GetAll, GetAll)
}
// MergeUnique returns a slice with a union of strings in slices.
// The resulting slice contains one entry for each set of
// duplicate strings in the original slices. Both left
// and right slices are expected to be sorted.
func MergeUnique(left, right []string) []string {
return CollectVariety(left, right, GetUnique, GetUnique, GetUnique)
}
|
package handlers
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"dream01/internal/intlog"
"github.com/gorilla/mux"
)
// RadioStation ...
type RadioStation struct {
ID int `json:"id"`
Name string `json:"name"`
URL string `json:"url"`
Logo string `json:"logo"`
InfoURL string `json:"infoUrl"`
}
func homeHandler(w http.ResponseWriter, r *http.Request) {
b, err := ioutil.ReadFile("../../ui/dist/index.html")
if err != nil {
intlog.Error(err.Error())
}
w.Write(b)
}
func getRecordsHandler(w http.ResponseWriter, r *http.Request) {
}
func uploadHandler(w http.ResponseWriter, r *http.Request) {
}
func getFavourites(w http.ResponseWriter, r *http.Request) {
b, err := ioutil.ReadFile("./data/stations.json")
if err != nil {
fmt.Println(err.Error())
}
w.Write(b)
}
func getTop500(w http.ResponseWriter, r *http.Request) {
sl, _ := apiClient.GetClassic("rock")
b, _ := json.Marshal(&sl)
w.Write(b)
}
func getStreamSourceByID(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
base := "/sbin/tunein-station.pls"
url := fmt.Sprintf("http://yp.shoutcast.com%s?id=%v", base, vars["id"])
resp, err := http.Get(url)
if err != nil {
fmt.Println(err)
}
if resp.StatusCode != http.StatusOK {
w.WriteHeader(resp.StatusCode)
return
}
b, _ := ioutil.ReadAll(resp.Body)
fmt.Println(string(b))
lines := strings.Split(string(b), "\n")
sources := []string{}
for _, l := range lines {
l = strings.TrimSpace(l)
if strings.Index(l, "File") > -1 {
i := strings.Index(l, "=")
stream := fmt.Sprintf("%v", l[i+1:])
sources = append(sources, stream)
}
}
if len(sources) == 0 {
w.WriteHeader(http.StatusNotFound)
return
}
b, _ = json.Marshal(sources)
w.Write(b)
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl_test
import (
"bytes"
"context"
"fmt"
"strconv"
"strings"
"testing"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
testddlutil "github.com/pingcap/tidb/ddl/testutil"
"github.com/pingcap/tidb/ddl/util/callback"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/parser/auth"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessiontxn"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/testkit/external"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/stretchr/testify/require"
)
func TestTableForeignKey(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t1 (a int, b int, index(a), index(b));")
// test create table with foreign key.
failSQL := "create table t2 (c int, foreign key (a) references t1(a));"
tk.MustGetErrCode(failSQL, errno.ErrKeyColumnDoesNotExits)
// test add foreign key.
tk.MustExec("create table t3 (a int, b int);")
failSQL = "alter table t1 add foreign key (c) REFERENCES t3(a);"
tk.MustGetErrCode(failSQL, errno.ErrKeyColumnDoesNotExits)
// test origin key not match error
failSQL = "alter table t1 add foreign key (a) REFERENCES t3(a, b);"
tk.MustGetErrCode(failSQL, errno.ErrWrongFkDef)
// Test drop column with foreign key.
tk.MustExec("create table t4 (c int,d int,foreign key (d) references t1 (b));")
failSQL = "alter table t4 drop column d"
tk.MustGetErrCode(failSQL, errno.ErrFkColumnCannotDrop)
// Test change column with foreign key.
failSQL = "alter table t4 change column d e bigint;"
tk.MustGetErrCode(failSQL, errno.ErrFKIncompatibleColumns)
// Test modify column with foreign key.
failSQL = "alter table t4 modify column d bigint;"
tk.MustGetErrCode(failSQL, errno.ErrFKIncompatibleColumns)
tk.MustQuery("select count(*) from information_schema.KEY_COLUMN_USAGE;")
tk.MustExec("alter table t4 drop foreign key fk_1")
tk.MustExec("alter table t4 modify column d bigint;")
tk.MustExec("drop table if exists t1,t2,t3,t4;")
}
func TestAddNotNullColumn(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
// for different databases
tk.MustExec("create table tnn (c1 int primary key auto_increment, c2 int)")
tk.MustExec("insert tnn (c2) values (0)" + strings.Repeat(",(0)", 99))
done := make(chan error, 1)
testddlutil.SessionExecInGoroutine(store, "test", "alter table tnn add column c3 int not null default 3", done)
updateCnt := 0
out:
for {
select {
case err := <-done:
require.NoError(t, err)
break out
default:
// Close issue #14636
// Because add column action is not amendable now, it causes an error when the schema is changed
// in the process of an insert statement.
_, err := tk.Exec("update tnn set c2 = c2 + 1 where c1 = 99")
if err == nil {
updateCnt++
}
}
}
expected := fmt.Sprintf("%d %d", updateCnt, 3)
tk.MustQuery("select c2, c3 from tnn where c1 = 99").Check(testkit.Rows(expected))
tk.MustExec("drop table tnn")
}
func TestCharacterSetInColumns(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database varchar_test;")
defer tk.MustExec("drop database varchar_test;")
tk.MustExec("use varchar_test")
tk.MustExec("create table t (c1 int, s1 varchar(10), s2 text)")
tk.MustQuery("select count(*) from information_schema.columns where table_schema = 'varchar_test' and character_set_name != 'utf8mb4'").Check(testkit.Rows("0"))
tk.MustQuery("select count(*) from information_schema.columns where table_schema = 'varchar_test' and character_set_name = 'utf8mb4'").Check(testkit.Rows("2"))
tk.MustExec("create table t1(id int) charset=UTF8;")
tk.MustExec("create table t2(id int) charset=BINARY;")
tk.MustExec("create table t3(id int) charset=LATIN1;")
tk.MustExec("create table t4(id int) charset=ASCII;")
tk.MustExec("create table t5(id int) charset=UTF8MB4;")
tk.MustExec("create table t11(id int) charset=utf8;")
tk.MustExec("create table t12(id int) charset=binary;")
tk.MustExec("create table t13(id int) charset=latin1;")
tk.MustExec("create table t14(id int) charset=ascii;")
tk.MustExec("create table t15(id int) charset=utf8mb4;")
}
func TestAddNotNullColumnWhileInsertOnDupUpdate(t *testing.T) {
store := testkit.CreateMockStore(t)
tk1 := testkit.NewTestKit(t, store)
tk1.MustExec("use test")
tk2 := testkit.NewTestKit(t, store)
tk2.MustExec("use test")
closeCh := make(chan bool)
var wg util.WaitGroupWrapper
tk1.MustExec("create table nn (a int primary key, b int)")
tk1.MustExec("insert nn values (1, 1)")
wg.Run(func() {
for {
select {
case <-closeCh:
return
default:
}
tk2.MustExec("insert nn (a, b) values (1, 1) on duplicate key update a = 1, b = values(b) + 1")
}
})
tk1.MustExec("alter table nn add column c int not null default 3 after a")
close(closeCh)
wg.Wait()
tk1.MustQuery("select * from nn").Check(testkit.Rows("1 3 2"))
}
func TestTransactionOnAddDropColumn(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("set @@global.tidb_max_delta_schema_count= 4096")
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int, b int);")
tk.MustExec("create table t2 (a int, b int);")
tk.MustExec("insert into t2 values (2,0)")
transactions := [][]string{
{
"begin",
"insert into t1 set a=1",
"update t1 set b=1 where a=1",
"commit",
},
{
"begin",
"insert into t1 select a,b from t2",
"update t1 set b=2 where a=2",
"commit",
},
}
originHook := dom.DDL().GetHook()
defer dom.DDL().SetHook(originHook)
hook := &callback.TestDDLCallback{Do: dom}
var checkErr error
hook.OnJobRunBeforeExported = func(job *model.Job) {
if checkErr != nil {
return
}
switch job.SchemaState {
case model.StateWriteOnly, model.StateWriteReorganization, model.StateDeleteOnly, model.StateDeleteReorganization:
default:
return
}
// do transaction.
for _, transaction := range transactions {
for _, sql := range transaction {
if _, checkErr = tk.Exec(sql); checkErr != nil {
checkErr = errors.Errorf("err: %s, sql: %s, job schema state: %s", checkErr.Error(), sql, job.SchemaState)
return
}
}
}
}
dom.DDL().SetHook(hook)
done := make(chan error, 1)
// test transaction on add column.
go backgroundExec(store, "test", "alter table t1 add column c int not null after a", done)
err := <-done
require.NoError(t, err)
require.Nil(t, checkErr)
tk.MustQuery("select a,b from t1 order by a").Check(testkit.Rows("1 1", "1 1", "1 1", "2 2", "2 2", "2 2"))
tk.MustExec("delete from t1")
// test transaction on drop column.
go backgroundExec(store, "test", "alter table t1 drop column c", done)
err = <-done
require.NoError(t, err)
require.Nil(t, checkErr)
tk.MustQuery("select a,b from t1 order by a").Check(testkit.Rows("1 1", "1 1", "1 1", "2 2", "2 2", "2 2"))
}
func TestCreateTableWithSetCol(t *testing.T) {
store := testkit.CreateMockStore(t, mockstore.WithDDLChecker())
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t_set (a int, b set('e') default '');")
tk.MustQuery("show create table t_set").Check(testkit.Rows("t_set CREATE TABLE `t_set` (\n" +
" `a` int(11) DEFAULT NULL,\n" +
" `b` set('e') DEFAULT ''\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("drop table t_set")
tk.MustExec("create table t_set (a set('a', 'b', 'c', 'd') default 'a,c,c');")
tk.MustQuery("show create table t_set").Check(testkit.Rows("t_set CREATE TABLE `t_set` (\n" +
" `a` set('a','b','c','d') DEFAULT 'a,c'\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
// It's for failure cases.
// The type of default value is string.
tk.MustExec("drop table t_set")
failedSQL := "create table t_set (a set('1', '4', '10') default '3');"
tk.MustGetErrCode(failedSQL, errno.ErrInvalidDefault)
failedSQL = "create table t_set (a set('1', '4', '10') default '1,4,11');"
tk.MustGetErrCode(failedSQL, errno.ErrInvalidDefault)
// Success when the new collation is enabled.
tk.MustExec("create table t_set (a set('1', '4', '10') default '1 ,4');")
// The type of default value is int.
failedSQL = "create table t_set (a set('1', '4', '10') default 0);"
tk.MustGetErrCode(failedSQL, errno.ErrInvalidDefault)
failedSQL = "create table t_set (a set('1', '4', '10') default 8);"
tk.MustGetErrCode(failedSQL, errno.ErrInvalidDefault)
// The type of default value is int.
// It's for successful cases
tk.MustExec("drop table if exists t_set")
tk.MustExec("create table t_set (a set('1', '4', '10', '21') default 1);")
tk.MustQuery("show create table t_set").Check(testkit.Rows("t_set CREATE TABLE `t_set` (\n" +
" `a` set('1','4','10','21') DEFAULT '1'\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("drop table t_set")
tk.MustExec("create table t_set (a set('1', '4', '10', '21') default 2);")
tk.MustQuery("show create table t_set").Check(testkit.Rows("t_set CREATE TABLE `t_set` (\n" +
" `a` set('1','4','10','21') DEFAULT '4'\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("drop table t_set")
tk.MustExec("create table t_set (a set('1', '4', '10', '21') default 3);")
tk.MustQuery("show create table t_set").Check(testkit.Rows("t_set CREATE TABLE `t_set` (\n" +
" `a` set('1','4','10','21') DEFAULT '1,4'\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("drop table t_set")
tk.MustExec("create table t_set (a set('1', '4', '10', '21') default 15);")
tk.MustQuery("show create table t_set").Check(testkit.Rows("t_set CREATE TABLE `t_set` (\n" +
" `a` set('1','4','10','21') DEFAULT '1,4,10,21'\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec("insert into t_set value()")
tk.MustQuery("select * from t_set").Check(testkit.Rows("1,4,10,21"))
}
func TestCreateTableWithEnumCol(t *testing.T) {
store := testkit.CreateMockStore(t, mockstore.WithDDLChecker())
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
// It's for failure cases.
// The type of default value is string.
tk.MustExec("drop table if exists t_enum")
failedSQL := "create table t_enum (a enum('1', '4', '10') default '3');"
tk.MustGetErrCode(failedSQL, errno.ErrInvalidDefault)
failedSQL = "create table t_enum (a enum('1', '4', '10') default '');"
tk.MustGetErrCode(failedSQL, errno.ErrInvalidDefault)
// The type of default value is int.
failedSQL = "create table t_enum (a enum('1', '4', '10') default 0);"
tk.MustGetErrCode(failedSQL, errno.ErrInvalidDefault)
failedSQL = "create table t_enum (a enum('1', '4', '10') default 8);"
tk.MustGetErrCode(failedSQL, errno.ErrInvalidDefault)
// The type of default value is int.
// It's for successful cases
tk.MustExec("drop table if exists t_enum")
tk.MustExec("create table t_enum (a enum('2', '3', '4') default 2);")
ret := tk.MustQuery("show create table t_enum").Rows()[0][1]
require.True(t, strings.Contains(ret.(string), "`a` enum('2','3','4') DEFAULT '3'"))
tk.MustExec("drop table t_enum")
tk.MustExec("create table t_enum (a enum('a', 'c', 'd') default 2);")
ret = tk.MustQuery("show create table t_enum").Rows()[0][1]
require.True(t, strings.Contains(ret.(string), "`a` enum('a','c','d') DEFAULT 'c'"))
tk.MustExec("insert into t_enum value()")
tk.MustQuery("select * from t_enum").Check(testkit.Rows("c"))
}
func TestCreateTableWithIntegerColWithDefault(t *testing.T) {
store := testkit.CreateMockStore(t, mockstore.WithDDLChecker())
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
// It's for failure cases.
tk.MustExec("drop table if exists t1")
failedSQL := "create table t1 (a tinyint unsigned default -1.25);"
tk.MustGetErrCode(failedSQL, errno.ErrInvalidDefault)
failedSQL = "create table t1 (a tinyint default 999999999);"
tk.MustGetErrCode(failedSQL, errno.ErrInvalidDefault)
// It's for successful cases
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a tinyint unsigned default 1.25);")
ret := tk.MustQuery("show create table t1").Rows()[0][1]
require.True(t, strings.Contains(ret.(string), "`a` tinyint(3) unsigned DEFAULT '1'"))
tk.MustExec("drop table t1")
tk.MustExec("create table t1 (a smallint default -1.25);")
ret = tk.MustQuery("show create table t1").Rows()[0][1]
require.True(t, strings.Contains(ret.(string), "`a` smallint(6) DEFAULT '-1'"))
tk.MustExec("drop table t1")
tk.MustExec("create table t1 (a mediumint default 2.8);")
ret = tk.MustQuery("show create table t1").Rows()[0][1]
require.True(t, strings.Contains(ret.(string), "`a` mediumint(9) DEFAULT '3'"))
tk.MustExec("drop table t1")
tk.MustExec("create table t1 (a int default -2.8);")
ret = tk.MustQuery("show create table t1").Rows()[0][1]
require.True(t, strings.Contains(ret.(string), "`a` int(11) DEFAULT '-3'"))
tk.MustExec("drop table t1")
tk.MustExec("create table t1 (a bigint unsigned default 0.0);")
ret = tk.MustQuery("show create table t1").Rows()[0][1]
require.True(t, strings.Contains(ret.(string), "`a` bigint(20) unsigned DEFAULT '0'"))
tk.MustExec("drop table t1")
tk.MustExec("create table t1 (a float default '0012.43');")
ret = tk.MustQuery("show create table t1").Rows()[0][1]
require.True(t, strings.Contains(ret.(string), "`a` float DEFAULT '12.43'"))
tk.MustExec("drop table t1")
tk.MustExec("create table t1 (a double default '12.4300');")
ret = tk.MustQuery("show create table t1").Rows()[0][1]
require.True(t, strings.Contains(ret.(string), "`a` double DEFAULT '12.43'"))
}
func TestAlterTableWithValidation(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
defer tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (c1 int, c2 int as (c1 + 1));")
// Test for alter table with validation.
tk.MustExec("alter table t1 with validation")
require.Equal(t, uint16(1), tk.Session().GetSessionVars().StmtCtx.WarningCount())
tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Warning|8200|ALTER TABLE WITH VALIDATION is currently unsupported"))
// Test for alter table without validation.
tk.MustExec("alter table t1 without validation")
require.Equal(t, uint16(1), tk.Session().GetSessionVars().StmtCtx.WarningCount())
tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Warning|8200|ALTER TABLE WITHOUT VALIDATION is currently unsupported"))
}
func TestCreateTableWithInfo(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.Session().SetValue(sessionctx.QueryString, "skip")
d := dom.DDL()
require.NotNil(t, d)
info := []*model.TableInfo{{
ID: 42,
Name: model.NewCIStr("t"),
}}
require.NoError(t, d.BatchCreateTableWithInfo(tk.Session(), model.NewCIStr("test"), info, ddl.OnExistError, ddl.AllocTableIDIf(func(ti *model.TableInfo) bool {
return false
})))
tk.MustQuery("select tidb_table_id from information_schema.tables where table_name = 't'").Check(testkit.Rows("42"))
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers)
var id int64
err := kv.RunInNewTxn(ctx, store, true, func(_ context.Context, txn kv.Transaction) error {
m := meta.NewMeta(txn)
var err error
id, err = m.GenGlobalID()
return err
})
require.NoError(t, err)
info = []*model.TableInfo{{
ID: 42,
Name: model.NewCIStr("tt"),
}}
tk.Session().SetValue(sessionctx.QueryString, "skip")
require.NoError(t, d.BatchCreateTableWithInfo(tk.Session(), model.NewCIStr("test"), info, ddl.OnExistError, ddl.AllocTableIDIf(func(ti *model.TableInfo) bool {
return true
})))
idGen, ok := tk.MustQuery("select tidb_table_id from information_schema.tables where table_name = 'tt'").Rows()[0][0].(string)
require.True(t, ok)
idGenNum, err := strconv.ParseInt(idGen, 10, 64)
require.NoError(t, err)
require.Greater(t, idGenNum, id)
}
func TestBatchCreateTable(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists tables_1")
tk.MustExec("drop table if exists tables_2")
tk.MustExec("drop table if exists tables_3")
d := dom.DDL()
infos := []*model.TableInfo{}
infos = append(infos, &model.TableInfo{
Name: model.NewCIStr("tables_1"),
})
infos = append(infos, &model.TableInfo{
Name: model.NewCIStr("tables_2"),
})
infos = append(infos, &model.TableInfo{
Name: model.NewCIStr("tables_3"),
})
// correct name
tk.Session().SetValue(sessionctx.QueryString, "skip")
err := d.BatchCreateTableWithInfo(tk.Session(), model.NewCIStr("test"), infos, ddl.OnExistError)
require.NoError(t, err)
tk.MustQuery("show tables like '%tables_%'").Check(testkit.Rows("tables_1", "tables_2", "tables_3"))
job := tk.MustQuery("admin show ddl jobs").Rows()[0]
require.Equal(t, "test", job[1])
require.Equal(t, "tables_1,tables_2,tables_3", job[2])
require.Equal(t, "create tables", job[3])
require.Equal(t, "public", job[4])
// FIXME: we must change column type to give multiple id
// c.Assert(job[6], Matches, "[^,]+,[^,]+,[^,]+")
// duplicated name
infos[1].Name = model.NewCIStr("tables_1")
tk.Session().SetValue(sessionctx.QueryString, "skip")
err = d.BatchCreateTableWithInfo(tk.Session(), model.NewCIStr("test"), infos, ddl.OnExistError)
require.True(t, terror.ErrorEqual(err, infoschema.ErrTableExists))
newinfo := &model.TableInfo{
Name: model.NewCIStr("tables_4"),
}
{
colNum := 2
cols := make([]*model.ColumnInfo, colNum)
viewCols := make([]model.CIStr, colNum)
var stmtBuffer bytes.Buffer
stmtBuffer.WriteString("SELECT ")
for i := range cols {
col := &model.ColumnInfo{
Name: model.NewCIStr(fmt.Sprintf("c%d", i+1)),
Offset: i,
State: model.StatePublic,
}
cols[i] = col
viewCols[i] = col.Name
stmtBuffer.WriteString(cols[i].Name.L + ",")
}
stmtBuffer.WriteString("1 FROM t")
newinfo.Columns = cols
newinfo.View = &model.ViewInfo{Cols: viewCols, Security: model.SecurityDefiner, Algorithm: model.AlgorithmMerge, SelectStmt: stmtBuffer.String(), CheckOption: model.CheckOptionCascaded, Definer: &auth.UserIdentity{CurrentUser: true}}
}
tk.Session().SetValue(sessionctx.QueryString, "skip")
tk.Session().SetValue(sessionctx.QueryString, "skip")
err = d.BatchCreateTableWithInfo(tk.Session(), model.NewCIStr("test"), []*model.TableInfo{newinfo}, ddl.OnExistError)
require.NoError(t, err)
}
// port from mysql
// https://github.com/mysql/mysql-server/blob/124c7ab1d6f914637521fd4463a993aa73403513/mysql-test/t/lock.test
func TestLock(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
/* Testing of table locking */
tk.MustExec("DROP TABLE IF EXISTS t1")
tk.MustExec("CREATE TABLE t1 ( `id` int(11) NOT NULL default '0', `id2` int(11) NOT NULL default '0', `id3` int(11) NOT NULL default '0', `dummy1` char(30) default NULL, PRIMARY KEY (`id`,`id2`), KEY `index_id3` (`id3`))")
tk.MustExec("insert into t1 (id,id2) values (1,1),(1,2),(1,3)")
tk.MustExec("LOCK TABLE t1 WRITE")
tk.MustExec("select dummy1,count(distinct id) from t1 group by dummy1")
tk.MustExec("update t1 set id=-1 where id=1")
tk.MustExec("LOCK TABLE t1 READ")
_, err := tk.Exec("update t1 set id=1 where id=1")
require.True(t, terror.ErrorEqual(err, infoschema.ErrTableNotLockedForWrite))
tk.MustExec("unlock tables")
tk.MustExec("update t1 set id=1 where id=-1")
tk.MustExec("drop table t1")
}
// port from mysql
// https://github.com/mysql/mysql-server/blob/4f1d7cf5fcb11a3f84cff27e37100d7295e7d5ca/mysql-test/t/tablelock.test
func TestTableLock(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1,t2")
/* Test of lock tables */
tk.MustExec("create table t1 ( n int auto_increment primary key)")
tk.MustExec("lock tables t1 write")
tk.MustExec("insert into t1 values(NULL)")
tk.MustExec("unlock tables")
checkTableLock(t, tk, "test", "t1", model.TableLockNone)
tk.MustExec("lock tables t1 write")
tk.MustExec("insert into t1 values(NULL)")
tk.MustExec("unlock tables")
checkTableLock(t, tk, "test", "t1", model.TableLockNone)
tk.MustExec("drop table if exists t1")
/* Test of locking and delete of files */
tk.MustExec("drop table if exists t1,t2")
tk.MustExec("CREATE TABLE t1 (a int)")
tk.MustExec("CREATE TABLE t2 (a int)")
tk.MustExec("lock tables t1 write, t2 write")
tk.MustExec("drop table t1,t2")
tk.MustExec("CREATE TABLE t1 (a int)")
tk.MustExec("CREATE TABLE t2 (a int)")
tk.MustExec("lock tables t1 write, t2 write")
tk.MustExec("drop table t2,t1")
}
// port from mysql
// https://github.com/mysql/mysql-server/blob/4f1d7cf5fcb11a3f84cff27e37100d7295e7d5ca/mysql-test/t/lock_tables_lost_commit.test
func TestTableLocksLostCommit(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk2 := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk2.MustExec("use test")
tk.MustExec("DROP TABLE IF EXISTS t1")
tk.MustExec("CREATE TABLE t1(a INT)")
tk.MustExec("LOCK TABLES t1 WRITE")
tk.MustExec("INSERT INTO t1 VALUES(10)")
err := tk2.ExecToErr("SELECT * FROM t1")
require.True(t, terror.ErrorEqual(err, infoschema.ErrTableLocked))
tk.Session().Close()
tk2.MustExec("SELECT * FROM t1")
tk2.MustExec("DROP TABLE t1")
tk.MustExec("unlock tables")
}
func checkTableLock(t *testing.T, tk *testkit.TestKit, dbName, tableName string, lockTp model.TableLockType) {
tb := external.GetTableByName(t, tk, dbName, tableName)
dom := domain.GetDomain(tk.Session())
err := dom.Reload()
require.NoError(t, err)
if lockTp != model.TableLockNone {
require.NotNil(t, tb.Meta().Lock)
require.Equal(t, lockTp, tb.Meta().Lock.Tp)
require.Equal(t, model.TableLockStatePublic, tb.Meta().Lock.State)
require.True(t, len(tb.Meta().Lock.Sessions) == 1)
require.Equal(t, dom.DDL().GetID(), tb.Meta().Lock.Sessions[0].ServerID)
require.Equal(t, tk.Session().GetSessionVars().ConnectionID, tb.Meta().Lock.Sessions[0].SessionID)
} else {
require.Nil(t, tb.Meta().Lock)
}
}
// test write local lock
func TestWriteLocal(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk2 := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk2.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 ( n int auto_increment primary key)")
// Test: allow read
tk.MustExec("lock tables t1 write local")
tk.MustExec("insert into t1 values(NULL)")
tk2.MustQuery("select count(*) from t1")
tk.MustExec("unlock tables")
tk2.MustExec("unlock tables")
// Test: forbid write
tk.MustExec("lock tables t1 write local")
err := tk2.ExecToErr("insert into t1 values(NULL)")
require.True(t, terror.ErrorEqual(err, infoschema.ErrTableLocked))
tk.MustExec("unlock tables")
tk2.MustExec("unlock tables")
// Test mutex: lock write local first
tk.MustExec("lock tables t1 write local")
err = tk2.ExecToErr("lock tables t1 write local")
require.True(t, terror.ErrorEqual(err, infoschema.ErrTableLocked))
err = tk2.ExecToErr("lock tables t1 write")
require.True(t, terror.ErrorEqual(err, infoschema.ErrTableLocked))
err = tk2.ExecToErr("lock tables t1 read")
require.True(t, terror.ErrorEqual(err, infoschema.ErrTableLocked))
tk.MustExec("unlock tables")
tk2.MustExec("unlock tables")
// Test mutex: lock write first
tk.MustExec("lock tables t1 write")
err = tk2.ExecToErr("lock tables t1 write local")
require.True(t, terror.ErrorEqual(err, infoschema.ErrTableLocked))
tk.MustExec("unlock tables")
tk2.MustExec("unlock tables")
// Test mutex: lock read first
tk.MustExec("lock tables t1 read")
err = tk2.ExecToErr("lock tables t1 write local")
require.True(t, terror.ErrorEqual(err, infoschema.ErrTableLocked))
tk.MustExec("unlock tables")
tk2.MustExec("unlock tables")
}
func TestLockTables(t *testing.T) {
store := testkit.CreateMockStore(t)
setTxnTk := testkit.NewTestKit(t, store)
setTxnTk.MustExec("set global tidb_txn_mode=''")
setTxnTk.MustExec("set global tidb_enable_metadata_lock=0")
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1,t2")
defer tk.MustExec("drop table if exists t1,t2")
tk.MustExec("create table t1 (a int)")
tk.MustExec("create table t2 (a int)")
// Test lock 1 table.
tk.MustExec("lock tables t1 write")
checkTableLock(t, tk, "test", "t1", model.TableLockWrite)
tk.MustExec("lock tables t1 read")
checkTableLock(t, tk, "test", "t1", model.TableLockRead)
tk.MustExec("lock tables t1 write")
checkTableLock(t, tk, "test", "t1", model.TableLockWrite)
// Test lock multi tables.
tk.MustExec("lock tables t1 write, t2 read")
checkTableLock(t, tk, "test", "t1", model.TableLockWrite)
checkTableLock(t, tk, "test", "t2", model.TableLockRead)
tk.MustExec("lock tables t1 read, t2 write")
checkTableLock(t, tk, "test", "t1", model.TableLockRead)
checkTableLock(t, tk, "test", "t2", model.TableLockWrite)
tk.MustExec("lock tables t2 write")
checkTableLock(t, tk, "test", "t2", model.TableLockWrite)
checkTableLock(t, tk, "test", "t1", model.TableLockNone)
tk.MustExec("lock tables t1 write")
checkTableLock(t, tk, "test", "t1", model.TableLockWrite)
checkTableLock(t, tk, "test", "t2", model.TableLockNone)
tk2 := testkit.NewTestKit(t, store)
tk2.MustExec("use test")
// Test read lock.
tk.MustExec("lock tables t1 read")
tk.MustQuery("select * from t1")
tk2.MustQuery("select * from t1")
tk.MustGetDBError("insert into t1 set a=1", infoschema.ErrTableNotLockedForWrite)
tk.MustGetDBError("update t1 set a=1", infoschema.ErrTableNotLockedForWrite)
tk.MustGetDBError("delete from t1", infoschema.ErrTableNotLockedForWrite)
tk2.MustGetDBError("insert into t1 set a=1", infoschema.ErrTableLocked)
tk2.MustGetDBError("update t1 set a=1", infoschema.ErrTableLocked)
tk2.MustGetDBError("delete from t1", infoschema.ErrTableLocked)
tk2.MustExec("lock tables t1 read")
tk2.MustGetDBError("insert into t1 set a=1", infoschema.ErrTableNotLockedForWrite)
// Test write lock.
tk.MustGetDBError("lock tables t1 write", infoschema.ErrTableLocked)
tk2.MustExec("unlock tables")
tk.MustExec("lock tables t1 write")
tk.MustQuery("select * from t1")
tk.MustExec("delete from t1")
tk.MustExec("insert into t1 set a=1")
tk2.MustGetDBError("select * from t1", infoschema.ErrTableLocked)
tk2.MustGetDBError("insert into t1 set a=1", infoschema.ErrTableLocked)
tk2.MustGetDBError("lock tables t1 write", infoschema.ErrTableLocked)
// Test write local lock.
tk.MustExec("lock tables t1 write local")
tk.MustQuery("select * from t1")
tk.MustExec("delete from t1")
tk.MustExec("insert into t1 set a=1")
tk2.MustQuery("select * from t1")
tk2.MustGetDBError("delete from t1", infoschema.ErrTableLocked)
tk2.MustGetDBError("insert into t1 set a=1", infoschema.ErrTableLocked)
tk2.MustGetDBError("lock tables t1 write", infoschema.ErrTableLocked)
tk2.MustGetDBError("lock tables t1 read", infoschema.ErrTableLocked)
// Test none unique table.
tk.MustGetDBError("lock tables t1 read, t1 write", infoschema.ErrNonuniqTable)
// Test lock table by other session in transaction and commit without retry.
tk.MustExec("unlock tables")
tk2.MustExec("unlock tables")
tk.MustExec("set @@session.tidb_disable_txn_auto_retry=1")
tk.MustExec("begin")
tk.MustExec("insert into t1 set a=1")
tk2.MustExec("lock tables t1 write")
tk.MustGetErrMsg("commit",
"previous statement: insert into t1 set a=1: [domain:8028]Information schema is changed during the execution of the statement(for example, table definition may be updated by other DDL ran in parallel). If you see this error often, try increasing `tidb_max_delta_schema_count`. [try again later]")
// Test lock table by other session in transaction and commit with retry.
tk.MustExec("unlock tables")
tk2.MustExec("unlock tables")
tk.MustExec("set @@session.tidb_disable_txn_auto_retry=0")
tk.MustExec("begin")
tk.MustExec("insert into t1 set a=1")
tk2.MustExec("lock tables t1 write")
tk.MustGetDBError("commit", infoschema.ErrTableLocked)
// Test for lock the same table multiple times.
tk2.MustExec("lock tables t1 write")
tk2.MustExec("lock tables t1 write, t2 read")
// Test lock tables and drop tables
tk.MustExec("unlock tables")
tk2.MustExec("unlock tables")
tk.MustExec("lock tables t1 write, t2 write")
tk.MustExec("drop table t1")
tk2.MustExec("create table t1 (a int)")
tk.MustExec("lock tables t1 write, t2 read")
// Test lock tables and drop database.
tk.MustExec("unlock tables")
tk.MustExec("create database test_lock")
tk.MustExec("create table test_lock.t3 (a int)")
tk.MustExec("lock tables t1 write, test_lock.t3 write")
tk2.MustExec("create table t3 (a int)")
tk.MustExec("lock tables t1 write, t3 write")
tk.MustExec("drop table t3")
// Test lock tables and truncate tables.
tk.MustExec("unlock tables")
tk.MustExec("lock tables t1 write, t2 read")
tk.MustExec("truncate table t1")
tk.MustExec("insert into t1 set a=1")
tk2.MustGetDBError("insert into t1 set a=1", infoschema.ErrTableLocked)
// Test for lock unsupported schema tables.
tk2.MustGetDBError("lock tables performance_schema.global_status write", infoschema.ErrAccessDenied)
tk2.MustGetDBError("lock tables information_schema.tables write", infoschema.ErrAccessDenied)
tk2.MustGetDBError("lock tables mysql.db write", infoschema.ErrAccessDenied)
// Test create table/view when session is holding the table locks.
tk.MustExec("unlock tables")
tk.MustExec("lock tables t1 write, t2 read")
tk.MustGetDBError("create table t3 (a int)", infoschema.ErrTableNotLocked)
tk.MustGetDBError("create view v1 as select * from t1;", infoschema.ErrTableNotLocked)
// Test for locking view was not supported.
tk.MustExec("unlock tables")
tk.MustExec("create view v1 as select * from t1;")
tk.MustGetDBError("lock tables v1 read", table.ErrUnsupportedOp)
// Test for locking sequence was not supported.
tk.MustExec("unlock tables")
tk.MustExec("create sequence seq")
tk.MustGetDBError("lock tables seq read", table.ErrUnsupportedOp)
tk.MustExec("drop sequence seq")
// Test for create/drop/alter database when session is holding the table locks.
tk.MustExec("unlock tables")
tk.MustExec("lock table t1 write")
tk.MustGetDBError("drop database test", table.ErrLockOrActiveTransaction)
tk.MustGetDBError("create database test_lock", table.ErrLockOrActiveTransaction)
tk.MustGetDBError("alter database test charset='utf8mb4'", table.ErrLockOrActiveTransaction)
// Test alter/drop database when other session is holding the table locks of the database.
tk2.MustExec("create database test_lock2")
tk2.MustGetDBError("drop database test", infoschema.ErrTableLocked)
tk2.MustGetDBError("alter database test charset='utf8mb4'", infoschema.ErrTableLocked)
// Test for admin cleanup table locks.
tk.MustExec("unlock tables")
tk.MustExec("lock table t1 write, t2 write")
tk2.MustGetDBError("lock tables t1 write, t2 read", infoschema.ErrTableLocked)
tk2.MustExec("admin cleanup table lock t1,t2")
checkTableLock(t, tk, "test", "t1", model.TableLockNone)
checkTableLock(t, tk, "test", "t2", model.TableLockNone)
// cleanup unlocked table.
tk2.MustExec("admin cleanup table lock t1,t2")
checkTableLock(t, tk, "test", "t1", model.TableLockNone)
checkTableLock(t, tk, "test", "t2", model.TableLockNone)
tk2.MustExec("lock tables t1 write, t2 read")
checkTableLock(t, tk2, "test", "t1", model.TableLockWrite)
checkTableLock(t, tk2, "test", "t2", model.TableLockRead)
tk.MustExec("unlock tables")
tk2.MustExec("unlock tables")
}
func TestTablesLockDelayClean(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk2 := testkit.NewTestKit(t, store)
tk2.MustExec("use test")
tk.MustExec("use test")
tk.MustExec("drop table if exists t1,t2")
defer tk.MustExec("drop table if exists t1,t2")
tk.MustExec("create table t1 (a int)")
tk.MustExec("create table t2 (a int)")
tk.MustExec("lock tables t1 write")
checkTableLock(t, tk, "test", "t1", model.TableLockWrite)
config.UpdateGlobal(func(conf *config.Config) {
conf.DelayCleanTableLock = 100
})
var wg util.WaitGroupWrapper
var startTime time.Time
wg.Run(func() {
startTime = time.Now()
tk.Session().Close()
})
time.Sleep(50 * time.Millisecond)
checkTableLock(t, tk, "test", "t1", model.TableLockWrite)
wg.Wait()
require.True(t, time.Since(startTime).Seconds() > 0.1)
checkTableLock(t, tk, "test", "t1", model.TableLockNone)
config.UpdateGlobal(func(conf *config.Config) {
conf.DelayCleanTableLock = 0
})
}
func TestDDLWithInvalidTableInfo(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
defer tk.MustExec("drop table if exists t")
// Test create with invalid expression.
_, err := tk.Exec(`CREATE TABLE t (
c0 int(11) ,
c1 int(11),
c2 decimal(16,4) GENERATED ALWAYS AS ((case when (c0 = 0) then 0when (c0 > 0) then (c1 / c0) end))
);`)
require.Error(t, err)
require.Equal(t, "[parser:1064]You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 4 column 88 near \"then (c1 / c0) end))\n\t);\" ", err.Error())
tk.MustExec("create table t (a bigint, b int, c int generated always as (b+1)) partition by hash(a) partitions 4;")
// Test drop partition column.
tk.MustGetErrMsg("alter table t drop column a;", "[ddl:3855]Column 'a' has a partitioning function dependency and cannot be dropped or renamed")
// Test modify column with invalid expression.
tk.MustGetErrMsg("alter table t modify column c int GENERATED ALWAYS AS ((case when (a = 0) then 0when (a > 0) then (b / a) end));", "[parser:1064]You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 97 near \"then (b / a) end));\" ")
// Test add column with invalid expression.
tk.MustGetErrMsg("alter table t add column d int GENERATED ALWAYS AS ((case when (a = 0) then 0when (a > 0) then (b / a) end));", "[parser:1064]You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 94 near \"then (b / a) end));\" ")
}
func TestAddColumn2(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (a int key, b int);")
defer tk.MustExec("drop table if exists t1, t2")
originHook := dom.DDL().GetHook()
defer dom.DDL().SetHook(originHook)
hook := &callback.TestDDLCallback{Do: dom}
var writeOnlyTable table.Table
hook.OnJobRunBeforeExported = func(job *model.Job) {
if job.SchemaState == model.StateWriteOnly {
writeOnlyTable, _ = dom.InfoSchema().TableByID(job.TableID)
}
}
dom.DDL().SetHook(hook)
done := make(chan error, 1)
// test transaction on add column.
go backgroundExec(store, "test", "alter table t1 add column c int not null", done)
err := <-done
require.NoError(t, err)
tk.MustExec("insert into t1 values (1,1,1)")
tk.MustQuery("select a,b,c from t1").Check(testkit.Rows("1 1 1"))
// mock for outdated tidb update record.
require.NotNil(t, writeOnlyTable)
ctx := context.Background()
err = sessiontxn.NewTxn(ctx, tk.Session())
require.NoError(t, err)
oldRow, err := tables.RowWithCols(writeOnlyTable, tk.Session(), kv.IntHandle(1), writeOnlyTable.WritableCols())
require.NoError(t, err)
require.Equal(t, 3, len(oldRow))
err = writeOnlyTable.RemoveRecord(tk.Session(), kv.IntHandle(1), oldRow)
require.NoError(t, err)
_, err = writeOnlyTable.AddRecord(tk.Session(), types.MakeDatums(oldRow[0].GetInt64(), 2, oldRow[2].GetInt64()), table.IsUpdate)
require.NoError(t, err)
tk.Session().StmtCommit(ctx)
err = tk.Session().CommitTxn(ctx)
require.NoError(t, err)
tk.MustQuery("select a,b,c from t1").Check(testkit.Rows("1 2 1"))
// Test for _tidb_rowid
var re *testkit.Result
tk.MustExec("create table t2 (a int);")
hook.OnJobRunBeforeExported = func(job *model.Job) {
if job.SchemaState != model.StateWriteOnly {
return
}
// allow write _tidb_rowid first
tk.MustExec("set @@tidb_opt_write_row_id=1")
tk.MustExec("begin")
tk.MustExec("insert into t2 (a,_tidb_rowid) values (1,2);")
re = tk.MustQuery(" select a,_tidb_rowid from t2;")
tk.MustExec("commit")
}
dom.DDL().SetHook(hook)
go backgroundExec(store, "test", "alter table t2 add column b int not null default 3", done)
err = <-done
require.NoError(t, err)
re.Check(testkit.Rows("1 2"))
tk.MustQuery("select a,b,_tidb_rowid from t2").Check(testkit.Rows("1 3 2"))
}
func TestDropTables(t *testing.T) {
store := testkit.CreateMockStore(t, mockstore.WithDDLChecker())
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1;")
failedSQL := "drop table t1;"
tk.MustGetErrCode(failedSQL, errno.ErrBadTable)
failedSQL = "drop table test2.t1;"
tk.MustGetErrCode(failedSQL, errno.ErrBadTable)
tk.MustExec("create table t1 (a int);")
tk.MustExec("drop table if exists t1, t2;")
tk.MustExec("create table t1 (a int);")
tk.MustExec("drop table if exists t2, t1;")
// Without IF EXISTS, the statement drops all named tables that do exist, and returns an error indicating which
// nonexisting tables it was unable to drop.
// https://dev.mysql.com/doc/refman/5.7/en/drop-table.html
tk.MustExec("create table t1 (a int);")
failedSQL = "drop table t1, t2;"
tk.MustGetErrCode(failedSQL, errno.ErrBadTable)
tk.MustExec("create table t1 (a int);")
failedSQL = "drop table t2, t1;"
tk.MustGetErrCode(failedSQL, errno.ErrBadTable)
failedSQL = "show create table t1;"
tk.MustGetErrCode(failedSQL, errno.ErrNoSuchTable)
}
|
// Copyright (c) 2017-2021 Uber Technologies Inc.
// Portions of the Software are attributed to Copyright (c) 2020 Temporal Technologies Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package internal
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"go.uber.org/zap/zaptest"
)
func TestContext_RaceRegression(t *testing.T) {
/*
A race condition existed due to concurrently ending goroutines on shutdown (i.e. closing their chan without waiting
on them to finish shutdown), which executed... quite a lot of non-concurrency-safe code in a concurrent way. All
decision-sensitive code is assumed to be run strictly sequentially.
Context cancellation was one identified by a customer, and it's fairly easy to test.
In principle this must be safe to do - contexts are supposed to be concurrency-safe. Even if ours are not actually
safe (for valid reasons), our execution model needs to ensure they *act* like it's safe.
*/
s := WorkflowTestSuite{}
s.SetLogger(zaptest.NewLogger(t))
env := s.NewTestWorkflowEnvironment()
wf := func(ctx Context) error {
ctx, cancel := WithCancel(ctx)
racyCancel := func(ctx Context) {
defer cancel() // defer is necessary as Sleep will never return due to Goexit
_ = Sleep(ctx, time.Hour)
}
// start a handful to increase odds of a race being detected
for i := 0; i < 10; i++ {
Go(ctx, racyCancel)
}
_ = Sleep(ctx, time.Minute) // die early
return nil
}
env.RegisterWorkflow(wf)
env.ExecuteWorkflow(wf)
assert.NoError(t, env.GetWorkflowError())
}
|
package main
import (
"fmt"
"time"
)
// go channel concept is blocking
// <- c read from channel
// waiting for write channel
// if read only none data on channel program we call dead lock mode
// thread all sleep
// c <- 1 write to channel
func main() {
fmt.Println("with channel")
// channel1()
channelCaseBlocking()
}
func channel1() {
channel := make(chan int)
a := 10
b := 5
go add(channel, a, b)
go multiply(channel)
time.Sleep(1 * time.Second)
}
func add(c chan int, a int, b int) {
result := a + b
c <- result // write channel
}
func multiply(c chan int) {
result := <-c // read channel
result *= 2
fmt.Printf("Result is %d\n", result)
}
func channelCaseBlocking() {
c := make(chan bool)
go routine1(c)
go routine2(c)
time.Sleep(3 * time.Second)
}
func routine1(c chan bool) {
fmt.Println("#1 has started, waiting for #2 to start")
<-c // read channel is blocking before write channel
fmt.Println("#1 received a notification from #2")
}
func routine2(c chan bool) {
fmt.Println("#2 has started, do some work and notify #1")
time.Sleep(2 * time.Second) // simulate working
c <- true // write channel
fmt.Println("#2 has finished")
}
func channelDeadLockCase() {
c := make(chan bool)
// deadlock, channel have not been write
<-c // read channel
fmt.Println("DONE") // never reach this print
}
// if create goroutine more
// cpu used is overhead
func channelCreatelimitSlot() {
c := make(chan bool, 3)
// if thread more than 4 thread is block
for true {
go func() {
fmt.Println(time.Now().Second())
time.Sleep(2 * time.Second)
<-c
}()
c <- true
}
}
|
package waktu_test
import (
"fmt"
"testing"
. "github.com/gomodul/waktu"
)
func TestTime_StartOfDay(t *testing.T) {
fmt.Println(now.StartOfDay())
}
func TestTime_StartOfWeek(t *testing.T) {
startOfWeek := now.StartOfWeek()
if int(startOfWeek.Weekday()) != int(Minggu) {
t.Fatal("invalid value")
}
}
func TestTime_StartOfMonth(t *testing.T) {
fmt.Println(now.StartOfMonth())
}
func TestTime_StartOfYear(t *testing.T) {
fmt.Println(now.StartOfYear())
}
|
package bind
import (
. "bytes"
. "github.com/rainmyy/easyDB/library/common"
. "github.com/rainmyy/easyDB/library/strategy"
)
type String struct {
value *Buffer
}
func (s *String) Bind(treeList []*TreeStruct) {
var buffer = NewBuffer([]byte{})
if len(treeList) > 1 {
buffer.WriteRune(LeftBracket)
}
BindString(treeList, buffer)
if len(treeList) > 1 {
buffer.WriteRune(RightBracket)
}
s.value = buffer
}
func (s *String) GetValue() interface{} {
return s.value
}
func StrigInstance() *String {
return new(String)
}
/**
* 序列化tree:[{"test":[{"params":[{"name":"name1"},{"key":"value"},{"count":{"value":"www"}}]},{"params":[{"name":"name2"},{"key":"value"}]}]}]
*/
func BindString(treeList []*TreeStruct, buffer *Buffer) int {
if len(treeList) == 0 {
return 0
}
childrenNum := 0
leafNum := 0
for _, val := range treeList {
if val.IsLeaf() {
leafNum++
}
}
for key, val := range treeList {
nodeList := val.GetNode()
if len(nodeList) <= 0 {
continue
}
node := nodeList[0]
buffer.WriteRune(LeftRance)
if val.IsLeaf() {
buffer.WriteString(formatBytes(node.GetName()))
buffer.WriteRune(Colon)
buffer.WriteString(formatBytes(node.GetData()))
} else {
childrenNum = len(val.GetChildren())
buffer.WriteString(formatBytes(node.GetName()))
buffer.WriteRune(Colon)
if childrenNum > 1 {
buffer.WriteRune(LeftBracket)
}
BindString(val.GetChildren(), buffer)
if childrenNum > 1 {
buffer.WriteRune(RightBracket)
}
}
buffer.WriteRune(RightRance)
if key != len(treeList)-1 {
buffer.WriteRune(Comma)
}
}
return childrenNum
}
/**
* unbind string,parser the string to tree data
*/
func (s *String) UnBind() []*TreeStruct {
return nil
}
|
package main
import (
"fmt"
"github.com/mitsuhide1992/language/structUtil"
)
func main() {
c := make(chan int)
quit := make(chan int)
go func() {
for i := 0; i < 10; i++ {
fmt.Println(<-c)
}
quit <- 0
}()
structUtil.Fibonacci(c, quit)
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"crypto/tls"
"fmt"
"net"
"strings"
"time"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/session/txninfo"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/util/disk"
"github.com/pingcap/tidb/util/execdetails"
"github.com/pingcap/tidb/util/memory"
"github.com/tikv/client-go/v2/oracle"
)
// OOMAlarmVariablesInfo is a struct for OOM alarm variables.
type OOMAlarmVariablesInfo struct {
SessionAnalyzeVersion int
SessionEnabledRateLimitAction bool
SessionMemQuotaQuery int64
}
// ProcessInfo is a struct used for show processlist statement.
type ProcessInfo struct {
Time time.Time
ExpensiveLogTime time.Time
ExpensiveTxnLogTime time.Time
CurTxnCreateTime time.Time
Plan interface{}
StmtCtx *stmtctx.StatementContext
RefCountOfStmtCtx *stmtctx.ReferenceCount
MemTracker *memory.Tracker
DiskTracker *disk.Tracker
StatsInfo func(interface{}) map[string]uint64
RuntimeStatsColl *execdetails.RuntimeStatsColl
DB string
Digest string
Host string
User string
Info string
Port string
ResourceGroupName string
PlanExplainRows [][]string
TableIDs []int64
IndexNames []string
OOMAlarmVariablesInfo OOMAlarmVariablesInfo
ID uint64
CurTxnStartTS uint64
// MaxExecutionTime is the timeout for select statement, in milliseconds.
// If the query takes too long, kill it.
MaxExecutionTime uint64
State uint16
Command byte
RedactSQL bool
}
// ToRowForShow returns []interface{} for the row data of "SHOW [FULL] PROCESSLIST".
func (pi *ProcessInfo) ToRowForShow(full bool) []interface{} {
var info interface{}
if len(pi.Info) > 0 {
if full {
info = pi.Info
} else {
info = fmt.Sprintf("%.100v", pi.Info)
}
}
t := uint64(time.Since(pi.Time) / time.Second)
var db interface{}
if len(pi.DB) > 0 {
db = pi.DB
}
var host string
if pi.Port != "" {
host = net.JoinHostPort(pi.Host, pi.Port)
} else {
host = pi.Host
}
return []interface{}{
pi.ID,
pi.User,
host,
db,
mysql.Command2Str[pi.Command],
t,
serverStatus2Str(pi.State),
info,
}
}
func (pi *ProcessInfo) String() string {
rows := pi.ToRowForShow(false)
return fmt.Sprintf("{id:%v, user:%v, host:%v, db:%v, command:%v, time:%v, state:%v, info:%v}", rows...)
}
func (pi *ProcessInfo) txnStartTs(tz *time.Location) (txnStart string) {
if pi.CurTxnStartTS > 0 {
physicalTime := oracle.GetTimeFromTS(pi.CurTxnStartTS)
txnStart = fmt.Sprintf("%s(%d)", physicalTime.In(tz).Format("01-02 15:04:05.000"), pi.CurTxnStartTS)
}
return
}
// ToRow returns []interface{} for the row data of
// "SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST".
func (pi *ProcessInfo) ToRow(tz *time.Location) []interface{} {
bytesConsumed := int64(0)
diskConsumed := int64(0)
if pi.StmtCtx != nil {
if pi.MemTracker != nil {
bytesConsumed = pi.MemTracker.BytesConsumed()
}
if pi.DiskTracker != nil {
diskConsumed = pi.DiskTracker.BytesConsumed()
}
}
return append(pi.ToRowForShow(true), pi.Digest, bytesConsumed, diskConsumed, pi.txnStartTs(tz), pi.ResourceGroupName)
}
// ascServerStatus is a slice of all defined server status in ascending order.
var ascServerStatus = []uint16{
mysql.ServerStatusInTrans,
mysql.ServerStatusAutocommit,
mysql.ServerMoreResultsExists,
mysql.ServerStatusNoGoodIndexUsed,
mysql.ServerStatusNoIndexUsed,
mysql.ServerStatusCursorExists,
mysql.ServerStatusLastRowSend,
mysql.ServerStatusDBDropped,
mysql.ServerStatusNoBackslashEscaped,
mysql.ServerStatusMetadataChanged,
mysql.ServerStatusWasSlow,
mysql.ServerPSOutParams,
}
// mapServerStatus2Str is the map for server status to string.
var mapServerStatus2Str = map[uint16]string{
mysql.ServerStatusInTrans: "in transaction",
mysql.ServerStatusAutocommit: "autocommit",
mysql.ServerMoreResultsExists: "more results exists",
mysql.ServerStatusNoGoodIndexUsed: "no good index used",
mysql.ServerStatusNoIndexUsed: "no index used",
mysql.ServerStatusCursorExists: "cursor exists",
mysql.ServerStatusLastRowSend: "last row send",
mysql.ServerStatusDBDropped: "db dropped",
mysql.ServerStatusNoBackslashEscaped: "no backslash escaped",
mysql.ServerStatusMetadataChanged: "metadata changed",
mysql.ServerStatusWasSlow: "was slow",
mysql.ServerPSOutParams: "ps out params",
}
// serverStatus2Str convert server status to string.
// Param state is a bit-field. (e.g. 0x0003 = "in transaction; autocommit").
func serverStatus2Str(state uint16) string {
// l collect server status strings.
//nolint: prealloc
var l []string
// check each defined server status, if match, append to collector.
for _, s := range ascServerStatus {
if state&s == 0 {
continue
}
l = append(l, mapServerStatus2Str[s])
}
return strings.Join(l, "; ")
}
// SessionManager is an interface for session manage. Show processlist and
// kill statement rely on this interface.
type SessionManager interface {
ShowProcessList() map[uint64]*ProcessInfo
ShowTxnList() []*txninfo.TxnInfo
GetProcessInfo(id uint64) (*ProcessInfo, bool)
Kill(connectionID uint64, query bool, maxExecutionTime bool)
KillAllConnections()
UpdateTLSConfig(cfg *tls.Config)
ServerID() uint64
// GetAutoAnalyzeProcID returns processID for auto analyze
GetAutoAnalyzeProcID() uint64
// StoreInternalSession puts the internal session pointer to the map in the SessionManager.
StoreInternalSession(se interface{})
// DeleteInternalSession deletes the internal session pointer from the map in the SessionManager.
DeleteInternalSession(se interface{})
// GetInternalSessionStartTSList gets all startTS of every transactions running in the current internal sessions.
GetInternalSessionStartTSList() []uint64
// CheckOldRunningTxn checks if there is an old transaction running in the current sessions
CheckOldRunningTxn(job2ver map[int64]int64, job2ids map[int64]string)
// KillNonFlashbackClusterConn kill all non flashback cluster connections.
KillNonFlashbackClusterConn()
// GetConAttrs gets the connection attributes
GetConAttrs() map[uint64]map[string]string
}
|
/*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package tasks
import (
"context"
"errors"
"fmt"
"time"
"github.com/Tencent/bk-bcs/bcs-common/common/blog"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/cloudprovider"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/cloudprovider/qcloud/api"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/remote/loop"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/utils"
"github.com/avast/retry-go"
)
// CleanNodeGroupNodesTask clean node group nodes task
func CleanNodeGroupNodesTask(taskID string, stepName string) error {
start := time.Now()
// get task and task current step
state, step, err := cloudprovider.GetTaskStateAndCurrentStep(taskID, stepName)
if err != nil {
return err
}
// previous step successful when retry task
if step == nil {
return nil
}
// extract parameter && check validate
clusterID := step.Params[cloudprovider.ClusterIDKey.String()]
nodeGroupID := step.Params[cloudprovider.NodeGroupIDKey.String()]
cloudID := step.Params[cloudprovider.CloudIDKey.String()]
nodeIDs := cloudprovider.ParseNodeIpOrIdFromCommonMap(state.Task.CommonParams,
cloudprovider.NodeIDsKey.String(), ",")
if len(clusterID) == 0 || len(nodeGroupID) == 0 || len(cloudID) == 0 || len(nodeIDs) == 0 {
blog.Errorf("CleanNodeGroupNodesTask[%s]: check parameter validate failed", taskID)
retErr := fmt.Errorf("CleanNodeGroupNodesTask check parameters failed")
_ = state.UpdateStepFailure(start, stepName, retErr)
return retErr
}
dependInfo, err := cloudprovider.GetClusterDependBasicInfo(cloudprovider.GetBasicInfoReq{
ClusterID: clusterID,
CloudID: cloudID,
NodeGroupID: nodeGroupID,
})
if err != nil {
blog.Errorf("CleanNodeGroupNodesTask[%s]: GetClusterDependBasicInfo failed: %s", taskID, err.Error())
retErr := fmt.Errorf("CleanNodeGroupNodesTask GetClusterDependBasicInfo failed")
_ = state.UpdateStepFailure(start, stepName, retErr)
return retErr
}
if dependInfo.NodeGroup.AutoScaling == nil || dependInfo.NodeGroup.AutoScaling.AutoScalingID == "" {
blog.Errorf("CleanNodeGroupNodesTask[%s]: nodegroup %s in task %s step %s has no autoscaling group",
taskID, nodeGroupID, taskID, stepName)
retErr := fmt.Errorf("get autoScalingID err, %v", err)
_ = state.UpdateStepFailure(start, stepName, retErr)
return retErr
}
// inject taskID
ctx := cloudprovider.WithTaskIDForContext(context.Background(), taskID)
err = removeAsgInstances(ctx, dependInfo, nodeIDs)
if err != nil {
blog.Errorf("CleanNodeGroupNodesTask[%s] nodegroup %s removeAsgInstances failed: %v",
taskID, nodeGroupID, err)
retErr := fmt.Errorf("removeAsgInstances err, %v", err)
_ = state.UpdateStepFailure(start, stepName, retErr)
return retErr
}
// update step
if err := state.UpdateStepSucc(start, stepName); err != nil {
blog.Errorf("CleanNodeGroupNodesTask[%s] task %s %s update to storage fatal", taskID, taskID, stepName)
return err
}
return nil
}
func removeAsgInstances(ctx context.Context, info *cloudprovider.CloudDependBasicInfo, nodeIDs []string) error {
taskID := cloudprovider.GetTaskIDFromContext(ctx)
asgID, err := getAsgIDByNodePool(ctx, info)
if err != nil {
return fmt.Errorf("removeAsgInstances[%s] getAsgIDByNodePool failed: %v", taskID, err)
}
// create node group
asCli, err := api.NewASClient(info.CmOption)
if err != nil {
blog.Errorf("removeAsgInstances[%s] get as client failed: %v", taskID, err.Error())
return err
}
// check instances if exist
var (
instanceIDList, validateInstances = make([]string, 0), make([]string, 0)
)
asgInstances, err := asCli.DescribeAutoScalingInstances(asgID)
if err != nil {
blog.Errorf("removeAsgInstances[%s] DescribeAutoScalingInstances[%s] failed: %v", taskID, asgID, err.Error())
return err
}
for _, ins := range asgInstances {
instanceIDList = append(instanceIDList, *ins.InstanceID)
}
for _, id := range nodeIDs {
if utils.StringInSlice(id, instanceIDList) {
validateInstances = append(validateInstances, id)
}
}
if len(validateInstances) == 0 {
blog.Infof("removeAsgInstances[%s] validateInstances is empty", taskID)
return nil
}
blog.Infof("removeAsgInstances[%s] validateInstances[%v]", taskID, validateInstances)
err = retry.Do(func() error {
activityID, err := asCli.RemoveInstances(asgID, validateInstances)
if err != nil {
blog.Errorf("removeAsgInstances[%s] RemoveInstances failed: %v", taskID, err)
return err
}
blog.Infof("removeAsgInstances[%s] RemoveInstances[%v] successful[%s]", taskID, nodeIDs, activityID)
return nil
}, retry.Attempts(3))
if err != nil {
return err
}
return nil
}
// CheckClusterCleanNodsTask check cluster clean nodes task
func CheckClusterCleanNodsTask(taskID string, stepName string) error {
start := time.Now()
// get task and task current step
state, step, err := cloudprovider.GetTaskStateAndCurrentStep(taskID, stepName)
if err != nil {
return err
}
// previous step successful when retry task
if step == nil {
return nil
}
// extract parameter && check validate
clusterID := step.Params[cloudprovider.ClusterIDKey.String()]
nodeGroupID := step.Params[cloudprovider.NodeGroupIDKey.String()]
cloudID := step.Params[cloudprovider.CloudIDKey.String()]
nodeIDs := cloudprovider.ParseNodeIpOrIdFromCommonMap(state.Task.CommonParams,
cloudprovider.NodeIDsKey.String(), ",")
if len(clusterID) == 0 || len(nodeGroupID) == 0 || len(cloudID) == 0 || len(nodeIDs) == 0 {
blog.Errorf("CheckClusterCleanNodsTask[%s]: check parameter validate failed", taskID)
retErr := fmt.Errorf("CheckClusterCleanNodsTask check parameters failed")
_ = state.UpdateStepFailure(start, stepName, retErr)
return retErr
}
dependInfo, err := cloudprovider.GetClusterDependBasicInfo(cloudprovider.GetBasicInfoReq{
ClusterID: clusterID,
CloudID: cloudID,
NodeGroupID: nodeGroupID,
})
if err != nil {
blog.Errorf("CheckClusterCleanNodsTask[%s]: GetClusterDependBasicInfo failed: %s", taskID, err.Error())
retErr := fmt.Errorf("CheckClusterCleanNodsTask GetClusterDependBasicInfo failed")
_ = state.UpdateStepFailure(start, stepName, retErr)
return retErr
}
// inject taskID
ctx := cloudprovider.WithTaskIDForContext(context.Background(), taskID)
// wait check delete component status
timeContext, cancel := context.WithTimeout(ctx, time.Minute*5)
defer cancel()
err = loop.LoopDoFunc(timeContext, func() error {
exist, notExist, err := FilterClusterInstanceFromNodesIDs(timeContext, dependInfo, nodeIDs)
if err != nil {
blog.Errorf("CheckClusterCleanNodsTask[%s] FilterClusterInstanceFromNodesIDs failed: %v", taskID, err)
return nil
}
blog.Infof("CheckClusterCleanNodsTask[%s] nodeIDs[%v] exist[%v] notExist[%v]",
taskID, nodeIDs, exist, notExist)
if len(exist) == 0 {
return loop.EndLoop
}
return nil
}, loop.LoopInterval(30*time.Second))
if err != nil && !errors.Is(err, context.DeadlineExceeded) {
blog.Errorf("CheckClusterCleanNodsTask[%s] cluster[%s] failed: %v", taskID, clusterID, err)
}
// timeout error
if errors.Is(err, context.DeadlineExceeded) {
blog.Infof("CheckClusterCleanNodsTask[%s] cluster[%s] timeout failed: %v", taskID, clusterID, err)
}
// update step
if err := state.UpdateStepSucc(start, stepName); err != nil {
blog.Errorf("CheckClusterCleanNodsTask[%s] task %s %s update to storage fatal", taskID, taskID, stepName)
return err
}
return nil
}
// CheckCleanNodeGroupNodesStatusTask check clean node group nodes status task
func CheckCleanNodeGroupNodesStatusTask(taskID string, stepName string) error {
start := time.Now()
//get task information and validate
state, step, err := cloudprovider.GetTaskStateAndCurrentStep(taskID, stepName)
if err != nil {
return err
}
if step == nil {
return nil
}
// step login started here
clusterID := step.Params[cloudprovider.ClusterIDKey.String()]
nodeGroupID := step.Params[cloudprovider.NodeGroupIDKey.String()]
cloudID := step.Params[cloudprovider.CloudIDKey.String()]
dependInfo, err := cloudprovider.GetClusterDependBasicInfo(cloudprovider.GetBasicInfoReq{
ClusterID: clusterID,
CloudID: cloudID,
NodeGroupID: nodeGroupID,
})
if err != nil {
blog.Errorf("CheckCleanNodeGroupNodesStatusTask[%s]: GetClusterDependBasicInfo for nodegroup %s in task %s step %s failed, %s",
taskID, nodeGroupID, taskID, stepName, err.Error())
retErr := fmt.Errorf("get cloud/cluster information failed, %s", err.Error())
_ = state.UpdateStepFailure(start, stepName, retErr)
return retErr
}
// get qcloud client
cli, err := api.NewTkeClient(dependInfo.CmOption)
if err != nil {
blog.Errorf("CheckCleanNodeGroupNodesStatusTask[%s]: get tke client for nodegroup[%s] in task %s step %s failed, %s",
taskID, nodeGroupID, taskID, stepName, err.Error())
retErr := fmt.Errorf("get cloud tke client err, %s", err.Error())
_ = state.UpdateStepFailure(start, stepName, retErr)
return retErr
}
// wait node group state to normal
ctx, cancel := context.WithTimeout(context.TODO(), 20*time.Minute)
defer cancel()
// wait all nodes to be ready
err = loop.LoopDoFunc(ctx, func() error {
np, errPool := cli.DescribeClusterNodePoolDetail(dependInfo.Cluster.SystemID, dependInfo.NodeGroup.CloudNodeGroupID)
if errPool != nil {
blog.Errorf("taskID[%s] CheckCleanNodeGroupNodesStatusTask[%s/%s] failed: %v", taskID, dependInfo.NodeGroup.ClusterID,
dependInfo.NodeGroup.CloudNodeGroupID, errPool)
return nil
}
if np == nil || np.NodeCountSummary == nil {
return nil
}
if np.NodeCountSummary.ManuallyAdded == nil || np.NodeCountSummary.AutoscalingAdded == nil {
return nil
}
allNormalNodesCount := *np.NodeCountSummary.ManuallyAdded.Normal + *np.NodeCountSummary.AutoscalingAdded.Normal
switch {
case *np.DesiredNodesNum == allNormalNodesCount:
return loop.EndLoop
default:
return nil
}
}, loop.LoopInterval(10*time.Second))
if err != nil {
blog.Errorf("taskID[%s] DescribeClusterNodePoolDetail failed: %v", taskID, err)
return err
}
return nil
}
// UpdateCleanNodeGroupNodesDBInfoTask update clean node group nodes db info task
func UpdateCleanNodeGroupNodesDBInfoTask(taskID string, stepName string) error {
start := time.Now()
//get task information and validate
state, step, err := cloudprovider.GetTaskStateAndCurrentStep(taskID, stepName)
if err != nil {
return err
}
if step == nil {
return nil
}
// step login started here
clusterID := step.Params[cloudprovider.ClusterIDKey.String()]
nodeGroupID := step.Params[cloudprovider.NodeGroupIDKey.String()]
cloudID := step.Params[cloudprovider.CloudIDKey.String()]
dependInfo, err := cloudprovider.GetClusterDependBasicInfo(cloudprovider.GetBasicInfoReq{
ClusterID: clusterID,
CloudID: cloudID,
NodeGroupID: nodeGroupID,
})
if err != nil {
blog.Errorf("CheckCleanNodeGroupNodesStatusTask[%s]: GetClusterDependBasicInfo for nodegroup %s in task %s step %s failed, %s",
taskID, nodeGroupID, taskID, stepName, err.Error())
retErr := fmt.Errorf("get cloud/cluster information failed, %s", err.Error())
_ = state.UpdateStepFailure(start, stepName, retErr)
return retErr
}
// get qcloud client
cli, err := api.NewTkeClient(dependInfo.CmOption)
if err != nil {
blog.Errorf("UpdateCleanNodeGroupNodesDBInfoTask[%s]: get tke client for nodegroup[%s] in task %s step %s failed, %s",
taskID, nodeGroupID, taskID, stepName, err.Error())
retErr := fmt.Errorf("get cloud tke client err, %s", err.Error())
_ = state.UpdateStepFailure(start, stepName, retErr)
return retErr
}
np, err := cli.DescribeClusterNodePoolDetail(dependInfo.Cluster.SystemID, dependInfo.NodeGroup.CloudNodeGroupID)
if err != nil {
blog.Errorf("taskID[%s] DescribeClusterNodePoolDetail[%s/%s] failed: %v", taskID, dependInfo.NodeGroup.ClusterID,
dependInfo.NodeGroup.CloudNodeGroupID, err)
retErr := fmt.Errorf("DescribeClusterNodePoolDetail err, %s", err.Error())
_ = state.UpdateStepFailure(start, stepName, retErr)
return nil
}
// will do update nodes info
err = updateNodeGroupDesiredSize(nodeGroupID, uint32(*np.DesiredNodesNum))
if err != nil {
blog.Errorf("taskID[%s] updateNodeGroupDesiredSize[%s/%d] failed: %v", taskID, nodeGroupID,
*np.DesiredNodesNum, err)
retErr := fmt.Errorf("updateNodeGroupDesiredSize err, %s", err.Error())
_ = state.UpdateStepFailure(start, stepName, retErr)
return nil
}
// update step
if err := state.UpdateStepSucc(start, stepName); err != nil {
blog.Errorf("UpdateCleanNodeGroupNodesDBInfoTask[%s] task %s %s update to storage fatal", taskID, taskID, stepName)
return err
}
return nil
}
|
package main
import "fmt"
type Rectangle struct {
x uint64
y uint64
}
//사각형 둘레
func RectanglePerimeter(r *Rectangle) uint64 {
return (2 * r.x) + (2 * r.y)
}
//사각형 넓이
func RectangleArea(r *Rectangle) uint64 {
return r.x * r.y
}
func main() {
rec := Rectangle{x: 10, y: 20}
fmt.Println("사각형의 둘레 : ", RectanglePerimeter(&rec))
fmt.Println("사각형의 넓이 : ", RectangleArea(&rec))
}
|
package gogrep_test
import (
"context"
"errors"
"fmt"
"io"
"sort"
"strings"
"testing"
"time"
"github.com/berquerant/gogrep"
"github.com/stretchr/testify/assert"
)
func dupStrings(n int, seeds ...string) []string {
r := make([]string, len(seeds)*n)
for i := 0; i < len(r); i++ {
r[i] = seeds[i%len(seeds)]
}
return r
}
func toResultSlice(resultC <-chan gogrep.Result) []gogrep.Result {
results := []gogrep.Result{}
for r := range resultC {
results = append(results, r)
}
return results
}
type errReader struct {
err error
}
func (s *errReader) Read(_ []byte) (int, error) { return 0, s.err }
type delayReader struct {
delay time.Duration
reader io.Reader
}
func (s *delayReader) Read(p []byte) (int, error) {
time.Sleep(s.delay)
return s.reader.Read(p)
}
func TestGrepper(t *testing.T) {
t.Run("already canceled", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
cancel()
_, err := gogrep.New().Grep(ctx, "ra", nil)
assert.ErrorIs(t, err, context.Canceled)
})
t.Run("invalid regex", func(t *testing.T) {
_, err := gogrep.New().Grep(context.TODO(), "?", nil)
assert.Contains(t, err.Error(), "Grepper cannot compile regex")
})
t.Run("scan error", func(t *testing.T) {
readErr := errors.New("reader")
resultC, err := gogrep.New().Grep(context.TODO(), ".", &errReader{
err: readErr,
})
assert.Nil(t, err)
results := toResultSlice(resultC)
assert.Equal(t, 1, len(results))
gotErr := results[0].Err()
assert.NotNil(t, gotErr)
assert.ErrorIs(t, gotErr, readErr)
assert.Contains(t, gotErr.Error(), "Grepper got error from source")
})
t.Run("canceled", func(t *testing.T) {
grepper := gogrep.New(gogrep.WithResultBufferSize(1))
source := &delayReader{
reader: strings.NewReader("delayed"),
delay: 500 * time.Millisecond,
}
ctx, cancel := context.WithTimeout(context.TODO(), 200*time.Millisecond)
defer cancel()
resultC, err := grepper.Grep(ctx, `.+`, source)
assert.Nil(t, err)
results := toResultSlice(resultC)
assert.Equal(t, 1, len(results))
assert.ErrorIs(t, results[0].Err(), context.DeadlineExceeded)
})
for _, tc := range []*struct {
title string
regex string
input []string
want []string
}{
{
title: "no input",
regex: "vanity",
},
{
title: "not matched",
regex: "vanity",
input: []string{"empty"},
},
{
title: "matched",
regex: "vanity",
input: []string{"vanity"},
want: []string{"vanity"},
},
{
title: "long input not matched",
regex: "vanity",
input: dupStrings(300, "empty"),
},
{
title: "long input matched",
regex: "vanity",
input: dupStrings(300, "vanity"),
want: dupStrings(300, "vanity"),
},
{
title: "long input matched partially",
regex: "afford|deny",
input: dupStrings(300, "empty", "afford", "vanity", "deny"),
want: dupStrings(300, "afford", "deny"),
},
{
title: "long input matched partially lines",
regex: "afford|prove|those$",
input: dupStrings(300, "one of those days", "affordance", "vanitas", "prove all things"),
want: dupStrings(300, "affordance", "prove all things"),
},
} {
tc := tc
t.Run(tc.title, func(t *testing.T) {
source := strings.NewReader(strings.Join(tc.input, "\n"))
resultC, err := gogrep.New().Grep(context.TODO(), tc.regex, source)
if err != nil {
t.Fatal(err)
}
got := []string{}
for matched := range resultC {
assert.Nil(t, matched.Err())
got = append(got, matched.Text())
}
assert.Equal(t, len(tc.want), len(got))
sort.Strings(tc.want)
sort.Strings(got)
for i, w := range tc.want {
g := got[i]
assert.Equal(t, w, g)
}
})
}
}
func BenchmarkGrepper(b *testing.B) {
for i := 0; i <= 5; i++ {
threads := 1 << i
b.Run(fmt.Sprintf("with %d threads", threads), func(b *testing.B) {
data := strings.NewReader(strings.Join(
dupStrings(b.N, "allocation", "freeable", "cached", "dirty", "flush memory", "NAND", "ready to write"), "\n"))
b.ResetTimer()
resultC, err := gogrep.New(gogrep.WithThreads(threads)).Grep(context.TODO(), "[cf].+sh", data)
if err != nil {
b.Fatal(err)
}
for range resultC {
}
})
}
}
|
package main
import (
"flag"
"fmt"
"github.com/mattnappo/yearbook/api"
"github.com/mattnappo/yearbook/common"
"github.com/mattnappo/yearbook/database"
)
var (
createSchemaFlag = flag.Bool("create-schema", false, "create the database schema")
addSeniorsFlag = flag.Bool("add-seniors", false, "add the seniors to the database")
notifsFlag = flag.Bool("with-notifs", false, "enable email notifications")
apiPort = flag.Int64("start-api", common.APIPort, "start the API server on a given port")
)
func main() {
flag.Parse()
if *createSchemaFlag {
db := database.Connect(false)
defer db.Disconnect()
err := db.CreateSchema()
if err != nil {
panic(err)
}
fmt.Println("created schema")
}
if *addSeniorsFlag {
db := database.Connect(false)
defer db.Disconnect()
err := db.AddSeniors()
if err != nil {
panic(err)
}
fmt.Println("added the seniors to the database")
}
if *notifsFlag {
common.NotifsEnabled = true
}
if *apiPort > 0 {
err := api.StartAPIServer(*apiPort)
if err != nil {
panic(err)
}
}
}
|
package translator_test
import (
"testing"
"github.com/goropikari/psqlittle/core"
trans "github.com/goropikari/psqlittle/translator"
"github.com/stretchr/testify/assert"
)
func TestTranslateSelect(t *testing.T) {
var tests = []struct {
name string
expected trans.Statement
query string
}{
{
name: "test translator",
expected: &trans.QueryStatement{
RANode: &trans.ProjectionNode{
TargetColNames: core.ColumnNames{
{TableName: "foo", Name: "id"},
{TableName: "foo", Name: "name"},
},
ResTargets: []trans.ExpressionNode{
trans.ColRefNode{core.ColumnName{TableName: "foo", Name: "id"}},
trans.ColRefNode{core.ColumnName{TableName: "foo", Name: "name"}},
},
RANode: &trans.WhereNode{
Condition: nil,
Table: &trans.CrossJoinNode{
RANodes: []trans.RelationalAlgebraNode{
&trans.TableNode{
TableName: "foo",
},
},
},
},
},
},
query: "SELECT foo.id, foo.name FROM foo",
},
{
name: "test wildcard",
expected: &trans.QueryStatement{
RANode: &trans.ProjectionNode{
TargetColNames: core.ColumnNames{
core.ColumnName{Name: "*"},
},
ResTargets: []trans.ExpressionNode{
trans.ColWildcardNode{},
},
RANode: &trans.WhereNode{
Condition: nil,
Table: &trans.CrossJoinNode{
RANodes: []trans.RelationalAlgebraNode{
&trans.TableNode{
TableName: "foo",
},
},
},
},
},
},
query: "SELECT * FROM foo",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
transl := trans.NewPGTranslator(tt.query)
actual, _ := transl.Translate()
assert.Equal(t, tt.expected, actual)
})
}
}
func TestTranslateCreate(t *testing.T) {
var tests = []struct {
name string
tableName string
expected trans.Statement
query string
}{
{
name: "test translator",
tableName: "foo",
expected: &trans.QueryStatement{
RANode: &trans.CreateTableNode{
TableName: "foo",
ColumnDefs: core.Cols{
core.Col{
ColName: core.ColumnName{TableName: "foo", Name: "id"},
ColType: core.Integer,
},
core.Col{
ColName: core.ColumnName{TableName: "foo", Name: "name"},
ColType: core.VarChar,
},
},
},
},
query: "CREATE TABLE foo (id int, name varchar(255))",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
transl := trans.NewPGTranslator(tt.query)
actual, _ := transl.Translate()
assert.Equal(t, tt.expected, actual)
})
}
}
func TestTranslateInsert(t *testing.T) {
var tests = []struct {
name string
tableName string
expected trans.Statement
query string
}{
{
name: "test insert",
tableName: "foo",
expected: &trans.QueryStatement{
RANode: &trans.InsertNode{
TableName: "foo",
ColumnNames: core.ColumnNames{},
ValuesList: core.ValuesList{
core.Values{1, "mike"},
},
},
},
query: "INSERT INTO foo values (1, 'mike')",
},
{
name: "test insert multi values",
tableName: "foo",
expected: &trans.QueryStatement{
RANode: &trans.InsertNode{
TableName: "foo",
ColumnNames: core.ColumnNames{
{
TableName: "foo",
Name: "id",
},
{
TableName: "foo",
Name: "name",
},
},
ValuesList: core.ValuesList{
core.Values{1, "mike"},
core.Values{100, "taro"},
},
},
},
query: "INSERT INTO foo (id, name) values (1, 'mike'), (100, 'taro')",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
transl := trans.NewPGTranslator(tt.query)
actual, _ := transl.Translate()
assert.Equal(t, tt.expected, actual)
})
}
}
|
package main
import (
"net/http"
"log"
"Moodometer/Server/moodometer"
"io/ioutil"
"github.com/golang/protobuf/proto"
)
var howDay = 0
var moods = []moodometer.Mood{}
func main() {
http.HandleFunc("/", action)
http.ListenAndServe(":8008", nil)
}
func action(w http.ResponseWriter, r *http.Request) {
how := r.URL.Path[1:]
if how == nil {
log.Print("No day number")
}
what := r.URL.Path[2:]
if what == nil {
log.Print("No action")
}
if how != nil && what != nil {
howDay = how
switch what {
case "add":
add(w,r)
case "list":
list(w,r)
case "debug":
debug(w,r)
}
}
}
func debug(w http.ResponseWriter, r *http.Request){
print(moods)
}
func add(w http.ResponseWriter, r *http.Request) {
mood := new(moodometer.Mood)
byteProto, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Fatal("Read Problem: "+err.Error())
}
err = proto.Unmarshal(byteProto, mood)
if err != nil {
log.Fatal("Unmarshal Problem: "+err.Error())
}
moods = append(moods, mood)
moodsXday := []moodometer.Mood{}
// Aujourd'hui en time - le nombre de jours en time
for _,x := range moods {
if x.Date >
}
meter := new(moodometer.Meter)
meter.Avg = 0
for _,m := range moods {
meter.Avg += m.Score
}
meter.Avg /= len(moods)
byteProto, err = proto.Marshal(meter)
if err != nil {
log.Fatal("Marshal Problem: "+err.Error())
}
w.Write(byteProto)
}
func list(w http.ResponseWriter, r *http.Request) {
}
|
// This file was generated for SObject CaseTeamTemplate, API Version v43.0 at 2018-07-30 03:47:39.961302704 -0400 EDT m=+26.304896172
package sobjects
import (
"fmt"
"strings"
)
type CaseTeamTemplate struct {
BaseSObject
CreatedById string `force:",omitempty"`
CreatedDate string `force:",omitempty"`
Description string `force:",omitempty"`
Id string `force:",omitempty"`
LastModifiedById string `force:",omitempty"`
LastModifiedDate string `force:",omitempty"`
Name string `force:",omitempty"`
SystemModstamp string `force:",omitempty"`
}
func (t *CaseTeamTemplate) ApiName() string {
return "CaseTeamTemplate"
}
func (t *CaseTeamTemplate) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("CaseTeamTemplate #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tCreatedById: %v\n", t.CreatedById))
builder.WriteString(fmt.Sprintf("\tCreatedDate: %v\n", t.CreatedDate))
builder.WriteString(fmt.Sprintf("\tDescription: %v\n", t.Description))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tLastModifiedById: %v\n", t.LastModifiedById))
builder.WriteString(fmt.Sprintf("\tLastModifiedDate: %v\n", t.LastModifiedDate))
builder.WriteString(fmt.Sprintf("\tName: %v\n", t.Name))
builder.WriteString(fmt.Sprintf("\tSystemModstamp: %v\n", t.SystemModstamp))
return builder.String()
}
type CaseTeamTemplateQueryResponse struct {
BaseQuery
Records []CaseTeamTemplate `json:"Records" force:"records"`
}
|
package main
import (
"fmt"
"math"
)
func main() {
x := float64(64)
fmt.Printf("Sqrt(%v) : %v\n", x, math.Sqrt(x))
}
|
package connection
import (
"errors"
"net"
"bufio"
"io"
"strconv"
)
const MaxMessageSize = 0x1FFFFFFF
type TCPConnection struct {
url string
}
type TCPConnectionInstance struct {
conn net.Conn
read *bufio.Reader
}
func NewTCPConnection(url string) *TCPConnection {
return &TCPConnection{url}
}
func writeMessage(w io.Writer, message []byte) error {
h := []byte("$" + strconv.FormatUint(uint64(len(message)), 10) + " ")
b := make([]byte, len(h) + len(message) + 2)
copy(b, h)
copy(b[len(h):], message)
b[len(b) - 2] = '\r'
b[len(b) - 1] = '\n'
_, err := w.Write(b)
return err
}
func readMessage(r *bufio.Reader) ([]byte, error) {
b, err := r.ReadBytes(' ')
if err != nil {
return nil, err
}
if len(b) > 0 && b[0] != '$' {
return nil, errors.New("Not a valid message")
}
n, err := strconv.ParseUint(string(b[1:len(b)-1]), 10, 32)
if err != nil {
return nil, errors.New("Invalid/Unparsable size")
}
if n > MaxMessageSize {
return nil, errors.New("Message is too long")
}
b = make([]byte, int(n) + 2)
if _, err := io.ReadFull(r, b); err != nil {
return nil, err
}
if b[len(b)-2] != '\r' || b[len(b)-1] != '\n' {
return nil, errors.New("Expecting crlf suffix")
}
return b[:len(b)-2], nil
}
func (c *TCPConnection) Dial() (ConnectorReadWriter, error) {
conn, err := net.Dial("tcp", c.url)
if err != nil {
return &TCPConnectionInstance{}, err
}
return &TCPConnectionInstance{conn, bufio.NewReader(conn)}, nil
}
func (c *TCPConnectionInstance) Close() error {
return c.conn.Close()
}
func (c *TCPConnectionInstance) Write(command string) error {
return writeMessage(c.conn, []byte(command))
}
func (c *TCPConnectionInstance) Read() ([]byte, error) {
return readMessage(c.read)
}
|
package main
import (
"bytes"
"compress/gzip"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/sqs"
"github.com/aws/aws-sdk-go-v2/service/sqs/types"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/valyala/fastjson"
"github.com/falcosecurity/plugin-sdk-go/pkg/sdk"
)
func dirExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}
func openLocal(pCtx *pluginContext, oCtx *openContext, params string) error {
oCtx.openMode = fileMode
oCtx.cloudTrailFilesDir = params
if len(oCtx.cloudTrailFilesDir) == 0 {
return fmt.Errorf(PluginName + " plugin error: missing input directory argument")
}
if !dirExists(oCtx.cloudTrailFilesDir) {
return fmt.Errorf(PluginName+" plugin error: cannot open %s", oCtx.cloudTrailFilesDir)
}
log.Printf("[%s] scanning directory %s\n", PluginName, oCtx.cloudTrailFilesDir)
err := filepath.Walk(oCtx.cloudTrailFilesDir, func(path string, info os.FileInfo, err error) error {
if info != nil && info.IsDir() {
return nil
}
isCompressed := strings.HasSuffix(path, ".json.gz")
if filepath.Ext(path) != ".json" && !isCompressed {
return nil
}
var fi fileInfo = fileInfo{name: path, isCompressed: isCompressed}
oCtx.files = append(oCtx.files, fi)
return nil
})
if err != nil {
return err
}
if len(oCtx.files) == 0 {
return fmt.Errorf(PluginName + " plugin error: no json files found in " + oCtx.cloudTrailFilesDir)
}
log.Printf("[%s] found %d json files\n", PluginName, len(oCtx.files))
return nil
}
func initS3(oCtx *openContext) {
oCtx.s3.awsSess = session.Must(session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
}))
oCtx.s3.awsSvc = s3.New(oCtx.s3.awsSess)
oCtx.s3.downloader = s3manager.NewDownloader(oCtx.s3.awsSess)
}
func openS3(pCtx *pluginContext, oCtx *openContext, input string) error {
oCtx.openMode = s3Mode
// remove the initial "s3://"
input = input[5:]
slashindex := strings.Index(input, "/")
// Extract the URL components
var prefix string
if slashindex == -1 {
oCtx.s3.bucket = input
prefix = ""
} else {
oCtx.s3.bucket = input[:slashindex]
prefix = input[slashindex+1:]
}
initS3(oCtx)
// Fetch the list of keys
err := oCtx.s3.awsSvc.ListObjectsPages(&s3.ListObjectsInput{
Bucket: &oCtx.s3.bucket,
Prefix: &prefix,
}, func(p *s3.ListObjectsOutput, last bool) (shouldContinue bool) {
for _, obj := range p.Contents {
//fmt.Printf("> %v %v\n", *obj.Size, *obj.Key)
path := obj.Key
isCompressed := strings.HasSuffix(*path, ".json.gz")
if filepath.Ext(*path) != ".json" && !isCompressed {
continue
}
var fi fileInfo = fileInfo{name: *path, isCompressed: true}
oCtx.files = append(oCtx.files, fi)
}
return true
})
if err != nil {
err = fmt.Errorf(PluginName + " plugin error: failed to list objects: " + err.Error())
}
return err
}
func getMoreSQSFiles(pCtx *pluginContext, oCtx *openContext) error {
ctx := context.Background()
input := &sqs.ReceiveMessageInput{
MessageAttributeNames: []string{
string(types.QueueAttributeNameAll),
},
QueueUrl: &oCtx.queueURL,
MaxNumberOfMessages: 1,
}
msgResult, err := oCtx.sqsClient.ReceiveMessage(ctx, input)
if err != nil {
return err
}
if len(msgResult.Messages) == 0 {
return nil
}
if pCtx.sqsDelete {
// Delete the message from the queue so it won't be read again
delInput := &sqs.DeleteMessageInput{
QueueUrl: &oCtx.queueURL,
ReceiptHandle: msgResult.Messages[0].ReceiptHandle,
}
_, err = oCtx.sqsClient.DeleteMessage(ctx, delInput)
if err != nil {
return err
}
}
// The SQS message is just a SNS notification noting that new
// cloudtrail file(s) are available in the s3 bucket. Download
// those files.
var sqsMsg map[string]interface{}
err = json.Unmarshal([]byte(*msgResult.Messages[0].Body), &sqsMsg)
if err != nil {
return err
}
messageType, ok := sqsMsg["Type"]
if !ok {
return fmt.Errorf("received SQS message that did not have a Type property")
}
if messageType.(string) != "Notification" {
return fmt.Errorf("received SQS message that was not a SNS Notification")
}
var notification snsMessage
err = json.Unmarshal([]byte(sqsMsg["Message"].(string)), ¬ification)
if err != nil {
return err
}
// The notification contains a bucket and a list of keys that
// contain new cloudtrail files.
oCtx.s3.bucket = notification.Bucket
initS3(oCtx)
for _, key := range notification.Keys {
isCompressed := strings.HasSuffix(key, ".json.gz")
oCtx.files = append(oCtx.files, fileInfo{name: key, isCompressed: isCompressed})
}
return nil
}
func openSQS(pCtx *pluginContext, oCtx *openContext, input string) error {
ctx := context.Background()
cfg, err := config.LoadDefaultConfig(ctx)
if err != nil {
return err
}
oCtx.openMode = sqsMode
oCtx.sqsClient = sqs.NewFromConfig(cfg)
queueName := input[6:]
urlResult, err := oCtx.sqsClient.GetQueueUrl(ctx, &sqs.GetQueueUrlInput{QueueName: &queueName})
if err != nil {
return err
}
oCtx.queueURL = *urlResult.QueueUrl
return getMoreSQSFiles(pCtx, oCtx)
}
var dlErrChan chan error
func s3Download(oCtx *openContext, downloader *s3manager.Downloader, name string, dloadSlotNum int) {
defer oCtx.s3.DownloadWg.Done()
buff := &aws.WriteAtBuffer{}
_, err := downloader.Download(buff,
&s3.GetObjectInput{
Bucket: &oCtx.s3.bucket,
Key: &name,
})
if err != nil {
dlErrChan <- err
return
}
oCtx.s3.DownloadBufs[dloadSlotNum] = buff.Bytes()
}
func readNextFileS3(pCtx *pluginContext, oCtx *openContext) ([]byte, error) {
if oCtx.s3.curBuf < oCtx.s3.nFilledBufs {
curBuf := oCtx.s3.curBuf
oCtx.s3.curBuf++
return oCtx.s3.DownloadBufs[curBuf], nil
}
dlErrChan = make(chan error, pCtx.s3DownloadConcurrency)
k := oCtx.s3.lastDownloadedFileNum
oCtx.s3.nFilledBufs = min(pCtx.s3DownloadConcurrency, len(oCtx.files)-k)
for j, f := range oCtx.files[k : k+oCtx.s3.nFilledBufs] {
oCtx.s3.DownloadWg.Add(1)
go s3Download(oCtx, oCtx.s3.downloader, f.name, j)
}
oCtx.s3.DownloadWg.Wait()
select {
case e := <-dlErrChan:
return nil, e
default:
}
oCtx.s3.lastDownloadedFileNum += oCtx.s3.nFilledBufs
oCtx.s3.curBuf = 1
return oCtx.s3.DownloadBufs[0], nil
}
func readFileLocal(fileName string) ([]byte, error) {
return ioutil.ReadFile(fileName)
}
func extractRecordStrings(jsonStr []byte, res *[][]byte) {
indentation := 0
var entryStart int
for pos, char := range jsonStr {
if char == '{' {
if indentation == 1 {
entryStart = pos
}
indentation++
} else if char == '}' {
indentation--
if indentation == 1 {
if pos < len(jsonStr)-1 {
entry := jsonStr[entryStart : pos+1]
*res = append(*res, entry)
}
}
}
}
}
// nextEvent is the core event production function.
func nextEvent(pCtx *pluginContext, oCtx *openContext, evt sdk.EventWriter) error {
var evtData []byte
var tmpStr []byte
var err error
// Only open the next file once we're sure that the content of the previous one has been full consumed
if oCtx.evtJSONListPos == len(oCtx.evtJSONStrings) {
// Open the next file and bring its content into memeory
if oCtx.curFileNum >= uint32(len(oCtx.files)) {
// If reading file names from a queue, try to
// get more files first. Otherwise, return EOF.
if oCtx.openMode == sqsMode {
err = getMoreSQSFiles(pCtx, oCtx)
if err != nil {
return err
}
// If after trying, there are no
// additional files, return timeout.
if oCtx.curFileNum >= uint32(len(oCtx.files)) {
return sdk.ErrTimeout
}
} else {
return sdk.ErrEOF
}
}
file := oCtx.files[oCtx.curFileNum]
oCtx.curFileNum++
switch oCtx.openMode {
case s3Mode, sqsMode:
tmpStr, err = readNextFileS3(pCtx, oCtx)
case fileMode:
tmpStr, err = readFileLocal(file.name)
}
if err != nil {
return err
}
// The file can be gzipped. If it is, we unzip it.
if file.isCompressed {
gr, err := gzip.NewReader(bytes.NewBuffer(tmpStr))
if err != nil {
return sdk.ErrTimeout
}
defer gr.Close()
zdata, err := ioutil.ReadAll(gr)
if err != nil {
return sdk.ErrTimeout
}
tmpStr = zdata
}
// Cloudtrail files have the following format:
// {"Records":[
// {<evt1>},
// {<evt2>},
// ...
// ]}
// Here, we split the file content into substrings, one per event.
// We do this instead of unmarshaling the whole file because this allows
// us to pass the original json of each event to the engine without an
// additional marshaling, making things much faster.
oCtx.evtJSONStrings = nil
extractRecordStrings(tmpStr, &(oCtx.evtJSONStrings))
oCtx.evtJSONListPos = 0
}
// Extract the next record
var cr *fastjson.Value
if len(oCtx.evtJSONStrings) != 0 {
evtData = oCtx.evtJSONStrings[oCtx.evtJSONListPos]
cr, err = oCtx.nextJParser.Parse(string(evtData))
if err != nil {
// Not json? Just skip this event.
oCtx.evtJSONListPos++
return sdk.ErrTimeout
}
oCtx.evtJSONListPos++
} else {
// Json not int the expected format. Just skip this event.
oCtx.evtJSONListPos++
return sdk.ErrTimeout
}
// All cloudtrail events should have a time. If it's missing
// skip the event.
timeVal := cr.GetStringBytes("eventTime")
if timeVal == nil {
return sdk.ErrTimeout
}
// Extract the timestamp
t1, err := time.Parse(
time.RFC3339,
string(timeVal))
if err != nil {
//
// We assume this is just some spurious data and we continue
//
return sdk.ErrTimeout
}
evt.SetTimestamp(uint64(t1.UnixNano()))
// All cloudtrail events should have a type. If it's missing
// skip the event.
typeVal := cr.GetStringBytes("eventType")
if typeVal == nil {
return sdk.ErrTimeout
}
ets := string(typeVal)
if ets == "AwsCloudTrailInsight" {
return sdk.ErrTimeout
}
// Write the event data
n, err := evt.Writer().Write(evtData)
if err != nil {
return err
} else if n < len(evtData) {
return fmt.Errorf("cloudwatch message too long: %d, but %d were written", len(evtData), n)
}
return nil
}
|
package mhfpacket
import (
"github.com/Andoryuuta/Erupe/network"
"github.com/Andoryuuta/Erupe/network/clientctx"
"github.com/Andoryuuta/byteframe"
)
// MsgSysAck represents the MSG_SYS_ACK
type MsgSysAck struct {
AckHandle uint32
IsBufferResponse bool
ErrorCode uint8
AckData []byte
}
// Opcode returns the ID associated with this packet type.
func (m *MsgSysAck) Opcode() network.PacketID {
return network.MSG_SYS_ACK
}
// Parse parses the packet from binary
func (m *MsgSysAck) Parse(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error {
m.AckHandle = bf.ReadUint32()
m.IsBufferResponse = bf.ReadBool()
m.ErrorCode = bf.ReadUint8()
payloadSize := uint(bf.ReadUint16())
// Extended data size field
if payloadSize == 0xFFFF {
payloadSize = uint(bf.ReadUint32())
}
if m.IsBufferResponse {
m.AckData = bf.ReadBytes(payloadSize)
} else {
// endian-swapped 4 bytes, could be any type. Unknown purpose.
// Probably a fixed type like (int32 or uint32), but unknown.
m.AckData = bf.ReadBytes(4)
}
return nil
}
// Build builds a binary packet from the current data.
func (m *MsgSysAck) Build(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error {
bf.WriteUint32(m.AckHandle)
bf.WriteBool(m.IsBufferResponse)
bf.WriteUint8(m.ErrorCode)
if m.IsBufferResponse {
if len(m.AckData) < 0xFFFF {
bf.WriteUint16(uint16(len(m.AckData)))
} else {
bf.WriteUint16(0xFFFF)
bf.WriteUint32(uint32(len(m.AckData)))
}
} else {
bf.WriteUint16(0x00)
}
if m.IsBufferResponse {
bf.WriteBytes(m.AckData)
} else if len(m.AckData) >= 4 {
bf.WriteBytes(m.AckData[:4])
} else {
bf.WriteBytes([]byte{0x00, 0x00, 0x00, 0x00})
}
return nil
}
|
package main
import (
"bytes"
"fmt"
"strings"
"strconv"
)
func main() {
var str string
fmt.Scanf("%s", &str)
fmt.Print(formatToCurrenct(str))
}
func formatToCurrenct(str string) string{
var res []string
strTof, _ := strconv.ParseFloat(str, 64)
str = fmt.Sprintf("%.2f", strTof)
strs := strings.Split(str, ".")
num := strs[0]
dec := strs[1]
i := len(num) - 1
for i >= 0{
j := i - 2
if j < 0 {
j = 0
}
res = append(res, num[j:i+1])
i -= 3
}
var reStr bytes.Buffer
i = len(res) - 1
for i >= 0 {
reStr.WriteString(res[i])
if i != 0 {
reStr.WriteString(",")
}
i -= 1
}
if dec != "" {
reStr.WriteString(fmt.Sprintf(".%s", dec))
}
return reStr.String()
}
|
package orm
import (
"database/sql"
"time"
"video_server/api/defs"
"video_server/api/utils"
)
func AddVideo(authorId int, name string) (video *defs.Video, errs error) {
videoId, err := utils.NewUUID()
if err != nil {
return nil, err
}
t := time.Now()
// Jan 02 2006, 15:04:05 时间原点
ctime := t.Format("Jan 02 2006, 15:04:05")
stmt, err := conn.Prepare("INSERT INTO video_info (id,author_id,name,display_ctime) values (?,?,?,?)")
if err != nil {
return nil, err
}
_, err = stmt.Exec(videoId, authorId, name, ctime)
if err != nil {
return nil, err
}
res := &defs.Video{videoId, authorId, name, ctime}
defer stmt.Close()
return res, nil
}
func GetVideoInfo(videoId string) (video *defs.Video, errs error) {
stmt, err := conn.Prepare("SELECT author_id,name,display_ctime FROM video_info WHERE id = ?; ")
if err != nil {
return nil, err
}
var aid int
var dct string
var name string
err = stmt.QueryRow(videoId).Scan(&aid, &name, &dct)
if err != nil && err != sql.ErrNoRows {
return nil, err
}
if err == sql.ErrNoRows {
return nil, nil
}
defer stmt.Close()
res := &defs.Video{videoId, aid, name, dct}
return res, nil
}
func DeleteVideo(videoId string) (errs error) {
stmt, err := conn.Prepare("DELETE FROM video_info WHERE id = ?; ")
if err != nil {
return err
}
_, err = stmt.Exec(videoId)
if err != nil {
return err
}
defer stmt.Close()
return nil
}
|
package main
import "fmt"
func main() {
var s []int
for i := range s {
fmt.Println(i)
}
fmt.Println("vim-go")
}
|
package authenticate
import (
"github.com/pomerium/pomerium/config"
"github.com/pomerium/pomerium/internal/identity"
"github.com/pomerium/pomerium/internal/identity/oauth"
"github.com/pomerium/pomerium/internal/urlutil"
)
func defaultGetIdentityProvider(options *config.Options, idpID string) (identity.Authenticator, error) {
authenticateURL, err := options.GetAuthenticateURL()
if err != nil {
return nil, err
}
redirectURL, err := urlutil.DeepCopy(authenticateURL)
if err != nil {
return nil, err
}
redirectURL.Path = options.AuthenticateCallbackPath
idp, err := options.GetIdentityProviderForID(idpID)
if err != nil {
return nil, err
}
return identity.NewAuthenticator(oauth.Options{
RedirectURL: redirectURL,
ProviderName: idp.GetType(),
ProviderURL: idp.GetUrl(),
ClientID: idp.GetClientId(),
ClientSecret: idp.GetClientSecret(),
Scopes: idp.GetScopes(),
AuthCodeOptions: idp.GetRequestParams(),
})
}
|
package cfmysql
import "os"
//go:generate counterfeiter . OsWrapper
type OsWrapper interface {
LookupEnv(key string) (string, bool)
Name(file *os.File) string
Remove(name string) error
WriteString(file *os.File, s string) (n int, err error)
}
func NewOsWrapper() OsWrapper {
return new(osWrapper)
}
type osWrapper struct{}
func (self *osWrapper) LookupEnv(key string) (string, bool) {
return os.LookupEnv(key)
}
func (self *osWrapper) Name(file *os.File) string {
return file.Name()
}
func (self *osWrapper) Remove(name string) error {
return os.Remove(name)
}
func (self *osWrapper) WriteString(file *os.File, s string) (n int, err error) {
return file.WriteString(s)
}
|
package main
func BinarySearch(arr []int, t int) int {
l := 0
r := len(arr)
for l < r {
mid := (l + r) / 2
if arr[mid] == t {
return mid
} else if arr[mid] > t {
r = mid
} else {
l = mid + 1
}
}
return -1
}
|
package Collections
import "fmt"
//双数组Trie树
type DATrie struct {
Base []int //base数组
Check []int //check数组
Tail [][]rune // 存放尾串的数组
tailPosition int // 现在尾串的位置
RuneCodeMap map[rune]int //<字符,code码>hash表
}
//标记结束的字符
const EndRune = '#'
//初始化双数组Tire
func NewDATrie() *DATrie {
newDATrie := DATrie{}
newDATrie.Base = make([]int, 1024)
//Base数组0位置不用,1是根节点
newDATrie.Base[1] = 1
newDATrie.Check = make([]int, 1024)
newDATrie.Tail = make([][]rune, 1) //tail数组第一个必须空,因为base中存负值,索引为0的用不上
newDATrie.tailPosition = 0
newDATrie.RuneCodeMap = make(map[rune]int)
newDATrie.RuneCodeMap[EndRune] = 1
for i := 0; i < 26; i++ {
//+1是因为code从1开始
newDATrie.RuneCodeMap[rune('a'+i)] = len(newDATrie.RuneCodeMap) + 1
}
return &newDATrie
}
//将双数组扩充一定长度
func (this *DATrie) extendBaseCheck(addSize int) {
new := make([]int, addSize)
this.Base = append(this.Base, new[:]...)
this.Check = append(this.Check, new[:]...)
}
//获得字符的code码
func (this *DATrie) GetRuneCode(_rune rune) int {
if _, ok := this.RuneCodeMap[_rune]; !ok {
this.RuneCodeMap[_rune] = len(this.RuneCodeMap) + 1
}
return this.RuneCodeMap[_rune]
}
//寻找到新的base值,能够满足按照转移得到的子节点的位置都没有被占用
func (this *DATrie) x_check(checklist []int) int {
//从1开始寻找新的base值
for i := 1; ; i++ {
stopFlag := true
//遍历所有子节点的转移字符(到达子节点的code)
for _, inputCode := range checklist {
//新的子节点位置
newSonNodeIndex := i + inputCode
//如果这个位置已经被占据,退出
if this.Base[newSonNodeIndex] != 0 || this.Check[newSonNodeIndex] != 0 {
stopFlag = false
break
}
//新的子节点位置已经超过原数组大小了
if newSonNodeIndex > len(this.Base) {
this.extendBaseCheck(newSonNodeIndex - len(this.Base) + 1)
}
}
//遍历所有子节点的转移字符结束,发现可以满足要求
if stopFlag {
return i
}
//return 0
}
}
//找出某个节点的所有子节点的字符编码(达到子节点的边所代表的字符编码)
func (this *DATrie) getChildList(fatherNodeBaseIndex int) []int {
childList := make([]int, 0)
//遍历所有转移字符,看看这个节点是否有这一条边
for i := 1; i < len(this.RuneCodeMap); i++ {
maybeSonIndex := this.Base[fatherNodeBaseIndex] + i
if maybeSonIndex > len(this.Base) {
break
}
if this.Check[maybeSonIndex] == fatherNodeBaseIndex {
childList = append(childList, i)
}
}
return childList
}
//将一个字符串的尾串添加到TAIL数组中, 返回位置
func (this *DATrie) AppendToTailArray(runes []rune, position int) int {
tailRunes := runes[position:]
this.Tail = append(this.Tail, tailRunes)
return len(this.Tail) - 1
}
//添加单词 最核心部分
func (this *DATrie) Insert(word string) {
wordRunes := []rune(word)
wordRunes = append(wordRunes, EndRune)
prePosition := 1 //之前位置
var currentPosition int //现在位置-走一个单词的路径过程中当前字符在base数组中的索引位置
//index用于取尾串
for index, char := range wordRunes {
//获取该字符连接的子节点的位置
currentPosition = this.Base[prePosition] + this.GetRuneCode(char)
//扩充长度
if currentPosition > len(this.Base)-1 {
this.extendBaseCheck(currentPosition - len(this.Base) + 1)
}
//该子节点未被占用
//若单纯只是this.Base[currentPosition] == 0 并不代表未占用,有结束符连接到这个节点的情况
if this.Base[currentPosition] == 0 && this.Check[currentPosition] == 0 {
//先插入ba#, bc#,又插入b#(新的单词是旧的子串
if char == EndRune {
this.Base[currentPosition] = 0
this.Check[currentPosition] = prePosition
return //结束了
}
this.AppendToTailArray(wordRunes, index+1) //index要不要加1
//尾串添加到tail数组中的位置为len(this.Tail)-1
this.Base[currentPosition] = -(len(this.Tail) - 1)
this.Check[currentPosition] = prePosition
return //结束了
}
//该节点已经被占用
//如果可以正常转移 未发生冲突
if this.Base[currentPosition] > 0 && this.Check[currentPosition] == prePosition {
prePosition = currentPosition
continue
}
//发生冲突
//冲突 1:遇到 Base[cur_p]小于0的,即遇到一个被压缩存到Tail中的字符串
if this.Base[currentPosition] < 0 && this.Check[currentPosition] == prePosition {
tailIndex := -this.Base[currentPosition]
//发生冲突的单词(树的路径)的尾串完全一样,就停止了
if string(this.Tail[tailIndex]) == string(wordRunes[index+1]) {
return
}
//尾串不完全一样。取出共同前缀,存入Base数组,独立区分尾串存入Tail
//前面可能存在的相同的字符
if this.Tail[tailIndex][0] == wordRunes[index+1] {
fmt.Println("相同的前缀")
tailHeadCode := this.GetRuneCode(wordRunes[index+1])
newBaseValue := this.x_check([]int{tailHeadCode})
//换上新的base值,从负值到正值(有一个子节点)
this.Base[currentPosition] = newBaseValue
//改变tail数组中存放的。去掉第一个
this.Tail[tailIndex] = this.Tail[tailIndex][1:]
//这条边到达的子节点在Base数组中位置是newBaseValue+tailHeadCode
this.Base[newBaseValue+tailHeadCode] = -tailIndex
this.Check[newBaseValue+tailHeadCode] = currentPosition
prePosition = currentPosition
continue
} else { //不同的字符 可能有一个为结束符
fmt.Println("开始不同的字符")
fmt.Println("string(this.Tail[tailIndex][0])", string(this.Tail[tailIndex][0]))
fmt.Println("string(wordRunes[index+1]) ",string(wordRunes[index+1]))
tailHeadCode := this.GetRuneCode(this.Tail[tailIndex][0])
nextCharCode := this.GetRuneCode(wordRunes[index+1])
newBaseValue := this.x_check([]int{tailHeadCode, nextCharCode})
//换上新的base值,从负值到正值(有两个子节点)
this.Base[currentPosition] = newBaseValue
fmt.Println("newBaseValue ",newBaseValue)
fmt.Println("newBaseValue+tailHeadCode ",newBaseValue+tailHeadCode)
fmt.Println("newBaseValue+nextCharCode ",newBaseValue+nextCharCode)
this.Check[newBaseValue+tailHeadCode] = currentPosition
this.Check[newBaseValue+nextCharCode] = currentPosition
//Tail 为END_FLAG 的情况
if this.Tail[tailIndex][0] == EndRune {
fmt.Println("this.Tail[tailIndex][0] == EndRune")
this.Base[newBaseValue+tailHeadCode] = 0
} else {
//改变tail数组中存放的。去掉第一个
this.Tail[tailIndex] = this.Tail[tailIndex][1:]
//这条边到达的子节点在Base数组中位置是newBaseValue+tailHeadCode
this.Base[newBaseValue+tailHeadCode] = -tailIndex
}
//word转化的rune数组中剩余的部分存入tail
//为什么要用index+2?因为index+1已经作为到达子节点的边存在双数组中了
if wordRunes[index+1] == EndRune {
fmt.Println("wordRunes[index+1] == EndRune")
this.Base[newBaseValue+nextCharCode] = 0
} else {
newTailIndex := this.AppendToTailArray(wordRunes, index+2)
this.Base[newBaseValue+nextCharCode] = -newTailIndex
}
return
}
}
//TODO
//冲突2:当前结点已经被占用,需要调整pre的base
//这里也就是整个DATrie最复杂的地方了
if this.Check[currentPosition] != prePosition {
fmt.Println("冲突2")
fmt.Println("string(char) ",string(char))
fmt.Println("currentPosition ",currentPosition)
fmt.Println("prePosition ",prePosition)
fmt.Println("this.Base[prePosition] ",this.Base[prePosition])
fmt.Println("this.Check[currentPosition] ",this.Check[currentPosition])
preNodeSons := this.getChildList(prePosition) //之前节点的所有子节点
//发生冲突的节点的所有子节点
anotherNodeSons := this.getChildList(this.Check[currentPosition])
newBaseValue := 0
//选择迁移成本较小的节点
if len(preNodeSons) + 1 > len(anotherNodeSons) {
newBaseValue = this.x_check(anotherNodeSons)
} else {
newBaseValue = this.x_check(append(preNodeSons, this.GetRuneCode(char)))
}
fmt.Println(newBaseValue)
//return
}
}
}
//确认是否存在某个单词
func (this *DATrie) Contains(word string) bool {
exist := false
chars := []rune(word)
chars = append(chars, EndRune)
prePosition := 1
currentPosition := 0
for index, char := range chars {
currentPosition = this.Base[prePosition] + this.GetRuneCode(char)
//等于0,根本没有或者是结束符转移到了这里
if this.Base[currentPosition] == 0 {
fmt.Println("this.Base[currentPosition] == 0")
fmt.Println("currentPosition ", currentPosition)
fmt.Println("prePosition ", prePosition)
fmt.Println("this.Check[currentPosition] ", this.Check[currentPosition])
fmt.Println("this.Check[currentPosition] == prePosition ", this.Check[currentPosition] == prePosition)
if this.Check[currentPosition] == prePosition && index == len(chars)-1 {
fmt.Println("this.Check[currentPosition] == prePosition && index==len(chars)-1")
return true
}
return false
} else if this.Base[currentPosition] > 0 {
fmt.Println("this.Base[currentPosition] > 0")
//大于0,继续转移
if this.Check[currentPosition] != prePosition {
return false
}
prePosition = currentPosition
} else {
fmt.Println("this.Base[currentPosition] < 0")
//小于0,去比较尾串
fmt.Println("this.Base[currentPosition]", this.Base[currentPosition])
fmt.Println("string(char) ", string(char))
fmt.Println("string(this.Tail[-this.Base[currentPosition]])", string(this.Tail[-this.Base[currentPosition]]))
fmt.Println("string(chars[index+1:])", string(chars[index+1:]))
if string(this.Tail[-this.Base[currentPosition]]) == string(chars[index+1:]) {
return true
} else {
return false
}
}
}
return exist
}
|
package app
import (
"fmt"
"github.com/blang/semver"
"github.com/rhysd/go-github-selfupdate/selfupdate"
"strings"
)
func DoSelfUpdate(currentVersion string) {
ver := semver.MustParse(strings.TrimPrefix(currentVersion, "v"))
slug := "Brialius/jira2trello"
latest, found, err := selfupdate.DetectLatest(slug)
if err != nil {
fmt.Println("Failed to check updates:", err)
return
}
if latest.Version.Equals(ver) {
// latest version is the same as current version. It means current binary is up-to-date.
fmt.Println("Current binary is the latest version:", currentVersion)
return
}
if found && latest.Version.GT(ver) {
fmt.Printf("New version found: %s\n", latest.Version)
fmt.Println("Updating...")
_, err := selfupdate.UpdateSelf(ver, slug)
if err != nil {
fmt.Println("Binary update failed:", err)
return
}
fmt.Println("Successfully updated to version", latest.Version)
fmt.Println("Release notes:")
fmt.Println(latest.ReleaseNotes)
}
}
|
package stdout
import (
"github.com/k0kubun/pp"
"github.com/cloudfly/ecenter/pkg/sender"
)
func init() {
sender.Register(Name, New)
}
// 发送器名称
const (
Name = "stdout"
)
// Sender represents a email sender
type Sender struct{}
// New create new sender
func New(setting map[string]string) (sender.Sender, error) {
return Sender{}, nil
}
// Send email
func (sender Sender) Send(title, content, from string, to []string) (int, error) {
pp.Println(map[string]interface{}{
"title": title,
"content": content,
"from": from,
"to": to,
})
return len(to), nil
}
|
package authproxy
import (
"net/http"
"time"
)
func createAccessTokenCookie(accessToken string) *http.Cookie {
return &http.Cookie{
Name: accessTokenCookieName,
Value: accessToken,
Path: "/",
HttpOnly: true,
Secure: false, // TODO
Expires: time.Time{}, // TODO
MaxAge: 0, // TODO
}
}
|
package suite
import (
"benchmark/connection"
"fmt"
"errors"
"benchmark/helpers"
"math/rand"
)
const testPrefixSet = "set"
type SetCommand struct {
T GeoType
}
func (c *SetCommand) Fire(conn connection.ConnectorReadWriter) error {
var command string
switch c.T {
case Point:
lat, lon := helpers.RandomPointCoordinates()
command = fmt.Sprintf("SET %s %s_%d POINT %f %f", BenchmarkPrefix, testPrefixSet, rand.Intn(1000), lat, lon)
case Geohash:
hash, _ := helpers.RandomHashCoordinates()
command = fmt.Sprintf("SET %s %s_%d HASH %s %d", BenchmarkPrefix, testPrefixSet, rand.Intn(1000), hash, 10)
case Object:
json := helpers.RandomJsonCoordinates()
command = fmt.Sprintf("SET %s %s_%d OBJECT %s", BenchmarkPrefix, testPrefixSet, rand.Intn(1000), json)
case Bounds:
lat1, lon1, lat2, lon2 := helpers.RandomBoundsCoordinates()
command = fmt.Sprintf("SET %s %s_%d POINT %f %f %f %f", BenchmarkPrefix, testPrefixSet, rand.Intn(1000), lat1, lon1, lat2, lon2)
}
return conn.Write(command)
}
func (c *SetCommand) Match(conn connection.ConnectorReadWriter) error {
data, err := conn.Read()
if err != nil{
return err
}
if len(data) == 0 {
return errors.New("Empty response from Tile38 server")
}
return nil
}
func (c *SetCommand) Up(conn connection.ConnectorReadWriter) error {
return nil
}
func (c *SetCommand) Down(conn connection.ConnectorReadWriter) error {
return nil
} |
package terraform_kintone
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/naruta/terraform-provider-kintone/kintone"
"github.com/naruta/terraform-provider-kintone/kintone/raw_client"
)
func resourceKintoneRecord() *schema.Resource {
return &schema.Resource{
Create: resourceKintoneRecordCreate,
Read: resourceKintoneRecordRead,
Update: resourceKintoneRecordUpdate,
Delete: resourceKintoneRecordDelete,
Schema: map[string]*schema.Schema{
"app_id": {
Type: schema.TypeString,
Required: true,
},
"revision": {
Type: schema.TypeString,
Computed: true,
},
"values": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateKintoneRecordValues,
},
},
}
}
func validateKintoneRecordValues(v interface{}, k string) (ws []string, errors []error) {
var values map[string]string
rawValues := v.(string)
err := raw_client.DecodeJson([]byte(rawValues), &values)
if err != nil {
errors = append(errors, fmt.Errorf("invalid format of %s: %s", k, err))
}
return
}
func convertRecordValues(rawValues string) (map[kintone.FieldCode]string, error) {
var valueMap map[string]string
err := raw_client.DecodeJson([]byte(rawValues), &valueMap)
if err != nil {
return nil, err
}
values := map[kintone.FieldCode]string{}
for key, value := range valueMap {
values[kintone.FieldCode(key)] = value
}
return values, nil
}
func resourceKintoneRecordCreate(d *schema.ResourceData, m interface{}) error {
config := m.(*Config)
apiClient := newClient(*config)
useCase := kintone.NewCreateRecordUseCase(apiClient)
ctx := context.Background()
cmd := kintone.CreateRecordUseCaseCommand{
AppId: kintone.AppId(d.Get("app_id").(string)),
Record: kintone.Record{},
}
if v, ok := d.GetOk("values"); ok {
values, err := convertRecordValues(v.(string))
if err != nil {
return err
}
cmd.Record.Values = values
}
id, revision, err := useCase.Execute(ctx, cmd)
if err != nil {
return err
}
d.SetId(id.String())
d.Set("revision", revision.String())
return nil
}
func resourceKintoneRecordRead(d *schema.ResourceData, m interface{}) error {
// TODO: impl
return nil
}
func resourceKintoneRecordUpdate(d *schema.ResourceData, m interface{}) error {
config := m.(*Config)
apiClient := newClient(*config)
useCase := kintone.NewUpdateRecordUseCase(apiClient)
ctx := context.Background()
cmd := kintone.UpdateRecordUseCaseCommand{
AppId: kintone.AppId(d.Get("app_id").(string)),
Record: kintone.Record{
Id: kintone.RecordId(d.Id()),
Revision: kintone.RecordRevision(d.Get("revision").(string)),
},
}
if v, ok := d.GetOk("values"); ok {
values, err := convertRecordValues(v.(string))
if err != nil {
return err
}
cmd.Record.Values = values
}
revision, err := useCase.Execute(ctx, cmd)
if err != nil {
return err
}
d.Set("revision", revision.String())
return nil
}
func resourceKintoneRecordDelete(d *schema.ResourceData, m interface{}) error {
config := m.(*Config)
apiClient := newClient(*config)
useCase := kintone.NewDeleteRecordUseCase(apiClient)
ctx := context.Background()
cmd := kintone.DeleteRecordUseCaseCommand{
AppId: kintone.AppId(d.Get("app_id").(string)),
Id: kintone.RecordId(d.Id()),
}
return useCase.Execute(ctx, cmd)
}
|
package main
import (
"flag"
"fmt"
"github.com/gotk3/gotk3/cairo"
"github.com/gotk3/gotk3/gdk"
"github.com/gotk3/gotk3/gtk"
"os"
"runtime/pprof"
)
var initialConfig = ""
const lowBits64 uint64 = 0x5555555555555555
const bitsPerCell = 4
const cellsPerInt = 64 / bitsPerCell
const cellMask uint64 = (1 << bitsPerCell) - 1
type ShiftType int
const (
SHIFT_NONE ShiftType = iota
SHIFT_FIRST
SHIFT_TWO
SHIFT_ALL
)
func fail(err error) {
fmt.Fprintf(os.Stderr, "Failure: %v", err)
os.Exit(1)
}
type cellType struct {
color *gdk.RGBA
}
type cellValue struct {
young uint64
total uint64
}
type Playground struct {
da *gtk.DrawingArea
cellSize uint
area [][]uint64
cellTypes []*cellType
cellsPerRow int
lastIntMask uint64
lastCellOffset uint
repeats int // how many times to repeat
iterations uint64 // the number of steps passed
viewX0 int // the index of the top-left cell
viewY0 int
viewXSize int // the width of the view
viewYSize int
}
func NewPlayground(cellSize uint, xsize, ysize int) *Playground {
pg := new(Playground)
pg.cellSize = cellSize
pg.viewX0 = 0
pg.viewY0 = 0
pg.viewXSize = xsize
pg.viewYSize = ysize
return pg
}
// 01 01 01 01 prev
// >> 01 01 01 01 prev+ -> 11 11 11 11
// 01 01 01 01 << prev-
//
// 01 01 01 01 this - ignored
// >> 01 01 01 01 this+ -> 10 10 10 10
// 01 01 01 01 << this-
//
// 01 01 01 01 next -> 11 11 11 11
// >> 01 01 01 01 next+
// 01 01 01 01 << next-
func cellSplit(x uint64) cellValue {
const lowMask uint64 = 0x3333333333333333
y := x & lowMask
return cellValue{y, (x>>2)&lowMask + y}
}
// Makes a running sum of the row.
// Result is the array of (young,total)
func tripleRow(orig []uint64, lco uint, lim uint64) []cellValue {
nint := len(orig)
result := make([]cellValue, nint)
mask := cellMask
ls := uint(bitsPerCell)
rs := uint(64 - bitsPerCell)
for i := 1; i < nint-1; i++ {
o := orig[i]
a := orig[i-1]
b := orig[i+1]
x := o + (o >> ls) + (b << rs) + (o << ls) + (a >> rs)
result[i] = cellSplit(x)
}
if nint > 1 {
o := orig[0]
a := orig[nint-1]
b := orig[1]
x := o + (o >> ls) + (b << rs) + (o << ls) + ((a >> lco) & mask)
result[0] = cellSplit(x)
o = orig[nint-1]
a = orig[nint-2]
b = orig[0]
x = o + (o >> ls) + ((b & mask) << lco) + (o << ls) + (a >> rs)
x &= lim
result[nint-1] = cellSplit(x)
} else {
o := orig[0]
x := o + (o >> ls) + ((o & mask) << lco) + (o << ls) + ((o >> lco) & mask)
x &= lim
result[0] = cellSplit(x)
}
return result
}
// Sumup 8 adjacent cells together.
// Simple trick is to sumup all 9 cells, then subtrack the central one.
// Thus we can reuse the running sums of the rows.
//
func sumup8(arg [][]cellValue, orig []uint64) []cellValue {
nint := len(orig)
res := make([]cellValue, nint)
a := arg[0]
b := arg[1]
c := arg[2]
for i := 0; i < nint; i++ {
v := cellSplit(orig[i])
res[i].young = a[i].young + b[i].young + c[i].young - v.young
res[i].total = a[i].total + b[i].total + c[i].total - v.total
}
return res
}
func (pg *Playground) Step() {
// fmt.Printf("step %p\n", pg)
nrows := len(pg.area)
next := make([][]uint64, nrows) // the next state of the area
roll := make([][]cellValue, 3) // working area
first := tripleRow(pg.area[0], pg.lastCellOffset, pg.lastIntMask)
last := tripleRow(pg.area[nrows-1], pg.lastCellOffset, pg.lastIntMask)
roll[1] = last
roll[2] = first
for iy := 0; iy < nrows; iy++ {
// shift all rows
roll[0] = roll[1]
roll[1] = roll[2]
// fill the next row
idx := iy + 1
if idx < nrows {
roll[2] = tripleRow(pg.area[idx], pg.lastCellOffset, pg.lastIntMask)
} else {
roll[2] = first
}
// now sumup all young and total number of adjacent cells.
// counts is an array of number of Y (young) and T(total) cells around.
counts := sumup8(roll, pg.area[iy])
// rules are:
// 1. each young cell converts to old.
// 2. an empty cell converts to young cell if Y<2 and T=3, otherwise is empty
// 3. an old cell remains live if Y<2 and T=[2..3], otherwise is empty
nint := len(pg.area[iy])
next[iy] = make([]uint64, nint)
const ones uint64 = 0x1111111111111111
for ix := 0; ix < nint; ix++ {
orig := pg.area[iy][ix]
noto := ^orig
notyoung := ^counts[ix].young
total := counts[ix].total
// condition if young less than 2
yless2 := (notyoung >> 1) & (notyoung >> 2) & (notyoung >> 3)
// condition if total is 2 or 3
nott := ^total
total23 := (total >> 1) & (nott >> 2) & (nott >> 3)
// extract all young cells and convert them into old
new1 := (orig & ones) << 2
// extract all empty cells
empt := noto & (noto >> 2)
// convert them into youngs
new2 := empt & yless2 & total & total23 & ones
// extract all old cells
olds := orig >> 2
// convert them into old
new3 := (olds & yless2 & total23 & ones) << 2
// now combine all three outcomes
next[iy][ix] = new1 | new2 | new3
}
next[iy][nint-1] &= pg.lastIntMask
}
pg.area = next
pg.iterations++
// fmt.Println("step done\n")
}
func makeCellType(colorName string) *cellType {
ct := new(cellType)
ct.color = gdk.NewRGBA()
if !ct.color.Parse(colorName) {
panic("failed to parse color name")
}
return ct
}
func (pg *Playground) Init(nx, ny int) {
fmt.Println("configure-event")
if nx <= 0 {
panic("Too narrow area")
}
if ny <= 0 {
panic("Too short area")
}
// define cell types
pg.cellTypes = make([]*cellType, cellMask+1)
pg.cellTypes[0x0] = makeCellType("white")
pg.cellTypes[0x1] = makeCellType("lightgreen")
pg.cellTypes[0x4] = makeCellType("blue")
rowLen := (nx + cellsPerInt - 1) / cellsPerInt
pg.cellsPerRow = nx
lastIntCells := nx - cellsPerInt*(rowLen-1)
if lastIntCells <= 0 {
panic("Invalid lastIntCells")
}
// the mask of the last int in the row
pg.lastIntMask = ^(^uint64(0) << uint(lastIntCells*bitsPerCell))
// the offset the the last cell in the last int
pg.lastCellOffset = uint((lastIntCells - 1) * bitsPerCell)
for i := 0; i < ny; i++ {
row := make([]uint64, rowLen)
pg.area = append(pg.area, row)
}
pg.repeats = 0
switch initialConfig {
case "line":
pg.setDots(ny/2, nx/2-3, "1222221")
case "kaka":
pg.setDots(ny/2+0, nx/2, "000000012")
pg.setDots(ny/2+1, nx/2, "2100010021")
pg.setDots(ny/2+2, nx/2, "0020210021")
pg.setDots(ny/2+3, nx/2, "222002122")
pg.setDots(ny/2+4, nx/2, "0110101")
case "":
// do nothing
default:
pg.setDots(ny/2, nx/2, "221")
pg.setDots(ny/2+1, nx/2, "002")
pg.setDots(ny/2+2, nx/2, "2")
}
}
func (pg *Playground) setDots(y, x int, dots string) {
for ; y < 0; y += len(pg.area) {
}
for ; y >= len(pg.area); y -= len(pg.area) {
}
for ; x < 0; x += pg.cellsPerRow {
}
for ; x >= pg.cellsPerRow; x -= pg.cellsPerRow {
}
// TODO(bukind): optimize the loop
for i := 0; i < len(dots); i++ {
ix := x / cellsPerInt
shift := uint((x - ix*cellsPerInt) * bitsPerCell)
var v uint64
switch dots[i] {
case '0':
v = 0
case '1':
v = 0x1
case '2':
v = 0x4
}
nv := pg.area[y][ix] & ^(cellMask<<shift) + (v << shift)
pg.area[y][ix] = nv
x++
}
}
func (pg *Playground) Clean() {
for iy := 0; iy < len(pg.area); iy++ {
for ix := 0; ix < len(pg.area[iy]); ix++ {
pg.area[iy][ix] = 0
}
}
}
func (pg *Playground) StepAndDraw() {
if pg.repeats > 0 {
pg.repeats--
pg.Step()
pg.da.QueueDraw()
} else if pg.repeats == -1 {
pg.Step()
pg.da.QueueDraw()
}
}
func (pg *Playground) ShowAll() {
for iy := 0; iy < len(pg.area); iy++ {
for ix := 0; ix < len(pg.area[iy]); ix++ {
showbin(pg.area[iy][ix])
}
}
}
func areaDrawEvent(da *gtk.DrawingArea, cr *cairo.Context, pg *Playground) {
_ = cr
var gapSize uint = 0
if pg.cellSize > 3 {
gapSize = pg.cellSize / 4
}
dx := float64(pg.cellSize)
cs := float64(pg.cellSize - gapSize)
olds := 0
news := 0
// calculate the viewport parameters
cellsX := da.GetAllocatedWidth() / int(pg.cellSize)
cellsY := da.GetAllocatedHeight() / int(pg.cellSize)
startY := pg.viewY0
startX := pg.viewX0
endY := startY + cellsY
endX := startX + cellsX
if endY > len(pg.area) {
if cellsY > len(pg.area) {
startY = 0
} else {
startY = len(pg.area) - cellsY
}
endY = len(pg.area)
}
if startX+cellsX > pg.cellsPerRow {
if cellsX > pg.cellsPerRow {
startX = 0
} else {
startX = pg.cellsPerRow - cellsX
}
endX = pg.cellsPerRow
}
// convert X cells into ints
cellX0 := startX
cellY0 := startY
startX = startX / cellsPerInt
endX = (endX + cellsPerInt - 1) / cellsPerInt
for iy := startY; iy < endY; iy++ {
row := pg.area[iy]
y := float64(iy-cellY0) * dx
for mask, cellType := range pg.cellTypes {
if mask == 0 || cellType == nil {
// optimization - skip empty cells
continue
}
cnt := &olds
if mask == 1 {
cnt = &news
}
rgba := cellType.color.Floats()
cr.SetSourceRGBA(rgba[0], rgba[1], rgba[2], rgba[3])
for ix := startX; ix < endX; ix++ {
value := row[ix]
idx0 := ix * cellsPerInt
maxIdx := idx0 + cellsPerInt
if maxIdx > pg.cellsPerRow {
maxIdx = pg.cellsPerRow
}
for idx := idx0; idx < maxIdx; idx++ {
if int(value&cellMask) == mask {
cr.Rectangle(dx*float64(idx-cellX0), y, cs, cs)
(*cnt)++
}
value >>= bitsPerCell
}
}
cr.Fill()
}
}
cr.MoveTo(1., 14.)
cr.SetSourceRGB(0., 0., 0.)
cr.SetFontSize(12.)
total := float64(pg.cellsPerRow * len(pg.area))
cr.ShowText(fmt.Sprintf("steps:%d cells:%d/%.1f%% old:%d/%.1f%%",
pg.iterations, olds+news, float64(olds+news)*100/total,
olds, float64(olds)*100/total))
cr.Stroke()
if pg.repeats != 0 {
pg.StepAndDraw()
}
}
func keyPressEvent(win *gtk.Window, evt *gdk.Event, pg *Playground) {
_ = win
ev := gdk.EventKey{evt}
fmt.Printf("key: val:%d state:%d type:%v\n", ev.KeyVal(), ev.State(), ev.Type())
switch ev.KeyVal() {
case gdk.KEY_Escape:
gtk.MainQuit()
case gdk.KEY_space:
pg.Step()
pg.da.QueueDraw()
case gdk.KEY_C:
pg.Clean()
pg.da.QueueDraw()
case gdk.KEY_S:
{
// clean the lower half of the field
nint := len(pg.area)
for iy := nint / 2; iy < nint; iy++ {
for ix := 0; ix < len(pg.area[iy]); ix++ {
pg.area[iy][ix] = 0
}
}
}
case gdk.KEY_t:
pg.repeats += 10
pg.StepAndDraw()
case gdk.KEY_x:
pg.repeats = 0
case gdk.KEY_s:
pg.repeats = -1
pg.StepAndDraw()
}
}
func mouseScrollEvent(win *gtk.Window, evt *gdk.Event, pg *Playground) bool {
ev := gdk.EventScroll{evt}
dy := ev.DeltaY()
var newcs = pg.cellSize
if dy < 0 {
// zoom in
if newcs < 4 {
newcs += 1
} else if newcs < 10 {
newcs += 2
} else if newcs < 30 {
newcs = uint(1.4 * float64(newcs))
} else {
// too large cell - not zooming
}
} else if dy > 0 {
// zoom out
if newcs > 10 {
newcs = uint(float64(newcs) / 1.4)
if newcs > 10 {
newcs = 10
}
} else if newcs > 4 {
newcs -= 2
} else if newcs > 1 {
newcs -= 1
} else {
// too small cell - not zooming
}
}
if newcs == pg.cellSize {
return true
}
// old cell index under the cursor
oldX := float64(pg.viewX0) + ev.X()/float64(pg.cellSize)
oldY := float64(pg.viewY0) + ev.Y()/float64(pg.cellSize)
// find the 0 position so that the same cell is under the cursor
newX0 := int(oldX - (ev.X() / float64(newcs)))
newY0 := int(oldY - (ev.Y() / float64(newcs)))
if newX0 < 0 {
newX0 = 0
} else if newX0 >= pg.cellsPerRow {
newX0 = pg.cellsPerRow - 1
}
if newY0 < 0 {
newY0 = 0
} else if newY0 >= len(pg.area) {
newY0 = len(pg.area) - 1
}
fmt.Printf("scroll: dy:%.1f, (x,y):%.1f,%.1f v0:%d,%d -> %d,%d\n",
dy, ev.X(), ev.Y(), pg.viewX0, pg.viewY0, newX0, newY0)
pg.viewX0 = newX0
pg.viewY0 = newY0
pg.cellSize = newcs
pg.da.QueueDraw()
return true
}
func showbin(v uint64) string {
var r []byte
for i := 0; i < cellsPerInt; i++ {
var c byte
switch v & cellMask {
case 0:
c = '.'
case 1:
c = 'o'
case 2:
c = 'X'
case 3:
c = '?'
default:
c = '$'
}
r = append(r, c)
v >>= bitsPerCell
}
return string(r)
}
func mouseClickedEvent(win *gtk.Window, evt *gdk.Event, pg *Playground) bool {
ev := gdk.EventButton{evt}
dx := float64(pg.cellSize)
ix := int(ev.X() / dx)
iy := int(ev.Y() / dx)
idx := ix / cellsPerInt
v := pg.area[iy][idx]
shift := uint(bitsPerCell * (ix % cellsPerInt))
var nv uint64
switch (v >> shift) & cellMask {
case 0x0:
nv = 0x1
case 0x1:
nv = 0x4
case 0x4:
nv = 0x0
default:
nv = 0x0
}
nv <<= shift
nv += v & ^(cellMask << shift)
fmt.Printf("mouse: btn:%d bnt-val:%d state:%d type:%v ix,iy,idx,i:%d,%d,%d,%d\n",
ev.Button(), ev.ButtonVal(),
ev.State(), ev.Type(),
ix, iy, idx, ix%cellsPerInt)
fmt.Printf("old: %s\n", showbin(v))
fmt.Printf("new: %s\n", showbin(nv))
pg.area[iy][idx] = nv
pg.da.QueueDraw()
return true
}
func setupWindow(playground *Playground) error {
var win *gtk.Window
var err error
if win, err = gtk.WindowNew(gtk.WINDOW_TOPLEVEL); err != nil {
return err
}
win.SetTitle("dots")
win.Connect("destroy", gtk.MainQuit)
if playground.viewXSize <= 0 || playground.viewYSize <= 0 {
// fullscreen
win.Fullscreen()
} else {
win.SetSizeRequest(playground.viewXSize, playground.viewYSize)
}
win.SetResizable(false)
var da *gtk.DrawingArea
if da, err = gtk.DrawingAreaNew(); err != nil {
return err
}
// link playground and drawing area
playground.da = da
da.AddEvents(int(gdk.SCROLL_MASK))
win.Add(da)
win.ShowAll()
if _, err = da.Connect("draw", areaDrawEvent, playground); err != nil {
return err
}
if _, err = win.Connect("key-press-event", keyPressEvent, playground); err != nil {
return err
}
if _, err = win.Connect("button-press-event", mouseClickedEvent, playground); err != nil {
return err
}
if _, err = win.Connect("scroll-event", mouseScrollEvent, playground); err != nil {
return err
}
return nil
}
func main() {
var cellSize uint = 12
var prof string
var xsize int
var ysize int
var nx int
var ny int
flag.IntVar(&xsize, "xsize", 400, "Set the X viewport size, or -1")
flag.IntVar(&ysize, "ysize", 400, "Set the Y viewport size, or -1")
flag.IntVar(&nx, "nx", 40, "Set the number of cells per X")
flag.IntVar(&ny, "ny", 40, "Set the number of cells per Y")
flag.UintVar(&cellSize, "cellsize", cellSize, "The size of the cell")
flag.StringVar(&initialConfig, "init", initialConfig, "The name of the initial configuration")
flag.StringVar(&prof, "prof", "", "The name of the cpu profile output")
flag.Parse()
gtk.Init(nil)
playground := NewPlayground(cellSize, xsize, ysize)
// TODO: should be merged into constructor
playground.Init(nx, ny)
if err := setupWindow(playground); err != nil {
fail(err)
}
if prof != "" {
f, err := os.Create(prof)
if err != nil {
fail(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
gtk.Main()
}
|
package nodenormal
import (
"fmt"
"time"
"github.com/fananchong/go-xserver/common"
nodecommon "github.com/fananchong/go-xserver/internal/components/node/common"
"github.com/fananchong/go-xserver/internal/protocol"
"github.com/fananchong/go-xserver/internal/utility"
)
// IntranetSession : 网络会话类( Gateway 客户端会话类 )
type IntranetSession struct {
*nodecommon.SessionBase
}
// NewIntranetSession : 网络会话类的构造函数
func NewIntranetSession(ctx *common.Context) *IntranetSession {
sess := &IntranetSession{}
sess.SessionBase = nodecommon.NewSessionBase(ctx, sess)
return sess
}
// Start : 启动
func (sess *IntranetSession) Start() {
go func() {
for {
node := nodecommon.GetSessionMgr().GetByID(utility.ServerID2NodeID(sess.Info.GetId()))
if node == nil {
// 目标节点已丢失,不用试图去连接啦
break
}
address := fmt.Sprintf("%s:%d", sess.Info.GetAddrs()[common.IPINNER], sess.Info.GetPorts()[common.PORTFORINTRANET])
sess.Ctx.Log.Infoln("Try to connect to the gateway server, address:", address, "node:", utility.ServerID2UUID(sess.Info.GetId()).String())
if sess.Connect(address, sess) == false {
time.Sleep(1 * time.Second)
continue
}
sess.Verify()
// 发送 TOKEN 验证
msg := &protocol.MSG_GW_VERIFY_TOKEN{}
msg.Id = sess.Info.GetId()
msg.Token = sess.Ctx.Config.Common.IntranetToken
sess.SendMsg(uint64(protocol.CMD_GW_VERIFY_TOKEN), msg)
sess.Ctx.Log.Infoln("Successfully connected to the gateway server, address:", address, "node:", utility.ServerID2UUID(sess.Info.GetId()).String())
break
}
}()
}
// DoRegister : 某节点注册时处理
func (sess *IntranetSession) DoRegister(msg *protocol.MSG_MGR_REGISTER_SERVER, data []byte, flag byte) {
}
// DoVerify : 验证时保存自己的注册消息
func (sess *IntranetSession) DoVerify(msg *protocol.MSG_MGR_REGISTER_SERVER, data []byte, flag byte) {
}
// DoLose : 节点丢失时处理
func (sess *IntranetSession) DoLose(msg *protocol.MSG_MGR_LOSE_SERVER, data []byte, flag byte) {
}
// DoClose : 节点关闭时处理
func (sess *IntranetSession) DoClose(sessbase *nodecommon.SessionBase) {
}
|
package prettyprint
import (
"fmt"
"testing"
)
func TestCanvas(t *testing.T) {
canvas := NewCanvas(4, 4)
canvas.DrawLine(0, 0, 1, 1, "foo")
canvas.DrawLine(2, 2, 1, 1, "t")
canvas.DrawLine(1, 1, 0, 2, "bar")
canvas.DrawLine(2, 0, 1, 1, "")
fmt.Print(canvas)
}
|
package alerting
import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"github.com/square/p2/pkg/util"
)
type Urgency string
const (
pagerdutyURI = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
eventType = "trigger"
HighUrgency Urgency = "high_urgency"
LowUrgency Urgency = "low_urgency"
)
// Subset of *http.Client functionality, useful for testing
type Poster interface {
Post(uri string, contentType string, body io.Reader) (resp *http.Response, err error)
}
type pagerdutyAlerter struct {
Client Poster
// HighUrgencyServiceKey should be the service key for a pagerduty
// service that uses high-urgency escalation rules. Incidents sent by
// P2 using this service indicate serious problems that should be
// addressed as quickly as possible
HighUrgencyServiceKey string
// LowUrgencyServiceKey should be the pagerduty service key for a
// service that uses low-urgency rules. Incidents sent here should be
// addressed at some point but do not represent immediate threats
LowUrgencyServiceKey string
}
var _ Alerter = &pagerdutyAlerter{}
// Duplicates the information from AlertInfo but has the appropriate JSON tags
// as well as ServiceKey
type pagerdutyBody struct {
// required, provided in AlertInfo
Description string `json:"description"`
IncidentKey string `json:"incident_key"`
// optional, provided in AlertInfo
Details interface{} `json:"details,omitempty"`
// provided by pagerdutyAlerter
ServiceKey string `json:"service_key"`
EventType string `json:"event_type"`
}
func (p *pagerdutyAlerter) Alert(alertInfo AlertInfo, urgency Urgency) error {
// IncidentKey is not actually required by the PD API, but it's good
// practice to set it and is useful in error messages
if alertInfo.IncidentKey == "" {
return util.Errorf("An incident key was not provided for the alert")
}
if alertInfo.Description == "" {
return util.Errorf("A description was not provided for alert '%s", alertInfo.IncidentKey)
}
serviceKey := p.HighUrgencyServiceKey
if urgency == LowUrgency {
serviceKey = p.LowUrgencyServiceKey
}
body := pagerdutyBody{
ServiceKey: serviceKey,
Description: alertInfo.Description,
IncidentKey: alertInfo.IncidentKey,
Details: alertInfo.Details,
EventType: eventType,
}
bodyBytes, err := json.Marshal(body)
if err != nil {
return util.Errorf("Unable to marshal alert as JSON: %s", err)
}
resp, err := p.Client.Post(pagerdutyURI, "application/json", bytes.NewReader(bodyBytes))
if err != nil {
return util.Errorf("Unable to trigger incident: %s", err)
}
defer resp.Body.Close()
respBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return util.Errorf("Unable to read response from pagerduty when triggering incident: %s", err)
}
if resp.StatusCode == http.StatusOK {
return nil
}
if resp.StatusCode == http.StatusForbidden {
// TODO: retry these with backoff?
return util.Errorf("Unable to trigger incident %s due to PagerDuty rate limiting", alertInfo.IncidentKey)
}
return p.handleError(resp.StatusCode, respBytes)
}
func (p *pagerdutyAlerter) handleError(code int, respBytes []byte) error {
respJSON, err := json.Marshal(respBytes)
if err != nil {
// The response probably wasn't JSON
return util.Errorf("%d response from PagerDuty: %s", code, string(respBytes))
}
return util.Errorf("%d response from PagerDuty: %s", code, string(respJSON))
}
|
package gomvc
import (
"fmt"
"strings"
)
// type HttpMethod int8
// const (
// ALL_METHOD HttpMethod = 0
// GET HttpMethod = 1
// POST HttpMethod = 2
// PUT HttpMethod = 4
// DELETE HttpMethod = 8
// HEAD HttpMethod = 16
// )
/**
Action实体类
*/
type ActionInfo struct {
Name string //Action名称
Controller *ControllerInfo //控制器信息, 用于反向查找自己的controller
Handler func(ctx *HttpContext) ActionResulter //处理器,通过HttpContext获取到ActionResulter
Filters []Filter //过滤器
}
/**
给Action中添加过滤器
*/
func (action_info *ActionInfo) AddFilters(filters ...Filter) {
for _, filter := range filters {
if filter != nil {
action_info.Filters = append(action_info.Filters, filter)
}
}
}
/**
Controller实体类
*/
type ControllerInfo struct {
Name string //控制器名称
Actions map[string]*ActionInfo //控制器负责的所有action
Filters []Filter //控制器中包含的所有过滤器
}
/**
初始化控制器实例
主要完成了控制器包含的action的实例化你工作
*/
func (controller_Info *ControllerInfo) Init() *ControllerInfo {
controller_Info.Actions = make(map[string]*ActionInfo)
return controller_Info
}
/**
通过http方法和action名字,获取action_info
例如:http请求的方法为GET方法,请求的action名字为index
首先会在controller_info的Actions 映射中搜索,key为"get_index"的Action_info
如果找不到,会搜索"_index",这个可以用于所有的http请求方法的
*/
func (controller_info *ControllerInfo) GetAction(method string, name string) *ActionInfo {
action_info, ok := controller_info.Actions[strings.ToLower(method) + "_" + strings.ToLower(name)]
if !ok {
action_info, _ = controller_info.Actions["_"+strings.ToLower(name)]
}
return action_info
}
/**
注册action到controller
httpMethod: http请求的方法,如果是所有的http方法,值为"all"
actionName: action名称
*/
func (controller_info *ControllerInfo) RegAction(httpMethod string, actionName string, handler func(ctx *HttpContext) ActionResulter) *ActionInfo {
httpMethod = strings.ToLower(httpMethod)
if httpMethod == "all" {
httpMethod = ""
}
action_key := fmt.Sprintf("%s_%s", httpMethod, strings.ToLower(actionName))
/**
检测是否已经注册过这个action
*/
_, ok := controller_info.Actions[action_key]
if ok {
panic(fmt.Sprintf("%s %s.%s has registered.",
strings.ToUpper(httpMethod), controller_info.Name, actionName))
}
action_info := &ActionInfo{
Name: strings.ToLower(actionName),
Controller: controller_info,
Handler: handler,
}
controller_info.Actions[action_key] = action_info
return action_info
}
/**
添加过滤器到控制器上
*/
func (controller_info *ControllerInfo) AddFilters(filters ...Filter) {
for _, filter := range filters {
if filter != nil {
controller_info.Filters = append(controller_info.Filters, filter)
}
}
}
/**
将过滤器注册到某一个action上
*/
func (controller_info *ControllerInfo) AddActionFilters(httpMethod string, actionName string, filters ...Filter) {
action_info := controller_info.GetAction(httpMethod, actionName)
if action_info == nil {
panic("ControllerInfo.AddActionFilters: controller \"" + controller_info.Name + "\" no action for \"" +
strings.ToUpper(httpMethod) + " " + actionName + "\".")
}
action_info.AddFilters(filters...)
}
/**
管理控制器的工厂类
*/
type ControllerFactory struct {
Controllers map[string]*ControllerInfo
}
/**
通过http方法,控制器名称,action名称,获取到Action_info
*/
func (factory *ControllerFactory) GetAction(httpMethod string, controller_name string, action string) *ActionInfo {
controller_info, ok := factory.Controllers[strings.ToLower(controller_name)]
if !ok {
return nil
}
return controller_info.GetAction(httpMethod, action)
}
/**
全局的controller工厂
*/
var defaultControllerFactory *ControllerFactory = &ControllerFactory{
Controllers: make(map[string]*ControllerInfo),
}
/**
控制器构造器
一个控制器,最重要的就是有控制器的描述信息和action的描述信息组成
*/
type ControllerBuilder struct {
controller *ControllerInfo
currentAction *ActionInfo
}
/**
如果将httpMethod设定为"all",这个action会注册到所有的http方法上,但优先级是最低的
builder的Action方法可以链式调用
*/
func (builder *ControllerBuilder) Action(httpMethod string, actionName string,
handler func(ctx *HttpContext) ActionResulter) *ControllerBuilder {
builder.currentAction = builder.controller.RegAction(httpMethod, actionName, handler)
return builder
}
/**
注册http方法为GET的Action
*/
func (builder *ControllerBuilder) Get(actionName string,
handler func(ctx *HttpContext) ActionResulter) *ControllerBuilder {
return builder.Action("get", actionName, handler)
}
/**
注册http方法为POST的Action
*/
func (builder *ControllerBuilder) Post(actionName string,
handler func(ctx *HttpContext) ActionResulter) *ControllerBuilder {
return builder.Action("post", actionName, handler)
}
/**
注册http方法为PUT的Action
*/
func (builder *ControllerBuilder) Put(actionName string,
handler func(ctx *HttpContext) ActionResulter) *ControllerBuilder {
return builder.Action("put", actionName, handler)
}
/**
注册http方法为DELETE的Action
*/
func (builder *ControllerBuilder) Delete(actionName string,
handler func(ctx *HttpContext) ActionResulter) *ControllerBuilder {
return builder.Action("delete", actionName, handler)
}
/**
如果当前的builder中含有action的信息,就注册到action上
如果没有action信息,就注册到controller上
*/
func (builder *ControllerBuilder) Filters(filters ...Filter) *ControllerBuilder {
if builder.currentAction != nil {
builder.currentAction.AddFilters(filters...)
} else {
builder.controller.AddFilters(filters...)
}
return builder
}
/**
构建controller,这是外部程序主要访问入口
*/
func Controller(name string) *ControllerBuilder {
name = strings.ToLower(name)
controller_info, ok := defaultControllerFactory.Controllers[name]
/**
这里实现了单件的功能,controller factory值含有一个controller实例
*/
if !ok {
controller_info = &ControllerInfo{
Name: name,
}
controller_info.Init()
defaultControllerFactory.Controllers[name] = controller_info
}
builder := &ControllerBuilder{
controller: controller_info,
}
return builder
}
|
package leetcode
func heightChecker(heights []int) int {
counter := make([]int, 101)
for _, n := range heights {
counter[n]++
}
offset := 0
ans := 0
for i := 1; i <= 100; i++ {
for j := 0; j < counter[i]; j++ {
if heights[offset] != i {
ans++
}
offset++
}
}
return ans
}
|
package repository
import (
"context"
"database/sql"
"fmt"
"github.com/lib/pq"
"github.com/rs/zerolog/log"
"github.com/go-sink/sink/internal/app/datastruct"
)
// LinkRepository data structure.
type LinkRepository struct {
database *sql.DB
}
// NewLinkRepository creates new LinkRepository instance.
func NewLinkRepository(db *sql.DB) LinkRepository {
return LinkRepository{database: db}
}
// GetAllLinks gets all links from database
func (r LinkRepository) GetAllLinks(ctx context.Context) ([]datastruct.Link, error) {
var links []datastruct.Link
rows, err := r.database.QueryContext(ctx, "SELECT id, original, shortened, follow_qtty from links")
if err != nil {
return nil, fmt.Errorf("could not execute a query %w", err)
}
defer func(rows *sql.Rows) {
closeErr := rows.Close()
if closeErr != nil {
log.Error().Err(closeErr).Msg("could not close cursor after getting link")
}
}(rows)
for rows.Next() {
link := datastruct.Link{}
err = rows.Scan(&link.ID, &link.Original, &link.Shortened, &link.FollowQuantity)
if err != nil {
return nil, fmt.Errorf("could not scan a row: %w", err)
}
links = append(links, link)
}
return links, nil
}
// GetLink from database.
func (r LinkRepository) GetLink(ctx context.Context, short string) (datastruct.Link, error) {
var link datastruct.Link
rows, err := r.database.QueryContext(ctx, "SELECT id, original, shortened, follow_qtty from links where shortened = $1", short)
if err != nil {
return link, fmt.Errorf("could not execute a query %w", err)
}
defer func(rows *sql.Rows) {
closeErr := rows.Close()
if closeErr != nil {
log.Error().Err(closeErr).Msg("could not close cursor after getting link")
}
}(rows)
if !rows.Next() {
return link, fmt.Errorf("coud not find original link for %v", short)
}
err = rows.Scan(&link.ID, &link.Original, &link.Shortened, &link.FollowQuantity)
if err != nil {
return link, fmt.Errorf("could not scan a row: %w", err)
}
return link, nil
}
// SetLink to database.
func (r LinkRepository) SetLink(ctx context.Context, link datastruct.Link) error {
rows, err := r.database.QueryContext(ctx, "INSERT INTO links(original, shortened, follow_qtty) VALUES ($1, $2, $3)", link.Original, link.Shortened, link.FollowQuantity)
if err != nil {
return err
}
defer func(rows *sql.Rows) {
err := rows.Close()
if err != nil {
log.Error().Err(err).Msg("could not close cursor after setting link")
}
}(rows)
return nil
}
// UpdateLink in database.
func (r LinkRepository) UpdateLink(ctx context.Context, link datastruct.Link) error {
rows, err := r.database.QueryContext(ctx, "UPDATE links SET original = $1, shortened = $2, follow_qtty = $3 WHERE id = $4", link.Original, link.Shortened, link.FollowQuantity, link.ID)
if err != nil {
return err
}
defer func(rows *sql.Rows) {
err := rows.Close()
if err != nil {
log.Error().Err(err).Msg("could not close cursor after updating link")
}
}(rows)
return nil
}
// UpdateLinks updates several links
func (r LinkRepository) UpdateLinks(ctx context.Context, links []datastruct.Link) error {
groupedLinks := make(map[int][]int64)
for _, link := range links {
groupedLinks[link.LastStatus] = append(groupedLinks[link.LastStatus], link.ID)
}
for statusCode, ids := range groupedLinks {
rows, err := r.database.QueryContext(ctx, "UPDATE links SET last_status = $1 WHERE id = ANY($2)", statusCode, pq.Int64Array(ids))
if err != nil {
return fmt.Errorf("could not update links: %w", err)
}
_ = rows.Close()
}
return nil
}
// DeleteLink from database.
func (r LinkRepository) DeleteLink(ctx context.Context, link datastruct.Link) error {
rows, err := r.database.QueryContext(ctx, "DELETE FROM links WHERE id = $1", link.ID)
if err != nil {
return err
}
defer func(rows *sql.Rows) {
err := rows.Close()
if err != nil {
log.Error().Err(err).Msg("could not close cursor after deleting link")
}
}(rows)
return nil
}
|
package api
import (
"github.com/PhongVX/taskmanagement/internal/app/user"
)
func newUserHandler() (*user.Handler, error) {
s, err := dialDefaultMongoDB()
if err != nil {
return nil, err
}
repo := user.NewMongoDBRepository(s)
srv := user.NewService(repo)
handler := user.NewHTTPHandler(*srv)
return handler, nil
}
|
// Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
package test
import (
"github.com/iotaledger/wasp/contracts/common"
"github.com/iotaledger/wasp/packages/solo"
"github.com/stretchr/testify/require"
"testing"
)
func setupTest(t *testing.T) *solo.Chain {
return common.DeployContract(t, ScName)
}
func TestDeploy(t *testing.T) {
chain := common.DeployContract(t, ScName)
_, err := chain.FindContract(ScName)
require.NoError(t, err)
}
|
package main
import (
_"github.com/go-sql-driver/mysql"
"fmt"
"github.com/jmoiron/sqlx"
)
type Person struct {
UserId int `db:"userid"`
UserName string `db:"username"`
Sex string `db:"sex"`
Email string `db:"email"`
}
type Place struct {
Counttry string `db: "country"`
City string `db:"city"`
TelCode int `db:"telcode"`
}
var Db *sqlx.DB
//初始化数据库登陆信息
func init() {
database,error := sqlx.Open("mysql","fuck:123456@tcp(localhost:3306)/Golang")
if error != nil{
fmt.Println("connect mysql failed")
return
}
Db = database
}
func main() {
//插入信息
input,err1 :=Db.Exec("insert into person(username, sex, email)values(?, ?, ?)", "stu002", "female", "stu02@qq.com")
if err1 != nil{
fmt.Printf("Insert data failed %v\n",err1)
return
}
//插入成功后返回id,以此判断是否插入成功
id,err2 := input.LastInsertId()
if err2 != nil{
fmt.Println("exec failed",err2)
return
}
fmt.Println("insert success",id)
}
|
// There has gotta be away around the nasty copypasta hacks
// TODO: FIX THAT SHIT
package libTransmission
import (
"net/http"
"encoding/json"
"log"
"bytes"
"io"
"errors"
"github.com/germ/geoip"
)
var (
ServerURL = "http://:9090/transmission/rpc"
ServerUser = "germ"
ServerPass = "hackersgonnahack"
)
func Add(url, dir string) (err error) {
req := Request {
Method: "torrent-add",
Arguments: map[string]interface{} {
"filename":url,
"download-dir":dir,
},
}
reader,err := req.getJson()
defer reader.Close()
if err != nil {
log.Println(err)
return
}
var res AddResponse
err = json.NewDecoder(reader).Decode(&res)
if err != nil {
log.Println(err)
return
}
log.Println(res)
if res.Result != "success" {
return errors.New("Torrent failed to add")
}
return
}
func GetTorrents() (peers []Torrent, err error) {
// Craft Request
req := Request {
Method: "torrent-get",
Arguments: map[string]interface{} {
"fields":[]string{"peers", "id", "name"},
},
}
// Make request
jsonReader, err := req.getJson()
defer jsonReader.Close()
if err != nil {
log.Println("Error making request: ", err, jsonReader)
return
}
var resp TorrentResponse
err = json.NewDecoder(jsonReader).Decode(&resp)
if err != nil {
log.Println(err)
return
}
if resp.Result != "success" {
return
}
for _, v := range(resp.Arguments["torrents"]) {
peers = append(peers, v)
}
return
}
func LimitSpeed(enable bool) (err error) {
req := Request {
Method:"session-set",
Arguments: map[string]interface{} {
"alt-speed-enabled":enable,
},
}
read, err := req.getJson()
defer read.Close()
if err != nil {
log.Println(err)
return
}
var s Response
err = json.NewDecoder(read).Decode(&s)
if err != nil {
log.Println(err)
return
}
if s.Result != "success" {
err = errors.New("Error: Could not enable alts")
return
}
return
}
func GetStats() (s Stats, err error) {
req := Request {
Method: "session-get",
}
read, err := req.getJson()
defer read.Close()
if err != nil {
log.Println(err)
return
}
var resp StatsResponse
err = json.NewDecoder(read).Decode(&resp)
if err != nil {
log.Println(err)
return
}
if resp.Result != "success" {
err = errors.New("Error: Printer on Fire")
return
}
return resp.Arguments, err
}
func GetSession() (s Session, err error) {
// So here's something funny. Turns out I named my data structures
// wrong. Stats relate to Session vars and vice versa.
// TODO: Learn how to use gofmt rewrite
req := Request {
Method: "session-stats",
}
read, err := req.getJson()
defer read.Close()
if err != nil {
log.Println(err)
return
}
var resp SessionResponse
err = json.NewDecoder(read).Decode(&resp)
if err != nil {
log.Println(err)
return
}
if resp.Result != "success" {
err = errors.New("Error: Printer on Fire")
return
}
return resp.Arguments, err
}
func init() {
log.SetFlags(log.Lshortfile | log.LstdFlags)
log.SetPrefix("[ ScanServer ] ");
}
func (r *Request) getJson() (jsonReader io.ReadCloser, err error) {
// Can the request into JSON
msg, err := json.Marshal(r)
if err != nil {
log.Println("%v : %v", err, msg)
return
}
buf := bytes.NewBuffer(msg)
log.Println("Message: ", buf)
// Craft the request
var client http.Client
req, err := http.NewRequest("POST", ServerURL, buf)
if err != nil {
log.Println("%v : %v", err, req)
return
}
req.Header.Add("X-Transmission-Session-Id", r.sessionID)
req.SetBasicAuth(ServerUser, ServerPass)
// Send the request
resp, err := client.Do(req)
if err != nil {
log.Println("%v : %v", err, resp)
return
}
// Set Auth header if needed
if resp.StatusCode == 409 {
resp.Body.Close()
id := resp.Header.Get("X-Transmission-Session-Id")
log.Println("Changing SessionID to: ", id)
r.sessionID = id
return r.getJson()
}
jsonReader = resp.Body
return
}
func (p *TorrentPeer) Locate() (err error) {
location, err := geoip.LookupString(p.Address)
if err != nil {
log.Println(err)
return
}
p.Geo = location
p.Geo.Description = p.Address + " " + p.ClientName
return
}
|
package evaluator
import (
"testing"
"github.com/stretchr/testify/assert"
e "github.com/optimizely/go-sdk/pkg/entities"
)
var stringFooCondition = e.Condition{
Type: "custom_attribute",
Match: "exact",
Name: "string_foo",
Value: "foo",
}
var boolTrueCondition = e.Condition{
Type: "custom_attribute",
Match: "exact",
Name: "bool_true",
Value: true,
}
var int42Condition = e.Condition{
Type: "custom_attribute",
Match: "exact",
Name: "int_42",
Value: 42,
}
func TestConditionTreeEvaluateSimpleCondition(t *testing.T) {
conditionTreeEvaluator := NewMixedTreeEvaluator()
conditionTree := &e.TreeNode{
Operator: "or",
Nodes: []*e.TreeNode{
&e.TreeNode{
Item: stringFooCondition,
},
},
}
// Test match
user := e.UserContext{
Attributes: map[string]interface{}{
"string_foo": "foo",
},
}
condTreeParams := e.NewTreeParameters(&user, map[string]e.Audience{})
result, _ := conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.True(t, result)
// Test no match
user = e.UserContext{
Attributes: map[string]interface{}{
"string_foo": "not foo",
},
}
result, _ = conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.False(t, result)
}
func TestConditionTreeEvaluateMultipleOrConditions(t *testing.T) {
conditionTreeEvaluator := NewMixedTreeEvaluator()
conditionTree := &e.TreeNode{
Operator: "or",
Nodes: []*e.TreeNode{
&e.TreeNode{
Item: stringFooCondition,
},
&e.TreeNode{
Item: boolTrueCondition,
},
},
}
// Test match string
user := e.UserContext{
Attributes: map[string]interface{}{
"string_foo": "foo",
},
}
condTreeParams := e.NewTreeParameters(&user, map[string]e.Audience{})
result, _ := conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.True(t, result)
// Test match bool
user = e.UserContext{
Attributes: map[string]interface{}{
"bool_true": true,
},
}
result, _ = conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.True(t, result)
// Test match both
user = e.UserContext{
Attributes: map[string]interface{}{
"string_foo": "foo",
"bool_true": true,
},
}
result, _ = conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.True(t, result)
// Test no match
user = e.UserContext{
Attributes: map[string]interface{}{
"string_foo": "not foo",
"bool_true": false,
},
}
result, _ = conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.False(t, result)
}
func TestConditionTreeEvaluateMultipleAndConditions(t *testing.T) {
conditionTreeEvaluator := NewMixedTreeEvaluator()
conditionTree := &e.TreeNode{
Operator: "and",
Nodes: []*e.TreeNode{
&e.TreeNode{
Item: stringFooCondition,
},
&e.TreeNode{
Item: boolTrueCondition,
},
},
}
// Test only string match with NULL bubbling
user := e.UserContext{
Attributes: map[string]interface{}{
"string_foo": "foo",
},
}
condTreeParams := e.NewTreeParameters(&user, map[string]e.Audience{})
result, _ := conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.False(t, result)
// Test only bool match with NULL bubbling
user = e.UserContext{
Attributes: map[string]interface{}{
"bool_true": true,
},
}
result, _ = conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.False(t, result)
// Test match both
user = e.UserContext{
Attributes: map[string]interface{}{
"string_foo": "foo",
"bool_true": true,
},
}
result, _ = conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.True(t, result)
// Test no match
user = e.UserContext{
Attributes: map[string]interface{}{
"string_foo": "not foo",
"bool_true": false,
},
}
result, _ = conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.False(t, result)
}
func TestConditionTreeEvaluateNotCondition(t *testing.T) {
conditionTreeEvaluator := NewMixedTreeEvaluator()
// [or, [not, stringFooCondition], [not, boolTrueCondition]]
conditionTree := &e.TreeNode{
Operator: "or",
Nodes: []*e.TreeNode{
&e.TreeNode{
Operator: "not",
Nodes: []*e.TreeNode{
&e.TreeNode{
Item: stringFooCondition,
},
},
},
&e.TreeNode{
Operator: "not",
Nodes: []*e.TreeNode{
&e.TreeNode{
Item: boolTrueCondition,
},
},
},
},
}
// Test match string
user := e.UserContext{
Attributes: map[string]interface{}{
"string_foo": "not foo",
},
}
condTreeParams := e.NewTreeParameters(&user, map[string]e.Audience{})
result, _ := conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.True(t, result)
// Test match bool
user = e.UserContext{
Attributes: map[string]interface{}{
"bool_true": false,
},
}
result, _ = conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.True(t, result)
// Test match both
user = e.UserContext{
Attributes: map[string]interface{}{
"string_foo": "not foo",
"bool_true": false,
},
}
result, _ = conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.True(t, result)
// Test no match
user = e.UserContext{
Attributes: map[string]interface{}{
"string_foo": "foo",
"bool_true": true,
},
}
result, _ = conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.False(t, result)
}
func TestConditionTreeEvaluateMultipleMixedConditions(t *testing.T) {
conditionTreeEvaluator := NewMixedTreeEvaluator()
// [or, [and, stringFooCondition, boolTrueCondition], [or, [not, stringFooCondition], int42Condition]]
conditionTree := &e.TreeNode{
Operator: "or",
Nodes: []*e.TreeNode{
&e.TreeNode{
Operator: "and",
Nodes: []*e.TreeNode{
&e.TreeNode{
Item: stringFooCondition,
},
&e.TreeNode{
Item: boolTrueCondition,
},
},
},
&e.TreeNode{
Operator: "or",
Nodes: []*e.TreeNode{
&e.TreeNode{
Operator: "not",
Nodes: []*e.TreeNode{
&e.TreeNode{
Item: stringFooCondition,
},
},
},
&e.TreeNode{
Item: int42Condition,
},
},
},
},
}
// Test only match AND condition
user := e.UserContext{
Attributes: map[string]interface{}{
"string_foo": "foo",
"bool_true": true,
"int_42": 43,
},
}
condTreeParams := e.NewTreeParameters(&user, map[string]e.Audience{})
result, _ := conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.True(t, result)
// Test only match the NOT condition
user = e.UserContext{
Attributes: map[string]interface{}{
"string_foo": "not foo",
"bool_true": true,
"int_42": 43,
},
}
result, _ = conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.True(t, result)
// Test only match the int condition
user = e.UserContext{
Attributes: map[string]interface{}{
"string_foo": "foo",
"bool_true": false,
"int_42": 42,
},
}
result, _ = conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.True(t, result)
// Test no match
user = e.UserContext{
Attributes: map[string]interface{}{
"string_foo": "foo",
"bool_true": false,
"int_42": 43,
},
}
result, _ = conditionTreeEvaluator.Evaluate(conditionTree, condTreeParams)
assert.False(t, result)
}
var audienceMap = map[string]e.Audience{
"11111": audience11111,
"11112": audience11112,
}
var audience11111 = e.Audience{
ID: "11111",
ConditionTree: &e.TreeNode{
Operator: "or",
Nodes: []*e.TreeNode{
&e.TreeNode{
Operator: "or",
Nodes: []*e.TreeNode{
&e.TreeNode{
Item: stringFooCondition,
},
},
},
},
},
}
var audience11112 = e.Audience{
ID: "11112",
ConditionTree: &e.TreeNode{
Operator: "or",
Nodes: []*e.TreeNode{
&e.TreeNode{
Operator: "and",
Nodes: []*e.TreeNode{
&e.TreeNode{
Item: boolTrueCondition,
},
&e.TreeNode{
Item: int42Condition,
},
},
},
},
},
}
func TestConditionTreeEvaluateAnAudienceTreeSingleAudience(t *testing.T) {
audienceTree := &e.TreeNode{
Operator: "or",
Nodes: []*e.TreeNode{
&e.TreeNode{
Item: audience11111.ID,
},
},
}
conditionTreeEvaluator := NewMixedTreeEvaluator()
// Test matches audience 11111
treeParams := &e.TreeParameters{
User: &e.UserContext{
ID: "test_user_1",
Attributes: map[string]interface{}{
"string_foo": "foo",
},
},
AudienceMap: audienceMap,
}
result, _ := conditionTreeEvaluator.Evaluate(audienceTree, treeParams)
assert.True(t, result)
}
func TestConditionTreeEvaluateAnAudienceTreeMultipleAudiences(t *testing.T) {
audienceTree := &e.TreeNode{
Operator: "or",
Nodes: []*e.TreeNode{
&e.TreeNode{
Item: audience11111.ID,
},
&e.TreeNode{
Item: audience11112.ID,
},
},
}
conditionTreeEvaluator := NewMixedTreeEvaluator()
// Test only matches audience 11111
treeParams := &e.TreeParameters{
User: &e.UserContext{
ID: "test_user_1",
Attributes: map[string]interface{}{
"string_foo": "foo",
},
},
AudienceMap: audienceMap,
}
result, _ := conditionTreeEvaluator.Evaluate(audienceTree, treeParams)
assert.True(t, result)
// Test only matches audience 11112
treeParams = &e.TreeParameters{
User: &e.UserContext{
ID: "test_user_1",
Attributes: map[string]interface{}{
"bool_true": true,
"int_42": 42,
},
},
AudienceMap: audienceMap,
}
result, _ = conditionTreeEvaluator.Evaluate(audienceTree, treeParams)
assert.True(t, result)
}
|
package raw_client
import (
"context"
)
type GetAppSettingsRequest struct {
App string `json:"app"`
}
type GetAppSettingsResponse struct {
Name string `json:"name"`
Description string `json:"description"`
Theme string `json:"Theme"`
}
func GetAppSettings(ctx context.Context, apiClient *ApiClient, req GetAppSettingsRequest) (*GetAppSettingsResponse, error) {
apiRequest := ApiRequest{
Method: "GET",
Scheme: "https",
Path: "/k/v1/app/settings.json",
Json: req,
}
var GetAppSettingsResponse GetAppSettingsResponse
if err := apiClient.Call(ctx, apiRequest, &GetAppSettingsResponse); err != nil {
return nil, err
}
return &GetAppSettingsResponse, nil
}
|
package cli
import (
"fmt"
"os"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/usedepi/depi/pkg/config"
"github.com/usedepi/depi/pkg/datastore"
)
type App struct {
*cli.App
database datastore.Datastore
config *config.Config
}
// CLI application
var app *App
// Run parses the CLI arguments & run application command
func Run(version string, commit string, buildDate string) error {
// force all times in UTC, regardless of server timezone
time.Local = time.UTC
// setup CLI app
app = &App{cli.NewApp(), nil, nil}
app.Name = "Depi"
app.Usage = "simple & transparent website analytics"
app.Version = fmt.Sprintf("%v, commit %v, built at %v", strings.TrimPrefix(version, "v"), commit, buildDate)
app.HelpName = "depi"
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "config, c",
Value: ".env",
Usage: "Load configuration from `FILE`",
},
}
app.Before = before
app.After = after
app.Commands = []cli.Command{
serverCmd,
userCmd,
statsCmd,
}
if len(os.Args) < 2 || os.Args[1] != "--version" {
log.Printf("%s version %s", app.Name, app.Version)
}
err := app.Run(os.Args)
if err != nil {
return err
}
return nil
}
func before(c *cli.Context) error {
configFile := c.String("config")
config.LoadEnv(configFile)
app.config = config.Parse()
app.database = datastore.New(app.config.Database)
return nil
}
func after(c *cli.Context) error {
err := app.database.Close()
return err
}
|
package leetcode
import "fmt"
// 最清晰简洁
func moveZeroes(nums []int) {
zi := 0
for i := 0; i < len(nums); i++ {
if nums[i] != 0 {
nums[zi] = nums[i]
zi++
}
}
for i := zi; i < len(nums); i++ {
nums[i] = 0
}
}
// 大学 更清晰简洁
func moveZeroes4(nums []int) {
zi, nzi := -1, -1
for i := 0; i < len(nums); i++ {
fmt.Println(zi, nzi, i, nums)
if zi == -1 && nums[i] == 0 {
zi = i
}
if zi != -1 && nzi == -1 && nums[i] != 0 {
nzi = i
}
if zi != -1 && nzi != -1 && nums[i] != 0 {
nums[i], nums[zi] = nums[zi], nums[i]
zi++
}
}
}
// 大学 更清晰
func moveZeroes3(nums []int) {
zi, nzi := -1, -1
for i := 0; i < len(nums); i++ {
if nums[i] == 0 {
zi = i
break
}
}
if zi == -1 {
return
}
for i := zi + 1; i < len(nums); i++ {
if nums[i] != 0 {
nzi = i
break
}
}
if nzi == -1 {
return
}
for i := nzi; i < len(nums); i++ {
fmt.Println(zi, i, nums)
if nums[i] != 0 {
nums[i], nums[zi] = nums[zi], nums[i]
zi++
}
}
}
// 大学 稍改进 最牛逼
func moveZeroes2(nums []int) {
for i, zi := 0, 0; i < len(nums); i++ {
if nums[i] != 0 {
nums[i], nums[zi] = nums[zi], nums[i]
zi++
}
}
}
|
package aria2
import (
"github.com/jlb0906/micro-movie/basic"
"github.com/jlb0906/micro-movie/basic/config"
"github.com/micro/go-micro/v2/logger"
"sync"
)
var (
c *Conf
m sync.RWMutex
inited bool
)
// 配置
type Conf struct {
Uri string `json:"uri"`
Token string `json:"token"`
Timeout int `json:"timeout"`
Prefix string `json:"prefix"`
WorkerCount int `json:"workerCount"`
}
// 初始化
func init() {
basic.Register(initAria2)
}
func initAria2() {
m.Lock()
defer m.Unlock()
if inited {
logger.Warn("[initAria2] 已经初始化过Aria2...")
return
}
logger.Infof("[initAria2] 初始化Aria2...")
src := config.C()
c = new(Conf)
err := src.Path("aria2", c)
if err != nil {
logger.Fatal("[initAria2] %s", err)
}
inited = true
logger.Infof("[initAria2] 成功")
}
func Get() *Conf {
return c
}
|
package main
import (
"fmt"
"io/ioutil"
"net/http"
"net/url"
"time"
simplejson "github.com/bitly/go-simplejson"
mgo "gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
/**
* 在脉脉网验证
**/
func validMaiMai(proxyURL string) bool {
proxy := func(_ *http.Request) (*url.URL, error) {
return url.Parse(proxyURL)
}
transport := &http.Transport{Proxy: proxy}
client := &http.Client{
Transport: transport,
Timeout: time.Duration(8 * time.Second),
}
resp, err := client.Get("https://maimai.cn/contact/comment_list/38253207?jsononly=1")
if err != nil {
return false
}
data, _ := ioutil.ReadAll(resp.Body)
js, err := simplejson.NewJson(data) //判断是否是json
if err != nil {
return false
}
str, err := js.Get("result").String()
if err != nil {
return false
}
return str == "ok"
}
type Data struct {
proxy Proxy
key string
value bool
}
/**
* 验证爬虫,每隔一段时间一次
**/
func validCrawler(session *mgo.Session, success bool) {
c := session.DB("go-proxytool").C("proxy")
proxies := []Proxy{}
err := c.Find(bson.M{"maimai": success}).All(&proxies)
if err != nil {
panic(err)
}
dataChan := make(chan Data, ConcurNum)
occupyChan := make(chan bool, ConcurNum)
exitChan := make(chan bool, 1)
go func() { // 代理入库
DONE:
for {
select {
case data := <-dataChan:
data.proxy.Update(session, data.key, data.value)
case <-exitChan:
break DONE
}
}
}()
for index, proxy := range proxies {
occupyChan <- true //获取占用权
go func(proxy Proxy, count int) {
proxyURL := "http://" + proxy.IP + ":" + proxy.Port
success, msg := validHTTPBin(proxyURL)
fmt.Println("\033[1;36mvalid \033[0mhttp-bin", count, proxy, success)
fmt.Println(msg)
dataChan <- Data{
proxy: proxy,
key: "success",
value: success,
}
if success {
maimai := validMaiMai(proxyURL)
dataChan <- Data{
proxy: proxy,
key: "maimai",
value: maimai,
}
}
<-occupyChan //释放占用权
}(proxy, index+1)
}
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importer
import (
"context"
"fmt"
"io"
"math"
"net"
"os"
"path/filepath"
"runtime"
"strconv"
"sync"
"time"
"github.com/docker/go-units"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/br/pkg/lightning/backend"
"github.com/pingcap/tidb/br/pkg/lightning/backend/encode"
"github.com/pingcap/tidb/br/pkg/lightning/backend/kv"
"github.com/pingcap/tidb/br/pkg/lightning/backend/local"
"github.com/pingcap/tidb/br/pkg/lightning/checkpoints"
"github.com/pingcap/tidb/br/pkg/lightning/common"
"github.com/pingcap/tidb/br/pkg/lightning/config"
"github.com/pingcap/tidb/br/pkg/lightning/log"
"github.com/pingcap/tidb/br/pkg/lightning/mydump"
"github.com/pingcap/tidb/br/pkg/storage"
tidb "github.com/pingcap/tidb/config"
tidbkv "github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/syncutil"
pd "github.com/tikv/pd/client"
"go.uber.org/multierr"
"go.uber.org/zap"
)
// NewTiKVModeSwitcher make it a var, so we can mock it in tests.
var NewTiKVModeSwitcher = local.NewTiKVModeSwitcher
var (
// CheckDiskQuotaInterval is the default time interval to check disk quota.
// TODO: make it dynamically adjusting according to the speed of import and the disk size.
CheckDiskQuotaInterval = 10 * time.Second
// defaultMaxEngineSize is the default max engine size in bytes.
// we make it 5 times larger than lightning default engine size to reduce range overlap, especially for index,
// since we have an index engine per distributed subtask.
// for 1TiB data, we can divide it into 2 engines that runs on 2 TiDB. it can have a good balance between
// range overlap and sort speed in one of our test of:
// - 10 columns, PK + 6 secondary index 2 of which is mv index
// - 1.05 KiB per row, 527 MiB per file, 1024000000 rows, 1 TiB total
//
// it might not be the optimal value for other cases.
defaultMaxEngineSize = int64(5 * config.DefaultBatchSize)
)
// prepareSortDir creates a new directory for import, remove previous sort directory if exists.
func prepareSortDir(e *LoadDataController, taskID int64, tidbCfg *tidb.Config) (string, error) {
sortPathSuffix := "import-" + strconv.Itoa(int(tidbCfg.Port))
importDir := filepath.Join(tidbCfg.TempDir, sortPathSuffix)
sortDir := filepath.Join(importDir, strconv.FormatInt(taskID, 10))
if info, err := os.Stat(importDir); err != nil || !info.IsDir() {
if err != nil && !os.IsNotExist(err) {
e.logger.Error("stat import dir failed", zap.String("import_dir", importDir), zap.Error(err))
return "", errors.Trace(err)
}
if info != nil && !info.IsDir() {
e.logger.Warn("import dir is not a dir, remove it", zap.String("import_dir", importDir))
if err := os.RemoveAll(importDir); err != nil {
return "", errors.Trace(err)
}
}
e.logger.Info("import dir not exists, create it", zap.String("import_dir", importDir))
if err := os.MkdirAll(importDir, 0o700); err != nil {
e.logger.Error("failed to make dir", zap.String("import_dir", importDir), zap.Error(err))
return "", errors.Trace(err)
}
}
// todo: remove this after we support checkpoint
if _, err := os.Stat(sortDir); err != nil {
if !os.IsNotExist(err) {
e.logger.Error("stat sort dir failed", zap.String("sort_dir", sortDir), zap.Error(err))
return "", errors.Trace(err)
}
} else {
e.logger.Warn("sort dir already exists, remove it", zap.String("sort_dir", sortDir))
if err := os.RemoveAll(sortDir); err != nil {
return "", errors.Trace(err)
}
}
return sortDir, nil
}
// GetTiKVModeSwitcherWithPDClient creates a new TiKV mode switcher with its pd Client.
func GetTiKVModeSwitcherWithPDClient(ctx context.Context, logger *zap.Logger) (pd.Client, local.TiKVModeSwitcher, error) {
tidbCfg := tidb.GetGlobalConfig()
hostPort := net.JoinHostPort("127.0.0.1", strconv.Itoa(int(tidbCfg.Status.StatusPort)))
tls, err := common.NewTLS(
tidbCfg.Security.ClusterSSLCA,
tidbCfg.Security.ClusterSSLCert,
tidbCfg.Security.ClusterSSLKey,
hostPort,
nil, nil, nil,
)
if err != nil {
return nil, nil, err
}
tlsOpt := tls.ToPDSecurityOption()
pdCli, err := pd.NewClientWithContext(ctx, []string{tidbCfg.Path}, tlsOpt)
if err != nil {
return nil, nil, errors.Trace(err)
}
return pdCli, NewTiKVModeSwitcher(tls, pdCli, logger), nil
}
func getCachedKVStoreFrom(pdAddr string, tls *common.TLS) (tidbkv.Storage, error) {
// Disable GC because TiDB enables GC already.
keySpaceName := tidb.GetGlobalKeyspaceName()
// the kv store we get is a cached store, so we can't close it.
kvStore, err := GetKVStore(fmt.Sprintf("tikv://%s?disableGC=true&keyspaceName=%s", pdAddr, keySpaceName), tls.ToTiKVSecurityConfig())
if err != nil {
return nil, errors.Trace(err)
}
return kvStore, nil
}
// NewTableImporter creates a new table importer.
func NewTableImporter(param *JobImportParam, e *LoadDataController, taskID int64) (ti *TableImporter, err error) {
idAlloc := kv.NewPanickingAllocators(0)
tbl, err := tables.TableFromMeta(idAlloc, e.Table.Meta())
if err != nil {
return nil, errors.Annotatef(err, "failed to tables.TableFromMeta %s", e.Table.Meta().Name)
}
tidbCfg := tidb.GetGlobalConfig()
// todo: we only need to prepare this once on each node(we might call it 3 times in distribution framework)
dir, err := prepareSortDir(e, taskID, tidbCfg)
if err != nil {
return nil, err
}
hostPort := net.JoinHostPort("127.0.0.1", strconv.Itoa(int(tidbCfg.Status.StatusPort)))
tls, err := common.NewTLS(
tidbCfg.Security.ClusterSSLCA,
tidbCfg.Security.ClusterSSLCert,
tidbCfg.Security.ClusterSSLKey,
hostPort,
nil, nil, nil,
)
if err != nil {
return nil, err
}
// no need to close kvStore, since it's a cached store.
kvStore, err := getCachedKVStoreFrom(tidbCfg.Path, tls)
if err != nil {
return nil, errors.Trace(err)
}
backendConfig := local.BackendConfig{
PDAddr: tidbCfg.Path,
LocalStoreDir: dir,
MaxConnPerStore: config.DefaultRangeConcurrency,
ConnCompressType: config.CompressionNone,
WorkerConcurrency: config.DefaultRangeConcurrency * 2,
KVWriteBatchSize: config.KVWriteBatchSize,
RegionSplitBatchSize: config.DefaultRegionSplitBatchSize,
RegionSplitConcurrency: runtime.GOMAXPROCS(0),
// enable after we support checkpoint
CheckpointEnabled: false,
MemTableSize: config.DefaultEngineMemCacheSize,
LocalWriterMemCacheSize: int64(config.DefaultLocalWriterMemCacheSize),
ShouldCheckTiKV: true,
DupeDetectEnabled: false,
DuplicateDetectOpt: common.DupDetectOpt{ReportErrOnDup: false},
StoreWriteBWLimit: int(e.MaxWriteSpeed),
MaxOpenFiles: int(util.GenRLimit("table_import")),
KeyspaceName: tidb.GetGlobalKeyspaceName(),
PausePDSchedulerScope: config.PausePDSchedulerScopeTable,
}
if e.IsRaftKV2 {
backendConfig.RaftKV2SwitchModeDuration = config.DefaultSwitchTiKVModeInterval
}
// todo: use a real region size getter
regionSizeGetter := &local.TableRegionSizeGetterImpl{}
localBackend, err := local.NewBackend(param.GroupCtx, tls, backendConfig, regionSizeGetter)
if err != nil {
return nil, err
}
return &TableImporter{
JobImportParam: param,
LoadDataController: e,
backend: localBackend,
tableInfo: &checkpoints.TidbTableInfo{
ID: e.Table.Meta().ID,
Name: e.Table.Meta().Name.O,
Core: e.Table.Meta(),
},
encTable: tbl,
dbID: e.DBID,
store: e.dataStore,
kvStore: kvStore,
logger: e.logger,
// this is the value we use for 50TiB data parallel import.
// this might not be the optimal value.
// todo: use different default for single-node import and distributed import.
regionSplitSize: 2 * int64(config.SplitRegionSize),
regionSplitKeys: 2 * int64(config.SplitRegionKeys),
diskQuota: adjustDiskQuota(int64(e.DiskQuota), dir, e.logger),
diskQuotaLock: new(syncutil.RWMutex),
}, nil
}
// TableImporter is a table importer.
type TableImporter struct {
*JobImportParam
*LoadDataController
backend *local.Backend
tableInfo *checkpoints.TidbTableInfo
// this table has a separate id allocator used to record the max row id allocated.
encTable table.Table
dbID int64
store storage.ExternalStorage
// the kv store we get is a cached store, so we can't close it.
kvStore tidbkv.Storage
logger *zap.Logger
regionSplitSize int64
regionSplitKeys int64
diskQuota int64
diskQuotaLock *syncutil.RWMutex
}
func (ti *TableImporter) getParser(ctx context.Context, chunk *checkpoints.ChunkCheckpoint) (mydump.Parser, error) {
info := LoadDataReaderInfo{
Opener: func(ctx context.Context) (io.ReadSeekCloser, error) {
reader, err := mydump.OpenReader(ctx, &chunk.FileMeta, ti.dataStore)
if err != nil {
return nil, errors.Trace(err)
}
return reader, nil
},
Remote: &chunk.FileMeta,
}
parser, err := ti.LoadDataController.GetParser(ctx, info)
if err != nil {
return nil, err
}
// todo: when support checkpoint, we should set pos too.
// WARN: parser.SetPos can only be set before we read anything now. should fix it before set pos.
parser.SetRowID(chunk.Chunk.PrevRowIDMax)
return parser, nil
}
func (ti *TableImporter) getKVEncoder(chunk *checkpoints.ChunkCheckpoint) (kvEncoder, error) {
cfg := &encode.EncodingConfig{
SessionOptions: encode.SessionOptions{
SQLMode: ti.SQLMode,
Timestamp: chunk.Timestamp,
SysVars: ti.ImportantSysVars,
AutoRandomSeed: chunk.Chunk.PrevRowIDMax,
},
Path: chunk.FileMeta.Path,
Table: ti.encTable,
Logger: log.Logger{Logger: ti.logger.With(zap.String("path", chunk.FileMeta.Path))},
}
return newTableKVEncoder(cfg, ti)
}
func (e *LoadDataController) getAdjustedMaxEngineSize() int64 {
// we want to split data files into subtask of size close to MaxEngineSize to reduce range overlap,
// and evenly distribute them to subtasks.
// so we adjust MaxEngineSize to make sure each subtask has a similar amount of data to import.
// we calculate subtask count first by round(TotalFileSize / maxEngineSize), then adjust maxEngineSize
//
// AllocateEngineIDs is using ceil() to calculate subtask count, engine size might be too small in some case,
// such as 501G data, maxEngineSize will be about 250G, so we don't relay on it.
// see https://github.com/pingcap/tidb/blob/b4183e1dc9bb01fb81d3aa79ca4b5b74387c6c2a/br/pkg/lightning/mydump/region.go#L109
//
// for default e.MaxEngineSize = 500GiB, we have:
// data size range(G) cnt adjusted-engine-size range(G)
// [0, 750) 1 [0, 750)
// [750, 1250) 2 [375, 625)
// [1250, 1750) 3 [416, 583)
// [1750, 2250) 4 [437, 562)
maxEngineSize := int64(e.MaxEngineSize)
if e.TotalFileSize <= maxEngineSize {
return e.TotalFileSize
}
subtaskCount := math.Round(float64(e.TotalFileSize) / float64(maxEngineSize))
adjusted := math.Ceil(float64(e.TotalFileSize) / subtaskCount)
return int64(adjusted)
}
// PopulateChunks populates chunks from table regions.
// in dist framework, this should be done in the tidb node which is responsible for splitting job into subtasks
// then table-importer handles data belongs to the subtask.
func (e *LoadDataController) PopulateChunks(ctx context.Context) (ecp map[int32]*checkpoints.EngineCheckpoint, err error) {
task := log.BeginTask(e.logger, "populate chunks")
defer func() {
task.End(zap.ErrorLevel, err)
}()
tableMeta := &mydump.MDTableMeta{
DB: e.DBName,
Name: e.Table.Meta().Name.O,
DataFiles: e.toMyDumpFiles(),
}
adjustedMaxEngineSize := e.getAdjustedMaxEngineSize()
e.logger.Info("adjust max engine size", zap.Int64("before", int64(e.MaxEngineSize)),
zap.Int64("after", adjustedMaxEngineSize))
dataDivideCfg := &mydump.DataDivideConfig{
ColumnCnt: len(e.Table.Meta().Columns),
EngineDataSize: adjustedMaxEngineSize,
MaxChunkSize: int64(config.MaxRegionSize),
Concurrency: int(e.ThreadCnt),
IOWorkers: nil,
Store: e.dataStore,
TableMeta: tableMeta,
}
tableRegions, err2 := mydump.MakeTableRegions(ctx, dataDivideCfg)
if err2 != nil {
e.logger.Error("populate chunks failed", zap.Error(err2))
return nil, err2
}
var maxRowID int64
timestamp := time.Now().Unix()
tableCp := &checkpoints.TableCheckpoint{
Engines: map[int32]*checkpoints.EngineCheckpoint{},
}
for _, region := range tableRegions {
engine, found := tableCp.Engines[region.EngineID]
if !found {
engine = &checkpoints.EngineCheckpoint{
Status: checkpoints.CheckpointStatusLoaded,
}
tableCp.Engines[region.EngineID] = engine
}
ccp := &checkpoints.ChunkCheckpoint{
Key: checkpoints.ChunkCheckpointKey{
Path: region.FileMeta.Path,
Offset: region.Chunk.Offset,
},
FileMeta: region.FileMeta,
ColumnPermutation: nil,
Chunk: region.Chunk,
Timestamp: timestamp,
}
engine.Chunks = append(engine.Chunks, ccp)
if region.Chunk.RowIDMax > maxRowID {
maxRowID = region.Chunk.RowIDMax
}
}
if common.TableHasAutoID(e.Table.Meta()) {
tidbCfg := tidb.GetGlobalConfig()
hostPort := net.JoinHostPort("127.0.0.1", strconv.Itoa(int(tidbCfg.Status.StatusPort)))
tls, err4 := common.NewTLS(
tidbCfg.Security.ClusterSSLCA,
tidbCfg.Security.ClusterSSLCert,
tidbCfg.Security.ClusterSSLKey,
hostPort,
nil, nil, nil,
)
if err4 != nil {
return nil, err4
}
// no need to close kvStore, since it's a cached store.
kvStore, err4 := getCachedKVStoreFrom(tidbCfg.Path, tls)
if err4 != nil {
return nil, errors.Trace(err4)
}
if err3 := common.RebaseGlobalAutoID(ctx, 0, kvStore, e.DBID, e.Table.Meta()); err3 != nil {
return nil, errors.Trace(err3)
}
newMinRowID, _, err3 := common.AllocGlobalAutoID(ctx, maxRowID, kvStore, e.DBID, e.Table.Meta())
if err3 != nil {
return nil, errors.Trace(err3)
}
e.rebaseChunkRowID(newMinRowID, tableCp.Engines)
}
// Add index engine checkpoint
tableCp.Engines[common.IndexEngineID] = &checkpoints.EngineCheckpoint{Status: checkpoints.CheckpointStatusLoaded}
return tableCp.Engines, nil
}
func (*LoadDataController) rebaseChunkRowID(rowIDBase int64, engines map[int32]*checkpoints.EngineCheckpoint) {
if rowIDBase == 0 {
return
}
for _, engine := range engines {
for _, chunk := range engine.Chunks {
chunk.Chunk.PrevRowIDMax += rowIDBase
chunk.Chunk.RowIDMax += rowIDBase
}
}
}
// a simplified version of EstimateCompactionThreshold
func (ti *TableImporter) getTotalRawFileSize(indexCnt int64) int64 {
var totalSize int64
for _, file := range ti.dataFiles {
size := file.RealSize
if file.Type == mydump.SourceTypeParquet {
// parquet file is compressed, thus estimates with a factor of 2
size *= 2
}
totalSize += size
}
return totalSize * indexCnt
}
// OpenIndexEngine opens an index engine.
func (ti *TableImporter) OpenIndexEngine(ctx context.Context, engineID int32) (*backend.OpenedEngine, error) {
idxEngineCfg := &backend.EngineConfig{
TableInfo: ti.tableInfo,
}
idxCnt := len(ti.tableInfo.Core.Indices)
if !common.TableHasAutoRowID(ti.tableInfo.Core) {
idxCnt--
}
// todo: getTotalRawFileSize returns size of all data files, but in distributed framework,
// we create one index engine for each engine, should reflect this in the future.
threshold := local.EstimateCompactionThreshold2(ti.getTotalRawFileSize(int64(idxCnt)))
idxEngineCfg.Local = backend.LocalEngineConfig{
Compact: threshold > 0,
CompactConcurrency: 4,
CompactThreshold: threshold,
}
fullTableName := ti.fullTableName()
// todo: cleanup all engine data on any error since we don't support checkpoint for now
// some return path, didn't make sure all data engine and index engine are cleaned up.
// maybe we can add this in upper level to clean the whole local-sort directory
mgr := backend.MakeEngineManager(ti.backend)
return mgr.OpenEngine(ctx, idxEngineCfg, fullTableName, engineID)
}
// OpenDataEngine opens a data engine.
func (ti *TableImporter) OpenDataEngine(ctx context.Context, engineID int32) (*backend.OpenedEngine, error) {
dataEngineCfg := &backend.EngineConfig{
TableInfo: ti.tableInfo,
}
// todo: support checking IsRowOrdered later.
//if ti.tableMeta.IsRowOrdered {
// dataEngineCfg.Local.Compact = true
// dataEngineCfg.Local.CompactConcurrency = 4
// dataEngineCfg.Local.CompactThreshold = local.CompactionUpperThreshold
//}
mgr := backend.MakeEngineManager(ti.backend)
return mgr.OpenEngine(ctx, dataEngineCfg, ti.fullTableName(), engineID)
}
// ImportAndCleanup imports the engine and cleanup the engine data.
func (ti *TableImporter) ImportAndCleanup(ctx context.Context, closedEngine *backend.ClosedEngine) (int64, error) {
var kvCount int64
importErr := closedEngine.Import(ctx, ti.regionSplitSize, ti.regionSplitKeys)
if closedEngine.GetID() != common.IndexEngineID {
// todo: change to a finer-grain progress later.
// each row is encoded into 1 data key
kvCount = ti.backend.GetImportedKVCount(closedEngine.GetUUID())
}
// todo: if we need support checkpoint, engine should not be cleanup if import failed.
cleanupErr := closedEngine.Cleanup(ctx)
return kvCount, multierr.Combine(importErr, cleanupErr)
}
// FullTableName return FQDN of the table.
func (ti *TableImporter) fullTableName() string {
return common.UniqueTable(ti.DBName, ti.Table.Meta().Name.O)
}
// Close implements the io.Closer interface.
func (ti *TableImporter) Close() error {
ti.backend.Close()
return nil
}
// CheckDiskQuota checks disk quota.
func (ti *TableImporter) CheckDiskQuota(ctx context.Context) {
var locker sync.Locker
lockDiskQuota := func() {
if locker == nil {
ti.diskQuotaLock.Lock()
locker = ti.diskQuotaLock
}
}
unlockDiskQuota := func() {
if locker != nil {
locker.Unlock()
locker = nil
}
}
defer unlockDiskQuota()
for {
select {
case <-ctx.Done():
return
case <-time.After(CheckDiskQuotaInterval):
}
largeEngines, inProgressLargeEngines, totalDiskSize, totalMemSize := local.CheckDiskQuota(ti.backend, ti.diskQuota)
if len(largeEngines) == 0 && inProgressLargeEngines == 0 {
unlockDiskQuota()
continue
}
ti.logger.Warn("disk quota exceeded",
zap.Int64("diskSize", totalDiskSize),
zap.Int64("memSize", totalMemSize),
zap.Int64("quota", ti.diskQuota),
zap.Int("largeEnginesCount", len(largeEngines)),
zap.Int("inProgressLargeEnginesCount", inProgressLargeEngines))
lockDiskQuota()
if len(largeEngines) == 0 {
ti.logger.Warn("all large engines are already importing, keep blocking all writes")
continue
}
if err := ti.backend.FlushAllEngines(ctx); err != nil {
ti.logger.Error("flush engine for disk quota failed, check again later", log.ShortError(err))
unlockDiskQuota()
continue
}
// at this point, all engines are synchronized on disk.
// we then import every large engines one by one and complete.
// if any engine failed to import, we just try again next time, since the data are still intact.
var importErr error
for _, engine := range largeEngines {
// Use a larger split region size to avoid split the same region by many times.
if err := ti.backend.UnsafeImportAndReset(
ctx,
engine,
int64(config.SplitRegionSize)*int64(config.MaxSplitRegionSizeRatio),
int64(config.SplitRegionKeys)*int64(config.MaxSplitRegionSizeRatio),
); err != nil {
importErr = multierr.Append(importErr, err)
}
}
if importErr != nil {
// discuss: should we return the error and cancel the import?
ti.logger.Error("import large engines failed, check again later", log.ShortError(importErr))
}
unlockDiskQuota()
}
}
func adjustDiskQuota(diskQuota int64, sortDir string, logger *zap.Logger) int64 {
sz, err := common.GetStorageSize(sortDir)
if err != nil {
logger.Warn("failed to get storage size", zap.Error(err))
if diskQuota != 0 {
return diskQuota
}
logger.Info("use default quota instead", zap.Int64("quota", int64(DefaultDiskQuota)))
return int64(DefaultDiskQuota)
}
maxDiskQuota := int64(float64(sz.Capacity) * 0.8)
switch {
case diskQuota == 0:
logger.Info("use 0.8 of the storage size as default disk quota",
zap.String("quota", units.HumanSize(float64(maxDiskQuota))))
return maxDiskQuota
case diskQuota > maxDiskQuota:
logger.Warn("disk quota is larger than 0.8 of the storage size, use 0.8 of the storage size instead",
zap.String("quota", units.HumanSize(float64(maxDiskQuota))))
return maxDiskQuota
default:
return diskQuota
}
}
|
package db
import (
"time"
"github.com/go-redis/redis"
"github.com/golang/glog"
"sub_account_service/finance/config"
)
var RedisClient *redis.Client
func InitRedis() {
RedisClient = redis.NewClient(&redis.Options{
Addr: config.Opts().RedisAddr,
Password: config.Opts().RedisPasswd, // no password set
DB: 0, // use default DB
})
_, err := RedisClient.Ping().Result()
if err != nil {
glog.Errorln("db ping fail", err)
}
go Redistimer()
}
func Redistimer() {
redisTicker := time.NewTicker(5 * time.Second)
for {
select {
case <-redisTicker.C:
_, err := RedisClient.Ping().Result()
if err != nil {
glog.Errorln("redis connect fail,err:", err)
InitRedis()
}
}
}
}
|
// Copyright (c) Alex Ellis 2017. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
package inttests
import (
"encoding/json"
"net/http"
"strings"
"testing"
types "github.com/openfaas/faas-provider/types"
requests "github.com/openfaas/faas/gateway/requests"
)
func createFunction(request types.FunctionDeployment) (string, int, error) {
marshalled, _ := json.Marshal(request)
return fireRequest("http://localhost:8080/system/functions", http.MethodPost, string(marshalled))
}
func deleteFunction(name string) (string, int, error) {
marshalled, _ := json.Marshal(requests.DeleteFunctionRequest{FunctionName: name})
return fireRequest("http://localhost:8080/system/functions", http.MethodDelete, string(marshalled))
}
func TestCreate_ValidRequest(t *testing.T) {
request := types.FunctionDeployment{
Service: "test_resizer",
Image: "functions/resizer",
Network: "func_functions",
EnvProcess: "",
}
_, code, err := createFunction(request)
if err != nil {
t.Log(err)
t.Fail()
}
expectedErrorCode := http.StatusAccepted
if code != expectedErrorCode {
t.Errorf("Got HTTP code: %d, want %d\n", code, expectedErrorCode)
return
}
deleteFunction("test_resizer")
}
func TestCreate_InvalidImage(t *testing.T) {
request := types.FunctionDeployment{
Service: "test_resizer",
Image: "a b c",
Network: "func_functions",
EnvProcess: "",
}
body, code, err := createFunction(request)
if err != nil {
t.Log(err)
t.Fail()
}
expectedErrorCode := http.StatusBadRequest
if code != expectedErrorCode {
t.Errorf("Got HTTP code: %d, want %d\n", code, expectedErrorCode)
return
}
expectedErrorSlice := "is not a valid repository/tag"
if !strings.Contains(body, expectedErrorSlice) {
t.Errorf("Error message %s does not contain: %s\n", body, expectedErrorSlice)
return
}
}
func TestCreate_InvalidNetwork(t *testing.T) {
request := types.FunctionDeployment{
Service: "test_resizer",
Image: "functions/resizer",
Network: "non_existent_network",
EnvProcess: "",
}
body, code, err := createFunction(request)
if err != nil {
t.Log(err)
t.Fail()
}
expectedErrorCode := http.StatusBadRequest
if code != expectedErrorCode {
t.Errorf("Got HTTP code: %d, want %d\n", code, expectedErrorCode)
return
}
expectedErrorSlice := "network non_existent_network not found"
if !strings.Contains(body, expectedErrorSlice) {
t.Errorf("Error message %s does not contain: %s\n", body, expectedErrorSlice)
return
}
}
func TestCreate_InvalidJson(t *testing.T) {
reqBody := `not json`
_, code, err := fireRequest("http://localhost:8080/system/functions", http.MethodPost, reqBody)
if err != nil {
t.Log(err)
t.Fail()
}
if code != http.StatusBadRequest {
t.Errorf("Got HTTP code: %d, want %d\n", code, http.StatusBadRequest)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.