text
stringlengths 11
4.05M
|
|---|
package consecutivestrings
import (
"strings"
)
// Solution to: https://www.codewars.com/kata/56a5d994ac971f1ac500003e
func BetterSolution(strarr []string, k int) string {
// Own implementation of better solutions on codewars
longest := ""
for i := 0; i < len(strarr)-k+1; i++ {
concat := strings.Join(strarr[i:i+k], "")
if len(concat) > len(longest) {
longest = concat
}
}
return longest
}
func LongestConsec(strarr []string, k int) string {
longest := ""
for i := range strarr {
if (i + k) > len(strarr) {
break
}
strBuilder := strings.Builder{}
for j := 0; j < k; j++ {
strBuilder.WriteString(strarr[i+j])
}
out := strBuilder.String()
if len(out) > len(longest) {
longest = out
}
}
return longest
}
|
package mdtable
import (
"bytes"
"flag"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
)
//go:generate go test . -write-golden
func TestMain(m *testing.M) {
var err error
var writeGolden bool
flag.BoolVar(&writeGolden, "write-golden", false, "write golden files")
flag.Parse()
if writeGolden {
err = updateGolden()
if err != nil {
panic(err)
}
}
os.Exit(m.Run())
}
func TestAlign_fillCell(t *testing.T) {
runTest := func(align Align, width, padding int, s, want string) {
t.Helper()
got := align.fillCell(s, width, padding)
require.Equal(t, want, got)
}
runTest(AlignLeft, 8, 1, "世世世", " 世世世 ")
runTest(AlignLeft, 8, 1, "foo", " foo ")
runTest(AlignLeft, 8, 0, "foo", "foo ")
runTest(AlignLeft, 2, 1, "foo", " foo ")
runTest(AlignDefault, 8, 1, "foo", " foo ")
runTest(AlignDefault, 8, 0, "foo", "foo ")
runTest(AlignDefault, 2, 1, "foo", " foo ")
runTest(AlignRight, 8, 1, "foo", " foo ")
runTest(AlignRight, 8, 0, "foo", " foo")
runTest(AlignRight, 2, 1, "foo", " foo ")
runTest(AlignCenter, 8, 1, "foo", " foo ")
runTest(AlignCenter, 8, 0, "foo", " foo ")
runTest(AlignCenter, 2, 1, "foo", " foo ")
runTest(AlignCenter, 9, 1, "foo", " foo ")
runTest(AlignCenter, 9, 0, "foo", " foo ")
}
func Test_cellValue(t *testing.T) {
require.Empty(t, cellValue(nil, 1, 1))
require.Empty(t, cellValue(exampleData, -1, 1))
require.Empty(t, cellValue(exampleData, 1, 10))
require.Empty(t, cellValue(exampleData, 10, 1))
require.Equal(t, "Domain name", cellValue(exampleData, 1, 1))
}
func TestGenerate(t *testing.T) {
for _, td := range goldenTests {
t.Run(td.name, func(t *testing.T) {
want, err := ioutil.ReadFile(filepath.Join("testdata", "tables", td.name+".md"))
require.NoError(t, err)
want = bytes.TrimSuffix(want, []byte{'\n'})
got := Generate(td.data, td.options...)
require.Equal(t, string(want), string(got))
})
}
}
var exampleData = [][]string{
{"Date", "Description", "CV2", "Amount"},
{"1/1/2014", "Domain name", "2233", "$10.98"},
{"1/1/2014", "January Hosting", "2233", "$54.95"},
{"1/4/2014", "February Hosting", "2233", "$51.00"},
{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"},
}
var goldenTests = []struct {
name string
options []Option
data [][]string
}{
{
name: "defaults",
data: exampleData,
},
{
name: "combined-options",
data: exampleData,
options: []Option{
Alignment(AlignCenter),
ColumnAlignment(1, AlignLeft),
ColumnAlignment(2, AlignRight),
ColumnTextAlignment(1, AlignCenter),
HeaderAlignment(AlignRight),
ColumnHeaderAlignment(1, AlignLeft),
ColumnMinWidth(0, 12),
ColumnMinWidth(2, 12),
ColumnMinWidth(3, 12),
},
},
{
name: "empty",
},
{
name: "Alignment",
data: exampleData,
options: []Option{Alignment(AlignRight)},
},
{
name: "HeaderAlignment",
data: exampleData,
options: []Option{HeaderAlignment(AlignRight)},
},
{
name: "TextAlignment",
data: exampleData,
options: []Option{TextAlignment(AlignRight)},
},
{
name: "ColumnAlignment",
data: exampleData,
options: []Option{ColumnAlignment(1, AlignRight)},
},
{
name: "ColumnTextAlignment",
data: exampleData,
options: []Option{ColumnTextAlignment(1, AlignRight)},
},
{
name: "ColumnHeaderAlignment",
data: exampleData,
options: []Option{ColumnHeaderAlignment(1, AlignRight)},
},
{
name: "ColumnMinWidth",
data: exampleData,
options: []Option{ColumnMinWidth(0, 40)},
},
{
name: "ColumnMinWidth-small",
data: exampleData,
options: []Option{ColumnMinWidth(0, 2)},
},
}
func updateGolden() error {
err := os.RemoveAll(filepath.Join("testdata", "tables"))
if err != nil {
return err
}
err = os.MkdirAll(filepath.Join("testdata", "tables"), 0o700)
if err != nil {
return err
}
for _, gt := range goldenTests {
got := Generate(gt.data, gt.options...)
got = append(got, '\n')
err = ioutil.WriteFile(filepath.Join("testdata", "tables", gt.name+".md"), got, 0o600)
if err != nil {
return err
}
}
return nil
}
|
package fetch
import (
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"github.com/gorilla/mux"
"github.com/slotix/dataflowkit/storage"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
)
var st storage.Store
func init() {
viper.Set("SPLASH", "127.0.0.1:8050")
viper.Set("SPLASH_TIMEOUT", 20)
viper.Set("SPLASH_RESOURCE_TIMEOUT", 30)
viper.Set("SPLASH_WAIT", 0.5)
viper.Set("STORAGE_TYPE", "Diskv")
storageType, err := storage.TypeString(viper.GetString("STORAGE_TYPE"))
if err != nil {
logger.Error(err)
}
st = storage.NewStore(storageType)
}
func TestFetchService(t *testing.T) {
var svc Service
svc = FetchService{}
cArr := []*http.Cookie{
&http.Cookie{
Name: "cookie1",
Value: "cValue1",
Domain: "example.com",
},
&http.Cookie{
Name: "cookie2",
Value: "cValue2",
Domain: "example.com",
},
}
userToken := "12345"
cookies, err := json.Marshal(cArr)
err = st.Write(userToken, cookies, 0)
if err != nil {
t.Log(err)
}
//BaseFetcher
r := mux.NewRouter()
r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Conent-Type", "text/html")
w.Write(IndexContent)
})
ts := httptest.NewServer(r)
defer ts.Close()
resp, err := svc.Response(BaseFetcherRequest{
URL: ts.URL,
Method: "GET",
UserToken: "123456",
})
assert.Nil(t, err, "Expected no error")
assert.NotNil(t, resp, "Expected response is not nil")
//read cookies
resp, err = svc.Response(BaseFetcherRequest{
URL: ts.URL,
Method: "GET",
UserToken: "123456",
})
assert.Nil(t, err, "Expected no error")
assert.NotNil(t, resp, "Expected response is not nil")
//invalid URL
resp, err = svc.Response(BaseFetcherRequest{
URL: "invalid_addr",
Method: "GET",
})
assert.Error(t, err, "Expected error")
//Splash Fetcher
// response, err := svc.Response(splash.Request{
// URL: "http://example.com",
// FormData: "",
// LUA: "",
// UserToken: userToken,
// })
// assert.Nil(t, err, "Expected no error")
// assert.Equal(t, 200, response.(*splash.Response).Response.Status, "Expected Splash server returns 200 status code")
}
/* func TestEncodeSplashFetcherContent(t *testing.T) {
ctx := context.Background()
resp := splash.Response{
HTML: `<!DOCTYPE html><html><body><h1>Hello World</h1></body></html>`,
}
w := httptest.NewRecorder()
EncodeSplashFetcherContent(ctx, w, resp)
//r := w.Code
//r := w.Result()
logger.Info(w)
} */
|
package rabbitmq
import (
"context"
"io/ioutil"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
"github.com/streadway/amqp"
"github.com/batchcorp/plumber-schemas/build/go/protos/args"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/batchcorp/plumber-schemas/build/go/protos/records"
"github.com/batchcorp/rabbit"
"github.com/batchcorp/plumber/backends/rabbitmq/rabbitfakes"
"github.com/batchcorp/plumber/validate"
)
var _ = Describe("RabbitMQ Backend", func() {
var readOpts *opts.ReadOptions
BeforeEach(func() {
readOpts = &opts.ReadOptions{
Rabbit: &opts.ReadGroupRabbitOptions{
Args: &args.RabbitReadArgs{
ExchangeName: "test",
QueueName: "test",
BindingKey: "test",
QueueExclusive: false,
QueueDeclare: false,
QueueDurable: false,
AutoAck: false,
ConsumerTag: "test",
QueueDelete: false,
},
},
}
})
Context("validateReadOptions", func() {
It("validates nil read options", func() {
err := validateReadOptions(nil)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(validate.ErrMissingReadOptions))
})
It("validates missing backend group", func() {
readOpts.Rabbit = nil
err := validateReadOptions(readOpts)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(validate.ErrEmptyBackendGroup))
})
It("validates missing backend args", func() {
readOpts.Rabbit.Args = nil
err := validateReadOptions(readOpts)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(validate.ErrEmptyBackendArgs))
})
It("validates empty exchange", func() {
readOpts.Rabbit.Args.ExchangeName = ""
err := validateReadOptions(readOpts)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(ErrEmptyExchangeName))
})
It("validates empty queue name", func() {
readOpts.Rabbit.Args.QueueName = ""
err := validateReadOptions(readOpts)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(ErrEmptyQueueName))
})
It("validates empty binding keu", func() {
readOpts.Rabbit.Args.BindingKey = ""
err := validateReadOptions(readOpts)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(ErrEmptyBindingKey))
})
})
Context("Read", func() {
It("validates read options", func() {
p := &RabbitMQ{}
err := p.Read(context.Background(), nil, nil, nil)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring(validate.ErrMissingReadOptions.Error()))
})
It("reads a message", func() {
fakeRabbit := &rabbitfakes.FakeIRabbit{}
fakeRabbit.ConsumeStub = func(context.Context, chan *rabbit.ConsumeError, func(msg amqp.Delivery) error) {
_, _, consumeFunc := fakeRabbit.ConsumeArgsForCall(0)
consumeFunc(amqp.Delivery{
Headers: amqp.Table{"test": "value"},
Timestamp: time.Time{},
})
}
r := &RabbitMQ{
client: fakeRabbit,
log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}),
}
errorCh := make(chan *records.ErrorRecord, 1)
resultsCh := make(chan *records.ReadRecord, 1)
err := r.Read(context.Background(), readOpts, resultsCh, errorCh)
Expect(err).ToNot(HaveOccurred())
Expect(resultsCh).Should(Receive())
Expect(errorCh).ShouldNot(Receive())
})
})
})
|
package client
import (
"bufio"
"crypto/ecdsa"
"encoding/base64"
"encoding/hex"
"fmt"
"io/ioutil"
"log"
"os"
"github.com/ethereum/go-ethereum/crypto"
"github.com/zbohm/lirisi/ring"
)
// CreatePrivateKey creates private key and print it on stdout or save it into the filename.
func CreatePrivateKey(output string) {
privKey, err := crypto.GenerateKey()
if err != nil {
panic(err)
}
if output == "" {
fmt.Println(hex.EncodeToString(crypto.FromECDSA(privKey)))
} else {
if err := crypto.SaveECDSA(output, privKey); err != nil {
panic(err)
}
}
}
// ExtractPublicKey from private key and encode it to base64.
func ExtractPublicKey(output, privkeyFilename string) {
privKey, err := crypto.LoadECDSA(privkeyFilename)
if err != nil {
panic(err)
}
pubKey := privKey.Public().(*ecdsa.PublicKey)
data := base64.StdEncoding.EncodeToString(crypto.FromECDSAPub(pubKey))
bytes := append([]byte(data), '\n')
if output == "" {
fmt.Println(data)
} else {
ioutil.WriteFile(output, bytes, 0600)
}
}
// CreateTestingRing creates the list of public keys for testing purposes.
func CreateTestingRing(output string, size int) {
var file *os.File
var err error
if output != "" {
file, err = os.OpenFile(output, os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
log.Fatal(err)
}
}
for i := 0; i < size; i++ {
privkey, err := crypto.GenerateKey()
if err != nil {
log.Fatal(err)
}
pubKey := privkey.Public().(*ecdsa.PublicKey)
data := base64.StdEncoding.EncodeToString(crypto.FromECDSAPub(pubKey))
if output == "" {
fmt.Println(data)
} else {
if _, err = file.Write(append([]byte(data), '\n')); err != nil {
log.Fatal(err)
}
}
}
if output != "" {
file.Close()
}
}
func loadRingPubKeys(ringFilename string) ring.PublicKeysList {
// Load public keys
inFile, err := os.Open(ringFilename)
if err != nil {
log.Fatal(err)
}
defer inFile.Close()
pubList := make(ring.PublicKeysList, 0)
scanner := bufio.NewScanner(inFile)
for scanner.Scan() {
line := scanner.Text()
data, err := base64.StdEncoding.DecodeString(line)
if err != nil {
log.Fatal(err)
}
pubKey, err := crypto.UnmarshalPubkey(data)
if err != nil {
log.Fatal(err)
}
pubList = append(pubList, pubKey)
}
return pubList
}
// CreateSignature creates the signature and save it to PEM.
func CreateSignature(output, message, ringFilename, privateFilename string) {
// Load private key
privKey, err := crypto.LoadECDSA(privateFilename)
if err != nil {
log.Fatal(err)
}
// Load public keys
ringPubList := loadRingPubKeys(ringFilename)
// Make signature
sign, err := ring.CreateSign([]byte(message), ringPubList, privKey)
if err != nil {
log.Fatal(err)
}
pem, err := sign.ToPEM()
if output == "" {
fmt.Println(string(pem))
} else {
ioutil.WriteFile(output, pem, 0600)
}
}
// VerifySignature verifies signature for message and keys ring.
func VerifySignature(output, message, ringFilename, signatureFilename string) {
// Load signature
pem, err := ioutil.ReadFile(signatureFilename)
if err != nil {
log.Fatal(err)
}
sign, err := ring.FromPEM(pem)
if err != nil {
log.Fatal(err)
}
// Load public keys
ringPubList := loadRingPubKeys(ringFilename)
// Verify signature
if ring.VerifySign(sign, []byte(message), ringPubList) {
if output == "" {
fmt.Println("SUCCESS")
} else {
ioutil.WriteFile(output, []byte("SUCCESS\n"), 0600)
}
} else {
if output == "" {
log.Fatal("ERROR")
} else {
ioutil.WriteFile(output, []byte("ERROR\n"), 0600)
os.Exit(1)
}
}
}
// GetKeyImage returns private key identifier.
func GetKeyImage(output, signatureFilename string) {
// Load signature
pem, err := ioutil.ReadFile(signatureFilename)
if err != nil {
log.Fatal(err)
}
sign, err := ring.FromPEM(pem)
if err != nil {
log.Fatal(err)
}
keyImage := sign.ImageToBase64()
if output == "" {
fmt.Println(string(keyImage))
} else {
ioutil.WriteFile(output, keyImage, 0600)
}
}
|
package raftstore
import (
"sync"
"go.uber.org/atomic"
"github.com/pingcap-incubator/tinykv/kv/tikv/raftstore/message"
"github.com/pingcap-incubator/tinykv/proto/pkg/raft_serverpb"
"github.com/pingcap/errors"
)
// router routes a message to a peer.
type router struct {
peers sync.Map
workerSenders []chan message.Msg
storeSender chan<- message.Msg
storeFsm *storeFsm
}
func newRouter(workerSize int, storeSender chan<- message.Msg, storeFsm *storeFsm) *router {
pm := &router{
workerSenders: make([]chan message.Msg, workerSize),
storeSender: storeSender,
storeFsm: storeFsm,
}
for i := 0; i < workerSize; i++ {
pm.workerSenders[i] = make(chan message.Msg, 4096)
}
return pm
}
func (pr *router) get(regionID uint64) *peerState {
v, ok := pr.peers.Load(regionID)
if ok {
return v.(*peerState)
}
return nil
}
func (pr *router) register(peer *peerFsm) {
id := peer.peer.regionId
idx := int(id) % len(pr.workerSenders)
apply := newApplierFromPeer(peer)
newPeer := &peerState{
msgCh: pr.workerSenders[idx],
closed: atomic.NewBool(false),
peer: peer,
apply: apply,
}
pr.peers.Store(id, newPeer)
}
func (pr *router) close(regionID uint64) {
v, ok := pr.peers.Load(regionID)
if ok {
ps := v.(*peerState)
ps.close()
pr.peers.Delete(regionID)
}
}
func (pr *router) send(regionID uint64, msg message.Msg) error {
msg.RegionID = regionID
p := pr.get(regionID)
if p == nil {
return errPeerNotFound
}
return p.send(msg)
}
func (pr *router) sendRaftCommand(cmd *message.MsgRaftCmd) error {
regionID := cmd.Request.Header.RegionId
return pr.send(regionID, message.NewPeerMsg(message.MsgTypeRaftCmd, regionID, cmd))
}
func (pr *router) sendRaftMessage(msg *raft_serverpb.RaftMessage) error {
regionID := msg.RegionId
if pr.send(regionID, message.NewPeerMsg(message.MsgTypeRaftMessage, regionID, msg)) != nil {
pr.sendStore(message.NewPeerMsg(message.MsgTypeStoreRaftMessage, regionID, msg))
}
return nil
}
func (pr *router) sendStore(msg message.Msg) {
pr.storeSender <- msg
}
var errPeerNotFound = errors.New("peer not found")
|
package sentence
import (
"path/filepath"
"runtime"
)
const (
nounsFileName = "default_nouns.txt"
adjectivesFileName = "default_adjectives.txt"
)
var (
defaultNounsFilePath = filepath.Join(getCurrentDir(), nounsFileName)
defaultAdjectivesFilePath = filepath.Join(getCurrentDir(), adjectivesFileName)
)
func getCurrentDir() string {
_, currentPath, _, _ := runtime.Caller(1)
return filepath.Dir(currentPath)
}
|
// Copyright 2020, Jeff Alder
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nr_yml
import (
"github.com/newrelic/go-agent/v3/newrelic"
"os"
)
// Don't use this; it's only exported for the yaml parser
type ConfigYaml struct {
AppName *string `yaml:"app_name"`
License *string `yaml:"license_key"`
Host *string `yaml:"host"`
Enabled *bool `yaml:"agent_enabled"`
HighSecurity *bool `yaml:"high_security"`
SecurityPoliciesToken *string `yaml:"security_policies_token"`
LogStreamName *string `yaml:"log_stream_name"`
LogLevel *string `yaml:"log_level"`
Labels *map[string]string `yaml:"labels"`
DistributedTracing *DistributedTracingYaml `yaml:"distributed_tracing"`
InfiniteTracing *InfiniteTracingYaml `yaml:"infinite_tracing"`
ProcessHost *ProcessHostYaml `yaml:"process_host"`
Attributes *AttributesYaml `yaml:"attributes"`
Utilization *UtilizationYaml `yaml:"utilization"`
}
func (yamlValues ConfigYaml) update(cfg *newrelic.Config) {
if yamlValues.AppName != nil {
newrelic.ConfigAppName(*yamlValues.AppName)(cfg)
}
if yamlValues.Enabled != nil {
newrelic.ConfigEnabled(*yamlValues.Enabled)(cfg)
}
if yamlValues.License != nil {
newrelic.ConfigLicense(*yamlValues.License)(cfg)
}
if yamlValues.HighSecurity != nil {
cfg.HighSecurity = *yamlValues.HighSecurity
}
if yamlValues.SecurityPoliciesToken != nil {
cfg.SecurityPoliciesToken = *yamlValues.SecurityPoliciesToken
}
if yamlValues.Host != nil {
cfg.Host = *yamlValues.Host
}
if yamlValues.Labels != nil {
cfg.Labels = *yamlValues.Labels
}
if yamlValues.Attributes != nil {
yamlValues.Attributes.update(cfg)
}
if yamlValues.InfiniteTracing != nil {
yamlValues.InfiniteTracing.update(cfg)
}
if yamlValues.DistributedTracing != nil {
yamlValues.DistributedTracing.update(cfg)
}
if yamlValues.ProcessHost != nil {
yamlValues.ProcessHost.update(cfg)
}
if yamlValues.Utilization != nil {
yamlValues.Utilization.update(cfg)
}
yamlValues.updateLogging(cfg)
}
func (yamlValues ConfigYaml) updateLogging(cfg *newrelic.Config) {
var logStream *os.File
if yamlValues.LogStreamName == nil {
return
}
switch *yamlValues.LogStreamName {
case "STDOUT", "Stdout", "stdout":
logStream = os.Stdout
case "STDERR", "Stderr", "stderr":
logStream = os.Stderr
}
if logStream != nil {
if yamlValues.LogLevel == nil {
newrelic.ConfigInfoLogger(logStream)(cfg)
} else {
switch *yamlValues.LogLevel {
case "DEBUG", "Debug", "debug":
newrelic.ConfigDebugLogger(logStream)(cfg)
default:
newrelic.ConfigInfoLogger(logStream)(cfg)
}
}
}
}
|
package intcode
import "fmt"
// Module is a module with an opcode and a ParamCount
// which does something to an ic computer *Intcode using the next ParamCount memory locations.
// Calling function can return an error if its params turns out to be invalid
// e.g., accessing an invalid memory address.
//
// It is assumed that calling function only happens if ic.Current() equals the opcode,
// unless if the Module supports "parameter modes",
// where in that case ic.Current()%100 is checked instead.
// Note that function will affect the Intcode computer
// e.g., changing its memory, inputs and outputs.
type Module struct {
opcode int64 // opcode (if 0 then check will occur in function)
mnemonic string // "name" of the opcode
parameterized bool // should module support parameter modes?
function func(ic *Intcode) error // what does it do to the computer?
}
// ModuleConfig is a structure representing the configuration of a module
type ModuleConfig struct {
Opcode int64 // opcode (if 0 then check will occur in function)
Mnemonic string // "name" of the opcode
Parameterized bool // should module support parameter modes?
Function func(ic *Intcode) error // what does it do to the computer?
}
// NewModule generates a module object with several attributes using a config struct
func NewModule(config ModuleConfig) *Module {
return &Module{
opcode: config.Opcode,
mnemonic: config.Mnemonic,
parameterized: config.Parameterized,
function: config.Function,
}
}
// getFromMemory turns an int flags (opcode without 2 LSDs), a slice of parameters, and an Intcode reel
// into the appropriate parameters, depending on slice inputs.
// The parameters are taken from the memory, raw,
// that is, the flags are unknown.
// If writeEnable is true, it is assumed that parameters[len(parameters)-1]
// is the address to be written,
// making it as a value as is, or added by ic.RelativeBase().
// The parameter slice is then modified, depending on said flags.
//
// Example: if flags is 10 (from LSD: position, immediate, position),
// parameters is []int64{4,3, 2}, writeEnable is true (thus using the raw value 2),
// then parameters becomes []int64{4, GetLocation(3), 3}.
//
// Modes:
// * Position mode (flag 0): use memory location
// * Immediate mode (flag 1): use the value itself
// * Relative mode (flag 2): use memory location plus the relative base of the computer
//
//
// May return an error if OutOfBounds.
func getFromMemory(flags int64, parameters []int64, ic *Intcode, writeEnable bool) (err error) {
parameterCount := len(parameters)
// to ignore or not to ignore parameters[len(parameters)-1]?
// check writeEnable
if writeEnable {
// then parameters[len(parameters)-1] would be direct
// therefore reduce the number of "parameters" to get
parameterCount--
}
for ii := 0; ii < parameterCount; ii++ {
switch flag := flags % 10; flag {
case 0: // position mode
if parameters[ii], err = ic.GetLocation(parameters[ii]); err != nil {
return
}
case 1: // immediate mode
// do nothing
case 2: // relative mode
if parameters[ii], err = ic.GetLocation(parameters[ii] + ic.RelativeBase()); err != nil {
return
}
default:
return fmt.Errorf("unimplemented flag (%v)", flag)
}
flags /= 10
}
// now for the last flag...
if writeEnable {
switch flag := flags % 10; flag {
case 0, 1: // position mode
// do nothing
case 2: // relative mode
parameters[parameterCount] = parameters[parameterCount] + ic.RelativeBase()
}
}
return
}
// SimpleAdder is a simple program that adds values.
// Adapted from https://adventofcode.com/2019/day/2.
//
// Memory:
// 1 ARG1 ARG2 ARG3
// Procedure:
// mem[ARG3] = mem[ARG1]+mem[ARG2]
// pc += 4
var SimpleAdder = NewModule(ModuleConfig{
// mem: 1 ARG1 ARG2 ARG3
// add: mem[ARG3] = mem[ARG1]+mem[ARG2]
Opcode: 1,
Mnemonic: "ADD",
Function: func(ic *Intcode) (err error) {
// assume that Current() is 1
// Now check if the next ones are in memory
var params []int64
if params, err = ic.GetNext(3); err != nil {
return
}
if params[0], err = ic.GetLocation(params[0]); err != nil {
return
}
if params[1], err = ic.GetLocation(params[1]); err != nil {
return
}
if err = ic.SetLocation(params[2], params[0]+params[1]); err != nil {
return
}
return ic.Increment(4)
},
})
// SimpleMultiplier is a simple program that adds values.
// Adapted from https://adventofcode.com/2019/day/2.
//
// Memory:
// 2 ARG1 ARG2 ARG3
// Procedure:
// mem[ARG3] = mem[ARG1]*mem[ARG2]
// pc += 4
//
var SimpleMultiplier = NewModule(ModuleConfig{
// mem: 2 ARG1 ARG2 ARG3
// mul: mem[ARG3] = mem[ARG1]*mem[ARG2]
Opcode: 2,
Mnemonic: "MUL",
Function: func(ic *Intcode) (err error) {
// assume that Current() is 2
// Now check if the next ones are in memory
var params []int64
if params, err = ic.GetNext(3); err != nil {
return
}
if params[0], err = ic.GetLocation(params[0]); err != nil {
return
}
if params[1], err = ic.GetLocation(params[1]); err != nil {
return
}
if err = ic.SetLocation(params[2], params[0]*params[1]); err != nil {
return
}
return ic.Increment(4)
},
})
// Adder is a module that adds values with support for parameterized mode.
// Adapted from https://adventofcode.com/2019/day/5.
//
// Memory:
// 1 ARG1 ARG2 ARG3
// Procedure:
// mem[ARG3] = mem[ARG1]+mem[ARG2]
// pc += 4
var Adder = NewModule(ModuleConfig{
Opcode: 1,
Mnemonic: "ADD",
Parameterized: true,
Function: func(ic *Intcode) (err error) {
var params []int64
if params, err = ic.GetNext(3); err != nil {
return
}
if err = getFromMemory(ic.Current()/100, params, ic, true); err != nil {
return
}
if err = ic.SetLocation(params[2], params[0]+params[1]); err != nil {
return
}
return ic.Increment(4)
},
})
// Multiplier is a module that adds values with support for parameterized mode.
// Adapted from https://adventofcode.com/2019/day/5.
//
// Memory:
// 2 ARG1 ARG2 ARG3
// Procedure:
// mem[ARG3] = mem[ARG1]*mem[ARG2]
// pc += 4
var Multiplier = NewModule(ModuleConfig{
Opcode: 2,
Mnemonic: "MUL",
Parameterized: true,
Function: func(ic *Intcode) (err error) {
var params []int64
if params, err = ic.GetNext(3); err != nil {
return
}
if err = getFromMemory(ic.Current()/100, params, ic, true); err != nil {
return
}
if err = ic.SetLocation(params[2], params[0]*params[1]); err != nil {
return
}
return ic.Increment(4)
},
})
// Inputter reads from the input and sets it to a specific address
//
// Memory:
// 3 ARG1
// Procedure:
// mem[ARG1], err = ic.GetInput()
// pc += 2
var Inputter = NewModule(ModuleConfig{
Opcode: 3,
Mnemonic: "INPUT",
Parameterized: true, // because apparently 203 exists...
Function: func(ic *Intcode) (err error) {
var params []int64
var input int64
if params, err = ic.GetNext(1); err != nil {
return
}
if input, err = ic.GetInput(); err != nil {
return
}
if err = getFromMemory(ic.Current()/100, params, ic, true); err != nil {
return
}
if err = ic.SetLocation(params[0], input); err != nil {
return
}
return ic.Increment(2)
},
})
// Outputter is a module that outputs the value at its only parameter.
//
// Memory:
// 4 ARG1
// Procedure:
// output = append(output, mem[ARG1])
// pc += 2
var Outputter = NewModule(ModuleConfig{
Opcode: 4,
Mnemonic: "OUTPUT",
Parameterized: true,
Function: func(ic *Intcode) (err error) {
var params []int64
if params, err = ic.GetNext(1); err != nil {
return
}
if err = getFromMemory(ic.Current()/100, params, ic, false); err != nil {
return
}
ic.PushToOutput(params[0])
return ic.Increment(2)
},
})
// OutputToInput is a moule that, instead of pushing its parameter to Output,
// it pushes the value to Input
var OutputToInput = NewModule(ModuleConfig{
Opcode: 4,
Mnemonic: "OUTPUT",
Parameterized: true,
Function: func(ic *Intcode) (err error) {
var params []int64
if params, err = ic.GetNext(1); err != nil {
return
}
if err = getFromMemory(ic.Current()/100, params, ic, false); err != nil {
return
}
ic.PushToInput(params[0])
return ic.Increment(2)
},
})
// OutputAndHalt is a module that outputs the value at its only parameter
// and, if non-zero, will halt immediately. Used for aoc2019.Day05.
//
// Memory:
// 4 ARG1
// Procedure:
// output = append(output, mem[ARG1])
// if mem[ARG1] != 0 then halt
// pc += 2
var OutputAndHalt = NewModule(ModuleConfig{
Opcode: 4,
Mnemonic: "OUTPUT",
Parameterized: true,
Function: func(ic *Intcode) (err error) {
var params []int64
if params, err = ic.GetNext(1); err != nil {
return
}
if err = getFromMemory(ic.Current()/100, params, ic, false); err != nil {
return
}
ic.PushToOutput(params[0])
if params[0] != 0 {
return NewHaltError("OUTPUT (4)")
}
return ic.Increment(2)
},
})
// JumpIfTrue is a module that sets the instruction pointer to the second parameter
// if the first parameter is non-zero
//
// Memory:
// 5 ARG1 ARG2
// Procedure:
// if mem[ARG1] != 0 then jump(mem[ARG2])
// pc += 3
var JumpIfTrue = NewModule(ModuleConfig{
Opcode: 5,
Mnemonic: "JUMPIFTRUE",
Parameterized: true,
Function: func(ic *Intcode) (err error) {
var params []int64
if params, err = ic.GetNext(2); err != nil {
return
}
if err = getFromMemory(ic.Current()/100, params, ic, false); err != nil {
return
}
// from here on, params[0] would be the value we would check
// and params[1] would be where we would want to jump
if params[0] != 0 {
return ic.Jump(params[1])
}
return ic.Increment(3)
},
})
// JumpIfFalse is a module that sets the instruction pointer to the second parameter
// if the first parameter is zero
//
// Memory:
// 5 ARG1 ARG2
// Procedure:
// if mem[ARG1] == 0 then jump(mem[ARG2])
// pc += 3
var JumpIfFalse = NewModule(ModuleConfig{
Opcode: 6,
Mnemonic: "JUMPIFTRUE",
Parameterized: true,
Function: func(ic *Intcode) (err error) {
var params []int64
if params, err = ic.GetNext(2); err != nil {
return
}
if err = getFromMemory(ic.Current()/100, params, ic, false); err != nil {
return
}
// from here on, params[0] would be the value we would check
// and params[1] would be where we would want to jump
if params[0] == 0 {
return ic.Jump(params[1])
}
return ic.Increment(3)
},
})
// LessThan is a module that stores 1 in the third parameter
// if the first is less than the second; otherwise it will store 0
//
// Memory:
// 7 ARG1 ARG2 ARG3
// Procedure:
// if mem[ARG1] < mem[ARG2] then mem[ARG3]=1 else mem[ARG3]=0
// pc += 4
var LessThan = NewModule(ModuleConfig{
Opcode: 7,
Mnemonic: "LESSTHAN",
Parameterized: true,
Function: func(ic *Intcode) (err error) {
var params []int64
if params, err = ic.GetNext(3); err != nil {
return
}
if err = getFromMemory(ic.Current()/100, params, ic, true); err != nil {
return
}
// now write to mem[params[2]] depending on what's with params[0]¶ms[1]
if params[0] < params[1] {
if err = ic.SetLocation(params[2], 1); err != nil {
return
}
} else {
if err = ic.SetLocation(params[2], 0); err != nil {
return
}
}
return ic.Increment(4)
},
})
// Equals is a module that stores 1 in the third parameter
// if the first equals the second; otherwise it will store 0
//
// Memory:
// 8 ARG1 ARG2 ARG3
// Procedure:
// if mem[ARG1] == mem[ARG2] then mem[ARG3]=1 else mem[ARG3]=0
// pc += 4
var Equals = NewModule(ModuleConfig{
Opcode: 8,
Mnemonic: "EQUALS",
Parameterized: true,
Function: func(ic *Intcode) (err error) {
var params []int64
if params, err = ic.GetNext(3); err != nil {
return
}
if err = getFromMemory(ic.Current()/100, params, ic, true); err != nil {
return
}
// now write to mem[params[2]] depending on what's with params[0]¶ms[1]
if params[0] == params[1] {
if err = ic.SetLocation(params[2], 1); err != nil {
return
}
} else {
if err = ic.SetLocation(params[2], 0); err != nil {
return
}
}
return ic.Increment(4)
},
})
// ChangeRelativeBase adjusts the relative base of the computer
// by its parameter.
//
// Memory:
// 9 ARG1
// Procedure:
// relativeBase += mem[ARG1]
// pc += 2
var ChangeRelativeBase = NewModule(ModuleConfig{
Opcode: 9,
Mnemonic: "RELBASE",
Parameterized: true,
Function: func(ic *Intcode) (err error) {
var params []int64
if params, err = ic.GetNext(1); err != nil {
return
}
if err = getFromMemory(ic.Current()/100, params, ic, false); err != nil {
return
}
// now pull out an ic.AdjustRelativeBase
ic.AdjustRelativeBase(params[0])
return ic.Increment(2)
},
})
// InstallAdderMultiplier installs the Adder and Multiplier modules
// to the Intcode computer
func InstallAdderMultiplier(ic *Intcode) {
ic.Install(Adder)
ic.Install(Multiplier)
return
}
// InstallJumpers installs the
// JumpIfFalse, JumpIfTrue, LessThan, and Equals modules
func InstallJumpers(ic *Intcode) {
ic.Install(JumpIfFalse)
ic.Install(JumpIfTrue)
ic.Install(LessThan)
ic.Install(Equals)
}
|
package main
import (
"fmt"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/jraams/aoc-2020/helpers"
)
func main() {
// Load input from file
inputPath, _ := filepath.Abs("input")
lines := helpers.GetInputValues(inputPath)
rules, messages := load(lines)
// Part 1
a := solve(rules, messages, false)
fmt.Printf("Solution day 19 part a: %d\n", a)
// Part 2
b := solve(rules, messages, true)
fmt.Printf("Solution day 19 part b: %d\n", b)
}
type rule struct {
char string
subRuleNumLists [][]int
}
func load(lines []string) (map[int]rule, []string) {
rules := make(map[int]rule)
var messages []string
loadingRules := true
for _, line := range lines {
if line == "" {
loadingRules = false
continue
}
if loadingRules {
splitLine := strings.Split(line, ": ")
num, _ := strconv.Atoi(splitLine[0])
ruleStr := splitLine[1]
newRule := rule{}
// Multiple lists of subrules
if strings.Contains(ruleStr, "|") {
subRuleSets := strings.Split(ruleStr, " | ")
newRule.subRuleNumLists = [][]int{}
for i := 0; i < len(subRuleSets); i++ {
newRule.subRuleNumLists = append(newRule.subRuleNumLists, helpers.TranslateStringArrToIntArr(strings.Split(subRuleSets[i], " ")))
}
// Match a single character
} else if strings.Contains(ruleStr, "\"") {
char := strings.ReplaceAll(ruleStr, "\"", "")
newRule.char = char
// Single list of Subrules
} else {
subRuleNums := helpers.TranslateStringArrToIntArr(strings.Split(ruleStr, " "))
newRule.subRuleNumLists = [][]int{
subRuleNums,
}
}
rules[num] = newRule
} else {
messages = append(messages, line)
}
}
return rules, messages
}
func getRegex(rules map[int]rule, rootRuleNum int, useFixedRules bool) string {
r := ""
rootRule := rules[rootRuleNum]
// Fix rules 8 & 11
if useFixedRules {
if rootRuleNum == 8 {
return getRegex(rules, 42, useFixedRules) + "+"
}
if rootRuleNum == 11 {
return getRegex(rules, 42, useFixedRules) + "{n}" + getRegex(rules, 31, useFixedRules) + "{n}"
}
}
// Reached a char rule, return it
if rootRule.char != "" {
return rootRule.char
}
// Rule contains two subrules
if len(rootRule.subRuleNumLists) == 2 {
r += "("
for _, subRuleNum := range rootRule.subRuleNumLists[0] {
r += getRegex(rules, subRuleNum, useFixedRules)
}
r += "|"
for _, subRuleNum := range rootRule.subRuleNumLists[1] {
r += getRegex(rules, subRuleNum, useFixedRules)
}
r += ")"
return r
}
// Rule is a single list of subrules
if len(rootRule.subRuleNumLists) == 1 {
for _, subRuleNum := range rootRule.subRuleNumLists[0] {
r += getRegex(rules, subRuleNum, useFixedRules)
}
}
return r
}
func solve(rules map[int]rule, messages []string, useFixedRules bool) int {
regex := getRegex(rules, 0, useFixedRules)
regex = "^" + regex + "$"
matchedMessageCount := 0
maxN := 10
if useFixedRules {
maxN = 1
}
for _, message := range messages {
for n := 1; n <= maxN; n++ {
quantificationAmount := strings.ReplaceAll(regex, "n", strconv.Itoa(n))
r, _ := regexp.Compile(quantificationAmount)
matches := r.Match([]byte(message))
if matches {
matchedMessageCount++
break
}
}
}
return matchedMessageCount
}
|
/*
Copyright (C) 2016 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"bufio"
"io/ioutil"
"net"
"net/http"
"os"
"path/filepath"
"regexp"
"runtime"
"time"
)
var DefaultRoundTripper http.RoundTripper = &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
var (
_, b, _, _ = runtime.Caller(0)
basepath = filepath.Dir(b)
)
func ResetDefaultRoundTripper() {
http.DefaultClient.Transport = DefaultRoundTripper
}
// MockRoundTripper mocks HTTP downloads of oc binaries
type MockRoundTripper struct {
delegate http.RoundTripper
}
func NewMockRoundTripper() http.RoundTripper {
return &MockRoundTripper{DefaultRoundTripper}
}
func (t *MockRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
// for now only proxy the actual download requests
re, _ := regexp.Compile(".*(openshift-origin-client-tools.*)&|.*(CHECKSUM).*")
match := re.FindStringSubmatch(req.URL.String())
if match != nil {
filename := match[1]
if filename == "" {
filename = match[2]
}
//fmt.Printf("MockRoundTripper - Proxying request %s\n", req.URL.String() )
response := &http.Response{
Header: make(http.Header),
Request: req,
StatusCode: http.StatusOK,
}
response.Header.Set("Content-Type", "application/octet-stream")
file, err := os.Open(filepath.Join(basepath, "..", "..", "test", "testdata", filename))
if err != nil {
panic(err)
}
response.Body = ioutil.NopCloser(bufio.NewReader(file))
return response, nil
}
// otherwise delegate
return t.delegate.RoundTrip(req)
}
|
// Package integration only contains integration tests
package integration
|
package selector
import (
"fmt"
"golang.org/x/net/html"
"os"
"testing"
)
func TestQuerySelector(t *testing.T) {
file, err := os.Open("./test.html")
if err != nil {
t.Error(err)
return
}
doc, err := html.Parse(file)
if err != nil {
t.Error(err)
return
}
node := NewNode(doc)
query := &Query{
Id: "test",
}
testIdNode, err := node.QuerySelector(query)
if err != nil {
t.Error(err)
return
}
fmt.Printf("Found node by id: \n%v\n\n", testIdNode)
query = &Query{
Class: "test-class-2",
}
classNode, err := node.QuerySelector(query)
if err != nil {
t.Error(err)
return
}
fmt.Printf("Found node by class: \n%v\n\n", classNode)
fmt.Printf("Inner Content: \n%v\n\n", classNode.Inner())
fmt.Printf("Text Content: \n%v\n\n", classNode.Text())
query = &Query{
Class: "nested-class",
}
nestedNode, err := node.QuerySelector(query)
if err != nil {
t.Error(err)
return
}
fmt.Printf("Found nested node by class: \n%v\n\n", nestedNode)
query = &Query{
Attributes: []Attribute{
{
Key: "type",
Value: "checkbox",
},
},
}
attrNode, err := node.QuerySelector(query)
if err != nil {
t.Error(err)
return
}
fmt.Printf("Found node by attribute [type=\"checkbox\"]: \n%v\n\n", attrNode)
query = &Query{
Tag: "input",
}
tagNode, err := node.QuerySelector(query)
if err != nil {
t.Error(err)
return
}
fmt.Printf("Found node by Tag \"input\": \n%v\n\n", tagNode)
query = &Query{
Class: "test-class",
}
nodes, err := node.QuerySelectorAll(query)
if err != nil {
t.Error(err)
return
}
fmt.Printf("Found all nodes with class \"test-class\": \n%v\n\n", nodes)
}
|
package Auxiliar
import "fmt"
//Escrever registra uma mensagem na tela
func Escrever() {
fmt.Println("Writing the package auxiliary")
escrever2()
}
|
package main
import (
"fmt"
)
func calcCubes(num int, cuchan chan int) {
sq := 0;
for num!= 0 {
digit := num%10
num = num/10
sq = sq + digit * digit * digit
}
cuchan <- sq
}
func calcSqares(num int, sqchan chan int) {
sq := 0;
for num!= 0 {
digit := num%10
num = num/10
sq = sq + digit * digit
}
sqchan <- sq
}
func main() {
num := 1001
sqchan := make(chan int)
cuchan := make(chan int)
go calcSqares(num, sqchan)
go calcCubes(num, cuchan)
squares, cubes:= <- sqchan, <- cuchan
fmt.Println("Final vales", squares+cubes)
}
|
// +build !windows
package config
func getSearchPaths() []string {
return []string{
"./",
"/etc/bitmaelum",
}
}
|
/*
Copyright 2019 Dmitry Kolesnikov, All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gouldian_test
import (
"errors"
"testing"
µ "github.com/fogfish/gouldian/v2"
"github.com/fogfish/gouldian/v2/mock"
"github.com/fogfish/it"
)
func TestEndpointThen(t *testing.T) {
var ok = errors.New("b")
var a µ.Endpoint = func(x *µ.Context) error { return nil }
var b µ.Endpoint = func(x *µ.Context) error { return ok }
var c µ.Endpoint = a.Then(b)
it.Ok(t).
If(c(mock.Input())).Should().Equal(ok)
}
func TestEndpointOr(t *testing.T) {
var ok = errors.New("a")
var a µ.Endpoint = func(x *µ.Context) error { return ok }
var b µ.Endpoint = func(x *µ.Context) error { return µ.ErrNoMatch }
t.Run("a", func(t *testing.T) {
var c µ.Endpoint = a.Or(b)
it.Ok(t).
If(c(mock.Input())).Should().Equal(ok)
})
t.Run("b", func(t *testing.T) {
var c µ.Endpoint = b.Or(a)
it.Ok(t).
If(c(mock.Input())).Should().Equal(ok)
})
}
|
package service
import (
"fmt"
"github.com/TRON-US/soter-order-service/common/constants"
"github.com/TRON-US/soter-order-service/common/errorm"
"github.com/TRON-US/soter-order-service/logger"
"github.com/TRON-US/soter-order-service/model"
"github.com/TRON-US/soter-order-service/utils"
"github.com/TRON-US/chaos/network/slack"
)
// Update order controller.
func (s *Server) UpdateOrderController(fileHash, sessionId, nodeIp string, orderId int64) error {
// Get order info by order id.
order, err := s.DbConn.QueryOrderInfoById(orderId)
if err != nil {
if err.Error() != errorm.QueryResultEmpty {
go func(orderId int64, err error) {
errMessage := fmt.Sprintf("orderId: [%v] query order info error, reasons: [%v]", orderId, err)
logger.Logger.Errorw(errMessage, "function", constants.QueryOrderInfoModel)
_ = slack.SendSlackNotification(s.Config.Slack.SlackWebhookUrl,
utils.ErrorRequestBody(errMessage, constants.QueryOrderInfoModel, constants.SlackNotifyLevel0),
s.Config.Slack.SlackNotificationTimeout,
slack.Priority0, slack.Priority(s.Config.Slack.SlackPriorityThreshold))
}(orderId, err)
}
return err
}
// Check order status.
if order.Status != constants.OrderPending {
return errorm.OrderStatusIllegal
}
//Check order type.
if order.OrderType != constants.Charge {
return errorm.OrderTypeIllegal
}
// Open transaction.
session := s.DbConn.DB.NewSession()
err = session.Begin()
if err != nil {
go func(orderId int64, err error) {
errMessage := fmt.Sprintf("orderId: [%v] open transaction error, reasons: [%v]", orderId, err)
logger.Logger.Errorw(errMessage, "function", constants.SessionBegin)
_ = slack.SendSlackNotification(s.Config.Slack.SlackWebhookUrl,
utils.ErrorRequestBody(errMessage, constants.SessionBegin, constants.SlackNotifyLevel0),
s.Config.Slack.SlackNotificationTimeout,
slack.Priority0, slack.Priority(s.Config.Slack.SlackPriorityThreshold))
}(orderId, err)
return err
}
defer session.Close()
// Update order session id and node ip.
err = model.UpdateOrderSessionById(session, sessionId, nodeIp, orderId)
if err != nil {
_ = session.Rollback()
go func(orderId int64, err error) {
errMessage := fmt.Sprintf("orderId: [%v] update order session id and node ip error, reasons: [%v]", orderId, err)
logger.Logger.Errorw(errMessage, "function", constants.UpdateOrderSessionByIdModel)
_ = slack.SendSlackNotification(s.Config.Slack.SlackWebhookUrl,
utils.ErrorRequestBody(errMessage, constants.UpdateOrderSessionByIdModel, constants.SlackNotifyLevel0),
s.Config.Slack.SlackNotificationTimeout,
slack.Priority0, slack.Priority(s.Config.Slack.SlackPriorityThreshold))
}(orderId, err)
return err
}
// Submit transaction.
err = session.Commit()
if err != nil {
go func(orderId int64, err error) {
errMessage := fmt.Sprintf("orderId: [%v] commit transaction error, reasons: [%v]", orderId, err)
logger.Logger.Errorw(errMessage, "function", constants.SessionCommit)
_ = slack.SendSlackNotification(s.Config.Slack.SlackWebhookUrl,
utils.ErrorRequestBody(errMessage, constants.SessionCommit, constants.SlackNotifyLevel0),
s.Config.Slack.SlackNotificationTimeout,
slack.Priority0, slack.Priority(s.Config.Slack.SlackPriorityThreshold))
}(orderId, err)
return err
}
return nil
}
|
package api
import (
"math"
"net/http"
"strconv"
"strings"
"gopkg.in/gographics/imagick.v3/imagick"
"onikur.com/text-to-img-api/conf"
"onikur.com/text-to-img-api/utils"
)
var extansion = "png"
// Options ...
type Options struct {
Font string
FontSize float64
FontColor string
LineMaxChar int
}
// NewOptions ...
func NewOptions() Options {
opts := Options{
Font: conf.Get().Font.Defaults.Font,
FontSize: conf.Get().Font.Defaults.FontSize,
FontColor: conf.Get().Font.Defaults.FontColor,
LineMaxChar: 0,
}
return opts
}
// MakeTextHandler ...
type MakeTextHandler struct{}
func (h *MakeTextHandler) ServeHTTP(res http.ResponseWriter, req *http.Request) {
if !strings.HasSuffix(req.URL.Path, "."+extansion) {
q := ""
if req.URL.RawQuery != "" {
q = "?" + req.URL.RawQuery
}
http.Redirect(res, req, req.URL.Path+"."+extansion+q, 301)
return
}
text := strings.TrimSuffix(strings.TrimPrefix(strings.TrimPrefix(req.URL.Path, "/-/"), "/api/text/"), extansion)
// Options
qs := req.URL.Query()
opts := NewOptions()
if v, err := strconv.ParseFloat(qs.Get("fsize"), 64); err == nil {
opts.FontSize = math.Max(conf.Get().Font.Defaults.MinFontSize, math.Min(v, conf.Get().Font.Defaults.MaxFontSize))
}
if qs.Get("fcolor") != "" {
opts.FontColor = qs.Get("fcolor")
}
if qs.Get("f") != "" {
if e, ok := conf.Get().Font.Include[qs.Get("f")]; e && ok {
opts.Font = qs.Get("f")
}
}
// Split to multi-line
if opts.LineMaxChar > 0 {
text = utils.StringsInsertRuneStep(text, opts.LineMaxChar, "\n")
}
// Start drawing
imagick.Initialize()
defer imagick.Terminate()
mw := imagick.NewMagickWand()
defer mw.Destroy()
dw := imagick.NewDrawingWand()
defer dw.Destroy()
mw.ReadImage("xc:transparent")
utils.ImagickDrawSetFont(mw, dw, opts.Font, opts.FontSize, "#"+opts.FontColor)
sw, sh := utils.ImagickGetImageWidthHeightByText(mw, dw, text)
mw.SetImageAlphaChannel(imagick.ALPHA_CHANNEL_TRANSPARENT)
mw.SetSize(sw, sh)
mw.ReadImage("xc:transparent")
utils.ImagickDrawText(mw, dw, text)
mw.SetImageFormat(extansion)
b := mw.GetImageBlob()
res.Header().Set("Content-Length", strconv.Itoa(len(b)))
res.Header().Set("Content-Type", "image/"+extansion)
res.Write(b)
}
|
package routers
import (
"net/http"
"github.com/gabriel70g/twittor/bd"
"github.com/gabriel70g/twittor/models"
)
/*BajaRalacion dar de baja la relacion */
func BajaRalacion(w http.ResponseWriter, r *http.Request) {
ID := r.URL.Query().Get("id")
var t models.Relacion
t.UsuarioID = IDUsuario
t.UsuarioRelacionID = ID
status, err := bd.BorroRelacion(t)
if err != nil {
http.Error(w, "Ocurrió un error al intentar borrar realción", http.StatusBadRequest)
return
}
if !status {
http.Error(w, "No se ha logrado borrar la realción", http.StatusBadRequest)
return
}
w.WriteHeader(http.StatusCreated)
}
|
package blockmanager
import (
"fmt"
"github.com/reed/blockchain/config"
"github.com/reed/blockchain/store"
"github.com/reed/crypto"
"github.com/reed/database/leveldb"
"github.com/reed/types"
dbm "github.com/tendermint/tmlibs/db"
"math/big"
"math/rand"
"strconv"
"testing"
"time"
)
func TestBlockManager_calcFork(t *testing.T) {
mainChain, forkChain, highBlock, curBlock := getMockBlock()
fmt.Printf("highBlock %d %x \n", highBlock.Height, highBlock.GetHash())
fmt.Printf("curBlock %d %x \n", curBlock.Height, curBlock.GetHash())
bi := &BlockIndex{
index: forkChain,
main: mainChain,
}
bm := &BlockManager{
blockIndex: bi,
}
for _, block := range mainChain {
if block == nil {
continue
}
fmt.Printf("height:%d hash %x prev %x\n", block.Height, block.GetHash(), block.PrevBlockHash)
}
fmt.Println("====================================================================================")
for _, block := range forkChain {
fmt.Printf("height:%d hash %x prev %x\n", block.Height, block.GetHash(), block.PrevBlockHash)
}
fmt.Println("====================================================================================")
reserves, discards, err := bm.calcFork(curBlock, highBlock)
if err != nil {
t.Error(err)
}
for _, block := range reserves {
fmt.Printf("height:%d hash %x prev %x\n", block.Height, block.GetHash(), block.PrevBlockHash)
}
fmt.Println("====================================================================================")
for _, block := range discards {
fmt.Printf("height:%d hash %x prev %x\n", block.Height, block.GetHash(), block.PrevBlockHash)
}
fmt.Println("====================================================================================")
if reserves[len(reserves)-1].PrevBlockHash != discards[len(discards)-1].PrevBlockHash {
t.Error("calcFork error")
}
}
func getMockBlock() ([]*types.Block, map[types.Hash]*types.Block, *types.Block, *types.Block) {
r := rand.New(rand.NewSource(time.Now().Unix()))
rn := r.Intn(9999999999)
mainChain := make([]*types.Block, 20, 20)
//for i := 0; i < 20; i++ {
// fmt.Println(i)
// mainChain[i]=nil
//}
forkChain := map[types.Hash]*types.Block{}
prev := types.DefHash()
// common
for i := 1; i < 10; i++ {
flag := strconv.Itoa(i)
mr := types.BytesToHash(crypto.Sha256([]byte("MerkleRootHash" + flag)))
header := &types.BlockHeader{Height: uint64(i),
PrevBlockHash: prev,
MerkleRootHash: mr,
Timestamp: uint64(time.Now().Unix()),
Nonce: uint64(rn),
BigNumber: *big.NewInt(int64(rn)),
Version: 1,
}
block := &types.Block{BlockHeader: *header}
mainChain[i] = block
forkChain[block.GetHash()] = block
prev = block.GetHash()
}
forkPre := prev
var highBlock *types.Block
for i := 10; i < 13; i++ {
flag := strconv.Itoa(i)
flag += "main-"
mr := types.BytesToHash(crypto.Sha256([]byte("MerkleRootHash" + flag)))
header := &types.BlockHeader{Height: uint64(i),
PrevBlockHash: prev,
MerkleRootHash: mr,
Timestamp: uint64(time.Now().Unix()),
Nonce: uint64(rn),
BigNumber: *big.NewInt(int64(rn)),
Version: 1,
}
block := &types.Block{BlockHeader: *header}
mainChain[i] = block
highBlock = block
prev = block.GetHash()
}
var curBlock *types.Block
for i := 10; i < 14; i++ {
flag := strconv.Itoa(i)
flag += "fork-"
mr := types.BytesToHash(crypto.Sha256([]byte("MerkleRootHash" + flag)))
header := &types.BlockHeader{Height: uint64(i),
PrevBlockHash: forkPre,
MerkleRootHash: mr,
Timestamp: uint64(time.Now().Unix()),
Nonce: uint64(rn),
BigNumber: *big.NewInt(int64(rn)),
Version: 1,
}
block := &types.Block{BlockHeader: *header}
forkChain[block.GetHash()] = block
forkPre = block.GetHash()
curBlock = block
}
return mainChain, forkChain, highBlock, curBlock
}
func getStore() store.Store {
return leveldb.NewStore(dbm.NewDB("core", dbm.LevelDBBackend, config.LogDir()))
}
|
package main
import "fmt"
/*声明全局变量*/
var g int = 200
func main() {
//声明局部变量
var a, b int
a = 100
b = 200
g = a + b
fmt.Printf("g= %d", g)
}
|
package main
import (
"flag"
"log"
"os"
"os/signal"
"net/url"
"github.com/gorilla/websocket"
"fmt"
"time"
"encoding/json"
)
func one(combo string, flag_name string)(){
var addr = flag.String(flag_name, "api.hitbtc.com", "http service address")
flag.Parse()
log.SetFlags(0)
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
u := url.URL{Scheme: "wss", Host: *addr, Path: "api/2/ws"}
log.Printf("connecting to %s", u.String())
c, _, err := websocket.DefaultDialer.Dial(u.String(), nil)
if err != nil {
log.Fatal("dial:", err)
}
defer c.Close()
done := make(chan struct{})
func()string {
type Params map[string]string
type Req struct {
Method string `json:"method"`
Params `json:"params"`
Id int `json:"id"`
}
r:= Req{"subscribeOrderbook",Params{"symbol":combo},123}
c.WriteJSON(r)
defer close(done)
for {
_, message, err := c.ReadMessage()
if err != nil {
log.Println("read:", err)
return "error"
}
//return string(message)
type Message struct {
Jsonrpc string `json:"_"`
Method string `json:"method"`
Params struct {
Asks [] struct {
Price string `json:"price"`
Size string `json:"size"`
} `json:"ask"`
Bids [] struct {
Price string `json:"price"`
Size string `json:"size"`
} `json:"bid"`
Symbol string `json:"symbol"`
UpdateId int `json:"sequence"`
} `json:"params"`
}
var m Message
json.Unmarshal(message, &m)
res := map[string]interface{}{"EventType":m.Method,"Symbol":m.Params.Symbol,"UpdateId":m.Params.UpdateId,
"Bids":m.Params.Bids,"Asks":m.Params.Asks}
res1,_ := json.Marshal(res)
//fmt.Println(string(res1))
Set("HiBtc:" + combo,res1)
//fmt.Print("recived:", string(message) + "\n")
//type ParseMessage struct {
//exchange string
//Message_Binance string
//}
//type ConstructStream struct{
//Stream ParseMessage
//}
//var m Message
//json.Unmarshal(message, &m)
/*
data := []byte`{
"EventType":m.
EventTime
Symbol
FirstUpdateId
FinalUpdateId
Bids
Asks
}
*/
//fmt.Print(m)
//{Binance" +combo + ":
//fmt.Printf("{EventType:%+v,EvenTime:%+v,Symbol:%+v,FirstUpdateId:%+v,FinalUpdateId:%+v," +
//"Bids:%+v,Asks:%+v\n", m.EventType,m.EventTime,m.Symbol,m.FirstUpdateId,m.FinalUpdateId,m.Bids,m.Asks)
//fmt.Printf("Exchange:Binance,EventType:%+v,EvenTime:%+v,Symbol:%+v,FirstUpdateId:%+v,FinalUpdateId:%+v," +
//"Bids:%+v,Asks:%+v\n", m.EventType,m.EventTime,m.Symbol,m.FirstUpdateId,m.FinalUpdateId,m.Bids,m.Asks)
//v, _ := jason.NewObjectFromBytes([]byte(message))
//fmt.Printf(v.GetString("U"))
//d,_ := json.Marshal({m.EventType,m.EventTime})
//Set("Binance:" + combo,message)
//var r,_ = Get("Binance:" + combo)
//fmt.Print("res:",string(message))
//var r,_ = Get("Binance")
//fmt.Print(string(message) + "\n")
//"%+v\n",
//data := &ConstructStream{Stream:ParseMessage{Exchange:"Binance",Message:string(message)}}
//data := &ParseMessage{Message_Binance:string(message)}
//r,_ := json.Marshal(data)
//Set("Binance:" + ,message)
//fmt.Println(string(message))
//fmt.Println(string(data))
//Set("Binance",message)
//data := []byte(message)
//json.Marshal(CombinedStream(data))
//mapD := map[string]byte {"Binance": json.Unmarshal(message, &m)}
//mapB, _ := json.Marshal(mapD)
//fmt.Println(string(mapB))
//d := map[string][]byte{"Binance":message}
//mapB, _ := json.Marshal(d)
//Set("Stream",mapB)
//E := []byte("Exchange:Binance,s:%+v,E:%+v,U:%+v,u:%+v,b:u:%+v,a:u:%+v\n", m.S,m.E, m.U, m.Z,m.B,m.A)
//Set("E1",E)
//var r,_ = Get("Binance")
//fmt.Print("%+v\n",string(r))
//fmt.Printf("Exchange:Binance,s:%+v,E:%+v,U:%+v,u:%+v,b:u:%+v,a:u:%+v\n", m.S,m.E, m.U, m.Z,m.B,m.A)
//log.Printf("recv: %s", message)
}
}()
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
case <-done:
return
case <-interrupt:
log.Println("interrupt")
// Cleanly close the connection by sending a close message and then
// waiting (with timeout) for the server to close the connection.
/*
err := c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
if err != nil {
log.Println("write close:", err)
return
}
select {
case <-done:
case <-time.After(time.Second):
}
*/
return
}
}
}
func main(){
args := os.Args[1:]
fmt.Print(args)
for i:= range args{
fmt.Println(i)
go one(args[i],"addr" + string(i+1))
}
//go one("bnbbtc","addr1")
//go one("ethbtc","addr2")
//go one("bnbeth","addr3")
fmt.Scanln()
}
// {"e":"depthUpdate","E":1526186677776,"s":"BNBBTC","U":71377818,"u":71377822,"b":[["0.00152750","84.28000000",[]]],"a":[["0.00152970","0.00000000",[]],["0.00153160","82.40000000",[]]]}
//go run hibtc_websocket.go redis_init.go ETHBTC
//go run redis_init.go ETHBTC
//docker run --net=host -it --entrypoint "bash" binance_web_socket_receiver
//docker run --net=host -it --name binance binance_web_socket_receiver
//docker run -it binance_web_socket_receiver bnbbtc --link freecache:freecache binance_web_socket_receiver
//docker build -t binance_web_socket_receiver .
//docker run -it --link freecache:freecache --name binance binance_web_socket_receiver bnbbtc
//docker run -it -p 6379 --link redis1:redis --name client2 redis sh
//recived:{"jsonrpc":"2.0","method":"snapshotOrderbook",
// "params":80130","size":"0{"ask":[{"price":"0.080126","size":"0.103"},{"price":"0.080129","size":"0.409"},
//// {"price":"0.0.001"},{"price":"0.080133","size":"0.001"},{"price":"0.080138","size":"0.680"}]
// ,"symbol":"ETHBTC","sequence":860363}}
//go run hibtc_websocket.go ETHBTC
|
package api
import (
"bytes"
"encoding/json"
utils "github.com/kevinbarbary/go-lms/utils"
"io/ioutil"
"log"
"net/http"
"strings"
"time"
)
// @todo - Time.Unix ?
type Timestamp int64
type JsonDate time.Time
type JsonDateTime time.Time
type Params map[string]interface{}
func MergeParams(a, b Params) Params {
for k, v := range b {
a[k] = v
}
return a
}
func (j *JsonDateTime) UnmarshalJSON(b []byte) error {
if string(b) == "null" {
return nil
}
s := strings.Trim(string(b), "\"")
t, err := time.Parse("2006-01-02 15:04:05", s)
if err != nil {
return err
}
*j = JsonDateTime(t)
return nil
}
func (j JsonDateTime) NotSet() bool {
return j == JsonDateTime{}
}
func (j JsonDateTime) ToTime() time.Time {
return time.Time(j)
}
func (j JsonDateTime) After(d JsonDateTime) bool {
return j.ToTime().After(d.ToTime())
}
func (j *JsonDate) UnmarshalJSON(b []byte) error {
s := strings.Trim(string(b), "\"")
t, err := time.Parse("2006-01-02", s)
if err != nil {
return err
}
*j = JsonDate(t)
return nil
}
//func (j JsonDate) MarshalJSON() ([]byte, error) {
// return json.Marshal(j)
//}
func (j JsonDate) ToTime() time.Time {
return time.Time(j)
}
func (j JsonDate) Format(s string) string {
return j.ToTime().Format(s)
}
func (j JsonDate) ToDate() string {
return j.Format("2006-01-02")
}
func (j JsonDate) EndOf() time.Time {
// returns the start of the next day after j, i.e. midnight...
// convert j to date (remove the time), add one day then return the result as time.Time
j1 := j.ToTime()
return time.Date(j1.Year(), j1.Month(), j1.Day(), 0, 0, 0, 0, j1.Location()).AddDate(0, 0, 1)
}
func Creds(site, key string) (Params, error) {
return Params{"SiteID": site, "SiteKey": key}, nil
}
type response struct {
Data interface{} `json:"data"`
Error string `json:"error"`
Help string `json:"help"`
Timestamp Timestamp `json:"timestamp"`
Token string `json:"token"`
User string `json:"user"`
Session int64 `json:"session"`
}
func extract(data string) (interface{}, string, string, Timestamp, string, string) {
jsonData := []byte(data)
var resp response
err := json.Unmarshal(jsonData, &resp)
if err != nil {
log.Print("JSON Extract Error... ", err.Error())
log.Print("JSON Response: ", data)
return nil, "", "", 0, "", ""
}
return resp.Data, resp.Error, resp.Help, resp.Timestamp, resp.Token, resp.User
}
func Call(method, endpoint, token, useragent, site string, payload Params, retry bool) (string, error) {
// Call the Course-Source RESTful API, if the token is unauthorized (e.g. expired) get a new token and repeat the request
data, err, code := request(useragent, method, endpoint, token, payload)
if code == http.StatusUnauthorized && retry {
// auth fail - try again with a new token
log.Print("API Call unauthorized - trying again with new auth token")
var newToken string
if newToken, _ = Auth(site, "", "", useragent, false); token == "" {
log.Print("Auth token request failed... ", err.Error())
panic(err)
}
log.Print("New auth token received")
data, err, _ = request(useragent, method, endpoint, newToken, payload)
if err != nil {
log.Print("API Call retry fail... ", err.Error())
panic(err)
}
log.Print("API Call retry success")
}
return data, err
}
// @todo - reduce duplication...
func request(useragent, method, endpoint, token string, payload Params) (string, error, int) {
if method == "GET" {
client := http.Client{
Timeout: time.Second * 5,
}
request, err := http.NewRequest(method, endpoint, bytes.NewBuffer(nil))
if err != nil {
log.Print("API GET Request Error... ", err.Error())
return "", err, http.StatusInternalServerError
}
request.Header.Add("Accept", "application/json")
request.Header.Set("User-Agent", useragent)
if token != "" {
request.Header.Set("Authorization", utils.Concat("Bearer ", token))
}
resp, err := client.Do(request)
if err != nil {
log.Print("API Call Error - invalid response from GET... ", err.Error())
return "", err, http.StatusInternalServerError
}
if resp.StatusCode != http.StatusOK {
log.Print(utils.Concat("API Call GET ", endpoint, " status... "), resp.StatusCode)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Print("API GET Response - no body... ", err.Error())
return "", err, http.StatusInternalServerError
}
return string(body), nil, resp.StatusCode
} else if method == "POST" {
data, err := json.Marshal(payload)
if err != nil {
log.Print("API Payload Marshall Error... ", err.Error())
return "", err, http.StatusInternalServerError
}
client := http.Client{
Timeout: time.Second * 5,
}
request, err := http.NewRequest(method, endpoint, bytes.NewBuffer(data))
if err != nil {
log.Print("API POST Request Error... ", err.Error())
return "", err, http.StatusInternalServerError
}
request.Header.Add("Accept", "application/json")
request.Header.Set("Content-type", "application/json")
request.Header.Set("User-Agent", useragent)
if token != "" {
request.Header.Set("Authorization", utils.Concat("Bearer ", token))
}
resp, err := client.Do(request)
if err != nil {
log.Print("API Call Error - invalid response from POST... ", err.Error())
return "", err, http.StatusInternalServerError
}
if resp.StatusCode != http.StatusOK {
log.Print(utils.Concat("API Call POST ", endpoint, " status... "), resp.StatusCode)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Print("API POST Response - no body... ", err.Error())
return "", err, http.StatusInternalServerError
}
return string(body), nil, resp.StatusCode
}
return "", nil, http.StatusNotImplemented
}
func (t Timestamp) ToTime() time.Time {
return time.Unix(int64(t)/10000, 0)
}
func (t Timestamp) ToUnix() int64 {
return t.ToTime().Unix()
}
func (t Timestamp) ToDate() string {
return t.ToTime().Format("2006-01-02")
}
func (t Timestamp) ToDatetime() string {
return t.ToTime().Format("2006-01-02 15:04:05")
}
func (t Timestamp) BeforeEnd(j JsonDate) bool {
// returns if t is before the end of j, i.e. t < (j + 1 day)
return t.ToTime().Before(j.EndOf())
}
func (t Timestamp) Until(j JsonDate) time.Duration {
// returns the duration between t and the end of d, assumes less than a year
return j.EndOf().Sub(t.ToTime())
}
|
package main
import (
"embed"
"encoding/json"
"fmt"
"github.com/docopt/docopt-go"
"github.com/fatih/color"
"github.com/joeljunstrom/go-luhn"
"io/fs"
"net/http"
"os"
"regexp"
"strconv"
)
var buildNumber = "21.05"
type card struct {
Number string
Valid bool
Issuer string
MII string
PAN string
}
//go:embed static/dist/*
var static embed.FS
func setupResponse(w *http.ResponseWriter, req *http.Request) {
(*w).Header().Set("Access-Control-Allow-Origin", "*")
(*w).Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
(*w).Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
}
func api(w http.ResponseWriter, req *http.Request) {
if req.FormValue("card") == "" {
fmt.Fprint(w, "error: null")
return
}
card := getCardInfo(req.FormValue("card"))
js, err := json.Marshal(card)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
setupResponse(&w, req)
if (*req).Method == "OPTIONS" {
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}
func getMII(pan string) string {
miiDigit, err := strconv.Atoi(string(pan[0]))
_ = err
switch miiDigit {
case 0:
return "ISO/TC 68"
case 1, 2:
return "airlines"
case 3:
return "travel and entertainment"
case 4, 5:
return "banking and financial"
case 6:
return "merchandising and banking/financial"
case 7:
return "petroleum"
case 8:
return "healthcare and telecommunications"
case 9:
return "national assignment"
default:
return "no information available"
}
}
func getIssuer(pan string) string {
visa, _ := regexp.MatchString("^4\\d{12}(\\d{3})?$", pan)
mastercard, _ := regexp.MatchString("^(5[1-5]\\d{4}|677189)\\d{10}$|^(222[1-9]|2[3-6]\\d{2}|27[0-1]\\d|2720)\\d{12}$", pan)
amex, _ := regexp.MatchString("^3[47]\\d{13}$", pan)
discover, _ := regexp.MatchString("^(6011|65\\d{2})\\d{12}$", pan)
dankort, _ := regexp.MatchString("^(5019)\\d{12}$", pan)
jcb, _ := regexp.MatchString("^(?:2131|1800|35\\d{3})\\d{11}$", pan)
maestro, _ := regexp.MatchString("^(?:5[0678]\\d\\d|6304|6390|67\\d\\d)\\d{8,15}$", pan)
diners, _ := regexp.MatchString("^3(?:0[0-5]|[68][0-9])[0-9]{11}$", pan)
if visa {
return "visa"
} else if mastercard{
return "mastercard"
} else if amex {
return "amex"
} else if discover {
return "discover"
} else if dankort {
return "dankort"
} else if jcb {
return "jcb"
} else if maestro {
return "maestro"
} else if diners {
return "diners"
} else {
return "unknown"
}
}
func getPAN(pan string) string {
if len(pan) > 8 {
return pan[6 : len(pan)-1]
} else {
return "unknown"
}
}
func getCardInfo(pan string) card {
card := card{
Number: pan,
Valid: luhn.Valid(pan),
Issuer: getIssuer(pan),
MII: getMII(pan),
PAN: getPAN(pan),
}
return card
}
func serveWeb(port string) {
color.Cyan("Running on port %s", port)
embedded, err := fs.Sub(static, "static/dist")
if err != nil {
panic(err)
}
http.Handle("/", http.FileServer(http.FS(embedded)))
http.HandleFunc("/api", api)
http.ListenAndServe(":" + port, nil)
}
func main() {
usage := `validate_cc.
Usage:
validate_cc card <card_number>...
validate_cc web [--port=<port>]
validate_cc -h | --help
validate_cc --version
Options:
-h --help Show this screen.
--version Show version.
--port=<port> Speed in knots [default: 8090].`
arguments, _ := docopt.ParseDoc(usage)
fmt.Println(arguments)
if version, _ := arguments.Bool("--version"); version {
color.Magenta("Version %s", buildNumber)
os.Exit(0)
}
if web, _ := arguments.Bool("web"); web {
port, _ := arguments.String("--port")
serveWeb(port)
}
//if card, _ := arguments.Bool("card"); card {
// cardNumber, err := arguments."<card_number>"[0]
// if err != nil {
// panic(err)
// }
//}
}
|
package guard
import (
"math/rand"
"testing"
"time"
)
func TestConstDeadline(t *testing.T) {
const iterations = 100
for i := 0; i < iterations; i++ {
tpl := time.Now().Add(time.Duration(rand.Int()) * time.Second)
constFunc := ConstDeadline(tpl)
result := constFunc(nil, nil)
if result != tpl {
t.Fatalf("%s failed on iteration %d with template %v and result %v", t.Name(), i, tpl, result)
}
}
}
func TestDeadlineIn(t *testing.T) {
const iterations = 100
for i := 0; i < iterations; i++ {
tpl := time.Duration(rand.Int()) * time.Second
constFunc := DeadlineIn(tpl)
result := constFunc(nil, nil)
when := time.Now().Add(tpl)
if when.Before(result) {
t.Fatalf("%s failed on iteration %d with template %v and result %v", t.Name(), i, tpl, result)
}
}
}
|
package strip
import (
"awesome-dragon.science/go/goGoGameBot/pkg/format/transformer/tokeniser"
)
// Transformer is a simple transformer that simply removes all intermediate form formatting codes it sees
type Transformer struct{}
// Transform strips all formatting codes from the passed string
func (s Transformer) Transform(in string) string { return tokeniser.Strip(in) }
// MakeIntermediate strips all formatting codes from the passed string
func (s Transformer) MakeIntermediate(in string) string { return tokeniser.Strip(in) }
|
package template
import (
"net/http"
"github.com/firefirestyle/engine-v01/oauth/twitter"
"github.com/firefirestyle/engine-v01/prop"
minisession "github.com/firefirestyle/engine-v01/session"
"io/ioutil"
userhundler "github.com/firefirestyle/engine-v01/user/handler"
"golang.org/x/net/context"
"google.golang.org/appengine"
"google.golang.org/appengine/log"
)
const (
UrlTwitterTokenUrlRedirect = "/api/v1/twitter/tokenurl/redirect"
UrlTwitterTokenCallback = "/api/v1/twitter/tokenurl/callback"
UrlUserGet = "/api/v1/user/get"
UrlUserFind = "/api/v1/user/find"
UrlUserBlobGet = "/api/v1/user/getblob"
UrlUserRequestBlobUrl = "/api/v1/user/requestbloburl"
UrlUserCallbackBlobUrl = "/api/v1/user/callbackbloburl"
UrlMeLogout = "/api/v1/me/logout"
UrlMeUpdate = "/api/v1/me/update"
UrlMeGet = "/api/v1/me/get"
)
type UserTemplateConfig struct {
KindBaseName string
PrivateKey string
AllowInvalidSSL bool
//
MasterKey []string
MasterUser []string
MasterAccount []string
//
TwitterConsumerKey string
TwitterConsumerSecret string
TwitterAccessToken string
TwitterAccessTokenSecret string
FacebookAppSecret string
FacebookAppId string
}
type UserTemplate struct {
config UserTemplateConfig
userHandlerObj *userhundler.UserHandler
}
func NewUserTemplate(config UserTemplateConfig) *UserTemplate {
if config.KindBaseName == "" {
config.KindBaseName = "FFSUser"
}
return &UserTemplate{
config: config,
}
}
func (tmpObj *UserTemplate) CheckLogin(r *http.Request, input *prop.MiniProp, useIp bool) minisession.CheckResult {
// ctx := appengine.NewContext(r)
token := input.GetString("token", "")
return tmpObj.CheckLoginFromToken(r, token, useIp)
}
func (tmpObj *UserTemplate) CheckLoginFromToken(r *http.Request, token string, useIp bool) minisession.CheckResult {
ctx := appengine.NewContext(r)
return tmpObj.GetUserHundlerObj(ctx).GetSessionMgr().CheckAccessToken(ctx, token, minisession.MakeOptionInfo(r), useIp)
}
func (tmpObj *UserTemplate) GetUserHundlerObj(ctx context.Context) *userhundler.UserHandler {
if tmpObj.userHandlerObj == nil {
v := appengine.DefaultVersionHostname(ctx)
scheme := "https"
if v == "127.0.0.1:8080" || v == "localhost:8080" {
v = "localhost:8080"
scheme = "http"
}
tmpObj.userHandlerObj = userhundler.NewUserHandler(UrlUserCallbackBlobUrl,
userhundler.UserHandlerManagerConfig{ //
UserKind: tmpObj.config.KindBaseName,
BlobSign: tmpObj.config.PrivateKey,
LengthHash: 9,
})
tmpObj.userHandlerObj.AddTwitterSession(twitter.TwitterOAuthConfig{
ConsumerKey: tmpObj.config.TwitterConsumerKey,
ConsumerSecret: tmpObj.config.TwitterConsumerSecret,
AccessToken: tmpObj.config.TwitterAccessToken,
AccessTokenSecret: tmpObj.config.TwitterAccessTokenSecret,
CallbackUrl: "" + scheme + "://" + appengine.DefaultVersionHostname(ctx) + "" + UrlTwitterTokenCallback,
SecretSign: appengine.VersionID(ctx),
AllowInvalidSSL: tmpObj.config.AllowInvalidSSL,
})
}
return tmpObj.userHandlerObj
}
func (tmpObj *UserTemplate) InitUserApi() {
// twitter
http.HandleFunc(UrlTwitterTokenUrlRedirect, func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Access-Control-Allow-Origin", "*")
tmpObj.GetUserHundlerObj(appengine.NewContext(r)).HandleTwitterRequestToken(w, r)
})
http.HandleFunc(UrlTwitterTokenCallback, func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Access-Control-Allow-Origin", "*")
tmpObj.GetUserHundlerObj(appengine.NewContext(r)).HandleTwitterCallbackToken(w, r)
})
// user
http.HandleFunc(UrlUserGet, func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Access-Control-Allow-Origin", "*")
tmpObj.GetUserHundlerObj(appengine.NewContext(r)).HandleGet(w, r)
})
http.HandleFunc(UrlUserFind, func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Access-Control-Allow-Origin", "*")
tmpObj.GetUserHundlerObj(appengine.NewContext(r)).HandleFind(w, r)
})
http.HandleFunc(UrlUserRequestBlobUrl, func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Access-Control-Allow-Origin", "*")
params, _ := ioutil.ReadAll(r.Body)
input := prop.NewMiniPropFromJson(params)
ret := tmpObj.CheckLogin(r, input, true)
Debug(appengine.NewContext(r), "(1) ---- ")
if ret.IsLogin == false {
tmpObj.userHandlerObj.HandleError(w, r, prop.NewMiniProp(), 1001, "Failed in token check")
return
} else {
tmpObj.GetUserHundlerObj(appengine.NewContext(r)).HandleBlobRequestTokenBase(w, r, input)
}
})
http.HandleFunc(UrlUserCallbackBlobUrl, func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Access-Control-Allow-Origin", "*")
tmpObj.GetUserHundlerObj(appengine.NewContext(r)).HandleBlobUpdated(w, r)
})
http.HandleFunc(UrlUserBlobGet, func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Access-Control-Allow-Origin", "*")
tmpObj.GetUserHundlerObj(appengine.NewContext(r)).HandleBlobGet(w, r)
})
// me
http.HandleFunc(UrlMeLogout, func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Access-Control-Allow-Origin", "*")
tmpObj.GetUserHundlerObj(appengine.NewContext(r)).HandleLogout(w, r)
})
http.HandleFunc(UrlMeUpdate, func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Access-Control-Allow-Origin", "*")
tmpObj.GetUserHundlerObj(appengine.NewContext(r)).HandleUpdateInfo(w, r)
})
http.HandleFunc(UrlMeGet, func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Access-Control-Allow-Origin", "*")
tmpObj.GetUserHundlerObj(appengine.NewContext(r)).HandleGetMe(w, r)
})
}
func Debug(ctx context.Context, message string) {
log.Infof(ctx, message)
}
|
package models
import (
"bytes"
"html/template"
)
type EmailRequest struct {
From string
To []string
Subject string
Body string
}
type ConfirmEmailTemplate struct {
Title string
Name string
URL string
}
func NewEmailRequest(to []string, subject, body string) *EmailRequest {
return &EmailRequest{
To: to,
Subject: subject,
Body: body,
}
}
func (r *EmailRequest) ParseTemplate(templateFileName string, data interface{}) error {
t, err := template.ParseFiles(templateFileName)
if err != nil {
return err
}
buf := new(bytes.Buffer)
if err = t.Execute(buf, data); err != nil {
return err
}
r.Body = buf.String()
return nil
}
|
package contracts
import (
"errors"
"math/big"
"time"
"github.com/smartcontractkit/integrations-framework/client"
"github.com/smartcontractkit/integrations-framework/contracts/ethereum"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
ocrConfigHelper "github.com/smartcontractkit/libocr/offchainreporting/confighelper"
)
// ContractDeployer is an interface for abstracting the contract deployment methods across network implementations
type ContractDeployer interface {
DeployStorageContract(fromWallet client.BlockchainWallet) (Storage, error)
DeployFluxAggregatorContract(
fromWallet client.BlockchainWallet,
fluxOptions FluxAggregatorOptions,
) (FluxAggregator, error)
DeployLinkTokenContract(fromWallet client.BlockchainWallet) (LinkToken, error)
DeployOffChainAggregator(
fromWallet client.BlockchainWallet,
offchainOptions OffchainOptions,
) (OffchainAggregator, error)
DeployVRFContract(fromWallet client.BlockchainWallet) (VRF, error)
}
// NewContractDeployer returns an instance of a contract deployer based on the client type
func NewContractDeployer(bcClient client.BlockchainClient) (ContractDeployer, error) {
switch clientImpl := bcClient.Get().(type) {
case *client.EthereumClient:
return NewEthereumContractDeployer(clientImpl), nil
}
return nil, errors.New("unknown blockchain client implementation")
}
// EthereumContractDeployer provides the implementations for deploying ETH (EVM) based contracts
type EthereumContractDeployer struct {
eth *client.EthereumClient
}
// NewEthereumContractDeployer returns an instantiated instance of the ETH contract deployer
func NewEthereumContractDeployer(ethClient *client.EthereumClient) *EthereumContractDeployer {
return &EthereumContractDeployer{
eth: ethClient,
}
}
// DefaultFluxAggregatorOptions produces some basic defaults for a flux aggregator contract
func DefaultFluxAggregatorOptions() FluxAggregatorOptions {
return FluxAggregatorOptions{
PaymentAmount: big.NewInt(1),
Timeout: uint32(5),
MinSubValue: big.NewInt(1),
MaxSubValue: big.NewInt(10),
Decimals: uint8(8),
Description: "Hardhat Flux Aggregator",
}
}
// DeployFluxAggregatorContract deploys the Flux Aggregator Contract on an EVM chain
func (e *EthereumContractDeployer) DeployFluxAggregatorContract(
fromWallet client.BlockchainWallet,
fluxOptions FluxAggregatorOptions,
) (FluxAggregator, error) {
address, _, instance, err := e.eth.DeployContract(fromWallet, "Flux Aggregator", func(
auth *bind.TransactOpts,
backend bind.ContractBackend,
) (common.Address, *types.Transaction, interface{}, error) {
linkAddress := common.HexToAddress(e.eth.Network.Config().LinkTokenAddress)
return ethereum.DeployFluxAggregator(auth,
backend,
linkAddress,
fluxOptions.PaymentAmount,
fluxOptions.Timeout,
fluxOptions.Validator,
fluxOptions.MinSubValue,
fluxOptions.MaxSubValue,
fluxOptions.Decimals,
fluxOptions.Description)
})
if err != nil {
return nil, err
}
return &EthereumFluxAggregator{
client: e.eth,
fluxAggregator: instance.(*ethereum.FluxAggregator),
callerWallet: fromWallet,
address: address,
}, nil
}
// DeployLinkTokenContract deploys a Link Token contract to an EVM chain
func (e *EthereumContractDeployer) DeployLinkTokenContract(fromWallet client.BlockchainWallet) (LinkToken, error) {
linkTokenAddress, _, instance, err := e.eth.DeployContract(fromWallet, "LINK Token", func(
auth *bind.TransactOpts,
backend bind.ContractBackend,
) (common.Address, *types.Transaction, interface{}, error) {
return ethereum.DeployLinkToken(auth, backend)
})
if err != nil {
return nil, err
}
// Set config address
e.eth.Network.Config().LinkTokenAddress = linkTokenAddress.Hex()
return &EthereumLinkToken{
client: e.eth,
linkToken: instance.(*ethereum.LinkToken),
callerWallet: fromWallet,
address: *linkTokenAddress,
}, err
}
// DefaultOffChainAggregatorOptions returns some base defaults for deploying an OCR contract
func DefaultOffChainAggregatorOptions() OffchainOptions {
return OffchainOptions{
MaximumGasPrice: uint32(500000000),
ReasonableGasPrice: uint32(28000),
MicroLinkPerEth: uint32(500),
LinkGweiPerObservation: uint32(500),
LinkGweiPerTransmission: uint32(500),
MinimumAnswer: big.NewInt(1),
MaximumAnswer: big.NewInt(5000),
Decimals: 8,
Description: "Test OCR",
}
}
// DefaultOffChainAggregatorConfig returns some base defaults for configuring an OCR contract
func DefaultOffChainAggregatorConfig() OffChainAggregatorConfig {
return OffChainAggregatorConfig{
AlphaPPB: 1,
DeltaC: time.Second * 15,
DeltaGrace: time.Second,
DeltaProgress: time.Second * 30,
DeltaStage: time.Second * 3,
DeltaResend: time.Second * 5,
DeltaRound: time.Second * 10,
RMax: 4,
S: []int{1, 1, 1, 1, 1},
N: 5,
F: 1,
OracleIdentities: []ocrConfigHelper.OracleIdentityExtra{},
}
}
// DeployOffChainAggregator deploys the offchain aggregation contract to the EVM chain
func (e *EthereumContractDeployer) DeployOffChainAggregator(
fromWallet client.BlockchainWallet,
offchainOptions OffchainOptions,
) (OffchainAggregator, error) {
address, _, instance, err := e.eth.DeployContract(fromWallet, "OffChain Aggregator", func(
auth *bind.TransactOpts,
backend bind.ContractBackend,
) (common.Address, *types.Transaction, interface{}, error) {
linkAddress := common.HexToAddress(e.eth.Network.Config().LinkTokenAddress)
return ethereum.DeployOffchainAggregator(auth,
backend,
offchainOptions.MaximumGasPrice,
offchainOptions.ReasonableGasPrice,
offchainOptions.MicroLinkPerEth,
offchainOptions.LinkGweiPerObservation,
offchainOptions.LinkGweiPerTransmission,
linkAddress,
offchainOptions.MinimumAnswer,
offchainOptions.MaximumAnswer,
offchainOptions.BillingAccessController,
offchainOptions.RequesterAccessController,
offchainOptions.Decimals,
offchainOptions.Description)
})
if err != nil {
return nil, err
}
return &EthereumOffchainAggregator{
client: e.eth,
ocr: instance.(*ethereum.OffchainAggregator),
callerWallet: fromWallet,
address: address,
}, err
}
// DeployStorageContract deploys a vanilla storage contract that is a value store
func (e *EthereumContractDeployer) DeployStorageContract(fromWallet client.BlockchainWallet) (Storage, error) {
_, _, instance, err := e.eth.DeployContract(fromWallet, "Storage", func(
auth *bind.TransactOpts,
backend bind.ContractBackend,
) (common.Address, *types.Transaction, interface{}, error) {
return ethereum.DeployStore(auth, backend)
})
if err != nil {
return nil, err
}
return &EthereumStorage{
client: e.eth,
store: instance.(*ethereum.Store),
callerWallet: fromWallet,
}, err
}
func (e *EthereumContractDeployer) DeployVRFContract(fromWallet client.BlockchainWallet) (VRF, error) {
address, _, instance, err := e.eth.DeployContract(fromWallet, "VRF", func(
auth *bind.TransactOpts,
backend bind.ContractBackend,
) (common.Address, *types.Transaction, interface{}, error) {
return ethereum.DeployVRF(auth, backend)
})
if err != nil {
return nil, err
}
return &EthereumVRF{
client: e.eth,
vrf: instance.(*ethereum.VRF),
callerWallet: fromWallet,
address: address,
}, err
}
|
package groups
import (
"errors"
"net/http"
"strconv"
"docktor/server/storage"
"docktor/server/types"
"github.com/labstack/echo/v4"
log "github.com/sirupsen/logrus"
)
// getAllWithDaemons find all groups with daemons
func getAllWithDaemons(c echo.Context) error {
user := c.Get("user").(types.User)
db := c.Get("DB").(*storage.Docktor)
if all, _ := strconv.ParseBool(c.QueryParam("all")); all {
groups, err := db.Groups().FindAllLight()
if err != nil {
log.WithFields(log.Fields{
"error": err,
}).Error("Error when retrieving groups")
return c.JSON(http.StatusBadRequest, err.Error())
}
return c.JSON(http.StatusOK, groups)
}
groups, err := db.Groups().FindByUser(user)
if err != nil {
log.WithFields(log.Fields{
"error": err,
}).Error("Error when retrieving groups")
return c.JSON(http.StatusBadRequest, err.Error())
}
if !user.IsAdmin() {
for i := range groups {
groups[i].Obfuscate()
}
}
return c.JSON(http.StatusOK, groups)
}
// getByDaemon find all groups by daemons id
func getByDaemon(c echo.Context) error {
db := c.Get("DB").(*storage.Docktor)
groups, err := db.Groups().FindByDaemonID(c.Param(types.DAEMON_ID_PARAM))
if err != nil {
log.WithFields(log.Fields{
"error": err,
"daemon_id": c.Param(types.DAEMON_ID_PARAM),
}).Error("Error when retrieving groups by daemon id")
return c.JSON(http.StatusBadRequest, err.Error())
}
return c.JSON(http.StatusOK, groups)
}
// getByID find one by id
func getByID(c echo.Context) error {
group := c.Get("group").(types.Group)
user := c.Get("user").(types.User)
if !user.IsAdmin() {
group.Obfuscate()
}
return c.JSON(http.StatusOK, group)
}
// save a Group server
func save(c echo.Context) error {
var g types.Group
err := c.Bind(&g)
if err != nil {
log.WithFields(log.Fields{
"body": c.Request().Body,
"error": err,
}).Error("Error when parsing group")
return c.JSON(http.StatusBadRequest, err.Error())
}
user := c.Get("user").(types.User)
db := c.Get("DB").(*storage.Docktor)
if !user.IsAdmin() {
if !g.ID.Valid() {
return echo.NewHTTPError(http.StatusForbidden, "Admin permission required")
}
group, err := db.Groups().FindByIDBson(g.ID)
if err != nil {
log.WithFields(log.Fields{
"group": g,
"error": err,
}).Error("Error when finding group")
return c.JSON(http.StatusBadRequest, err.Error())
}
if !group.IsAdmin(&user) {
return echo.NewHTTPError(http.StatusForbidden, "Admin group permission required")
}
if g.Name != group.Name || g.Subnet != group.Subnet || g.Daemon != group.Daemon || g.MinPort != group.MinPort || g.MaxPort != group.MaxPort {
return echo.NewHTTPError(http.StatusForbidden, "Admin permission required to change groupe name, subnet, min/max port and daemon")
}
}
g, err = db.Groups().Save(g)
if err != nil {
log.WithFields(log.Fields{
"group": g,
"error": err,
}).Error("Error when updating/creating group")
return c.JSON(http.StatusBadRequest, err.Error())
}
return c.JSON(http.StatusOK, g)
}
// deleteByID delete one by id
func deleteByID(c echo.Context) error {
db := c.Get("DB").(*storage.Docktor)
group := c.Get("group").(types.Group)
err := db.Groups().Delete(group.ID.Hex())
if err != nil {
log.WithFields(log.Fields{
"groupID": c.Param(types.GROUP_ID_PARAM),
"error": err,
}).Error("Error when deleting group")
return c.JSON(http.StatusBadRequest, err.Error())
}
return c.JSON(http.StatusOK, "ok")
}
// updateUser updates the role of a user in the group or delete it
func updateUser(c echo.Context) error {
group := c.Get("group").(types.Group)
user := c.Get("user").(types.User)
username := c.Param("username")
status := c.Param("status")
// Check if user is admin or if it's his username and delete case
if !(group.IsAdmin(&user) || (user.Username == username && status == "delete")) {
return echo.NewHTTPError(http.StatusForbidden, "Group admin permission required")
}
switch status {
case "admin":
group.Users = types.Remove(group.Users, username)
group.Admins = append(group.Admins, username)
case "user":
group.Admins = types.Remove(group.Admins, username)
group.Users = append(group.Users, username)
case "delete":
group.Users = types.Remove(group.Users, username)
group.Admins = types.Remove(group.Admins, username)
default:
return errors.New("Invalid status parameter")
}
db := c.Get("DB").(*storage.Docktor)
group, err := db.Groups().Save(group)
if err != nil {
log.WithFields(log.Fields{
"group": group.Name,
"error": err,
}).Error("Error when updating/creating group")
return c.JSON(http.StatusBadRequest, err.Error())
}
return c.JSON(http.StatusOK, group)
}
|
package services
import (
"errors"
"sub/app/helpers/dbhelper"
"sub/app/models"
)
// SaveMsg - to save hotel, room and rateplan object
func SaveMsg(msgData *models.MsgData) error {
conn, err := dbhelper.GetConnByHost("")
if err != nil {
return err
}
if msgData == nil {
return errors.New("No data received")
}
for _, offer := range msgData.Offers {
err := conn.Create(&offer.Hotel).Error
if err != nil {
return err
}
err = conn.Create(&offer.Room).Error
if err != nil {
return err
}
err = conn.Create(&offer.RatePlan).Error
if err != nil {
return err
}
}
return nil
}
|
package validator
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/authelia/authelia/v4/internal/configuration/schema"
)
func TestValidatePrivacyPolicy(t *testing.T) {
testCases := []struct {
name string
have schema.PrivacyPolicy
expected string
}{
{"ShouldValidateDefaultConfig", schema.PrivacyPolicy{}, ""},
{"ShouldValidateValidEnabledPolicy", schema.PrivacyPolicy{Enabled: true, PolicyURL: MustParseURL("https://example.com/privacy")}, ""},
{"ShouldValidateValidEnabledPolicyWithUserAcceptance", schema.PrivacyPolicy{Enabled: true, RequireUserAcceptance: true, PolicyURL: MustParseURL("https://example.com/privacy")}, ""},
{"ShouldNotValidateOnInvalidScheme", schema.PrivacyPolicy{Enabled: true, PolicyURL: MustParseURL("http://example.com/privacy")}, "privacy_policy: option 'policy_url' must have the 'https' scheme but it's configured as 'http'"},
{"ShouldNotValidateOnMissingURL", schema.PrivacyPolicy{Enabled: true}, "privacy_policy: option 'policy_url' must be provided when the option 'enabled' is true"},
}
validator := schema.NewStructValidator()
for _, tc := range testCases {
validator.Clear()
t.Run(tc.name, func(t *testing.T) {
ValidatePrivacyPolicy(&tc.have, validator)
assert.Len(t, validator.Warnings(), 0)
if tc.expected == "" {
assert.Len(t, validator.Errors(), 0)
} else {
assert.EqualError(t, validator.Errors()[0], tc.expected)
}
})
}
}
|
/*
* Copyright (c) 2015, Yawning Angel <yawning at torproject dot org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package socks5
import (
"bufio"
"bytes"
"encoding/hex"
)
// TestReadWriter is a bytes.Buffer backed io.ReadWriter used for testing. The
// Read and Write routines are to be used by the component being tested. Data
// can be written to and read back via the WriteHex and ReadHex routines.
type TestReadWriter struct {
readBuf bytes.Buffer
writeBuf bytes.Buffer
}
func (c *TestReadWriter) Read(buf []byte) (n int, err error) {
return c.readBuf.Read(buf)
}
func (c *TestReadWriter) Write(buf []byte) (n int, err error) {
return c.writeBuf.Write(buf)
}
func (c *TestReadWriter) WriteHex(str string) (n int, err error) {
var buf []byte
if buf, err = hex.DecodeString(str); err != nil {
return
}
return c.readBuf.Write(buf)
}
func (c *TestReadWriter) ReadHex() string {
return hex.EncodeToString(c.writeBuf.Bytes())
}
func (c *TestReadWriter) toBufio() *bufio.ReadWriter {
return bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c))
}
func (c *TestReadWriter) ToRequest() *Request {
req := new(Request)
req.rw = c.toBufio()
return req
}
func (c *TestReadWriter) reset(req *Request) {
c.readBuf.Reset()
c.writeBuf.Reset()
req.rw = c.toBufio()
}
|
package caaa
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01100103 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:caaa.011.001.03 Document"`
Message *AcceptorBatchTransferV03 `xml:"AccptrBtchTrf"`
}
func (d *Document01100103) AddMessage() *AcceptorBatchTransferV03 {
d.Message = new(AcceptorBatchTransferV03)
return d.Message
}
// The AcceptorBatchTransfer is sent by an acceptor (or its agent) to transfer the financial data of a collection of transactions to the acquirer (or its agent).
type AcceptorBatchTransferV03 struct {
// Batch capture message management information.
Header *iso20022.Header3 `xml:"Hdr"`
// Card payment transactions from one or several data set of transactions.
BatchTransfer *iso20022.CardPaymentBatchTransfer2 `xml:"BtchTrf"`
// Trailer of the message containing a MAC or a digital signature.
SecurityTrailer *iso20022.ContentInformationType9 `xml:"SctyTrlr"`
}
func (a *AcceptorBatchTransferV03) AddHeader() *iso20022.Header3 {
a.Header = new(iso20022.Header3)
return a.Header
}
func (a *AcceptorBatchTransferV03) AddBatchTransfer() *iso20022.CardPaymentBatchTransfer2 {
a.BatchTransfer = new(iso20022.CardPaymentBatchTransfer2)
return a.BatchTransfer
}
func (a *AcceptorBatchTransferV03) AddSecurityTrailer() *iso20022.ContentInformationType9 {
a.SecurityTrailer = new(iso20022.ContentInformationType9)
return a.SecurityTrailer
}
|
package main
import "fmt"
func main() {
x := bar()
fmt.Printf("%T\n", x)
// x()Run func
fmt.Println(x())
//More cleanup
fmt.Println(goo()())
}
func bar() func() int {
return func() int {
return 451
}
}
//More cleanup
func goo() func() string {
return func() string {
return "Limbaroyati"
}
}
|
package platform
import (
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestLoadConfigFromJSONFileShouldWorks(t *testing.T) {
const json = "{ \"f1\": \"val1\", \"f2\": 1 }"
file, err := ioutil.TempFile("", t.Name())
assert.Nil(t, err, "Error to create tempfile")
defer os.Remove(file.Name())
_, err = file.WriteString(json)
assert.Nil(t, err, "Error to create tempfile")
configType := struct {
F1 string
F2 int
}{}
err = LoadConfigFromJSONFile(file.Name(), &configType)
assert.Nil(t, err, "Error loading config")
assert.Equal(t, configType.F1, "val1")
assert.Equal(t, configType.F2, 1)
}
func TestLoadConfigFromJSONFileShouldFailWhenFileDoesNotExists(t *testing.T) {
tempDir, err := ioutil.TempDir("", t.Name())
assert.Nil(t, err, "Error to create tempDir")
configType := struct {
F1 string
F2 int
}{}
err = LoadConfigFromJSONFile(tempDir+string(os.PathSeparator)+t.Name(), &configType)
assert.NotNil(t, err, "Test must failed")
}
func TestLoadConfigFromJSONFileShouldFailWhenJSONParsingFailed(t *testing.T) {
const json = "{ \"f1\": \"val1\", \"f2\": \"val2\" }"
file, err := ioutil.TempFile("", t.Name())
assert.Nil(t, err, "Error to create tempfile")
defer os.Remove(file.Name())
_, err = file.WriteString(json)
assert.Nil(t, err, "Error to create tempfile")
configType := struct {
F1 string
F2 int
}{}
err = LoadConfigFromJSONFile(file.Name(), &configType)
assert.NotNil(t, err, "Test must failed")
}
|
package main
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/xml"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"io/ioutil"
"log"
"net/http"
"strconv"
"time"
)
func main() {
sess, err := session.NewSession(&aws.Config{
Region: aws.String("us-east-1")},
)
if err != nil {
log.Fatal(err)
}
// Create S3 service client
svc := s3.New(sess)
svc.Handlers.Build.PushBack(deleteObjectsBodyHandler)
payload := makeDeleteObjectsPayload()
url := getDeleteObjectsPresignedUrl(svc, payload.getKeys())
response, err := deleteObjects(url, payload)
if err != nil {
log.Fatal(err.Error())
}
if response.StatusCode != 200 {
body, _ := ioutil.ReadAll(response.Body)
log.Fatalf("Failed to DELETE objects using a pre-signed URL: status: %d, body: %s",
response.StatusCode, body)
}
}
func getDeleteObjectsPresignedUrl(svc *s3.S3, keys []string) string {
objects := make([]*s3.ObjectIdentifier, len(keys))
for i, k := range keys {
objects[i] = &s3.ObjectIdentifier{
Key: aws.String(k),
}
}
req, _ := svc.DeleteObjectsRequest(&s3.DeleteObjectsInput{
Bucket: aws.String("test-bucket"),
Delete: &s3.Delete{
Objects: objects,
},
})
url, err := req.Presign(15 * time.Second)
if err != nil {
log.Fatal(err.Error())
}
return url
}
func makeDeleteObjectsPayload() deleteObjectsPayload {
size := 2
objects := make([]object, size)
for i := 0; i < size; i++ {
objects[i] = object{
Key: "key-" + strconv.Itoa(i),
}
}
return deleteObjectsPayload{
Xmlns: "http://s3.amazonaws.com/doc/2006-03-01/",
Object: objects,
}
}
func deleteObjects(presignedUrl string, payload deleteObjectsPayload) (*http.Response, error) {
b, _ := xml.Marshal(payload)
bWithXml := append(append([]byte(xml.Header+string(b)), '\n'))
req, _ := http.NewRequest("POST", presignedUrl, bytes.NewBuffer(bWithXml))
req.Header.Add("content-md5", calculateMd5Hash(bWithXml))
req.Header.Add("content-length", strconv.Itoa(len(bWithXml)))
client := &http.Client{}
response, err := client.Do(req)
return response, err
}
type deleteObjectsPayload struct {
Xmlns string `xml:"xmlns,attr"`
XMLName xml.Name `xml:"Delete"`
Object []object `xml:"Object"`
}
type object struct {
Key string
}
func (payload deleteObjectsPayload) getKeys() []string {
size := len(payload.Object)
keys := make([]string, size)
for i := 0; i < size; i++ {
keys[i] = payload.Object[i].Key
}
return keys
}
func calculateMd5Hash(payloadBytes []byte) string {
md5Sum := md5.Sum(payloadBytes)
md5Hash := base64.StdEncoding.EncodeToString(md5Sum[:])
return md5Hash
}
func deleteObjectsBodyHandler(r *request.Request) {
if r.Operation.Name != "DeleteObjects" {
return
}
body, _ := ioutil.ReadAll(r.Body)
bodyWithXmlHeader := append(append([]byte(xml.Header), body...), '\n')
r.SetBufferBody(bodyWithXmlHeader)
}
|
package main
import "fmt"
func main() {
s := []int{1, 2, 3}
d := make([]int, 2, 4)
copy(d, s)
fmt.Println(d)
fmt.Println(len(d))
fmt.Println(cap(d))
}
|
package initializers_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestInitializers(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Initializers Suite")
}
|
// Copyright (c) 2019 Chair of Applied Cryptography, Technische Universität
// Darmstadt, Germany. All rights reserved. This file is part of go-perun. Use
// of this source code is governed by a MIT-style license that can be found in
// the LICENSE file.
package memorydb
import (
"testing"
"perun.network/go-perun/pkg/sortedkv/test"
)
func TestBatch(t *testing.T) {
t.Run("Generic Batch test", func(t *testing.T) {
test.GenericBatchTest(t, NewDatabase())
})
}
|
package schoolmeal
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"github.com/buger/jsonparser"
)
var (
client *http.Client
)
func init() {
client = &http.Client{}
}
// GetDayMeal 함수는 하루의 급식을 가져옵니다.
func (s School) GetDayMeal(date string, mealType int) (m Meal, err error) {
weekMeals, err := s.GetWeekMeal(date, mealType)
if err != nil {
return
}
parse := parseTime(date)
m = weekMeals[parse.Weekday()]
return
}
// GetMonthMeal 함수는 인자로 받은 연도와 달의 전체 급식을 리턴합니다.
// Meal[날짜-1][타입-1]
func (s School) GetMonthMeal(year, month int) (monthMeals [][]Meal, err error) {
reqFormat := `{"schulCode": "%s", "schulCrseScCode": %d, "schYm": "%d%02d"}`
reqJSON := []byte(fmt.Sprintf(reqFormat, s.Code, s.Kind, year, month))
doc, err := post(s, makeURL(s.Zone, linkMealMonthly), reqJSON)
if err != nil {
return
}
// get weekDietList json
docDiet, _, _, err := jsonparser.Get(doc, "resultSVO", "mthDietList")
if err != nil {
return
}
rds := make([]resultDiet, 0, 6)
if err = json.Unmarshal(docDiet, &rds); err != nil {
return
}
if len(rds) == 0 {
err = errors.New("unexpected: month meal is none")
return
}
monthMeals = make([][]Meal, 0, 32)
for _, rd := range rds {
appendIfNotNil(&monthMeals, rdToMealMonth(year, month, rd.Sun))
appendIfNotNil(&monthMeals, rdToMealMonth(year, month, rd.Mon))
appendIfNotNil(&monthMeals, rdToMealMonth(year, month, rd.Tue))
appendIfNotNil(&monthMeals, rdToMealMonth(year, month, rd.Wed))
appendIfNotNil(&monthMeals, rdToMealMonth(year, month, rd.The))
appendIfNotNil(&monthMeals, rdToMealMonth(year, month, rd.Fri))
appendIfNotNil(&monthMeals, rdToMealMonth(year, month, rd.Sat))
}
return
}
// GetWeekMeal 함수는 인자로 받는 날짜가 포함된 주의 급식이 담긴 []Meal{}을 리턴합니다.
func (s School) GetWeekMeal(date string, mealType int) (meals []Meal, err error) {
reqFormat := `{"schulCode": "%s", "schulCrseScCode": %d, "schMmealScCode": %d, "schYmd": "%s"}`
reqJSON := []byte(fmt.Sprintf(reqFormat, s.Code, s.Kind, mealType, date))
doc, err := post(s, makeURL(s.Zone, linkMealWeekly), reqJSON)
if err != nil {
return
}
// get weekDietList json
docDiet, _, _, err := jsonparser.Get(doc, "resultSVO", "weekDietList")
if err != nil {
return
}
rds := make([]resultDiet, 0, 3)
if err = json.Unmarshal(docDiet, &rds); err != nil {
return
}
if len(rds) < 3 {
err = errors.New("schoolmeal: no diet in this week")
return
}
// rds[0] == DateString
// rds[1] == People
// rds[2] == Content
meals = make([]Meal, 7, 7)
meals[0] = rdToMealWeek(rds[0].Sun, rds[1].Sun, rds[2].Sun, mealType)
meals[1] = rdToMealWeek(rds[0].Mon, rds[1].Mon, rds[2].Mon, mealType)
meals[2] = rdToMealWeek(rds[0].Tue, rds[1].Tue, rds[2].Tue, mealType)
meals[3] = rdToMealWeek(rds[0].Wed, rds[1].Wed, rds[2].Wed, mealType)
meals[4] = rdToMealWeek(rds[0].The, rds[1].The, rds[2].The, mealType)
meals[5] = rdToMealWeek(rds[0].Fri, rds[1].Fri, rds[2].Fri, mealType)
meals[6] = rdToMealWeek(rds[0].Sat, rds[1].Sat, rds[2].Sat, mealType)
return
}
|
package main
import (
"context"
"fmt"
"github.com/Highway-Project/highway/config"
"github.com/Highway-Project/highway/internal/server"
"github.com/Highway-Project/highway/logging"
"github.com/creasty/defaults"
"os"
"os/signal"
"syscall"
"time"
)
func main() {
fmt.Println(`
_ _ _ _
| | | (_) | |
| |__| |_ __ _| |____ ____ _ _ _
| __ | |/ _' | '_ \ \ /\ / / _' | | | |
| | | | | (_| | | | \ V V / (_| | |_| |
|_| |_|_|\__, |_| |_|\_/\_/ \__,_|\__, |
__/ | __/ |
|___/ |___/`)
logging.InitLogger("debug", true)
cfg, err := config.ReadConfig()
if err != nil {
logging.Logger.WithError(err).Fatal("could not load config")
}
err = defaults.Set(cfg)
if err != nil {
logging.Logger.WithError(err).Fatal("could not set default values")
}
err = cfg.Validate()
if err != nil {
logging.Logger.WithError(err).Fatal("invalid config")
}
s, err := server.NewServer(cfg.Global, cfg.RouterSpec, cfg.ServicesSpecs, cfg.RulesSpecs, cfg.MiddlewaresSpecs)
if err != nil {
logging.Logger.WithError(err).Fatal("could not create server")
}
go func() {
logging.Logger.Infof("started serving on port :%d", cfg.Global.Port)
logging.Logger.Fatal(s.ListenAndServe())
}()
shutdown := make(chan os.Signal)
signal.Notify(shutdown, syscall.SIGINT, syscall.SIGTERM)
<-shutdown
logging.Logger.Info("shutting down highway gracefully")
s.SetKeepAlivesEnabled(false)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
err = s.Shutdown(ctx)
if err != nil {
logging.Logger.WithError(err).Error("could not shutdown highway gracefully")
}
logging.Logger.Info("exiting highway")
}
|
// Package storage - служба хранения данных
package storage
import (
"go.core/lesson7/pkg/crawler"
"go.core/lesson7/pkg/storage/bstree"
)
type Interface interface {
Create(docs []crawler.Document)
Document(id int) (crawler.Document, bool)
Add(d crawler.Document)
}
// New - конструктор службы хранения данных
func New() *bstree.Tree {
t := bstree.Tree{}
return &t
}
|
package html
import (
"testing"
)
func TestTidy(t *testing.T) {
dest := "<div id='hello'><a onclick=\"window.location='aa'\" id=\"ss\"></a>"
str := Tidy(dest)
t.Error(str)
}
|
package week11
type RuneStack []rune
func (s *RuneStack) Push(r rune) {
*s = append(*s, r)
}
func (s *RuneStack) Pop() rune {
last := (*s)[len(*s)-1]
*s = (*s)[:len(*s)-1]
return last
}
// 20. 有效的括号 https://leetcode-cn.com/problems/valid-parentheses/
func isValid(s string) bool {
stack := RuneStack{}
for _, token := range s {
switch token {
case '(', '[', '{':
stack.Push(token)
case ')':
if stack.Pop() != '(' {
return false
}
case ']':
if stack.Pop() != '[' {
return false
}
case '}':
if stack.Pop() != '{' {
return false
}
}
}
return len(stack) == 0
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package partition
import (
"context"
"fmt"
"math/rand"
"strconv"
"strings"
"testing"
gotime "time"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
mysql "github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/sessionctx/binloginfo"
"github.com/pingcap/tidb/sessiontxn"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/dbterror"
"github.com/pingcap/tidb/util/logutil"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
func TestPartitionBasic(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.Session().GetSessionVars().BinlogClient = binloginfo.MockPumpsClient(testkit.MockPumpClient{})
tk.MustExec("set @@session.tidb_enable_table_partition = '1'")
tk.MustExec(`CREATE TABLE partition_basic (id int(11), unique index(id))
PARTITION BY RANGE COLUMNS ( id ) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11),
PARTITION p2 VALUES LESS THAN (16),
PARTITION p3 VALUES LESS THAN (21)
)`)
tk.MustExec("insert into partition_basic values(0)")
tk.MustExec("insert into partition_basic values(2) on duplicate key update id = 1")
tk.MustExec("update partition_basic set id = 7 where id = 0")
tk.MustQuery("select * from partition_basic where id = 7").Check(testkit.Rows("7"))
tk.MustQuery("select * from partition_basic partition (p1)").Check(testkit.Rows("7"))
tk.MustExecToErr("select * from partition_basic partition (p5)")
tk.MustExecToErr("update partition_basic set id = 666 where id = 7")
tk.MustExec("update partition_basic set id = 9 where id = 7")
tk.MustExec("delete from partition_basic where id = 7")
tk.MustExec("delete from partition_basic where id = 9")
tk.MustExec("drop table partition_basic")
}
func TestPartitionAddRecord(t *testing.T) {
createTable1 := `CREATE TABLE test.t1 (id int(11), index(id))
PARTITION BY RANGE ( id ) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11),
PARTITION p2 VALUES LESS THAN (16),
PARTITION p3 VALUES LESS THAN (21)
)`
ctx := context.Background()
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
_, err := tk.Session().Execute(ctx, "use test")
require.NoError(t, err)
_, err = tk.Session().Execute(ctx, "drop table if exists t1, t2;")
require.NoError(t, err)
_, err = tk.Session().Execute(ctx, createTable1)
require.NoError(t, err)
tb, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t1"))
require.NoError(t, err)
tbInfo := tb.Meta()
p0 := tbInfo.Partition.Definitions[0]
require.Equal(t, model.NewCIStr("p0"), p0.Name)
require.Nil(t, sessiontxn.NewTxn(ctx, tk.Session()))
rid, err := tb.AddRecord(tk.Session(), types.MakeDatums(1))
require.NoError(t, err)
// Check that add record writes to the partition, rather than the table.
txn, err := tk.Session().Txn(true)
require.NoError(t, err)
val, err := txn.Get(context.TODO(), tables.PartitionRecordKey(p0.ID, rid.IntValue()))
require.NoError(t, err)
require.Greater(t, len(val), 0)
_, err = txn.Get(context.TODO(), tables.PartitionRecordKey(tbInfo.ID, rid.IntValue()))
require.True(t, kv.ErrNotExist.Equal(err))
// Cover more code.
_, err = tb.AddRecord(tk.Session(), types.MakeDatums(7))
require.NoError(t, err)
_, err = tb.AddRecord(tk.Session(), types.MakeDatums(12))
require.NoError(t, err)
_, err = tb.AddRecord(tk.Session(), types.MakeDatums(16))
require.NoError(t, err)
// Make the changes visible.
_, err = tk.Session().Execute(context.Background(), "commit")
require.NoError(t, err)
// Check index count equals to data count.
tk.MustQuery("select count(*) from t1").Check(testkit.Rows("4"))
tk.MustQuery("select count(*) from t1 use index(id)").Check(testkit.Rows("4"))
tk.MustQuery("select count(*) from t1 use index(id) where id > 6").Check(testkit.Rows("3"))
// Value must locates in one partition.
_, err = tb.AddRecord(tk.Session(), types.MakeDatums(22))
require.True(t, table.ErrNoPartitionForGivenValue.Equal(err))
_, err = tk.Session().Execute(context.Background(), "rollback")
require.NoError(t, err)
createTable2 := `CREATE TABLE test.t2 (id int(11))
PARTITION BY RANGE ( id ) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p3 VALUES LESS THAN MAXVALUE
)`
_, err = tk.Session().Execute(context.Background(), createTable2)
require.NoError(t, err)
require.Nil(t, sessiontxn.NewTxn(ctx, tk.Session()))
tb, err = dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t2"))
require.NoError(t, err)
_, err = tb.AddRecord(tk.Session(), types.MakeDatums(22))
require.NoError(t, err)
createTable3 := `create table test.t3 (id int) partition by range (id)
(
partition p0 values less than (10)
)`
_, err = tk.Session().Execute(context.Background(), createTable3)
require.NoError(t, err)
require.Nil(t, sessiontxn.NewTxn(ctx, tk.Session()))
tb, err = dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t3"))
require.NoError(t, err)
_, err = tb.AddRecord(tk.Session(), types.MakeDatums(11))
require.True(t, table.ErrNoPartitionForGivenValue.Equal(err))
_, err = tb.AddRecord(tk.Session(), types.MakeDatums(10))
require.True(t, table.ErrNoPartitionForGivenValue.Equal(err))
_, err = tb.AddRecord(tk.Session(), types.MakeDatums(0))
require.NoError(t, err)
createTable4 := `create table test.t4 (a int,b int) partition by range (a+b)
(
partition p0 values less than (10)
);`
_, err = tk.Session().Execute(context.Background(), createTable4)
require.NoError(t, err)
require.Nil(t, sessiontxn.NewTxn(ctx, tk.Session()))
tb, err = dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t4"))
require.NoError(t, err)
_, err = tb.AddRecord(tk.Session(), types.MakeDatums(1, 11))
require.True(t, table.ErrNoPartitionForGivenValue.Equal(err))
}
func TestHashPartitionAddRecord(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
_, err := tk.Session().Execute(context.Background(), "use test")
require.NoError(t, err)
_, err = tk.Session().Execute(context.Background(), "drop table if exists t1;")
require.NoError(t, err)
_, err = tk.Session().Execute(context.Background(), "set @@session.tidb_enable_table_partition = '1';")
require.NoError(t, err)
_, err = tk.Session().Execute(context.Background(), `CREATE TABLE test.t1 (id int(11), index(id)) PARTITION BY HASH (id) partitions 4;`)
require.NoError(t, err)
tb, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t1"))
require.NoError(t, err)
tbInfo := tb.Meta()
p0 := tbInfo.Partition.Definitions[0]
require.Nil(t, sessiontxn.NewTxn(context.Background(), tk.Session()))
rid, err := tb.AddRecord(tk.Session(), types.MakeDatums(8))
require.NoError(t, err)
// Check that add record writes to the partition, rather than the table.
txn, err := tk.Session().Txn(true)
require.NoError(t, err)
val, err := txn.Get(context.TODO(), tables.PartitionRecordKey(p0.ID, rid.IntValue()))
require.NoError(t, err)
require.Greater(t, len(val), 0)
_, err = txn.Get(context.TODO(), tables.PartitionRecordKey(tbInfo.ID, rid.IntValue()))
require.True(t, kv.ErrNotExist.Equal(err))
// Cover more code.
_, err = tb.AddRecord(tk.Session(), types.MakeDatums(-1))
require.NoError(t, err)
_, err = tb.AddRecord(tk.Session(), types.MakeDatums(3))
require.NoError(t, err)
_, err = tb.AddRecord(tk.Session(), types.MakeDatums(6))
require.NoError(t, err)
// Make the changes visible.
_, err = tk.Session().Execute(context.Background(), "commit")
require.NoError(t, err)
// Check index count equals to data count.
tk.MustQuery("select count(*) from t1").Check(testkit.Rows("4"))
tk.MustQuery("select count(*) from t1 use index(id)").Check(testkit.Rows("4"))
tk.MustQuery("select count(*) from t1 use index(id) where id > 2").Check(testkit.Rows("3"))
// Test for partition expression is negative number.
_, err = tk.Session().Execute(context.Background(), `CREATE TABLE test.t2 (id int(11), index(id)) PARTITION BY HASH (id) partitions 11;`)
require.NoError(t, err)
tb, err = dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t2"))
require.NoError(t, err)
tbInfo = tb.Meta()
for i := 0; i < 11; i++ {
require.Nil(t, sessiontxn.NewTxn(context.Background(), tk.Session()))
rid, err = tb.AddRecord(tk.Session(), types.MakeDatums(-i))
require.NoError(t, err)
txn, err = tk.Session().Txn(true)
require.NoError(t, err)
val, err = txn.Get(context.TODO(), tables.PartitionRecordKey(tbInfo.Partition.Definitions[i].ID, rid.IntValue()))
require.NoError(t, err)
require.Greater(t, len(val), 0)
_, err = txn.Get(context.TODO(), tables.PartitionRecordKey(tbInfo.ID, rid.IntValue()))
require.True(t, kv.ErrNotExist.Equal(err))
}
_, err = tk.Session().Execute(context.Background(), "drop table if exists t1, t2;")
require.NoError(t, err)
}
// TestPartitionGetPhysicalID tests partition.GetPhysicalID().
func TestPartitionGetPhysicalID(t *testing.T) {
createTable1 := `CREATE TABLE test.t1 (id int(11), index(id))
PARTITION BY RANGE ( id ) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11),
PARTITION p2 VALUES LESS THAN (16),
PARTITION p3 VALUES LESS THAN (21)
)`
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
_, err := tk.Session().Execute(context.Background(), "Drop table if exists test.t1;")
require.NoError(t, err)
_, err = tk.Session().Execute(context.Background(), createTable1)
require.NoError(t, err)
tb, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t1"))
require.NoError(t, err)
tbInfo := tb.Meta()
ps := tbInfo.GetPartitionInfo()
require.NotNil(t, ps)
for _, pd := range ps.Definitions {
p := tb.(table.PartitionedTable).GetPartition(pd.ID)
require.NotNil(t, p)
require.Equal(t, p.GetPhysicalID(), pd.ID)
}
}
func TestGeneratePartitionExpr(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
_, err := tk.Session().Execute(context.Background(), "use test")
require.NoError(t, err)
_, err = tk.Session().Execute(context.Background(), "drop table if exists t1;")
require.NoError(t, err)
_, err = tk.Session().Execute(context.Background(), `create table t1 (id int)
partition by range (id) (
partition p0 values less than (4),
partition p1 values less than (7),
partition p3 values less than maxvalue)`)
require.NoError(t, err)
tbl, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t1"))
require.NoError(t, err)
type partitionExpr interface {
PartitionExpr() *tables.PartitionExpr
}
pe := tbl.(partitionExpr).PartitionExpr()
upperBounds := []string{
"lt(t1.id, 4)",
"lt(t1.id, 7)",
"1",
}
for i, expr := range pe.UpperBounds {
require.Equal(t, upperBounds[i], expr.String())
}
}
func TestLocateRangeColumnPartitionErr(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE t_month_data_monitor (
id int(20) NOT NULL AUTO_INCREMENT,
data_date date NOT NULL,
PRIMARY KEY (id, data_date)
) PARTITION BY RANGE COLUMNS(data_date) (
PARTITION p20190401 VALUES LESS THAN ('2019-04-02'),
PARTITION p20190402 VALUES LESS THAN ('2019-04-03')
)`)
_, err := tk.Exec("INSERT INTO t_month_data_monitor VALUES (4, '2019-04-04')")
require.True(t, table.ErrNoPartitionForGivenValue.Equal(err))
}
func TestLocateRangePartitionErr(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE t_range_locate (
id int(20) NOT NULL AUTO_INCREMENT,
data_date date NOT NULL,
PRIMARY KEY (id, data_date)
) PARTITION BY RANGE(id) (
PARTITION p0 VALUES LESS THAN (1024),
PARTITION p1 VALUES LESS THAN (4096)
)`)
_, err := tk.Exec("INSERT INTO t_range_locate VALUES (5000, '2019-04-04')")
require.True(t, table.ErrNoPartitionForGivenValue.Equal(err))
}
func TestLocatePartitionWithExtraHandle(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE t_extra (
id int(20) NOT NULL AUTO_INCREMENT,
x int(10) not null,
PRIMARY KEY (id, x)
) PARTITION BY RANGE(id) (
PARTITION p0 VALUES LESS THAN (1024),
PARTITION p1 VALUES LESS THAN (4096)
)`)
tk.MustExec("INSERT INTO t_extra VALUES (1000, 1000), (2000, 2000)")
tk.MustExec("set autocommit=0;")
tk.MustQuery("select * from t_extra where id = 1000 for update").Check(testkit.Rows("1000 1000"))
tk.MustExec("commit")
}
func TestMultiTableUpdate(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE t_a (
id int(20),
data_date date
) partition by hash(id) partitions 10`)
tk.MustExec(`CREATE TABLE t_b (
id int(20),
data_date date
) PARTITION BY RANGE(id) (
PARTITION p0 VALUES LESS THAN (2),
PARTITION p1 VALUES LESS THAN (4),
PARTITION p2 VALUES LESS THAN (6)
)`)
tk.MustExec("INSERT INTO t_a VALUES (1, '2020-08-25'), (2, '2020-08-25'), (3, '2020-08-25'), (4, '2020-08-25'), (5, '2020-08-25')")
tk.MustExec("INSERT INTO t_b VALUES (1, '2020-08-25'), (2, '2020-08-25'), (3, '2020-08-25'), (4, '2020-08-25'), (5, '2020-08-25')")
tk.MustExec("update t_a, t_b set t_a.data_date = '2020-08-24', t_a.data_date = '2020-08-23', t_a.id = t_a.id + t_b.id where t_a.id = t_b.id")
tk.MustQuery("select id from t_a order by id").Check(testkit.Rows("2", "4", "6", "8", "10"))
}
func TestLocatePartitionSingleColumn(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`CREATE TABLE t_hash_locate (
id int(20),
data_date date
) partition by hash(id) partitions 10`)
tk.MustExec(`CREATE TABLE t_range (
id int(10) NOT NULL,
data_date date,
PRIMARY KEY (id)
) PARTITION BY RANGE(id) (
PARTITION p0 VALUES LESS THAN (1),
PARTITION p1 VALUES LESS THAN (2),
PARTITION p2 VALUES LESS THAN (4)
)`)
tk.MustExec("INSERT INTO t_hash_locate VALUES (), (), (), ()")
tk.MustQuery("SELECT count(*) FROM t_hash_locate PARTITION (p0)").Check(testkit.Rows("4"))
tk.MustExec("INSERT INTO t_range VALUES (-1, NULL), (1, NULL), (2, NULL), (3, NULL)")
tk.MustQuery("SELECT count(*) FROM t_range PARTITION (p0)").Check(testkit.Rows("1"))
tk.MustQuery("SELECT count(*) FROM t_range PARTITION (p1)").Check(testkit.Rows("1"))
tk.MustQuery("SELECT count(*) FROM t_range PARTITION (p2)").Check(testkit.Rows("2"))
_, err := tk.Exec("INSERT INTO t_range VALUES (4, NULL)")
require.True(t, table.ErrNoPartitionForGivenValue.Equal(err))
}
func TestLocatePartition(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t;")
tk.MustExec(`CREATE TABLE t (
id bigint(20) DEFAULT NULL,
type varchar(255) COLLATE utf8mb4_unicode_ci DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
PARTITION BY LIST COLUMNS(type)
(PARTITION push_event VALUES IN ("PushEvent"),
PARTITION watch_event VALUES IN ("WatchEvent")
)`)
tk.MustExec(`insert into t values (1,"PushEvent"),(2,"WatchEvent"),(3, "WatchEvent")`)
tk.MustExec(`analyze table t`)
tk1 := testkit.NewTestKit(t, store)
tk2 := testkit.NewTestKit(t, store)
tk3 := testkit.NewTestKit(t, store)
tks := []*testkit.TestKit{tk1, tk2, tk3}
wg := util.WaitGroupWrapper{}
exec := func(tk0 *testkit.TestKit) {
tk0.MustExec("use test")
tk0.MustQuery("explain format = 'brief' select id, type from t where type = 'WatchEvent';").Check(testkit.Rows(""+
`TableReader 2.00 root partition:watch_event data:Selection`,
`└─Selection 2.00 cop[tikv] eq(test.t.type, "WatchEvent")`,
` └─TableFullScan 3.00 cop[tikv] table:t keep order:false`))
}
run := func(num int) {
tk := tks[num]
wg.Run(func() {
exec(tk)
})
}
for i := 0; i < len(tks); i++ {
run(i)
}
wg.Wait()
}
func TestTimeZoneChange(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
createTable := `CREATE TABLE timezone_test (
id int(11) NOT NULL,
creation_dt timestamp DEFAULT CURRENT_TIMESTAMP ) PARTITION BY RANGE ( ` + "UNIX_TIMESTAMP(`creation_dt`)" + ` )
( PARTITION p5 VALUES LESS THAN ( UNIX_TIMESTAMP('2020-01-03 15:10:00') ),
PARTITION p6 VALUES LESS THAN ( UNIX_TIMESTAMP('2020-01-03 15:15:00') ),
PARTITION p7 VALUES LESS THAN ( UNIX_TIMESTAMP('2020-01-03 15:20:00') ),
PARTITION p8 VALUES LESS THAN ( UNIX_TIMESTAMP('2020-01-03 15:25:00') ),
PARTITION p9 VALUES LESS THAN (MAXVALUE) )`
tk.MustExec("SET @@time_zone = 'Asia/Shanghai'")
tk.MustExec(createTable)
tk.MustQuery("SHOW CREATE TABLE timezone_test").Check(testkit.Rows("timezone_test CREATE TABLE `timezone_test` (\n" +
" `id` int(11) NOT NULL,\n" +
" `creation_dt` timestamp DEFAULT CURRENT_TIMESTAMP\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" +
"PARTITION BY RANGE (UNIX_TIMESTAMP(`creation_dt`))\n" +
"(PARTITION `p5` VALUES LESS THAN (1578035400),\n" +
" PARTITION `p6` VALUES LESS THAN (1578035700),\n" +
" PARTITION `p7` VALUES LESS THAN (1578036000),\n" +
" PARTITION `p8` VALUES LESS THAN (1578036300),\n" +
" PARTITION `p9` VALUES LESS THAN (MAXVALUE))"))
tk.MustExec("DROP TABLE timezone_test")
// Note that the result of "show create table" varies with time_zone.
tk.MustExec("SET @@time_zone = 'UTC'")
tk.MustExec(createTable)
tk.MustQuery("SHOW CREATE TABLE timezone_test").Check(testkit.Rows("timezone_test CREATE TABLE `timezone_test` (\n" +
" `id` int(11) NOT NULL,\n" +
" `creation_dt` timestamp DEFAULT CURRENT_TIMESTAMP\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" +
"PARTITION BY RANGE (UNIX_TIMESTAMP(`creation_dt`))\n" +
"(PARTITION `p5` VALUES LESS THAN (1578064200),\n" +
" PARTITION `p6` VALUES LESS THAN (1578064500),\n" +
" PARTITION `p7` VALUES LESS THAN (1578064800),\n" +
" PARTITION `p8` VALUES LESS THAN (1578065100),\n" +
" PARTITION `p9` VALUES LESS THAN (MAXVALUE))"))
// Change time zone and insert data, check the data locates in the correct partition.
tk.MustExec("SET @@time_zone = 'Asia/Shanghai'")
tk.MustExec("INSERT INTO timezone_test VALUES (1,'2020-01-03 15:16:59')")
tk.MustQuery("SELECT * FROM timezone_test PARTITION (p5)").Check(testkit.Rows("1 2020-01-03 15:16:59"))
tk.MustQuery("SELECT * FROM timezone_test PARTITION (p6)").Check(testkit.Rows())
tk.MustQuery("SELECT * FROM timezone_test PARTITION (p7)").Check(testkit.Rows())
tk.MustQuery("SELECT * FROM timezone_test PARTITION (p8)").Check(testkit.Rows())
tk.MustQuery("SELECT * FROM timezone_test PARTITION (p9)").Check(testkit.Rows())
tk.MustExec("SET @@time_zone = 'UTC'")
tk.MustExec("INSERT INTO timezone_test VALUES (1,'2020-01-03 15:16:59')")
tk.MustQuery("SELECT * FROM timezone_test PARTITION (p5)").Check(testkit.Rows("1 2020-01-03 07:16:59"))
tk.MustQuery("SELECT * FROM timezone_test PARTITION (p6)").Check(testkit.Rows())
tk.MustQuery("SELECT * FROM timezone_test PARTITION (p7)").Check(testkit.Rows("1 2020-01-03 15:16:59"))
tk.MustQuery("SELECT * FROM timezone_test PARTITION (p8)").Check(testkit.Rows())
tk.MustQuery("SELECT * FROM timezone_test PARTITION (p9)").Check(testkit.Rows())
}
func TestCreatePartitionTableNotSupport(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
_, err := tk.Exec(`create table t7 (a int) partition by range (mod((select * from t), 5)) (partition p1 values less than (1));`)
require.True(t, dbterror.ErrPartitionFunctionIsNotAllowed.Equal(err))
_, err = tk.Exec(`create table t7 (a int) partition by range (1 + (select * from t)) (partition p1 values less than (1));`)
require.True(t, dbterror.ErrPartitionFunctionIsNotAllowed.Equal(err))
_, err = tk.Exec(`create table t7 (a int) partition by range (a + row(1, 2, 3)) (partition p1 values less than (1));`)
require.True(t, dbterror.ErrPartitionFunctionIsNotAllowed.Equal(err))
_, err = tk.Exec(`create table t7 (a int) partition by range (-(select * from t)) (partition p1 values less than (1));`)
require.True(t, dbterror.ErrPartitionFunctionIsNotAllowed.Equal(err))
}
func TestRangePartitionUnderNoUnsigned(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t2;")
tk.MustExec("drop table if exists tu;")
defer tk.MustExec("drop table if exists t2;")
defer tk.MustExec("drop table if exists tu;")
tk.MustGetErrCode(`CREATE TABLE tu (c1 BIGINT UNSIGNED) PARTITION BY RANGE(c1 - 10) (
PARTITION p0 VALUES LESS THAN (-5),
PARTITION p1 VALUES LESS THAN (0),
PARTITION p2 VALUES LESS THAN (5),
PARTITION p3 VALUES LESS THAN (10),
PARTITION p4 VALUES LESS THAN (MAXVALUE));`, mysql.ErrPartitionConstDomain)
tk.MustExec("SET @@sql_mode='NO_UNSIGNED_SUBTRACTION';")
tk.MustExec(`create table t2 (a bigint unsigned) partition by range (a) (
partition p1 values less than (0),
partition p2 values less than (1),
partition p3 values less than (18446744073709551614),
partition p4 values less than (18446744073709551615),
partition p5 values less than maxvalue);`)
tk.MustExec("insert into t2 values(10);")
tk.MustGetErrCode(`CREATE TABLE tu (c1 BIGINT UNSIGNED) PARTITION BY RANGE(c1 - 10) (
PARTITION p0 VALUES LESS THAN (-5),
PARTITION p1 VALUES LESS THAN (0),
PARTITION p2 VALUES LESS THAN (5),
PARTITION p3 VALUES LESS THAN (10),
PARTITION p4 VALUES LESS THAN (MAXVALUE));`, mysql.ErrPartitionConstDomain)
}
func TestIntUint(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec(`create table t_uint (id bigint unsigned) partition by range (id) (
partition p0 values less than (4294967293),
partition p1 values less than (4294967296),
partition p2 values less than (484467440737095),
partition p3 values less than (18446744073709551614))`)
tk.MustExec("insert into t_uint values (1)")
tk.MustExec("insert into t_uint values (4294967294)")
tk.MustExec("insert into t_uint values (4294967295)")
tk.MustExec("insert into t_uint values (18446744073709551613)")
tk.MustQuery("select * from t_uint where id > 484467440737095").Check(testkit.Rows("18446744073709551613"))
tk.MustQuery("select * from t_uint where id = 4294967295").Check(testkit.Rows("4294967295"))
tk.MustQuery("select * from t_uint where id < 4294967294").Check(testkit.Rows("1"))
tk.MustQuery("select * from t_uint where id >= 4294967293 order by id").Check(testkit.Rows("4294967294", "4294967295", "18446744073709551613"))
tk.MustExec(`create table t_int (id bigint signed) partition by range (id) (
partition p0 values less than (-4294967293),
partition p1 values less than (-12345),
partition p2 values less than (0),
partition p3 values less than (484467440737095),
partition p4 values less than (9223372036854775806))`)
tk.MustExec("insert into t_int values (-9223372036854775803)")
tk.MustExec("insert into t_int values (-429496729312)")
tk.MustExec("insert into t_int values (-1)")
tk.MustExec("insert into t_int values (4294967295)")
tk.MustExec("insert into t_int values (9223372036854775805)")
tk.MustQuery("select * from t_int where id > 484467440737095").Check(testkit.Rows("9223372036854775805"))
tk.MustQuery("select * from t_int where id = 4294967295").Check(testkit.Rows("4294967295"))
tk.MustQuery("select * from t_int where id = -4294967294").Check(testkit.Rows())
tk.MustQuery("select * from t_int where id < -12345 order by id desc").Check(testkit.Rows("-429496729312", "-9223372036854775803"))
}
func TestHashPartitionAndConditionConflict(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2, t3;")
tk.MustExec("create table t1 (a int, b tinyint) partition by range (a) (" +
" partition p0 values less than (10)," +
" partition p1 values less than (20)," +
" partition p2 values less than (30)," +
" partition p3 values less than (40)," +
" partition p4 values less than MAXVALUE" +
");")
tk.MustExec("insert into t1 values(NULL, NULL), (0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (30, 30), (31, 31), (32, 32), (33, 33), (34, 34), (35, 35), (36, 36), (40, 40), (50, 50), (80, 80), (90, 90), (100, 100);")
tk.MustExec("create table t2 (a int, b bigint) partition by hash(a) partitions 10;")
tk.MustExec("insert into t2 values (NULL, NULL), (0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (19, 19), (20, 20), (21, 21), (22, 22), (23, 23);")
tk.MustQuery("select /*+ HASH_JOIN(t1, t2) */ * from t1 partition (p0) left join t2 partition (p1) on t1.a = t2.a where t1.a = 6 order by t1.a, t1.b, t2.a, t2.b;").
Check(testkit.Rows("6 6 <nil> <nil>"))
tk.MustQuery("select /*+ HASH_JOIN(t1, t2) */ * from t2 partition (p1) left join t1 partition (p0) on t2.a = t1.a where t2.a = 6 order by t1.a, t1.b, t2.a, t2.b;").
Check(testkit.Rows())
tk.MustQuery("select * from t2 partition (p1) where t2.a = 6;").Check(testkit.Rows())
}
func TestHashPartitionInsertValue(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop tables if exists t4")
tk.MustExec(`CREATE TABLE t4(
a bit(1) DEFAULT NULL,
b int(11) DEFAULT NULL
) PARTITION BY HASH(a)
PARTITIONS 3`)
defer tk.MustExec("drop tables if exists t4")
tk.MustExec("INSERT INTO t4 VALUES(0, 0)")
tk.MustExec("INSERT INTO t4 VALUES(1, 1)")
result := tk.MustQuery("SELECT * FROM t4 WHERE a = 1")
result.Check(testkit.Rows("\x01 1"))
}
func TestIssue21574(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop tables if exists t_21574")
tk.MustExec("create table t_21574 (`key` int, `table` int) partition by range columns (`key`) (partition p0 values less than (10));")
tk.MustExec("drop table t_21574")
tk.MustExec("create table t_21574 (`key` int, `table` int) partition by list columns (`key`) (partition p0 values in (10));")
tk.MustExec("drop table t_21574")
tk.MustExec("create table t_21574 (`key` int, `table` int) partition by list columns (`key`,`table`) (partition p0 values in ((1,1)));")
}
func TestIssue24746(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop tables if exists t_24746")
tk.MustExec("create table t_24746 (a int, b varchar(60), c int, primary key(a)) partition by range(a) (partition p0 values less than (5),partition p1 values less than (10), partition p2 values less than maxvalue)")
defer tk.MustExec("drop table t_24746")
err := tk.ExecToErr("insert into t_24746 partition (p1) values(4,'ERROR, not matching partition p1',4)")
require.True(t, table.ErrRowDoesNotMatchGivenPartitionSet.Equal(err))
tk.MustExec("insert into t_24746 partition (p0) values(4,'OK, first row in correct partition',4)")
err = tk.ExecToErr("insert into t_24746 partition (p0) values(4,'DUPLICATE, in p0',4) on duplicate key update a = a + 1, b = 'ERROR, not allowed to write to p1'")
require.True(t, table.ErrRowDoesNotMatchGivenPartitionSet.Equal(err))
// Actual bug, before the fix this was updating the row in p0 (deleting it in p0 and inserting in p1):
err = tk.ExecToErr("insert into t_24746 partition (p1) values(4,'ERROR, not allowed to read from partition p0',4) on duplicate key update a = a + 1, b = 'ERROR, not allowed to read from p0!'")
require.True(t, table.ErrRowDoesNotMatchGivenPartitionSet.Equal(err))
}
func TestIssue31629(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("set @@tidb_enable_list_partition = 1")
tk.MustExec("create database Issue31629")
defer tk.MustExec("drop database Issue31629")
tk.MustExec("use Issue31629")
// Test following partition types:
// HASH, RANGE, LIST:
// - directly on a single int column
// - with expression on multiple columns
// RANGE/LIST COLUMNS single column
// RANGE/LIST COLUMNS -- Verify that only single column is allowed and no expression
tests := []struct {
create string
fail bool
cols []string
}{
{"(col1 int, col2 varchar(60), col3 int, primary key(col1)) partition by range(col1) (partition p0 values less than (5),partition p1 values less than (10), partition p2 values less than maxvalue)", false, []string{"col1"}},
{"(Col1 int, col2 varchar(60), col3 int, primary key(Col1,col3)) partition by range(Col1+col3) (partition p0 values less than (5),partition p1 values less than (10), partition p2 values less than maxvalue)", false, []string{"Col1", "col3"}},
{"(col1 int, col2 varchar(60), col3 int, primary key(col1)) partition by hash(col1) partitions 3", false, []string{"col1"}},
{"(Col1 int, col2 varchar(60), col3 int, primary key(Col1,col3)) partition by hash(Col1+col3) partitions 3", false, []string{"Col1", "col3"}},
{"(col1 int, col2 varchar(60), col3 int, primary key(col1)) partition by list(col1) (partition p0 values in (5,6,7,8,9),partition p1 values in (10,11,12,13,14), partition p2 values in (20,21,22,23,24))", false, []string{"col1"}},
{"(Col1 int, col2 varchar(60), col3 int, primary key(Col1,col3)) partition by list(Col1+col3) (partition p0 values in (5,6,7,8,9),partition p1 values in (10,11,12,13,14), partition p2 values in (20,21,22,23,24))", false, []string{"Col1", "col3"}},
{`(col1 int, col2 varchar(60), col3 int, primary key(col2)) partition by range columns (col2) (partition p0 values less than (""),partition p1 values less than ("MID"), partition p2 values less than maxvalue)`, false, []string{"col2"}},
{`(col1 int, col2 varchar(60), col3 int, primary key(col2)) partition by range columns (col2,col3) (partition p0 values less than (""),partition p1 values less than ("MID"), partition p2 values less than maxvalue)`, true, nil},
{`(col1 int, col2 varchar(60), col3 int, primary key(col2)) partition by range columns (col1+1) (partition p0 values less than (""),partition p1 values less than ("MID"), partition p2 values less than maxvalue)`, true, nil},
{`(col1 int, col2 varchar(60), col3 int, primary key(col2)) partition by list columns (col2) (partition p0 values in ("","First"),partition p1 values in ("MID","Middle"), partition p2 values in ("Last","Unknown"))`, false, []string{"col2"}},
{`(col1 int, col2 varchar(60), col3 int, primary key(col2)) partition by list columns (col2,col3) (partition p0 values in ("","First"),partition p1 values in ("MID","Middle"), partition p2 values in ("Last","Unknown"))`, true, nil},
{`(col1 int, col2 varchar(60), col3 int, primary key(col2)) partition by list columns (col1+1) (partition p0 values in ("","First"),partition p1 values in ("MID","Middle"), partition p2 values in ("Last","Unknown"))`, true, nil},
}
for i, tt := range tests {
createTable := "create table t1 " + tt.create
res, err := tk.Exec(createTable)
if res != nil {
res.Close()
}
if err != nil {
if tt.fail {
continue
}
}
require.Falsef(t, tt.fail, "test %d succeeded but was expected to fail! %s", i, createTable)
require.NoError(t, err)
tk.MustQuery("show warnings").Check(testkit.Rows())
tb, err := dom.InfoSchema().TableByName(model.NewCIStr("Issue31629"), model.NewCIStr("t1"))
require.NoError(t, err)
tbp, ok := tb.(table.PartitionedTable)
require.Truef(t, ok, "test %d does not generate a table.PartitionedTable: %s (%T, %+v)", i, createTable, tb, tb)
colNames := tbp.GetPartitionColumnNames()
checkNames := []model.CIStr{model.NewCIStr(tt.cols[0])}
for i := 1; i < len(tt.cols); i++ {
checkNames = append(checkNames, model.NewCIStr(tt.cols[i]))
}
require.ElementsMatchf(t, colNames, checkNames, "test %d %s", i, createTable)
tk.MustExec("drop table t1")
}
}
func TestExchangePartitionStates(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
dbName := "partSchemaVer"
tk.MustExec("create database " + dbName)
tk.MustExec("use " + dbName)
tk.MustExec(`set @@global.tidb_enable_metadata_lock = ON`)
tk2 := testkit.NewTestKit(t, store)
tk2.MustExec("use " + dbName)
tk3 := testkit.NewTestKit(t, store)
tk3.MustExec("use " + dbName)
tk4 := testkit.NewTestKit(t, store)
tk4.MustExec("use " + dbName)
tk.MustExec(`create table t (a int primary key, b varchar(255), key (b))`)
tk.MustExec(`create table tp (a int primary key, b varchar(255), key (b)) partition by range (a) (partition p0 values less than (1000000), partition p1M values less than (2000000))`)
tk.MustExec(`insert into t values (1, "1")`)
tk.MustExec(`insert into tp values (2, "2")`)
tk.MustExec(`analyze table t,tp`)
tk.MustExec("BEGIN")
tk.MustQuery(`select * from t`).Check(testkit.Rows("1 1"))
tk.MustQuery(`select * from tp`).Check(testkit.Rows("2 2"))
alterChan := make(chan error)
go func() {
// WITH VALIDATION is the default
err := tk2.ExecToErr(`alter table tp exchange partition p0 with table t`)
alterChan <- err
}()
waitFor := func(tableName, s string, pos int) {
for {
select {
case alterErr := <-alterChan:
require.Fail(t, "Alter completed unexpectedly", "With error %v", alterErr)
default:
// Alter still running
}
res := tk4.MustQuery(`admin show ddl jobs where db_name = '` + strings.ToLower(dbName) + `' and table_name = '` + tableName + `' and job_type = 'exchange partition'`).Rows()
if len(res) == 1 && res[0][pos] == s {
logutil.BgLogger().Info("Got state", zap.String("State", s))
break
}
gotime.Sleep(50 * gotime.Millisecond)
}
}
waitFor("t", "write only", 4)
tk3.MustExec(`BEGIN`)
tk3.MustExec(`insert into t values (4,"4")`)
tk3.MustContainErrMsg(`insert into t values (1000004,"1000004")`, "[table:1748]Found a row not matching the given partition set")
tk.MustExec(`insert into t values (5,"5")`)
// This should fail the alter table!
tk.MustExec(`insert into t values (1000005,"1000005")`)
// MDL will block the alter to not continue until all clients
// are in StateWriteOnly, which tk is blocking until it commits
tk.MustExec(`COMMIT`)
waitFor("t", "rollback done", 11)
// MDL will block the alter from finish, tk is in 'rollbacked' schema version
// but the alter is still waiting for tk3 to commit, before continuing
tk.MustExec("BEGIN")
tk.MustExec(`insert into t values (1000006,"1000006")`)
tk.MustExec(`insert into t values (6,"6")`)
tk3.MustExec(`insert into t values (7,"7")`)
tk3.MustContainErrMsg(`insert into t values (1000007,"1000007")`,
"[table:1748]Found a row not matching the given partition set")
tk3.MustExec("COMMIT")
require.ErrorContains(t, <-alterChan,
"[ddl:1737]Found a row that does not match the partition")
tk3.MustExec(`BEGIN`)
tk.MustQuery(`select * from t`).Sort().Check(testkit.Rows(
"1 1", "1000005 1000005", "1000006 1000006", "5 5", "6 6"))
tk.MustQuery(`select * from tp`).Sort().Check(testkit.Rows("2 2"))
tk3.MustQuery(`select * from t`).Sort().Check(testkit.Rows(
"1 1", "1000005 1000005", "4 4", "5 5", "7 7"))
tk3.MustQuery(`select * from tp`).Sort().Check(testkit.Rows("2 2"))
tk.MustContainErrMsg(`insert into t values (7,"7")`,
"[kv:1062]Duplicate entry '7' for key 't.PRIMARY'")
tk.MustExec(`insert into t values (8,"8")`)
tk.MustExec(`insert into t values (1000008,"1000008")`)
tk.MustExec(`insert into tp values (9,"9")`)
tk.MustExec(`insert into tp values (1000009,"1000009")`)
tk3.MustExec(`insert into t values (10,"10")`)
tk3.MustExec(`insert into t values (1000010,"1000010")`)
tk3.MustExec(`COMMIT`)
tk.MustQuery(`show create table tp`).Check(testkit.Rows("" +
"tp CREATE TABLE `tp` (\n" +
" `a` int(11) NOT NULL,\n" +
" `b` varchar(255) DEFAULT NULL,\n" +
" PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */,\n" +
" KEY `b` (`b`)\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" +
"PARTITION BY RANGE (`a`)\n" +
"(PARTITION `p0` VALUES LESS THAN (1000000),\n" +
" PARTITION `p1M` VALUES LESS THAN (2000000))"))
tk.MustQuery(`show create table t`).Check(testkit.Rows("" +
"t CREATE TABLE `t` (\n" +
" `a` int(11) NOT NULL,\n" +
" `b` varchar(255) DEFAULT NULL,\n" +
" PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */,\n" +
" KEY `b` (`b`)\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
tk.MustExec(`commit`)
tk.MustExec(`insert into t values (11,"11")`)
tk.MustExec(`insert into t values (1000011,"1000011")`)
tk.MustExec(`insert into tp values (12,"12")`)
tk.MustExec(`insert into tp values (1000012,"1000012")`)
}
func TestAddKeyPartitionStates(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
dbName := "partSchemaVer"
tk.MustExec("create database " + dbName)
tk.MustExec("use " + dbName)
tk.MustExec(`set @@global.tidb_enable_metadata_lock = ON`)
tk2 := testkit.NewTestKit(t, store)
tk2.MustExec("use " + dbName)
tk3 := testkit.NewTestKit(t, store)
tk3.MustExec("use " + dbName)
tk4 := testkit.NewTestKit(t, store)
tk4.MustExec("use " + dbName)
tk.MustExec(`create table t (a int primary key, b varchar(255), key (b)) partition by hash (a) partitions 3`)
tk.MustExec(`insert into t values (1, "1")`)
tk.MustExec(`analyze table t`)
tk.MustExec("BEGIN")
tk.MustQuery(`select * from t`).Check(testkit.Rows("1 1"))
tk.MustExec(`insert into t values (2, "2")`)
syncChan := make(chan bool)
go func() {
tk2.MustExec(`alter table t add partition partitions 1`)
syncChan <- true
}()
waitFor := func(i int, s string) {
for {
res := tk4.MustQuery(`admin show ddl jobs where db_name = '` + strings.ToLower(dbName) + `' and table_name = 't' and job_type like 'alter table%'`).Rows()
if len(res) == 1 && res[0][i] == s {
break
}
gotime.Sleep(10 * gotime.Millisecond)
}
}
waitFor(4, "delete only")
tk3.MustExec(`BEGIN`)
tk3.MustQuery(`select * from t`).Sort().Check(testkit.Rows("1 1"))
tk3.MustExec(`insert into t values (3,"3")`)
tk.MustExec(`COMMIT`)
waitFor(4, "write only")
tk.MustExec(`BEGIN`)
tk.MustQuery(`select * from t`).Sort().Check(testkit.Rows("1 1", "2 2"))
tk.MustExec(`insert into t values (4,"4")`)
tk3.MustExec(`COMMIT`)
waitFor(4, "write reorganization")
tk3.MustExec(`BEGIN`)
tk3.MustQuery(`show create table t`).Check(testkit.Rows("" +
"t CREATE TABLE `t` (\n" +
" `a` int(11) NOT NULL,\n" +
" `b` varchar(255) DEFAULT NULL,\n" +
" PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */,\n" +
" KEY `b` (`b`)\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" +
"PARTITION BY HASH (`a`) PARTITIONS 3"))
tk3.MustQuery(`select * from t`).Sort().Check(testkit.Rows("1 1", "2 2", "3 3"))
tk3.MustExec(`insert into t values (5,"5")`)
tk.MustExec(`COMMIT`)
waitFor(4, "delete reorganization")
tk.MustExec(`BEGIN`)
tk.MustQuery(`show create table t`).Check(testkit.Rows("" +
"t CREATE TABLE `t` (\n" +
" `a` int(11) NOT NULL,\n" +
" `b` varchar(255) DEFAULT NULL,\n" +
" PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */,\n" +
" KEY `b` (`b`)\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" +
"PARTITION BY HASH (`a`) PARTITIONS 4"))
tk.MustQuery(`select * from t`).Sort().Check(testkit.Rows("1 1", "2 2", "3 3", "4 4"))
tk.MustExec(`insert into t values (6,"6")`)
tk3.MustExec(`COMMIT`)
tk.MustExec(`COMMIT`)
<-syncChan
tk.MustQuery(`select * from t`).Sort().Check(testkit.Rows("1 1", "2 2", "3 3", "4 4", "5 5", "6 6"))
}
type compoundSQL struct {
selectSQL string
point bool
batchPoint bool
pruned bool
executeExplain bool
usedPartition []string
notUsedPartition []string
rowCount int
}
type partTableCase struct {
partitionbySQL string
selectInfo []compoundSQL
}
func executePartTableCase(t *testing.T, tk *testkit.TestKit, testCases []partTableCase,
createSQL string, insertSQLs []string, dropSQL string) {
for i, testCase := range testCases {
// create table ... partition by key ...
ddlSQL := createSQL + testCase.partitionbySQL
logutil.BgLogger().Info("Partition DDL test", zap.Int("i", i), zap.String("ddlSQL", ddlSQL))
executeSQLWrapper(t, tk, ddlSQL)
// insert data
for _, insertsql := range insertSQLs {
executeSQLWrapper(t, tk, insertsql)
}
// execute testcases
for j, selInfo := range testCase.selectInfo {
logutil.BgLogger().Info("Select", zap.Int("j", j), zap.String("selectSQL", selInfo.selectSQL))
tk.MustQuery(selInfo.selectSQL).Check(testkit.Rows(strconv.Itoa(selInfo.rowCount)))
if selInfo.executeExplain {
result := tk.MustQuery("EXPLAIN " + selInfo.selectSQL)
if selInfo.point {
result.CheckContain("Point_Get")
}
if selInfo.batchPoint {
result.CheckContain("Batch_Point_Get")
}
if selInfo.pruned {
for _, part := range selInfo.usedPartition {
result.CheckContain(part)
}
for _, part := range selInfo.notUsedPartition {
result.CheckNotContain(part)
}
}
}
}
executeSQLWrapper(t, tk, dropSQL)
}
}
func executeSQLWrapper(t *testing.T, tk *testkit.TestKit, SQLString string) {
res, err := tk.Exec(SQLString)
if res != nil {
res.Close()
}
require.Nil(t, err)
}
func TestKeyPartitionTableBasic(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database partitiondb")
defer tk.MustExec("drop database partitiondb")
tk.MustExec("use partitiondb")
testCases := []struct {
createSQL string
dropSQL string
insertSQL string
selectInfo []compoundSQL
}{
{
createSQL: "CREATE TABLE tkey0 (col1 INT NOT NULL, col2 DATE NOT NULL, col3 INT NOT NULL, col4 INT NOT NULL,UNIQUE KEY (col3)) PARTITION BY KEY(col3) PARTITIONS 4",
insertSQL: "INSERT INTO tkey0 VALUES(1, '2023-02-22', 1, 1), (2, '2023-02-22', 2, 2), (3, '2023-02-22', 3, 3), (4, '2023-02-22', 4, 4)",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey0",
false, false, false, false, []string{}, []string{}, 4,
},
{
"SELECT count(*) FROM tkey0 PARTITION(p0)",
false, false, false, false, []string{}, []string{}, 1,
},
{
"SELECT count(*) FROM tkey0 PARTITION(p1)",
false, false, false, false, []string{}, []string{}, 1,
},
{
"SELECT count(*) FROM tkey0 PARTITION(p2)",
false, false, false, false, []string{}, []string{}, 0,
},
{
"SELECT count(*) FROM tkey0 PARTITION(p3)",
false, false, false, false, []string{}, []string{}, 2,
},
{
"SELECT count(*) FROM tkey0 WHERE col3 = 3",
true, false, true, true, []string{"partition:p3"}, []string{"partition:p0", "partition:p1", "partition:p2"}, 1,
},
{
"SELECT count(*) FROM tkey0 WHERE col3 = 3 or col3 = 4",
false, false, true, true, []string{"partition:p0", "partition:p3"}, []string{"partition:p1", "partition:p2"}, 2,
},
{
"SELECT count(*) FROM tkey0 WHERE col3 >1 AND col3 < 4",
false, false, true, true, []string{"partition:p1", "partition:p3"}, []string{"partition:p0", "partition:p2"}, 2,
},
},
dropSQL: "DROP TABLE IF EXISTS tkey0",
},
{
createSQL: "CREATE TABLE tkey7 (col1 INT NOT NULL, col2 DATE NOT NULL, col3 INT NOT NULL, col4 INT NOT NULL,UNIQUE KEY (col3,col1)) PARTITION BY KEY(col3,col1) PARTITIONS 4",
insertSQL: "INSERT INTO tkey7 VALUES(1, '2023-02-22', 1, 1), (1, '2023-02-22', 2, 1),(2, '2023-02-22', 2, 2), (3, '2023-02-22', 3, 3), (4, '2023-02-22', 4, 4),(4, '2023-02-22', 5, 4)",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey7",
false, false, false, false, []string{}, []string{}, 6,
},
{
"SELECT count(*) FROM tkey7 PARTITION(p0)",
false, false, false, false, []string{}, []string{}, 2,
},
{
"SELECT count(*) FROM tkey7 PARTITION(p1)",
false, false, false, false, []string{}, []string{}, 2,
},
{
"SELECT count(*) FROM tkey7 PARTITION(p2)",
false, false, false, false, []string{}, []string{}, 1,
},
{
"SELECT count(*) FROM tkey7 PARTITION(p3)",
false, false, false, false, []string{}, []string{}, 1,
},
{
"SELECT count(*) FROM tkey7 WHERE col3 = 3",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 1,
},
{
"SELECT count(*) FROM tkey7 WHERE col3 = 3 and col1 = 3",
true, false, true, true, []string{"partition:p1"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey7 WHERE col3 = 3 or col3 = 4",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 2,
},
{
"SELECT count(*) FROM tkey7 WHERE col3 = 3 and col1 = 3 OR col3 = 4 and col1 = 4",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey7 WHERE col1>1 and col3 >1 AND col3 < 4 and col1<3",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 1,
},
},
dropSQL: "DROP TABLE IF EXISTS tkey7",
},
{
createSQL: "CREATE TABLE tkey8 (col1 INT NOT NULL, col2 DATE NOT NULL, col3 INT NOT NULL, col4 INT NOT NULL,PRIMARY KEY (col3,col1)) PARTITION BY KEY(col3,col1) PARTITIONS 4",
insertSQL: "INSERT INTO tkey8 VALUES(1, '2023-02-22', 111, 1), (1, '2023-02-22', 2, 1),(2, '2023-02-22', 218, 2), (3, '2023-02-22', 3, 3), (4, '2023-02-22', 4, 4),(4, '2023-02-22', 5, 4),(5, '2023-02-22', 5, 5),(5, '2023-02-22', 50, 2),(6, '2023-02-22', 62, 2),(60, '2023-02-22', 6, 5),(70, '2023-02-22', 50, 2),(80, '2023-02-22', 62, 2),(100, '2023-02-22', 62, 2),(2000, '2023-02-22', 6, 5),(400, '2023-02-22', 50, 2),(90, '2023-02-22', 62, 2)",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey8",
false, false, false, false, []string{}, []string{}, 16,
},
{
"SELECT count(*) FROM tkey8 PARTITION(p0)",
false, false, false, false, []string{}, []string{}, 4,
},
{
"SELECT count(*) FROM tkey8 PARTITION(p1)",
false, false, false, false, []string{}, []string{}, 7,
},
{
"SELECT count(*) FROM tkey8 PARTITION(p2)",
false, false, false, false, []string{}, []string{}, 3,
},
{
"SELECT count(*) FROM tkey8 PARTITION(p3)",
false, false, false, false, []string{}, []string{}, 2,
},
{
"SELECT count(*) FROM tkey8 WHERE col3 = 3",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 1,
},
{
"SELECT count(*) FROM tkey8 WHERE col3 = 3 and col1 = 3",
true, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey8 WHERE col3 = 3 or col3 = 4",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 2,
},
{
"SELECT count(*) FROM tkey8 WHERE col3 = 3 and col1 = 3 OR col3 = 4 and col1 = 4",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey8 WHERE col1>1 and col3 >1 AND col3 < 4 and col1<3",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 0,
},
},
dropSQL: "DROP TABLE IF EXISTS tkey8",
},
{
createSQL: "CREATE TABLE tkey6 (col1 INT NOT NULL, col2 DATE NOT NULL, col3 VARCHAR(12) NOT NULL, col4 INT NOT NULL,UNIQUE KEY (col3)) PARTITION BY KEY(col3) PARTITIONS 4",
insertSQL: "INSERT INTO tkey6 VALUES(1, '2023-02-22', 'linpin', 1), (2, '2023-02-22', 'zhangsan', 2), (3, '2023-02-22', 'anqila', 3), (4, '2023-02-22', 'xingtian', 4),(1, '2023-02-22', 'renleifeng', 5), (2, '2023-02-22', 'peilin', 2),(1, '2023-02-22', 'abcdeeg', 7), (2, '2023-02-22', 'rpstdfed', 8)",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey6",
false, false, false, false, []string{}, []string{}, 8,
},
{
"SELECT count(*) FROM tkey6 PARTITION(p0)",
false, false, false, false, []string{}, []string{}, 1,
},
{
"SELECT count(*) FROM tkey6 PARTITION(p1)",
false, false, false, false, []string{}, []string{}, 1,
},
{
"SELECT count(*) FROM tkey6 PARTITION(p2)",
false, false, false, false, []string{}, []string{}, 2,
},
{
"SELECT count(*) FROM tkey6 PARTITION(p3)",
false, false, false, false, []string{}, []string{}, 4,
},
{
"SELECT count(*) FROM tkey6 WHERE col3 = 'linpin'",
true, false, true, true, []string{"partition:p3"}, []string{"partition:p0", "partition:p1", "partition:p2"}, 1,
},
{
"SELECT count(*) FROM tkey6 WHERE col3 = 'zhangsan' or col3 = 'linpin'",
true, true, true, true, []string{}, []string{}, 2,
},
{
"SELECT count(*) FROM tkey6 WHERE col3 > 'linpin' AND col3 < 'qing'",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 1,
},
},
dropSQL: "DROP TABLE IF EXISTS tkey6",
},
{
createSQL: "CREATE TABLE tkey2 (JYRQ INT not null,KHH VARCHAR(12) not null,ZJZH CHAR(14) not null,primary key (JYRQ, KHH, ZJZH))PARTITION BY KEY(KHH) partitions 4",
insertSQL: "INSERT INTO tkey2 VALUES(1,'nanjing','025'),(2,'huaian','0517'),(3,'zhenjiang','0518'),(4,'changzhou','0519'),(5,'wuxi','0511'),(6,'suzhou','0512'),(7,'xuzhou','0513'),(8,'suqian','0513'),(9,'lianyungang','0514'),(10,'yangzhou','0515'),(11,'taizhou','0516'),(12,'nantong','0520'),(13,'yancheng','0521'),(14,'NANJING','025'),(15,'HUAIAN','0527'),(16,'ZHENJIANG','0529'),(17,'CHANGZHOU','0530')",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey2",
false, false, false, false, []string{}, []string{}, 17,
},
{
"SELECT count(*) FROM tkey2 PARTITION(p0)",
false, false, false, false, []string{}, []string{}, 5,
},
{
"SELECT count(*) FROM tkey2 PARTITION(p1)",
false, false, false, false, []string{}, []string{}, 4,
},
{
"SELECT count(*) FROM tkey2 PARTITION(p2)",
false, false, false, false, []string{}, []string{}, 2,
},
{
"SELECT count(*) FROM tkey2 PARTITION(p3)",
false, false, false, false, []string{}, []string{}, 6,
},
{
"SELECT count(*) FROM tkey2 WHERE KHH = 'huaian'",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p0", "partition:p1", "partition:p2"}, 1,
},
{
"SELECT count(*) FROM tkey2 WHERE KHH = 'huaian' or KHH = 'zhenjiang'",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p0", "partition:p1", "partition:p2"}, 2,
},
{
"SELECT count(*) FROM tkey2 WHERE KHH > 'nanjing' AND KHH < 'suzhou'",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 2,
},
},
dropSQL: "DROP TABLE IF EXISTS tkey2",
},
{
createSQL: "CREATE TABLE tkey5 (JYRQ INT not null,KHH VARCHAR(12) not null,ZJZH CHAR(14) not null,primary key (KHH, JYRQ, ZJZH))PARTITION BY KEY(KHH) partitions 4",
insertSQL: "INSERT INTO tkey5 VALUES(1,'nanjing','025'),(2,'huaian','0517'),(3,'zhenjiang','0518'),(4,'changzhou','0519'),(5,'wuxi','0511'),(6,'suzhou','0512'),(7,'xuzhou','0513'),(8,'suqian','0513'),(9,'lianyungang','0514'),(10,'yangzhou','0515'),(11,'taizhou','0516'),(12,'nantong','0520'),(13,'yancheng','0521'),(14,'NANJING','025'),(15,'HUAIAN','0527'),(16,'ZHENJIANG','0529'),(17,'CHANGZHOU','0530')",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey5",
false, false, false, false, []string{}, []string{}, 17,
},
{
"SELECT count(*) FROM tkey5 PARTITION(p0)",
false, false, false, false, []string{}, []string{}, 5,
},
{
"SELECT count(*) FROM tkey5 PARTITION(p1)",
false, false, false, false, []string{}, []string{}, 4,
},
{
"SELECT count(*) FROM tkey5 PARTITION(p2)",
false, false, false, false, []string{}, []string{}, 2,
},
{
"SELECT count(*) FROM tkey5 PARTITION(p3)",
false, false, false, false, []string{}, []string{}, 6,
},
{
"SELECT count(*) FROM tkey5 WHERE KHH = 'huaian'",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p0", "partition:p1", "partition:p2"}, 1,
},
{
"SELECT count(*) FROM tkey5 WHERE KHH = 'huaian' or KHH = 'zhenjiang'",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p0", "partition:p1", "partition:p2"}, 2,
},
{
"SELECT count(*) FROM tkey5 WHERE KHH > 'nanjing' AND KHH < 'suzhou'",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 2,
},
},
dropSQL: "DROP TABLE IF EXISTS tkey5",
},
{
createSQL: "CREATE TABLE tkey4 (JYRQ INT not null,KHH VARCHAR(12) not null,ZJZH CHAR(14) not null,primary key (JYRQ, KHH, ZJZH))PARTITION BY KEY(JYRQ, KHH) partitions 4",
insertSQL: "INSERT INTO tkey4 VALUES(1,'nanjing','025'),(2,'huaian','0517'),(3,'zhenjiang','0518'),(4,'changzhou','0519'),(5,'wuxi','0511'),(6,'suzhou','0512'),(7,'xuzhou','0513'),(8,'suqian','0513'),(9,'lianyungang','0514'),(10,'yangzhou','0515'),(11,'taizhou','0516'),(12,'nantong','0520'),(13,'yancheng','0521'),(14,'NANJING','025'),(15,'HUAIAN','0527'),(16,'ZHENJIANG','0529'),(17,'CHANGZHOU','0530'),(1,'beijing','010'),(2,'beijing','010'),(2,'zzzzwuhan','027')",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey4",
false, false, false, false, []string{}, []string{}, 20,
},
{
"SELECT count(*) FROM tkey4 PARTITION(p0)",
false, false, false, false, []string{}, []string{}, 7,
},
{
"SELECT count(*) FROM tkey4 PARTITION(p1)",
false, false, false, false, []string{}, []string{}, 5,
},
{
"SELECT count(*) FROM tkey4 PARTITION(p2)",
false, false, false, false, []string{}, []string{}, 4,
},
{
"SELECT count(*) FROM tkey4 PARTITION(p3)",
false, false, false, false, []string{}, []string{}, 4,
},
{
"SELECT count(*) FROM tkey4 WHERE KHH = 'huaian'",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 1,
},
{
"SELECT count(*) FROM tkey4 WHERE JYRQ = 2",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 3,
},
{
"SELECT count(*) FROM tkey4 WHERE KHH = 'huaian' and JYRQ = 2",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey4 WHERE KHH = 'huaian' and JYRQ = 2 or KHH = 'zhenjiang' and JYRQ = 3",
false, false, true, true, []string{"partition:p0", "partition:p1"}, []string{"partition:p2", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey4 WHERE KHH = 'huaian' and JYRQ = 2 or KHH = 'zhenjiang' and JYRQ = 3 or KHH = 'HUAIAN' and JYRQ = 15",
false, false, true, true, []string{"partition:p0", "partition:p1"}, []string{"partition:p2", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey4 WHERE KHH = 'huaian' or KHH = 'zhenjiang'",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 2,
},
{
"SELECT count(*) FROM tkey4 WHERE JYRQ = 2 OR JYRQ = 3",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 4,
},
{
"SELECT count(*) FROM tkey4 WHERE JYRQ = 2 OR JYRQ = 3 OR JYRQ = 15",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 5,
},
{
"SELECT count(*) FROM tkey4 WHERE JYRQ >6 AND JYRQ < 10",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 3,
},
{
"SELECT count(*) FROM tkey4 WHERE JYRQ >6 and KHH>'lianyungang' AND JYRQ < 10 and KHH<'xuzhou'",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 1,
},
},
dropSQL: "DROP TABLE IF EXISTS tkey4",
},
{
createSQL: "CREATE TABLE tkey9 (JYRQ INT not null,KHH VARCHAR(12) not null,ZJZH CHAR(14) not null,primary key (JYRQ, KHH, ZJZH))PARTITION BY KEY(JYRQ, KHH, ZJZH) partitions 4",
insertSQL: "INSERT INTO tkey9 VALUES(1,'nanjing','025'),(2,'huaian','0517'),(3,'zhenjiang','0518'),(4,'changzhou','0519'),(5,'wuxi','0511'),(6,'suzhou','0512'),(7,'xuzhou','0513'),(8,'suqian','0513'),(9,'lianyungang','0514'),(10,'yangzhou','0515'),(11,'taizhou','0516'),(12,'nantong','0520'),(13,'yancheng','0521'),(14,'NANJING','025'),(15,'HUAIAN','0527'),(16,'ZHENJIANG','0529'),(17,'CHANGZHOU','0530'),(1,'beijing','010'),(2,'beijing','010'),(2,'zzzzwuhan','027')",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey9",
false, false, false, false, []string{}, []string{}, 20,
},
{
"SELECT count(*) FROM tkey9 PARTITION(p0)",
false, false, false, false, []string{}, []string{}, 6,
},
{
"SELECT count(*) FROM tkey9 PARTITION(p1)",
false, false, false, false, []string{}, []string{}, 3,
},
{
"SELECT count(*) FROM tkey9 PARTITION(p2)",
false, false, false, false, []string{}, []string{}, 3,
},
{
"SELECT count(*) FROM tkey9 PARTITION(p3)",
false, false, false, false, []string{}, []string{}, 8,
},
{
"SELECT count(*) FROM tkey9 WHERE KHH = 'huaian' and JYRQ = 2 and ZJZH = '0517'",
true, false, true, true, []string{"partition:p0"}, []string{"partition:p3", "partition:p1", "partition:p2"}, 1,
},
{
"SELECT count(*) FROM tkey9 WHERE KHH = 'huaian' and JYRQ = 2",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 1,
},
{
"SELECT count(*) FROM tkey9 WHERE JYRQ = 2",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 3,
},
{
"SELECT count(*) FROM tkey9 WHERE KHH = 'huaian' and JYRQ = 2 and ZJZH='0517' or KHH = 'zhenjiang' and JYRQ = 3 and ZJZH = '0518'",
false, false, true, true, []string{"partition:p3", "partition:p0"}, []string{"partition:p1", "partition:p2"}, 2,
},
{
"SELECT count(*) FROM tkey9 WHERE KHH = 'huaian' and JYRQ = 2 and ZJZH='0517' or KHH = 'zhenjiang' and JYRQ = 3 and ZJZH = '0518' or KHH = 'NANJING' and JYRQ = 14 and ZJZH = '025'",
false, false, true, true, []string{"partition:p0", "partition:p3"}, []string{"partition:p2", "partition:p1"}, 3,
},
{
"SELECT count(*) FROM tkey9 WHERE KHH = 'huaian' or KHH = 'zhenjiang'",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 2,
},
{
"SELECT count(*) FROM tkey9 WHERE JYRQ = 2 OR JYRQ = 3",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 4,
},
{
"SELECT count(*) FROM tkey9 WHERE JYRQ = 2 OR JYRQ = 3 OR JYRQ = 15",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 5,
},
{
"SELECT count(*) FROM tkey9 WHERE JYRQ >6 AND JYRQ < 10",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 3,
},
{
"SELECT count(*) FROM tkey9 WHERE JYRQ = 2 and KHH = 'huaian' OR JYRQ = 3 and KHH = 'zhenjiang'",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 2,
},
},
dropSQL: "DROP TABLE IF EXISTS tkey9",
},
}
for i, testCase := range testCases {
logutil.BgLogger().Info("Partition DDL test", zap.Int("i", i), zap.String("createSQL", testCase.createSQL))
executeSQLWrapper(t, tk, testCase.createSQL)
executeSQLWrapper(t, tk, testCase.insertSQL)
for j, selInfo := range testCase.selectInfo {
logutil.BgLogger().Info("Select", zap.Int("j", j), zap.String("selectSQL", selInfo.selectSQL))
tk.MustQuery(selInfo.selectSQL).Check(testkit.Rows(strconv.Itoa(selInfo.rowCount)))
if selInfo.executeExplain {
result := tk.MustQuery("EXPLAIN " + selInfo.selectSQL)
if selInfo.point {
result.CheckContain("Point_Get")
}
if selInfo.batchPoint {
result.CheckContain("Batch_Point_Get")
}
if selInfo.pruned {
for _, part := range selInfo.usedPartition {
result.CheckContain(part)
}
}
}
}
executeSQLWrapper(t, tk, testCase.dropSQL)
}
}
func TestKeyPartitionTableAllFeildType(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database partitiondb3")
defer tk.MustExec("drop database partitiondb3")
tk.MustExec("use partitiondb3")
// partition column is numeric family
createSQL := "create table tkey_numeric(\n" +
"id1 BIT(8) not null,\n" +
"id2 TINYINT not null,\n" +
"id3 BOOL not null,\n" +
"id4 SMALLINT not null,\n" +
"id5 MEDIUMINT not null,\n" +
"id6 INT not null,\n" +
"id7 BIGINT not null,\n" +
"id8 DECIMAL(12,4) not null,\n" +
"id9 FLOAT not null,\n" +
"id10 DOUBLE not null,\n" +
"name varchar(20),\n" +
"primary key(id1,id2,id3,id4,id5,id6,id7,id8,id9,id10)\n" +
")\n"
dropSQL := "drop table tkey_numeric"
insertSQLS := []string{
"INSERT INTO tkey_numeric VALUES(1,1,0,1,1,1,1,1.1,120.1,367.45,'linpin'),(12,12,12,12,12,12,12,12.1,1220.1,3267.45,'anqila')",
"INSERT INTO tkey_numeric VALUES(0,2,1,2,2,2,2,2.78,16.78,17.25,'ring'),(33,33,33,33,33,33,33,33.78,336.78,37.25,'black')",
"INSERT INTO tkey_numeric VALUES(2,3,1,3,3,3,3,3.78,26.78,417.25,'liudehua'),(22,23,21,23,23,23,23,32.78,26.72,27.15,'chenchen')",
"INSERT INTO tkey_numeric VALUES(3,3,2,4,4,4,4,4.78,46.48,89.35,'guofucheng'), (4,4,4,5,5,5,5,5.78,56.48,59.35,'zhangxuyou')",
"INSERT INTO tkey_numeric VALUES(5,5,5,5,5,5,5,5.78,56.48,59.35,'xietingfeng'),(34,34,34,34,34,34,34,34.78,346.78,34.25,'dongxu')",
"INSERT INTO tkey_numeric VALUES(250,120,120,250,250,258,348,38.78,186.48,719.35,'chenguanxi'),(35,35,35,250,35,35,35,35.78,356.48,35.35,'chenguanxi')",
}
testCases := []partTableCase{
{
partitionbySQL: "PARTITION BY KEY(id1) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_numeric",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 12,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 5,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id1 = 3",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 1,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id1 = 3 or id1 = 4",
false, false, true, true, []string{"partition:p0", "partition:p3"}, []string{"partition:p1", "partition:p2"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id1 >1 AND id1 < 4",
false, false, true, true, []string{"partition:p1", "partition:p3"}, []string{"partition:p2", "partition:p0"}, 2,
},
},
},
{
partitionbySQL: "PARTITION BY KEY(id2) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_numeric",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 12,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 4,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id2 = 3",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p0", "partition:p2"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id2 = 3 or id2 = 4",
false, false, true, true, []string{"partition:p0", "partition:p3"}, []string{"partition:p1", "partition:p2"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id2 >1 AND id2 < 4",
false, false, true, true, []string{"partition:p1", "partition:p3"}, []string{"partition:p0", "partition:p2"}, 3,
},
},
},
{
partitionbySQL: "PARTITION BY KEY(id3) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_numeric",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 12,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 4,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 4,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p0", "partition:p1", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id3 = 5",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id3 = 5 or id3 = 4",
false, false, true, true, []string{"partition:p0", "partition:p2"}, []string{"partition:p1", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id3 >1 AND id3 < 4",
false, false, true, true, []string{"partition:p1", "partition:p3"}, []string{"partition:p2", "partition:p0"}, 1,
},
},
},
{
partitionbySQL: "PARTITION BY KEY(id4) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_numeric",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 12,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 5,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id4 = 5",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id4 = 5 or id4 = 4",
false, false, true, true, []string{"partition:p0", "partition:p2"}, []string{"partition:p1", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id4 >1 AND id4 < 4",
false, false, true, true, []string{"partition:p1", "partition:p3"}, []string{"partition:p0", "partition:p2"}, 2,
},
},
},
{
partitionbySQL: "PARTITION BY KEY(id5) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_numeric",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 12,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p3", "partition:p0"}, 4,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id5 = 5",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id5 = 5 or id5 = 4",
false, false, true, true, []string{"partition:p0", "partition:p2"}, []string{"partition:p1", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id5 >1 AND id5 < 4",
false, false, true, true, []string{"partition:p1", "partition:p3"}, []string{"partition:p2", "partition:p0"}, 2,
},
},
},
{
partitionbySQL: "PARTITION BY KEY(id6) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_numeric",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 12,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 4,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id6 = 5",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id6 = 5 or id6 = 4",
false, false, true, true, []string{"partition:p0", "partition:p2"}, []string{"partition:p1", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id6 >1 AND id6 < 4",
false, false, true, true, []string{"partition:p1", "partition:p3"}, []string{"partition:p2", "partition:p0"}, 2,
},
},
},
{
partitionbySQL: "PARTITION BY KEY(id7) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_numeric",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 12,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 4,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id7 = 5",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id7 = 5 or id7 = 4",
false, false, true, true, []string{"partition:p0", "partition:p2"}, []string{"partition:p1", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id7 >1 AND id7 < 4",
false, false, true, true, []string{"partition:p1", "partition:p3"}, []string{"partition:p2", "partition:p0"}, 2,
},
},
},
{
partitionbySQL: "PARTITION BY KEY(id8) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_numeric",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 12,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 4,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id8 = 1.1",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p2", "partition:p0", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id8 = 1.1 or id8 = 33.78",
false, false, true, true, []string{"partition:p0", "partition:p1"}, []string{"partition:p2", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id8 >1 AND id8 < 4",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 3,
},
},
},
{
partitionbySQL: "PARTITION BY KEY(id9) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_numeric",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 12,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 4,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id9 = 46.48",
false, false, true, true, []string{}, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, 0,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id9 = 46.48 or id9 = 336.78",
false, false, true, true, []string{}, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, 0,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id9 >45 AND id9 < 47",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 1,
},
},
},
{
partitionbySQL: "PARTITION BY KEY(id10) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_numeric",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 12,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 4,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_numeric PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 3,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id10 = 46.48",
false, false, true, true, []string{}, []string{}, 0,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id10 = 46.48 or id10 = 336.78",
false, false, true, true, []string{}, []string{}, 0,
},
{
"SELECT count(*) FROM tkey_numeric WHERE id10 >366 AND id10 < 368",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 1,
},
},
},
}
executePartTableCase(t, tk, testCases, createSQL, insertSQLS, dropSQL)
// partition column is date/time family
createSQL2 := "create table tkey_datetime(\n" +
"id1 DATE not null,\n" +
"id2 TIME not null,\n" +
"id3 DATETIME not null,\n" +
"id4 TIMESTAMP not null,\n" +
"id5 YEAR not null,\n" +
"name varchar(20),\n" +
"primary key(id1, id2, id3, id4, id5)\n" +
")\n"
dropSQL2 := "drop table tkey_datetime"
insertSQLS2 := []string{
"insert into tkey_datetime values('2012-04-10', '12:12:12', '2012-04-10 12:12:12', '2012-04-10 12:12:12.12345', 2012, 'linpin')",
"insert into tkey_datetime values('2013-05-11', '13:13:13', '2013-05-11 13:13:13', '2013-05-11 13:13:13.43133', 2013, 'minghua')",
"insert into tkey_datetime values('2014-06-12', '14:14:14', '2014-06-12 14:14:14', '2014-06-12 14:14:14.32344', 2014, 'oyangfeng')",
"insert into tkey_datetime values('2015-07-13', '15:15:15', '2015-07-13 15:15:15', '2015-07-13 15:15:15.42544', 2015, 'pengdehuai')",
"insert into tkey_datetime values('2021-08-14', '16:16:16', '2021-08-14 16:16:16', '2021-08-14 16:16:16.18945', 2021, 'shenwanshan')",
"insert into tkey_datetime values('2022-12-23', '23:12:15', '2022-12-23 23:12:15', '2022-12-23 23:12:15.43133', 2022, 'tangchap')",
"insert into tkey_datetime values('2023-01-12', '20:38:14', '2023-01-12 20:38:14', '2023-01-12 20:38:14.32344', 2023, 'xinyu')",
"insert into tkey_datetime values('2018-07-13', '07:15:15', '2018-07-13 07:15:15', '2018-07-13 07:15:15.42544', 2018, 'zongyang')",
"insert into tkey_datetime values('1980-01-30', '00:12:15', '1980-01-30 00:12:15', '1980-01-30 00:12:15.42544', 1980, 'MAYUWEI')",
"insert into tkey_datetime values('1980-03-30', '00:13:15', '1980-03-30 00:13:15', '1980-03-30 00:13:15.42544', 1980, 'maqinwei')",
}
testCases2 := []partTableCase{
{
partitionbySQL: "PARTITION BY KEY(id1) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_datetime",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 10,
},
{
"SELECT count(*) FROM tkey_datetime PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey_datetime PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_datetime PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 4,
},
{
"SELECT count(*) FROM tkey_datetime PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 2,
},
{
"SELECT count(*) FROM tkey_datetime WHERE id1 = '2012-04-10'",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey_datetime WHERE id1 = '2012-04-10' or id1 = '2018-07-13'",
false, false, true, true, []string{"partition:p0", "partition:p2"}, []string{"partition:p1", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_datetime WHERE id1 >'2012-04-10' AND id1 < '2014-04-10'",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 1,
},
},
},
{
partitionbySQL: "PARTITION BY KEY(id3) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_datetime",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 10,
},
{
"SELECT count(*) FROM tkey_datetime PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_datetime PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 4,
},
{
"SELECT count(*) FROM tkey_datetime PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 0,
},
{
"SELECT count(*) FROM tkey_datetime PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 3,
},
{
"SELECT count(*) FROM tkey_datetime WHERE id3 = '2012-04-10 12:12:12'",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey_datetime WHERE id3 = '2012-04-10 12:12:12' or id3 = '2021-08-14 16:16:16'",
false, false, true, true, []string{"partition:p3", "partition:p1"}, []string{"partition:p2", "partition:p0"}, 2,
},
{
"SELECT count(*) FROM tkey_datetime WHERE id3 >'2012-04-10 12:12:12' AND id3 < '2014-04-10 12:12:12'",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 1,
},
},
},
{
partitionbySQL: "PARTITION BY KEY(id4) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_datetime",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 10,
},
{
"SELECT count(*) FROM tkey_datetime PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_datetime PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 4,
},
{
"SELECT count(*) FROM tkey_datetime PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 0,
},
{
"SELECT count(*) FROM tkey_datetime PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 3,
},
{
"SELECT count(*) FROM tkey_datetime WHERE id4 = '2012-04-10 12:12:12'",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey_datetime WHERE id4 = '2012-04-10 12:12:12' or id4 = '2021-08-14 16:16:16'",
false, false, true, true, []string{"partition:p1", "partition:p3"}, []string{"partition:p0", "partition:p2"}, 2,
},
{
"SELECT count(*) FROM tkey_datetime WHERE id4 >'2012-04-10 12:12:12' AND id4 < '2014-04-10 12:12:12'",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 1,
},
},
},
{
partitionbySQL: "PARTITION BY KEY(id5) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_datetime",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 10,
},
{
"SELECT count(*) FROM tkey_datetime PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_datetime PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 3,
},
{
"SELECT count(*) FROM tkey_datetime PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_datetime PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 2,
},
{
"SELECT count(*) FROM tkey_datetime WHERE id5 = 2012",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey_datetime WHERE id5 = 2012 or id5 = 2018",
false, false, true, true, []string{"partition:p0", "partition:p2"}, []string{"partition:p1", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_datetime WHERE id5 >2012 AND id5 < 2014",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p3", "partition:p0"}, 1,
},
},
},
}
executePartTableCase(t, tk, testCases2, createSQL2, insertSQLS2, dropSQL2)
// partition column is string family
createSQL3 := "create table tkey_string(\n" +
"id1 CHAR(16) not null,\n" +
"id2 VARCHAR(16) not null,\n" +
"id3 BINARY(16) not null,\n" +
"id4 VARBINARY(16) not null,\n" +
"id5 BLOB not null,\n" +
"id6 TEXT not null,\n" +
"id7 ENUM('x-small', 'small', 'medium', 'large', 'x-large') not null,\n" +
"id8 SET ('a', 'b', 'c', 'd') not null,\n" +
"name varchar(16),\n" +
"primary key(id1, id2, id3, id4, id7, id8)\n" +
")\n"
dropSQL3 := "drop table tkey_string"
insertSQLS3 := []string{
"INSERT INTO tkey_string VALUES('huaian','huaian','huaian','huaian','huaian','huaian','x-small','a','linpin')",
"INSERT INTO tkey_string VALUES('nanjing','nanjing','nanjing','nanjing','nanjing','nanjing','small','b','linpin')",
"INSERT INTO tkey_string VALUES('zhenjiang','zhenjiang','zhenjiang','zhenjiang','zhenjiang','zhenjiang','medium','c','linpin')",
"INSERT INTO tkey_string VALUES('suzhou','suzhou','suzhou','suzhou','suzhou','suzhou','large','d','linpin')",
"INSERT INTO tkey_string VALUES('wuxi','wuxi','wuxi','wuxi','wuxi','wuxi','x-large','a','linpin')",
}
testCases3 := []partTableCase{
{
partitionbySQL: "PARTITION BY KEY(id1) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_string",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 5,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 0,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 0,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 3,
},
{
"SELECT count(*) FROM tkey_string WHERE id1 = 'huaian'",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p0", "partition:p2"}, 1,
},
{
"SELECT count(*) FROM tkey_string WHERE id1 = 'huaian' or id1 = 'suzhou'",
false, false, true, true, []string{"partition:p3", "partition:p0"}, []string{"partition:p1", "partition:p2"}, 2,
},
{
"SELECT count(*) FROM tkey_string WHERE id1 >'huaian' AND id1 < 'suzhou'",
false, false, true, true, []string{"partition:p1", "partition:p2", "partition:p0", "partition:p3"}, []string{}, 1,
},
},
},
{
partitionbySQL: "PARTITION BY KEY(id2) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_string",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 5,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 0,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 0,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 3,
},
{
"SELECT count(*) FROM tkey_string WHERE id2 = 'huaian'",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 1,
},
{
"SELECT count(*) FROM tkey_string WHERE id2 = 'huaian' or id2 = 'suzhou'",
false, false, true, true, []string{"partition:p3", "partition:p0"}, []string{"partition:p1", "partition:p2"}, 2,
},
{
"SELECT count(*) FROM tkey_string WHERE id2 >'huaian' AND id2 < 'suzhou'",
false, false, true, true, []string{"partition:p1", "partition:p2", "partition:p0", "partition:p3"}, []string{}, 1,
},
},
},
{
partitionbySQL: "PARTITION BY KEY(id3) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_string",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 5,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 1,
},
{
"SELECT count(*) FROM tkey_string WHERE id3 = 0x73757A686F7500000000000000000000",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey_string WHERE id3 = 0x73757A686F7500000000000000000000 or id3 = 0x6E616E6A696E67000000000000000000",
false, false, true, true, []string{"partition:p0", "partition:p1"}, []string{"partition:p2", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_string WHERE id3 >0x67756169616E00000000000000000000 AND id3 < 0x6E616E6A696E67000000000000000000",
false, false, true, true, []string{"partition:p1", "partition:p0", "partition:p2", "partition:p3"}, []string{}, 1,
},
},
},
{
partitionbySQL: "PARTITION BY KEY(id4) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_string",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 5,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 0,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 0,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 3,
},
{
"SELECT count(*) FROM tkey_string WHERE id4 = 0x68756169616E",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p0", "partition:p2"}, 1,
},
{
"SELECT count(*) FROM tkey_string WHERE id4 = 0x68756169616E or id4 = 0x73757A686F75",
false, false, true, true, []string{"partition:p3", "partition:p0"}, []string{"partition:p1", "partition:p2"}, 2,
},
{
"SELECT count(*) FROM tkey_string WHERE id4 >0x73757A686F75 AND id4 < 0x78757869",
false, false, true, true, []string{"partition:p1", "partition:p2", "partition:p0", "partition:p3"}, []string{}, 1,
},
},
},
{
partitionbySQL: "PARTITION BY KEY(id7) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_string",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 5,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 2,
},
{
"SELECT count(*) FROM tkey_string WHERE id7 = 'x-small'",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey_string WHERE id7 = 'x-small' or id7 = 'large'",
false, false, true, true, []string{"partition:p0", "partition:p2"}, []string{"partition:p1", "partition:p3"}, 2,
},
{
"SELECT count(*) FROM tkey_string WHERE id7 > 'large' AND id7 < 'x-small'",
false, false, true, true, []string{"partition:p1", "partition:p0", "partition:p3"}, []string{"partition:p2"}, 3,
},
},
},
{
partitionbySQL: "PARTITION BY KEY(id8) partitions 4",
selectInfo: []compoundSQL{
{
"SELECT count(*) FROM tkey_string",
false, false, true, true, []string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"}, []string{}, 5,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p0)",
false, false, true, true, []string{"partition:p0"}, []string{"partition:p1", "partition:p2", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p1)",
false, false, true, true, []string{"partition:p1"}, []string{"partition:p0", "partition:p2", "partition:p3"}, 1,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p2)",
false, false, true, true, []string{"partition:p2"}, []string{"partition:p1", "partition:p0", "partition:p3"}, 0,
},
{
"SELECT count(*) FROM tkey_string PARTITION(p3)",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 3,
},
{
"SELECT count(*) FROM tkey_string WHERE id8 = 'a'",
false, false, true, true, []string{"partition:p3"}, []string{"partition:p1", "partition:p2", "partition:p0"}, 2,
},
{
"SELECT count(*) FROM tkey_string WHERE id8 = 'a' or id8 = 'b'",
false, false, true, true, []string{"partition:p1", "partition:p3"}, []string{"partition:p0", "partition:p2"}, 3,
},
{
"SELECT count(*) FROM tkey_string WHERE id8 > 'a' AND id8 < 'c'",
false, false, true, true, []string{"partition:p1", "partition:p2", "partition:p0", "partition:p3"}, []string{}, 1,
},
},
},
}
executePartTableCase(t, tk, testCases3, createSQL3, insertSQLS3, dropSQL3)
}
func TestKeyPartitionTableMixed(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database partitiondb2")
defer tk.MustExec("drop database partitiondb2")
tk.MustExec("use partitiondb2")
// SHOW CREATE TABLE
tk.MustExec("CREATE TABLE tkey1 (col1 INT NOT NULL, col2 DATE NOT NULL,col3 INT NOT NULL, col4 INT NOT NULL, UNIQUE KEY (col3))" +
" PARTITION BY KEY(col3)" +
"(PARTITION `p0`," +
"PARTITION `p1`," +
"PARTITION `p2`," +
"PARTITION `p3`)")
tk.MustQuery("show create table tkey1").Check(testkit.Rows("tkey1 CREATE TABLE `tkey1` (\n" +
" `col1` int(11) NOT NULL,\n" +
" `col2` date NOT NULL,\n" +
" `col3` int(11) NOT NULL,\n" +
" `col4` int(11) NOT NULL,\n" +
" UNIQUE KEY `col3` (`col3`)\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" +
"PARTITION BY KEY (`col3`) PARTITIONS 4"))
// BLOB, JSON don't support key partition
err := tk.ExecToErr("create table tkey_string(\n" +
"id5 BLOB not null,\n" +
"id6 TEXT not null,\n" +
"name varchar(16)\n" +
") PARTITION BY KEY(id5) partitions 4\n")
require.Error(t, err)
require.Regexp(t, "Field 'id5' is of a not allowed type for this type of partitioning", err)
// BLOB, JSON don't support key partition
err = tk.ExecToErr("create table tkey_string2(\n" +
"id5 BLOB not null,\n" +
"id6 TEXT not null,\n" +
"name varchar(16)\n" +
") PARTITION BY KEY(id6) partitions 4\n")
require.Error(t, err)
require.Regexp(t, "Field 'id6' is of a not allowed type for this type of partitioning", err)
err = tk.ExecToErr("CREATE TABLE tkey_json (c1 JSON) PARTITION BY KEY(c1) partitions 4")
require.Error(t, err)
require.Regexp(t, "Field 'c1' is of a not allowed type for this type of partitioning", err)
// It doesn't support LINEAR KEY partition
tk.MustExec("CREATE TABLE tkey_linear (col1 INT, col2 CHAR(5), col3 DATE) PARTITION BY LINEAR KEY(col3) PARTITIONS 5")
result := tk.MustQuery("show warnings")
result.CheckContain("LINEAR KEY is not supported, using non-linear KEY instead")
// It will ignore ALGORITHM=1|2
tk.MustExec("CREATE TABLE tkey_algorithm1 (col1 INT, col2 CHAR(5), col3 DATE) PARTITION BY KEY ALGORITHM=1 (col3) PARTITIONS 5")
tk.MustExec("CREATE TABLE tkey_algorithm2 (col1 INT, col2 CHAR(5), col3 DATE) PARTITION BY KEY ALGORITHM=2 (col3) PARTITIONS 5")
err = tk.ExecToErr("CREATE TABLE tkey_algorithm3 (col1 INT, col2 CHAR(5), col3 DATE) PARTITION BY KEY ALGORITHM=3 (col3) PARTITIONS 5")
require.Error(t, err)
require.Regexp(t, "You have an error in your SQL syntax", err)
// Key partition can't be as subpartition
tk.MustContainErrMsg("CREATE TABLE tkey_subpartition1 (a INT not null,b VARCHAR(12) not null,c CHAR(14) not null,primary key (a, b, c)) PARTITION BY KEY (a) SUBPARTITION BY KEY(b) SUBPARTITIONS 2", "[ddl:1500]It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning")
tk.MustExec("CREATE TABLE tkey_subpartition1 (JYRQ INT not null,KHH VARCHAR(12) not null,ZJZH CHAR(14) not null,primary key (JYRQ, KHH, ZJZH))" +
"PARTITION BY RANGE(JYRQ)\n" +
"SUBPARTITION BY KEY(KHH) SUBPARTITIONS 2 \n" +
"(\n" +
"PARTITION p0 VALUES LESS THAN (8),\n" +
"PARTITION p1 VALUES LESS THAN (16),\n" +
"PARTITION p2 VALUES LESS THAN MAXVALUE\n" +
")")
result = tk.MustQuery("show warnings")
result.CheckContain("Unsupported subpartitioning, only using RANGE partitioning")
// It ignores /*!50100 */ format
tk.MustExec("CREATE TABLE tkey10 (`col1` int, `col2` char(5),`col3` date)" +
"/*!50100 PARTITION BY KEY (col3) PARTITIONS 5 */")
result = tk.MustQuery("show create table tkey10")
result.Check(testkit.Rows("tkey10 CREATE TABLE `tkey10` (\n" +
" `col1` int(11) DEFAULT NULL,\n" +
" `col2` char(5) DEFAULT NULL,\n" +
" `col3` date DEFAULT NULL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" +
"PARTITION BY KEY (`col3`) PARTITIONS 5"))
// It ignores /*!50100 */ format, but doesn't ignore specified partition names
tk.MustExec("CREATE TABLE tkey11 (`col1` int, `col2` char(5),`col3` date)" +
"/*!50100 PARTITION BY KEY (col1) PARTITIONS 4 \n" +
"(PARTITION `pp0`,\n" +
"PARTITION `pp1`,\n" +
"PARTITION `pp2`,\n" +
"PARTITION `pp3`)\n" +
"*/")
result = tk.MustQuery("show create table tkey11")
result.Check(testkit.Rows("tkey11 CREATE TABLE `tkey11` (\n" +
" `col1` int(11) DEFAULT NULL,\n" +
" `col2` char(5) DEFAULT NULL,\n" +
" `col3` date DEFAULT NULL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" +
"PARTITION BY KEY (`col1`)\n" +
"(PARTITION `pp0`,\n" +
" PARTITION `pp1`,\n" +
" PARTITION `pp2`,\n" +
" PARTITION `pp3`)"))
// It shows the comment defined in the ddl
tk.MustExec("CREATE TABLE tkey12 (`col1` int, `col2` char(5),`col3` date)" +
"PARTITION BY KEY (col1) \n" +
"(PARTITION `pp0` comment 'huaian',\n" +
"PARTITION `pp1` comment 'nanjing',\n" +
"PARTITION `pp2` comment 'zhenjiang',\n" +
"PARTITION `pp3` comment 'suzhou')\n")
result = tk.MustQuery("show create table tkey12")
result.Check(testkit.Rows("tkey12 CREATE TABLE `tkey12` (\n" +
" `col1` int(11) DEFAULT NULL,\n" +
" `col2` char(5) DEFAULT NULL,\n" +
" `col3` date DEFAULT NULL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" +
"PARTITION BY KEY (`col1`)\n" +
"(PARTITION `pp0` COMMENT 'huaian',\n" +
" PARTITION `pp1` COMMENT 'nanjing',\n" +
" PARTITION `pp2` COMMENT 'zhenjiang',\n" +
" PARTITION `pp3` COMMENT 'suzhou')"))
// It shows the placement policy defined in the ddl
tk.MustExec("drop placement policy if exists fivereplicas")
tk.MustExec("CREATE PLACEMENT POLICY fivereplicas FOLLOWERS=4")
tk.MustExec("CREATE TABLE tkey13 (`col1` int, `col2` char(5),`col3` date) placement policy fivereplicas\n" +
"PARTITION BY KEY (col1) PARTITIONS 4")
result = tk.MustQuery("show create table tkey13")
result.Check(testkit.Rows("tkey13 CREATE TABLE `tkey13` (\n" +
" `col1` int(11) DEFAULT NULL,\n" +
" `col2` char(5) DEFAULT NULL,\n" +
" `col3` date DEFAULT NULL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![placement] PLACEMENT POLICY=`fivereplicas` */\n" +
"PARTITION BY KEY (`col1`) PARTITIONS 4"))
// The partition column can has null value
tk.MustExec("CREATE TABLE tkey14 (`col1` int, `col2` int,`col3` int, col4 int)\n" +
"PARTITION BY KEY (col3) PARTITIONS 4")
tk.MustExec("INSERT INTO tkey14 values(20,1,1,1),(1,2,NULL,2),(3,3,3,3),(3,3,NULL,3),(4,4,4,4),(5,5,5,5),(6,6,null,6),(7,7,7,7),(8,8,8,8),(9,9,9,9),(10,10,10,5),(11,11,11,6),(12,12,12,12),(13,13,13,13),(14,14,null,14)")
tk.MustQuery("SELECT count(*) FROM tkey14 WHERE col3 = NULL").Check(testkit.Rows("0"))
tk.MustQuery("SELECT count(*) FROM tkey14 WHERE col3 IS NULL").Check(testkit.Rows("4"))
result = tk.MustQuery("EXPLAIN SELECT count(*) FROM tkey14 WHERE col3 IS NULL")
result.CheckContain("partition:p1")
result.MultiCheckNotContain([]string{"partition:p0", "partition:p2", "partition:p3"})
tk.MustExec("CREATE TABLE tkey15 (`col1` int, col2 DATE NOT NULL,col3 VARCHAR(12), col4 int)\n" +
"PARTITION BY KEY (col3) PARTITIONS 4")
tk.MustExec("INSERT INTO tkey15 VALUES(1, '2023-02-22', 'linpin', 1), (2, '2023-02-22', NULL, 2), (3, '2023-02-22', 'anqila', 3), (4, '2023-02-22', NULL, 4)")
result = tk.MustQuery("EXPLAIN SELECT count(*) FROM tkey15 WHERE col3 IS NULL")
result.CheckContain("partition:p1")
result.MultiCheckNotContain([]string{"partition:p0", "partition:p2", "partition:p3"})
tk.MustExec("CREATE TABLE tkey12_2 (col1 INT, col2 INT ,col3 INT ,col4 INT , UNIQUE KEY(col2, col3)" +
") PARTITION BY KEY(col2, col3) PARTITIONS 4")
tk.MustExec("INSERT INTO tkey12_2 values(20,1,1,1),(1,2,NULL,2),(3,3,3,3),(3,3,NULL,3),(4,4,4,4)," +
"(5,5,5,5), (6,6,null,6),(7,7,7,7),(8,8,8,8),(9,9,9,9),(10,10,10,5),(11,11,11,6),(12,12,12,12)," +
"(13,13,13,13),(14,14,null,14)")
result = tk.MustQuery("EXPLAIN SELECT * FROM tkey12_2 WHERE col2 = 2 and col3 IS NULL")
result.MultiCheckNotContain([]string{"partition:p1", "partition:p0", "partition:p3"})
tk.MustQuery("SELECT * FROM tkey12_2 WHERE col2 = 2 and col3 IS NULL").Check(testkit.Rows("1 2 <nil> 2"))
result = tk.MustQuery("EXPLAIN SELECT * FROM tkey12_2 WHERE col2 = 2")
result.MultiCheckContain([]string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"})
tk.MustQuery("SELECT * FROM tkey12_2 WHERE col2 = 2").Check(testkit.Rows("1 2 <nil> 2"))
tk.MustQuery("EXPLAIN SELECT * FROM tkey12_2 WHERE col2 = 2").MultiCheckContain([]string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"})
tk.MustQuery("SELECT * FROM tkey12_2 WHERE col2 IS NULL")
tk.MustQuery("EXPLAIN SELECT * FROM tkey12_2 WHERE col2 IS NULL").MultiCheckContain([]string{"partition:p0", "partition:p1", "partition:p2", "partition:p3"})
// Get the partition information from information_schema.partitions
result = tk.MustQuery("select PARTITION_NAME,PARTITION_ORDINAL_POSITION,PARTITION_METHOD,PARTITION_EXPRESSION " +
"FROM information_schema.partitions where TABLE_NAME = 'tkey12_2'")
result.Check(testkit.Rows("p0 1 KEY `col2`,`col3`", "p1 2 KEY `col2`,`col3`", "p2 3 KEY `col2`,`col3`", "p3 4 KEY `col2`,`col3`"))
// This tests caculating the boundary partition ID when it prunes partition table
tk.MustExec("create table tkey16 (a int) partition by key (a) partitions 12")
tk.MustExec("insert into tkey16 values (0), (1), (2), (3)")
tk.MustExec("insert into tkey16 select a + 4 from tkey16")
tk.MustExec("insert into tkey16 select a + 8 from tkey16")
tk.MustExec("select * from information_schema.partitions where partition_name is not null")
}
func TestKeyPartitionWithDifferentCharsets(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database partitiondb4")
defer tk.MustExec("drop database partitiondb4")
tk.MustExec("use partitiondb4")
tk.MustExec("CREATE TABLE tkey29 (" +
"col1 INT NOT NULL," +
"col2 DATE NOT NULL," +
"col3 VARCHAR(12) NOT NULL," +
"col4 INT NOT NULL," +
"UNIQUE KEY (col3)" +
") CHARSET=utf8mb4 COLLATE=utf8mb4_bin " +
"PARTITION BY KEY(col3) " +
"PARTITIONS 4")
// ignore tail spaces
err := tk.ExecToErr("INSERT INTO tkey29 VALUES(1, '2023-02-22', 'linpin', 1), (1, '2023-02-22', 'linpin ', 5)")
require.Regexp(t, "Duplicate entry 'linpin ' for key 'tkey29.col3'", err)
// case sensitive
tk.MustExec("INSERT INTO tkey29 VALUES(3, '2023-02-22', 'abc', 1), (4, '2023-02-22', 'ABC ', 5)")
tk.MustExec("CREATE TABLE tkey30 (" +
"col1 INT NOT NULL," +
"col2 DATE NOT NULL," +
"col3 VARCHAR(12) NOT NULL," +
"col4 INT NOT NULL," +
"UNIQUE KEY (col3)" +
") CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci " +
"PARTITION BY KEY(col3) " +
"PARTITIONS 4")
// case insensitive
err = tk.ExecToErr("INSERT INTO tkey30 VALUES(1, '2023-02-22', 'linpin', 1), (1, '2023-02-22', 'LINPIN', 5)")
require.Regexp(t, "Duplicate entry 'LINPIN' for key 'tkey30.col3'", err)
// ignore tail spaces
err = tk.ExecToErr("INSERT INTO tkey30 VALUES(1, '2023-02-22', 'linpin', 1), (1, '2023-02-22', 'LINPIN ', 5)")
require.Regexp(t, "Duplicate entry 'LINPIN ' for key 'tkey30.col3'", err)
tk.MustExec("CREATE TABLE tkey31 (" +
"col1 INT NOT NULL," +
"col2 DATE NOT NULL," +
"col3 VARCHAR(12) NOT NULL," +
"col4 INT NOT NULL," +
"UNIQUE KEY (col3)" +
") CHARSET=gbk COLLATE=gbk_chinese_ci " +
"PARTITION BY KEY(col3) " +
"PARTITIONS 4")
err = tk.ExecToErr("INSERT INTO tkey31 VALUES(1, '2023-02-22', '刘德华', 1), (1, '2023-02-22', '刘德华 ', 5)")
require.Regexp(t, "Duplicate entry '刘德华 ' for key 'tkey31.col3'", err)
tk.MustExec("INSERT INTO tkey31 VALUES(1, '2023-02-22', '刘德华', 1), (5, '2023-02-22', '张学友', 5),(6, '2023-02-22', '艾伦', 6), (7, '2023-02-22', '宁采臣', 7)")
tk.MustQuery("SELECT * FROM tkey31 partition(p0)").Check(testkit.Rows("1 2023-02-22 刘德华 1"))
tk.MustQuery("SELECT * FROM tkey31 partition(p1)").Check(testkit.Rows("6 2023-02-22 艾伦 6"))
tk.MustQuery("SELECT * FROM tkey31 partition(p2)").Check(testkit.Rows("5 2023-02-22 张学友 5"))
tk.MustQuery("SELECT * FROM tkey31 partition(p3)").Check(testkit.Rows("7 2023-02-22 宁采臣 7"))
}
func TestIssue31721(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("set tidb_enable_list_partition=on;")
tk.MustExec("drop tables if exists t_31721")
tk.MustExec("CREATE TABLE `t_31721` (`COL1` char(1) NOT NULL) CHARSET=utf8mb4 COLLATE=utf8mb4_bin PARTITION BY LIST COLUMNS(`COL1`) " +
"(PARTITION `P0` VALUES IN ('1')," +
"PARTITION `P1` VALUES IN ('2')," +
"PARTITION `P2` VALUES IN ('3'));")
tk.MustExec("insert into t_31721 values ('1')")
tk.MustExec("select * from t_31721 partition(p0, p1) where col1 != 2;")
}
func TestKeyPartitionTableDDL(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database partitiondb3")
defer tk.MustExec("drop database partitiondb3")
tk.MustExec("use partitiondb3")
tk.MustExec("CREATE TABLE tkey14 (\n" +
"col1 INT NOT NULL," +
"col2 INT NOT NULL," +
"col3 INT NOT NULL," +
"col4 INT NOT NULL," +
"primary KEY (col1,col3)\n" +
")" +
"PARTITION BY KEY(col3) PARTITIONS 4")
tk.MustExec("INSERT INTO tkey14 values(1,1,1,1),(1,1,2,2),(3,3,3,3),(3,3,4,3),(4,4,4,4),(5,5,5,5),(6,6,6,6),(7,7,7,7),(8,8,8,8),(9,9,9,9),(10,10,10,5),(11,11,11,6),(12,12,12,12),(13,13,13,13),(14,14,14,14)")
tk.MustExec("CREATE TABLE tkey15 (\n" +
"col1 INT NOT NULL," +
"col2 INT NOT NULL," +
"col3 INT NOT NULL," +
"col4 INT NOT NULL," +
"primary KEY (col1,col3)\n" +
")")
tk.MustExec("INSERT INTO tkey15 values (20,20,20,20)")
tk.MustExec("CREATE TABLE tkey16 (\n" +
"col1 INT NOT NULL," +
"col2 INT NOT NULL," +
"col3 INT NOT NULL," +
"col4 INT NOT NULL," +
"primary KEY (col1,col3)\n" +
")" +
"PARTITION BY KEY(col3) PARTITIONS 4")
tk.MustExec("INSERT INTO tkey16 values(1,1,1,1),(1,1,2,2),(3,3,3,3),(3,3,4,3),(4,4,4,4),(5,5,5,5),(6,6,6,6),(7,7,7,7),(8,8,8,8),(9,9,9,9),(10,10,10,5),(11,11,11,6),(12,12,12,12),(13,13,13,13),(14,14,14,14)")
tk.MustExec("ALTER TABLE tkey14 ADD PARTITION PARTITIONS 1")
err := tk.ExecToErr("ALTER TABLE tkey14 DROP PARTITION p4")
require.Regexp(t, "DROP PARTITION can only be used on RANGE/LIST partitions", err)
tk.MustExec("ALTER TABLE tkey14 TRUNCATE PARTITION p3")
tk.MustQuery("SELECT COUNT(*) FROM tkey14 partition(p3)").Check(testkit.Rows("0"))
tk.MustExec("ALTER TABLE tkey16 COALESCE PARTITION 2")
tk.MustExec("ALTER TABLE tkey14 ANALYZE PARTITION p3")
err = tk.ExecToErr("ALTER TABLE tkey14 CHECK PARTITION p2")
require.Regexp(t, "Unsupported check partition", err)
err = tk.ExecToErr("ALTER TABLE tkey14 OPTIMIZE PARTITION p2")
require.Regexp(t, "Unsupported optimize partition", err)
err = tk.ExecToErr("ALTER TABLE tkey14 REBUILD PARTITION p2")
require.Regexp(t, "Unsupported rebuild partition", err)
err = tk.ExecToErr("ALTER TABLE tkey14 EXCHANGE PARTITION p3 WITH TABLE tkey15")
require.Regexp(t, "Unsupported partition type of table tkey14 when exchanging partition", err)
err = tk.ExecToErr("ALTER TABLE tkey16 REORGANIZE PARTITION")
require.Regexp(t, "Unsupported reorganize partition", err)
err = tk.ExecToErr("ALTER TABLE tkey16 REORGANIZE PARTITION p0 INTO (PARTITION p0,PARTITION p1)")
require.Regexp(t, "Unsupported reorganize partition", err)
err = tk.ExecToErr("ALTER TABLE tkey16 REORGANIZE PARTITION p0 INTO (PARTITION p0)")
require.Regexp(t, "Unsupported reorganize partition", err)
err = tk.ExecToErr("ALTER TABLE tkey16 REORGANIZE PARTITION p0 INTO (PARTITION p4)")
require.Regexp(t, "Unsupported reorganize partition", err)
tk.MustExec("ALTER TABLE tkey15 PARTITION BY KEY(col3) PARTITIONS 4")
tk.MustExec("ALTER TABLE tkey16 REMOVE PARTITIONING")
tk.MustExec("CREATE TABLE tkey17 (" +
"id INT NOT NULL PRIMARY KEY," +
"name VARCHAR(20)" +
")" +
"PARTITION BY KEY()" +
"PARTITIONS 2")
result := tk.MustQuery("show warnings")
result.CheckContain("Unsupported partition type KEY, treat as normal table")
}
func TestLocatePartitionErrorInfo(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop tables if exists t_44966")
tk.MustExec("create table t_44966 (a bigint unsigned) partition by range (a) (partition p0 values less than (10))")
err := tk.ExecToErr("insert into t_44966 values (0xffffffffffffffff)")
require.Regexp(t, "Table has no partition for value 18446744073709551615", err)
tk.MustExec("drop tables if exists t_44966")
tk.MustExec("create table t_44966 (a bigint unsigned) partition by list (a) (partition p0 values in (1,2))")
require.Regexp(t, "Table has no partition for value 18446744073709551615", err)
}
func TestPruneModeWarningInfo(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("set @@tidb_partition_prune_mode = 'static'")
tk.MustQuery("show warnings").Check(testkit.Rows())
tk.MustExec("set session tidb_partition_prune_mode = 'dynamic'")
tk.MustQuery("show warnings").Sort().Check(testkit.Rows("Warning 1105 Please analyze all partition tables again for consistency between partition and global stats",
"Warning 1105 Please avoid setting partition prune mode to dynamic at session level and set partition prune mode to dynamic at global level"))
tk.MustExec("set global tidb_partition_prune_mode = 'dynamic'")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 Please analyze all partition tables again for consistency between partition and global stats"))
}
type testCallback struct {
ddl.Callback
OnJobRunBeforeExported func(job *model.Job)
}
func newTestCallBack(t *testing.T, dom *domain.Domain) *testCallback {
defHookFactory, err := ddl.GetCustomizedHook("default_hook")
require.NoError(t, err)
return &testCallback{
Callback: defHookFactory(dom),
}
}
func (c *testCallback) OnJobRunBefore(job *model.Job) {
if c.OnJobRunBeforeExported != nil {
c.OnJobRunBeforeExported(job)
}
}
// TODO: do extensive test for LIST [COLUMNS]
// TODO: Either skip this, move it to a separate directory for big tests
// or see if there are ways to speed this up :)
// Leaving the test here, for reference and completeness testing
func TestPartitionByIntListExtensivePart(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
schemaName := "PartitionByIntListExtensive"
tk.MustExec("create database " + schemaName)
tk.MustExec("use " + schemaName)
tk2 := testkit.NewTestKit(t, store)
tk2.MustExec("use " + schemaName)
tBase := `(lp tinyint unsigned, a int unsigned, b varchar(255) collate utf8mb4_general_ci, c int, d datetime, e timestamp, f double, g text, key (b), key (c,b), key (d,c), key(e), primary key (a, lp))`
t2Str := `create table t2 ` + tBase
tStr := `create table t ` + tBase
rows := 1000
pkInserts := 20
pkUpdates := 20
pkDeletes := 10 // Enough to delete half of what is inserted?
tStart := []string{
// Non partitioned
tStr,
// RANGE COLUMNS
tStr + ` partition by list (lp) (partition p0 values in (0,6),partition p1 values in (1), partition p2 values in (2), partition p3 values in (3), partition p4 values in (4,5))`,
// KEY
tStr + ` partition by key(a) partitions 5`,
// HASH
tStr + ` partition by hash(a) partitions 5`,
// HASH with function
tStr + ` partition by hash(a DIV 3) partitions 5`,
}
quarterUintRange := 1 << 30
quarterUintRangeStr := fmt.Sprintf("%d", quarterUintRange)
halfUintRangeStr := fmt.Sprintf("%d", 2*quarterUintRange)
threeQuarterUintRangeStr := fmt.Sprintf("%d", 3*quarterUintRange)
tAlter := []string{
// LIST
`alter table t partition by list (lp) (partition p0 values in (2), partition p1 values in (1,3,5), partition p2 values in (0,4,6))`,
`alter table t partition by list (lp) (partition p3 values in (3), partition p4 values in (4), partition p2 values in (2), partition p6 values in (6), partition p5 values in (5), partition p1 values in (1), partition p0 values in (0))`,
// LIST COLUMNS
`alter table t partition by list columns (lp) (partition p0 values in (2), partition p1 values in (1,3,5), partition p2 values in (0,4,6))`,
`alter table t partition by list columns (lp) (partition p3 values in (3), partition p4 values in (4), partition p2 values in (2), partition p6 values in (6), partition p5 values in (5), partition p1 values in (1), partition p0 values in (0))`,
// RANGE COLUMNS
`alter table t partition by range (a) (partition pFirst values less than (` + halfUintRangeStr + `), partition pLast values less than (MAXVALUE))`,
// RANGE COLUMNS
`alter table t partition by range (a) (partition pFirst values less than (` + quarterUintRangeStr + `),` +
`partition pLowMid values less than (` + halfUintRangeStr + `),` +
`partition pHighMid values less than (` + threeQuarterUintRangeStr + `),` +
`partition pLast values less than (maxvalue))`,
// KEY
`alter table t partition by key(a) partitions 7`,
`alter table t partition by key(a) partitions 3`,
// Hash
`alter table t partition by hash(a) partitions 7`,
`alter table t partition by hash(a) partitions 3`,
// Hash
`alter table t partition by hash(a DIV 13) partitions 7`,
`alter table t partition by hash(a DIV 13) partitions 3`,
}
seed := gotime.Now().UnixNano()
logutil.BgLogger().Info("Seeding rand", zap.Int64("seed", seed))
reorgRand := rand.New(rand.NewSource(seed))
for _, createSQL := range tStart {
for _, alterSQL := range tAlter {
tk.MustExec(createSQL)
tk.MustExec(t2Str)
getNewPK := getNewIntPK()
getValues := getInt7ValuesFunc()
checkDMLInAllStates(t, tk, tk2, schemaName, alterSQL, rows, pkInserts, pkUpdates, pkDeletes, reorgRand, getNewPK, getValues)
tk.MustExec(`drop table t`)
tk.MustExec(`drop table t2`)
}
}
for _, createSQL := range tStart[1:] {
tk.MustExec(createSQL)
tk.MustExec(t2Str)
getNewPK := getNewIntPK()
getValues := getInt7ValuesFunc()
checkDMLInAllStates(t, tk, tk2, schemaName, "alter table t remove partitioning", rows, pkInserts, pkUpdates, pkDeletes, reorgRand, getNewPK, getValues)
tk.MustExec(`drop table t`)
tk.MustExec(`drop table t2`)
}
}
func getInt7ValuesFunc() func(string, bool, *rand.Rand) string {
cnt := 0
return func(pk string, asAssignment bool, reorgRand *rand.Rand) string {
s := `(%d, %s, '%s', %d, '%s', '%s', %f, '%s')`
if asAssignment {
s = `lp = %d, a = %s, b = '%s', c = %d, d = '%s', e = '%s', f = %f, g = '%s'`
}
cnt++
lp, err := strconv.Atoi(pk)
if err != nil {
lp = 0
}
return fmt.Sprintf(s,
lp%7,
pk,
randStr(reorgRand.Intn(19), reorgRand),
cnt, //reorgRand.Int31(),
gotime.Unix(413487608+int64(reorgRand.Intn(1705689644)), 0).Format("2006-01-02T15:04:05"),
gotime.Unix(413487608+int64(reorgRand.Intn(1705689644)), 0).Format("2006-01-02T15:04:05"),
reorgRand.Float64(),
randStr(512+reorgRand.Intn(1024), reorgRand))
}
}
// TODO: Either skip this, move it to a separate directory for big tests
// or see if there are ways to speed this up :)
// Leaving the test here, for reference and completeness testing
func TestPartitionByIntExtensivePart(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
schemaName := "PartitionByIntExtensive"
tk.MustExec("create database " + schemaName)
tk.MustExec("use " + schemaName)
tk2 := testkit.NewTestKit(t, store)
tk2.MustExec("use " + schemaName)
tBase := `(a int unsigned, b varchar(255) collate utf8mb4_general_ci, c int, d datetime, e timestamp, f double, g text, primary key (a), key (b), key (c,b), key (d,c), key(e))`
t2Str := `create table t2 ` + tBase
tStr := `create table t ` + tBase
rows := 1000
pkInserts := 20
pkUpdates := 20
pkDeletes := 10 // Enough to delete half of what is inserted?
thirdUintRange := 1 << 32 / 2
thirdUintRangeStr := fmt.Sprintf("%d", thirdUintRange)
twoThirdUintRangeStr := fmt.Sprintf("%d", 2*thirdUintRange)
tStart := []string{
// Non partitioned
tStr,
// RANGE COLUMNS
tStr + ` partition by range (a) (partition pFirst values less than (` + thirdUintRangeStr + `),` +
`partition pMid values less than (` + twoThirdUintRangeStr + `), partition pLast values less than (maxvalue))`,
// KEY
tStr + ` partition by key(a) partitions 5`,
// HASH
tStr + ` partition by hash(a) partitions 5`,
// HASH with function
tStr + ` partition by hash(a DIV 3) partitions 5`,
}
quarterUintRange := 1 << 30
quarterUintRangeStr := fmt.Sprintf("%d", quarterUintRange)
halfUintRangeStr := fmt.Sprintf("%d", 2*quarterUintRange)
threeQuarterUintRangeStr := fmt.Sprintf("%d", 3*quarterUintRange)
tAlter := []string{
// RANGE COLUMNS
`alter table t partition by range (a) (partition pFirst values less than (` + halfUintRangeStr + `), partition pLast values less than (MAXVALUE))`,
// RANGE COLUMNS
`alter table t partition by range (a) (partition pFirst values less than (` + quarterUintRangeStr + `),` +
`partition pLowMid values less than (` + halfUintRangeStr + `),` +
`partition pHighMid values less than (` + threeQuarterUintRangeStr + `),` +
`partition pLast values less than (maxvalue))`,
// KEY
`alter table t partition by key(a) partitions 7`,
`alter table t partition by key(a) partitions 3`,
// Hash
`alter table t partition by hash(a) partitions 7`,
`alter table t partition by hash(a) partitions 3`,
// Hash
`alter table t partition by hash(a DIV 13) partitions 7`,
`alter table t partition by hash(a DIV 13) partitions 3`,
}
seed := gotime.Now().UnixNano()
logutil.BgLogger().Info("Seeding rand", zap.Int64("seed", seed))
reorgRand := rand.New(rand.NewSource(seed))
for _, createSQL := range tStart {
for _, alterSQL := range tAlter {
tk.MustExec(createSQL)
tk.MustExec(t2Str)
getNewPK := getNewIntPK()
getValues := getIntValuesFunc()
checkDMLInAllStates(t, tk, tk2, schemaName, alterSQL, rows, pkInserts, pkUpdates, pkDeletes, reorgRand, getNewPK, getValues)
tk.MustExec(`drop table t`)
tk.MustExec(`drop table t2`)
}
}
for _, createSQL := range tStart[1:] {
tk.MustExec(createSQL)
tk.MustExec(t2Str)
getNewPK := getNewIntPK()
getValues := getIntValuesFunc()
checkDMLInAllStates(t, tk, tk2, schemaName, "alter table t remove partitioning", rows, pkInserts, pkUpdates, pkDeletes, reorgRand, getNewPK, getValues)
tk.MustExec(`drop table t`)
tk.MustExec(`drop table t2`)
}
}
func getNewIntPK() func(map[string]struct{}, string, *rand.Rand) string {
return func(m map[string]struct{}, suf string, reorgRand *rand.Rand) string {
uintPK := reorgRand.Uint32()
newPK := strconv.FormatUint(uint64(uintPK), 10)
for _, ok := m[newPK]; ok; {
uintPK = reorgRand.Uint32()
newPK = strconv.FormatUint(uint64(uintPK), 10)
_, ok = m[newPK]
}
m[newPK] = struct{}{}
return newPK
}
}
func getIntValuesFunc() func(string, bool, *rand.Rand) string {
cnt := 0
return func(pk string, asAssignment bool, reorgRand *rand.Rand) string {
s := `(%s, '%s', %d, '%s', '%s', %f, '%s')`
if asAssignment {
s = `a = %s, b = '%s', c = %d, d = '%s', e = '%s', f = %f, g = '%s'`
}
cnt++
return fmt.Sprintf(s,
pk,
randStr(reorgRand.Intn(19), reorgRand),
cnt, //reorgRand.Int31(),
gotime.Unix(413487608+int64(reorgRand.Intn(1705689644)), 0).Format("2006-01-02T15:04:05"),
gotime.Unix(413487608+int64(reorgRand.Intn(1705689644)), 0).Format("2006-01-02T15:04:05"),
reorgRand.Float64(),
randStr(512+reorgRand.Intn(1024), reorgRand))
}
}
func TestRangePartitionByRange(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
schemaName := "RangePartitionByRange"
tk.MustExec("create database " + schemaName)
tk.MustExec("use " + schemaName)
tk.MustExec(`create table t (a int) partition by range(a) (partition p0 values less than (0), partition p1M values less than (1000000))`)
tk.MustExec(`insert into t values (-1),(0),(1)`)
tk.MustExec(`alter table t partition by range(a) (partition p0 values less than (0), partition p1M values less than (1000000))`)
tk.MustExec(`alter table t remove partitioning`)
tk.MustQuery(`select * from t`).Sort().Check(testkit.Rows("-1", "0", "1"))
}
// TODO: Either skip this, move it to a separate directory for big tests
// or see if there are ways to speed this up :)
// Leaving the test here, for reference and completeness testing
func TestPartitionByExtensivePart(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
schemaName := "PartitionByExtensive"
tk.MustExec("create database " + schemaName)
tk.MustExec("use " + schemaName)
tk2 := testkit.NewTestKit(t, store)
tk2.MustExec("use " + schemaName)
tBase := `(a varchar(255) collate utf8mb4_unicode_ci, b varchar(255) collate utf8mb4_general_ci, c int, d datetime, e timestamp, f double, g text, primary key (a), key (b), key (c,b), key (d,c), key(e))`
t2Str := `create table t2 ` + tBase
tStr := `create table t ` + tBase
rows := 1000
pkInserts := 20
pkUpdates := 20
pkDeletes := 10 // Enough to delete half of what is inserted?
tStart := []string{
// Non partitioned
tStr,
// RANGE COLUMNS
tStr + ` partition by range columns (a) (partition pNull values less than (""), partition pM values less than ("M"), partition pLast values less than (maxvalue))`,
// KEY
tStr + ` partition by key(a) partitions 5`,
}
showCreateStr := "t CREATE TABLE `t` (\n" +
" `a` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,\n" +
" `b` varchar(255) COLLATE utf8mb4_general_ci DEFAULT NULL,\n" +
" `c` int(11) DEFAULT NULL,\n" +
" `d` datetime DEFAULT NULL,\n" +
" `e` timestamp NULL DEFAULT NULL,\n" +
" `f` double DEFAULT NULL,\n" +
" `g` text DEFAULT NULL,\n" +
" PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */,\n" +
" KEY `b` (`b`),\n" +
" KEY `c` (`c`,`b`),\n" +
" KEY `d` (`d`,`c`),\n" +
" KEY `e` (`e`)\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n"
tAlter := []struct{ alter, result string }{
{
// RANGE COLUMNS
alter: `alter table t partition by range columns (a) (partition pH values less than ("H"), partition pLast values less than (MAXVALUE))`,
result: showCreateStr +
"PARTITION BY RANGE COLUMNS(`a`)\n" +
"(PARTITION `pH` VALUES LESS THAN ('H'),\n" +
" PARTITION `pLast` VALUES LESS THAN (MAXVALUE))",
},
{
// RANGE COLUMNS
alter: `alter table t partition by range columns (a) (partition pNull values less than (""), partition pG values less than ("G"), partition pR values less than ("R"), partition pLast values less than (maxvalue))`,
result: showCreateStr +
"PARTITION BY RANGE COLUMNS(`a`)\n" +
"(PARTITION `pNull` VALUES LESS THAN (''),\n" +
" PARTITION `pG` VALUES LESS THAN ('G'),\n" +
" PARTITION `pR` VALUES LESS THAN ('R'),\n" +
" PARTITION `pLast` VALUES LESS THAN (MAXVALUE))",
},
// KEY
{
alter: `alter table t partition by key(a) partitions 7`,
result: showCreateStr +
"PARTITION BY KEY (`a`) PARTITIONS 7",
},
{
alter: `alter table t partition by key(a) partitions 3`,
result: showCreateStr +
"PARTITION BY KEY (`a`) PARTITIONS 3",
},
}
seed := gotime.Now().UnixNano()
logutil.BgLogger().Info("Seeding rand", zap.Int64("seed", seed))
reorgRand := rand.New(rand.NewSource(seed))
for _, createSQL := range tStart {
for _, alterSQL := range tAlter {
tk.MustExec(createSQL)
tk.MustExec(t2Str)
getNewPK := getNewStringPK()
getValues := getValuesFunc()
checkDMLInAllStates(t, tk, tk2, schemaName, alterSQL.alter, rows, pkInserts, pkUpdates, pkDeletes, reorgRand, getNewPK, getValues)
res := tk.MustQuery(`show create table t`)
res.AddComment("create SQL: " + createSQL + "\nalterSQL: " + alterSQL.alter)
res.Check(testkit.Rows(alterSQL.result))
tk.MustExec(`drop table t`)
tk.MustExec(`drop table t2`)
}
}
for _, createSQL := range tStart[1:] {
tk.MustExec(createSQL)
tk.MustExec(t2Str)
getNewPK := getNewStringPK()
getValues := getValuesFunc()
checkDMLInAllStates(t, tk, tk2, schemaName, "alter table t remove partitioning", rows, pkInserts, pkUpdates, pkDeletes, reorgRand, getNewPK, getValues)
tk.MustExec(`drop table t`)
tk.MustExec(`drop table t2`)
}
}
func TestReorgPartExtensivePart(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
schemaName := "ReorgPartExtensive"
tk.MustExec("create database " + schemaName)
tk.MustExec("use " + schemaName)
tk2 := testkit.NewTestKit(t, store)
tk2.MustExec("use " + schemaName)
// TODO: Handle different column types?
// TODO: Handle index for different column types / combinations as well?
// Steps:
// - create a table (should at least test both LIST and RANGE partition, Including COLUMNS)
// - add base data
// - start DDL
// - before each (and after?) each state transition:
// - insert, update and delete concurrently, to verify that the states are correctly handled.
// - TODO: Crash (if rollback is needed, then OK, but the rest need to be tested
// - TODO: Fail
// - TODO: run queries that could clash with backfill etc. (How to handle expected errors?)
// - TODO: on both the 'current' state and 'previous' state!
// - run ADMIN CHECK TABLE
//
tk.MustExec(`create table t (a varchar(255) collate utf8mb4_unicode_ci, b varchar(255) collate utf8mb4_general_ci, c int, d datetime, e timestamp, f double, g text, primary key (a)) partition by range columns (a) (partition pNull values less than (""), partition pM values less than ("M"), partition pLast values less than (maxvalue))`)
tk.MustExec(`create table t2 (a varchar(255) collate utf8mb4_unicode_ci, b varchar(255) collate utf8mb4_general_ci, c int, d datetime, e timestamp, f double, g text, primary key (a), key (b), key (c,b), key (d,c), key(e))`)
// TODO: Test again with timestamp in col e!!
tk.MustQuery(`show create table t`).Check(testkit.Rows("" +
"t CREATE TABLE `t` (\n" +
" `a` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,\n" +
" `b` varchar(255) COLLATE utf8mb4_general_ci DEFAULT NULL,\n" +
" `c` int(11) DEFAULT NULL,\n" +
" `d` datetime DEFAULT NULL,\n" +
" `e` timestamp NULL DEFAULT NULL,\n" +
" `f` double DEFAULT NULL,\n" +
" `g` text DEFAULT NULL,\n" +
" PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" +
"PARTITION BY RANGE COLUMNS(`a`)\n" +
"(PARTITION `pNull` VALUES LESS THAN (''),\n" +
" PARTITION `pM` VALUES LESS THAN ('M'),\n" +
" PARTITION `pLast` VALUES LESS THAN (MAXVALUE))"))
tk.MustQuery(`show create table t2`).Check(testkit.Rows("" +
"t2 CREATE TABLE `t2` (\n" +
" `a` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,\n" +
" `b` varchar(255) COLLATE utf8mb4_general_ci DEFAULT NULL,\n" +
" `c` int(11) DEFAULT NULL,\n" +
" `d` datetime DEFAULT NULL,\n" +
" `e` timestamp NULL DEFAULT NULL,\n" +
" `f` double DEFAULT NULL,\n" +
" `g` text DEFAULT NULL,\n" +
" PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */,\n" +
" KEY `b` (`b`),\n" +
" KEY `c` (`c`,`b`),\n" +
" KEY `d` (`d`,`c`),\n" +
" KEY `e` (`e`)\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"))
rows := 10000
pkInserts := 200
pkUpdates := 200
pkDeletes := 100 // Enough to delete half of what is inserted?
alterStr := `alter table t reorganize partition pNull, pM, pLast into (partition pI values less than ("I"), partition pQ values less than ("q"), partition pLast values less than (MAXVALUE))`
seed := rand.Int63()
logutil.BgLogger().Info("Seeding rand", zap.Int64("seed", seed))
reorgRand := rand.New(rand.NewSource(seed))
getNewPK := getNewStringPK()
getValues := getValuesFunc()
checkDMLInAllStates(t, tk, tk2, schemaName, alterStr, rows, pkInserts, pkUpdates, pkDeletes, reorgRand, getNewPK, getValues)
}
func getNewStringPK() func(map[string]struct{}, string, *rand.Rand) string {
return func(m map[string]struct{}, suf string, reorgRand *rand.Rand) string {
newPK := randStr(2+reorgRand.Intn(5), reorgRand) + suf
lowerPK := strings.ToLower(newPK)
for _, ok := m[lowerPK]; ok; {
newPK = randStr(2+reorgRand.Intn(5), reorgRand)
lowerPK = strings.ToLower(newPK)
_, ok = m[lowerPK]
}
m[lowerPK] = struct{}{}
return newPK
}
}
func getValuesFunc() func(string, bool, *rand.Rand) string {
cnt := 0
return func(pk string, asAssignment bool, reorgRand *rand.Rand) string {
s := `('%s', '%s', %d, '%s', '%s', %f, '%s')`
if asAssignment {
s = `a = '%s', b = '%s', c = %d, d = '%s', e = '%s', f = %f, g = '%s'`
}
cnt++
return fmt.Sprintf(s,
pk,
randStr(reorgRand.Intn(19), reorgRand),
cnt, //reorgRand.Int31(),
gotime.Unix(413487608+int64(reorgRand.Intn(1705689644)), 0).Format("2006-01-02T15:04:05"),
gotime.Unix(413487608+int64(reorgRand.Intn(1705689644)), 0).Format("2006-01-02T15:04:05"),
reorgRand.Float64(),
randStr(512+reorgRand.Intn(1024), reorgRand))
}
}
func checkDMLInAllStates(t *testing.T, tk, tk2 *testkit.TestKit, schemaName, alterStr string,
rows, pkInserts, pkUpdates, pkDeletes int,
reorgRand *rand.Rand,
getNewPK func(map[string]struct{}, string, *rand.Rand) string,
getValues func(string, bool, *rand.Rand) string) {
dom := domain.GetDomain(tk.Session())
originHook := dom.DDL().GetHook()
defer dom.DDL().SetHook(originHook)
hook := newTestCallBack(t, dom)
dom.DDL().SetHook(hook)
pkMap := make(map[string]struct{}, rows)
pkArray := make([]string, 0, len(pkMap))
// Generate a start set:
for i := 0; i < rows; i++ {
pk := getNewPK(pkMap, "-o", reorgRand)
pkArray = append(pkArray, pk)
values := getValues(pk, false, reorgRand)
tk.MustExec(`insert into t values ` + values)
tk.MustExec(`insert into t2 values ` + values)
}
tk.MustExec(`analyze table t`)
tk.MustExec(`analyze table t2`)
tk.MustQuery(`select * from t except select * from t2`).Check(testkit.Rows())
tk.MustQuery(`select * from t2 except select * from t`).Check(testkit.Rows())
// How to arrange data for possible collisions?
// change both PK column, SK column and non indexed column!
// Run various changes in transactions, in two concurrent sessions
// + mirror those transactions on a copy of the same table and data without DDL
// to verify expected transaction conflicts!
// We should try to collide:
// Current data : 1-1000
// insert vN 1-200 // random order, random length of transaction?
// insert vN-1 100-300 // interleaved with vN, random order+length of txn?
// update vN 1-20, 100-120, 200-220, 300-320..
// update vN-1 10-30, 110-130, 210-230, ...
// delete vN
// delete vN-1
// insert update delete <-
// insert
// update
// delete
// Note: update the PK so it moves between different before and after partitions
tk2.MustQuery(`select count(*) from (select a from t except select a from t2) a`).Check(testkit.Rows("0"))
tk2.MustQuery(`select count(*) from (select a from t2 except select a from t) a`).Check(testkit.Rows("0"))
currentState := model.StateNone
transitions := 0
var currTbl table.Table
currSchema := sessiontxn.GetTxnManager(tk2.Session()).GetTxnInfoSchema()
prevTbl, err := currSchema.TableByName(model.NewCIStr(schemaName), model.NewCIStr("t"))
require.NoError(t, err)
var hookErr error
hook.OnJobRunBeforeExported = func(job *model.Job) {
if hookErr != nil {
// Enough to find a single error
return
}
if job.Type == model.ActionReorganizePartition && job.SchemaState != currentState {
transitions++
// use random generation to possibly trigger txn collisions / deadlocks?
// insert (dup in new/old , non dup)
// update (dup in new/old , non dup as in same old/new partition -> update, different new/old -> insert + delete)
// delete
// verify with select after commit?
logutil.BgLogger().Info("State before ins/upd/del", zap.Int("transitions", transitions),
zap.Int("rows", len(pkMap)), zap.Stringer("SchemaState", job.SchemaState))
tk2.MustQuery(`select count(*) from t2`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from t`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from (select a from t except select a from t2) a`).Check(testkit.Rows("0"))
tk2.MustQuery(`select count(*) from (select a from t2 except select a from t) a`).Check(testkit.Rows("0"))
// Start with PK changes (non duplicate keys)
insPK := make([]string, 0, pkInserts)
values := make([]string, 0, pkInserts)
for i := 0; i < pkInserts; i += 2 {
pk := getNewPK(pkMap, "-i0", reorgRand)
logutil.BgLogger().Debug("insert1", zap.String("pk", pk))
pkArray = append(pkArray, pk)
insPK = append(insPK, pk)
values = append(values, getValues(pk, false, reorgRand))
}
if len(pkMap) != len(pkArray) {
panic("Different length!!!")
}
hookErr = tk2.ExecToErr(`insert into t values ` + strings.Join(values, ","))
if hookErr != nil {
return
}
hookErr = tk2.ExecToErr(`insert into t2 values ` + strings.Join(values, ","))
if hookErr != nil {
return
}
tk2.MustQuery(`select count(*) from t2`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from t`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from (select a from t except select a from t2) a`).Check(testkit.Rows("0"))
tk2.MustQuery(`select count(*) from (select a from t2 except select a from t) a`).Check(testkit.Rows("0"))
currSchema = sessiontxn.GetTxnManager(tk2.Session()).GetTxnInfoSchema()
currTbl, hookErr = currSchema.TableByName(model.NewCIStr(schemaName), model.NewCIStr("t"))
require.True(t, tables.SwapReorgPartFields(currTbl, prevTbl))
// Now using previous schema version
values = values[:0]
for i := 1; i < pkInserts; i += 2 {
pk := getNewPK(pkMap, "-i1", reorgRand)
logutil.BgLogger().Debug("insert2", zap.String("pk", pk))
pkArray = append(pkArray, pk)
insPK = append(insPK, pk)
values = append(values, getValues(pk, false, reorgRand))
}
hookErr = tk2.ExecToErr(`insert into t values ` + strings.Join(values, ","))
if hookErr != nil {
return
}
hookErr = tk2.ExecToErr(`insert into t2 values ` + strings.Join(values, ","))
if hookErr != nil {
return
}
if len(pkMap) != len(pkArray) {
panic("Different length!!!")
}
tk2.MustQuery(`select count(*) from t2`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from t`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from (select a from t except select a from t2) a`).Check(testkit.Rows("0"))
tk2.MustQuery(`select count(*) from (select a from t2 except select a from t) a`).Check(testkit.Rows("0"))
rs, err := tk2.Exec(`select count(*) from t`)
if err != nil {
hookErr = err
return
}
tRows := tk2.ResultSetToResult(rs, "").Rows()[0][0].(string)
rs, err = tk2.Exec(`select count(*) from t2`)
if err != nil {
hookErr = err
return
}
t2Rows := tk2.ResultSetToResult(rs, "").Rows()[0][0].(string)
if tRows != t2Rows {
logutil.BgLogger().Error("rows do not match", zap.String("t", tRows), zap.String("t2", t2Rows), zap.Stringer("state", job.SchemaState))
}
require.True(t, tables.SwapReorgPartFields(currTbl, prevTbl))
// Now using current schema version
// Half from insert (1/4 in current schema version)
values = values[:0]
for i := 0; i < pkUpdates; i += 4 {
insIdx := reorgRand.Intn(len(insPK))
oldPK := insPK[insIdx]
lowerPK := strings.ToLower(oldPK)
delete(pkMap, lowerPK)
newPK := getNewPK(pkMap, "-u0", reorgRand)
insPK[insIdx] = newPK
idx := len(pkArray) - len(insPK) + insIdx
pkArray[idx] = newPK
value := getValues(newPK, true, reorgRand)
logutil.BgLogger().Debug("update1", zap.String("old", oldPK), zap.String("value", value))
hookErr = tk2.ExecToErr(`update t set ` + value + ` where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
hookErr = tk2.ExecToErr(`update t2 set ` + value + ` where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
// Also do some non-pk column updates!
insIdx = reorgRand.Intn(len(insPK))
oldPK = insPK[insIdx]
value = getValues(oldPK, true, reorgRand)
hookErr = tk2.ExecToErr(`update t set ` + value + ` where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
hookErr = tk2.ExecToErr(`update t2 set ` + value + ` where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
}
if len(pkMap) != len(pkArray) {
panic("Different length!!!")
}
tk2.MustQuery(`select count(*) from t2`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from t`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from (select a from t except select a from t2) a`).Check(testkit.Rows("0"))
tk2.MustQuery(`select count(*) from (select a from t2 except select a from t) a`).Check(testkit.Rows("0"))
require.True(t, tables.SwapReorgPartFields(currTbl, prevTbl))
// Now using previous schema version
// Half from insert (1/4 in previous schema version)
values = values[:0]
for i := 1; i < pkUpdates; i += 4 {
insIdx := reorgRand.Intn(len(insPK))
oldPK := insPK[insIdx]
lowerPK := strings.ToLower(oldPK)
delete(pkMap, lowerPK)
newPK := getNewPK(pkMap, "-u1", reorgRand)
insPK[insIdx] = newPK
idx := len(pkArray) - len(insPK) + insIdx
pkArray[idx] = newPK
value := getValues(newPK, true, reorgRand)
logutil.BgLogger().Debug("update2", zap.String("old", oldPK), zap.String("value", value))
hookErr = tk2.ExecToErr(`update t set ` + value + ` where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
hookErr = tk2.ExecToErr(`update t2 set ` + value + ` where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
// Also do some non-pk column updates!
// Note: if PK changes it does RemoveRecord + AddRecord
insIdx = reorgRand.Intn(len(insPK))
oldPK = insPK[insIdx]
value = getValues(oldPK, true, reorgRand)
hookErr = tk2.ExecToErr(`update t set ` + value + ` where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
hookErr = tk2.ExecToErr(`update t2 set ` + value + ` where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
}
if len(pkMap) != len(pkArray) {
panic("Different length!!!")
}
tk2.MustQuery(`select count(*) from t2`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from t`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from (select a from t except select a from t2) a`).Check(testkit.Rows("0"))
tk2.MustQuery(`select count(*) from (select a from t2 except select a from t) a`).Check(testkit.Rows("0"))
// Half from Old
require.True(t, tables.SwapReorgPartFields(currTbl, prevTbl))
// Now using current schema version
// Half from old (1/4 in current schema version)
values = values[:0]
for i := 2; i < pkUpdates; i += 4 {
idx := reorgRand.Intn(len(pkArray) - len(insPK))
oldPK := pkArray[idx]
lowerPK := strings.ToLower(oldPK)
delete(pkMap, lowerPK)
newPK := getNewPK(pkMap, "-u2", reorgRand)
pkArray[idx] = newPK
value := getValues(newPK, true, reorgRand)
logutil.BgLogger().Debug("update3", zap.String("old", oldPK), zap.String("value", value))
hookErr = tk2.ExecToErr(`update t set ` + value + ` where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
hookErr = tk2.ExecToErr(`update t2 set ` + value + ` where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
// Also do some non-pk column updates!
idx = reorgRand.Intn(len(pkArray) - len(insPK))
oldPK = pkArray[idx]
value = getValues(oldPK, true, reorgRand)
hookErr = tk2.ExecToErr(`update t set ` + value + ` where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
hookErr = tk2.ExecToErr(`update t2 set ` + value + ` where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
}
if len(pkMap) != len(pkArray) {
panic("Different length!!!")
}
tk2.MustQuery(`select count(*) from t2`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from t`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from (select a from t except select a from t2) a`).Check(testkit.Rows("0"))
tk2.MustQuery(`select count(*) from (select a from t2 except select a from t) a`).Check(testkit.Rows("0"))
require.True(t, tables.SwapReorgPartFields(currTbl, prevTbl))
// Now using previous schema version
// Half from old (1/4 in previous schema version)
values = values[:0]
for i := 3; i < pkUpdates; i += 4 {
idx := reorgRand.Intn(len(pkArray) - len(insPK))
oldPK := pkArray[idx]
lowerPK := strings.ToLower(oldPK)
delete(pkMap, lowerPK)
newPK := getNewPK(pkMap, "-u3", reorgRand)
pkArray[idx] = newPK
value := getValues(newPK, true, reorgRand)
logutil.BgLogger().Debug("update4", zap.String("old", oldPK), zap.String("value", value))
hookErr = tk2.ExecToErr(`update t set ` + value + ` where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
hookErr = tk2.ExecToErr(`update t2 set ` + value + ` where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
// Also do some non-pk column updates!
idx = reorgRand.Intn(len(pkArray) - len(insPK))
oldPK = pkArray[idx]
value = getValues(oldPK, true, reorgRand)
hookErr = tk2.ExecToErr(`update t set ` + value + ` where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
hookErr = tk2.ExecToErr(`update t2 set ` + value + ` where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
}
if len(pkMap) != len(pkArray) {
panic("Different length!!!")
}
tk2.MustQuery(`select count(*) from t2`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from t`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from (select a from t except select a from t2) a`).Check(testkit.Rows("0"))
tk2.MustQuery(`select count(*) from (select a from t2 except select a from t) a`).Check(testkit.Rows("0"))
rs, err = tk2.Exec(`select count(*) from t`)
if err != nil {
hookErr = err
return
}
tRows = tk2.ResultSetToResult(rs, "").Rows()[0][0].(string)
rs, err = tk2.Exec(`select count(*) from t2`)
if err != nil {
hookErr = err
return
}
t2Rows = tk2.ResultSetToResult(rs, "").Rows()[0][0].(string)
if tRows != t2Rows {
logutil.BgLogger().Error("rows do not match", zap.String("t", tRows), zap.String("t2", t2Rows), zap.Stringer("state", job.SchemaState))
}
tk2.MustQuery(`select count(*) from (select a from t except select a from t2) a`).Check(testkit.Rows("0"))
tk2.MustQuery(`select count(*) from (select a from t2 except select a from t) a`).Check(testkit.Rows("0"))
require.True(t, tables.SwapReorgPartFields(currTbl, prevTbl))
// Now using current schema version
// Half from insert (1/4 in current schema version)
values = values[:0]
for i := 0; i < pkDeletes; i += 4 {
insIdx := reorgRand.Intn(len(insPK))
oldPK := insPK[insIdx]
lowerPK := strings.ToLower(oldPK)
delete(pkMap, lowerPK)
idx := len(pkArray) - len(insPK) + insIdx
insPK = append(insPK[:insIdx], insPK[insIdx+1:]...)
pkArray = append(pkArray[:idx], pkArray[idx+1:]...)
logutil.BgLogger().Debug("delete0", zap.String("pk", oldPK))
hookErr = tk2.ExecToErr(`delete from t where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
hookErr = tk2.ExecToErr(`delete from t2 where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
}
if len(pkMap) != len(pkArray) {
panic("Different length!!!")
}
tk2.MustQuery(`select count(*) from t2`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from t`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from (select a from t except select a from t2) a`).Check(testkit.Rows("0"))
tk2.MustQuery(`select count(*) from (select a from t2 except select a from t) a`).Check(testkit.Rows("0"))
require.True(t, tables.SwapReorgPartFields(currTbl, prevTbl))
// Now using previous schema version
// Half from insert (1/4 in previous schema version)
values = values[:0]
for i := 1; i < pkDeletes; i += 4 {
insIdx := reorgRand.Intn(len(insPK))
oldPK := insPK[insIdx]
lowerPK := strings.ToLower(oldPK)
delete(pkMap, lowerPK)
idx := len(pkArray) - len(insPK) + insIdx
insPK = append(insPK[:insIdx], insPK[insIdx+1:]...)
pkArray = append(pkArray[:idx], pkArray[idx+1:]...)
logutil.BgLogger().Debug("delete1", zap.String("pk", oldPK))
hookErr = tk2.ExecToErr(`delete from t where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
hookErr = tk2.ExecToErr(`delete from t2 where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
}
if len(pkMap) != len(pkArray) {
panic("Different length!!!")
}
tk2.MustQuery(`select count(*) from t2`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from t`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from (select a from t except select a from t2) a`).Check(testkit.Rows("0"))
tk2.MustQuery(`select count(*) from (select a from t2 except select a from t) a`).Check(testkit.Rows("0"))
// Half from Old
require.True(t, tables.SwapReorgPartFields(currTbl, prevTbl))
// Now using current schema version
// Half from old (1/4 in current schema version)
values = values[:0]
for i := 2; i < pkDeletes; i += 4 {
idx := reorgRand.Intn(len(pkArray) - len(insPK))
oldPK := pkArray[idx]
lowerPK := strings.ToLower(oldPK)
delete(pkMap, lowerPK)
pkArray = append(pkArray[:idx], pkArray[idx+1:]...)
logutil.BgLogger().Debug("delete2", zap.String("pk", oldPK))
hookErr = tk2.ExecToErr(`delete from t where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
hookErr = tk2.ExecToErr(`delete from t2 where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
}
if len(pkMap) != len(pkArray) {
panic("Different length!!!")
}
tk2.MustQuery(`select count(*) from t2`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from t`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from (select a from t except select a from t2) a`).Check(testkit.Rows("0"))
tk2.MustQuery(`select count(*) from (select a from t2 except select a from t) a`).Check(testkit.Rows("0"))
require.True(t, tables.SwapReorgPartFields(currTbl, prevTbl))
// Now using previous schema version
// Half from old (1/4 in previous schema version)
values = values[:0]
for i := 3; i < pkDeletes; i += 4 {
idx := reorgRand.Intn(len(pkArray) - len(insPK))
oldPK := pkArray[idx]
lowerPK := strings.ToLower(oldPK)
delete(pkMap, lowerPK)
pkArray = append(pkArray[:idx], pkArray[idx+1:]...)
logutil.BgLogger().Debug("delete3", zap.String("pk", oldPK))
hookErr = tk2.ExecToErr(`delete from t where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
hookErr = tk2.ExecToErr(`delete from t2 where a = "` + oldPK + `"`)
if hookErr != nil {
return
}
}
tk2.MustQuery(`select count(*) from (select a from t except select a from t2) a`).Check(testkit.Rows("0"))
tk2.MustQuery(`select count(*) from (select a from t2 except select a from t) a`).Check(testkit.Rows("0"))
rs, err = tk2.Exec(`select count(*) from t`)
if err != nil {
hookErr = err
return
}
tRows = tk2.ResultSetToResult(rs, "").Rows()[0][0].(string)
rs, err = tk2.Exec(`select count(*) from t2`)
if err != nil {
hookErr = err
return
}
t2Rows = tk2.ResultSetToResult(rs, "").Rows()[0][0].(string)
if tRows != t2Rows {
logutil.BgLogger().Error("rows do not match", zap.String("t", tRows), zap.String("t2", t2Rows), zap.Stringer("state", job.SchemaState))
}
require.True(t, tables.SwapReorgPartFields(currTbl, prevTbl))
// Now using current schema version
tk2.MustQuery(`select count(*) from t2`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
tk2.MustQuery(`select count(*) from t`).Check(testkit.Rows(fmt.Sprintf("%d", len(pkMap))))
prevTbl = currTbl
logutil.BgLogger().Info("State after ins/upd/del", zap.Int("transitions", transitions),
zap.Int("rows", len(pkMap)), zap.Stringer("SchemaState", job.SchemaState))
}
}
tk.MustExec(alterStr)
require.NoError(t, hookErr)
tk.MustExec(`admin check table t`)
tk.MustExec(`admin check table t2`)
tk.MustQuery(`select count(*) from (select a from t except select a from t2) a`).Check(testkit.Rows("0"))
tk.MustQuery(`select count(*) from (select a from t2 except select a from t) a`).Check(testkit.Rows("0"))
tk.MustQuery(`select * from t except select * from t2 LIMIT 1`).Check(testkit.Rows())
tk.MustQuery(`select * from t2 except select * from t LIMIT 1`).Check(testkit.Rows())
}
// Emojis fold to a single rune, and ö compares as o, so just complicated having other runes.
// Enough to just distribute between A and Z + testing simple folding
var runes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func randStr(n int, r *rand.Rand) string {
var sb strings.Builder
sb.Grow(n)
for i := 0; i < n; i++ {
_, _ = sb.WriteRune(runes[r.Intn(len(runes))])
}
return sb.String()
}
|
package problem0441
import "testing"
import "math"
func TestArrangeCoins(t *testing.T) {
for i := 0; i < 10; i++ {
t.Log(arrangeCoins(i) == int(math.Sqrt(float64(i))))
}
}
|
package main
import (
"fmt"
"math"
)
type (
// Shapes interface
Shapes interface {
Area() float64
Perimeter() float64
}
// Rectangle struct
Rectangle struct {
height, length int
}
// Circle struct
Circle struct {
radius float64
}
// Triangle struct
Triangle struct {
sideA float64
sideB float64
sideC float64
}
)
func main() {
shapes := []Shapes{
&Rectangle{3, 4},
&Circle{5},
&Triangle{3, 4, 5},
}
for _, shape := range shapes {
printShapes(shape)
}
}
// Perimeter func
func (r *Rectangle) Perimeter() float64 {
return float64((r.height + r.length) * 2)
}
// Area func
func (r *Rectangle) Area() float64 {
return float64(r.height * r.length)
}
func (r *Rectangle) String() string {
return fmt.Sprintf("height = %v, length = %v. Type = %T", r.height, r.length, r)
}
// Perimeter func
func (c *Circle) Perimeter() float64 {
return 2 * math.Pi * c.radius
}
// Area func
func (c *Circle) Area() float64 {
return math.Pi * math.Pow(c.radius, 2)
}
func (c *Circle) String() string {
return fmt.Sprintf("radius = %v. Type = %T", c.radius, c)
}
// Perimeter func
func (t *Triangle) Perimeter() float64 {
return t.sideA + t.sideB + t.sideC
}
// Area func
func (t *Triangle) Area() float64 {
halfPerim := t.Perimeter() / 2
res := math.Sqrt(halfPerim * (halfPerim - t.sideA) * (halfPerim - t.sideB) * (halfPerim - t.sideC))
return res
}
func (t *Triangle) String() string {
return fmt.Sprintf("a = %v, b = %v, c = %v. Type = %T", t.sideA, t.sideB, t.sideC, t)
}
func printShapes(shape Shapes) {
fmt.Printf("%v\n", shape)
fmt.Printf("Perimeter = %.2f\n", shape.Perimeter())
fmt.Printf("Area = %.2f\n----------\n", shape.Area())
}
|
package s3fs
// Credentials store sensitive strings that should not be marshalled to JSON
type Credentials string
// MarshalJSON implementation that ensures credentials are never returned from the API
func (c Credentials) MarshalJSON() ([]byte, error) {
if len(c) > 0 {
return []byte(`"[redacted]"`), nil
}
return []byte(`""`), nil
}
|
package multiples3or5
import (
"fmt"
"testing"
)
func TestMultiple3And5(t *testing.T) {
tests := []struct {
arg int
want int
}{
{arg: 10, want: 23},
}
for _, tt := range tests {
t.Run(fmt.Sprintf("Multiple3And5(%d)", tt.arg), func(t *testing.T) {
if got := Multiple3And5(tt.arg); got != tt.want {
t.Errorf("Multiple3And5() = %v, want %v", got, tt.want)
}
})
}
}
|
package main
import "strconv"
type Position struct {
Latitude float64
Longitude float64
}
func (p Position) String() string {
return strconv.FormatFloat(p.Latitude, 'f', -1, 64) + "," + strconv.FormatFloat(p.Longitude, 'f', -1, 64)
}
|
package types
import (
"fmt"
"go/ast"
"go/token"
"regexp"
"strings"
goast "go/ast"
"strconv"
"github.com/elliotchance/c2go/program"
"github.com/elliotchance/c2go/util"
)
func CastExpr(p *program.Program, expr ast.Expr, fromType, toType string) ast.Expr {
fromType = ResolveType(p, fromType)
toType = ResolveType(p, toType)
// FIXME: This is a hack to avoid casting in some situations.
if fromType == "" || toType == "" {
return expr
}
// FIXME: This should be removed, it was just for debugging.
// if fromType == "" || toType == "" {
// panic(expr)
// }
if fromType == toType {
return expr
}
// TODO: The toType could be any type of string.
if IsNullExpr(expr) && toType == "char **" {
return &goast.BasicLit{
Kind: token.STRING,
Value: `""`,
}
}
// Compatible integer types
types := []string{
// General types:
"int", "int64", "uint16", "uint32", "byte", "uint64",
"float32", "float64",
// Known aliases
"__uint16_t",
// Darwin specific:
"__darwin_ct_rune_t", "darwin.Darwin_ct_rune_t",
}
for _, v := range types {
if fromType == v && toType == "bool" {
return &goast.BinaryExpr{
X: expr,
Op: token.NEQ,
Y: &goast.BasicLit{
Kind: token.STRING,
Value: "0",
},
}
}
}
// In the forms of:
// - `string` -> `[8]byte`
// - `string` -> `char *[13]`
match1 := regexp.MustCompile(`\[(\d+)\]byte`).FindStringSubmatch(toType)
match2 := regexp.MustCompile(`char \*\[(\d+)\]`).FindStringSubmatch(toType)
if fromType == "string" && (len(match1) > 0 || len(match2) > 0) {
// Construct a byte array from "first":
//
// var str [5]byte = [5]byte{'f','i','r','s','t'}
value := &goast.CompositeLit{
Type: &goast.ArrayType{
Len: &goast.BasicLit{
Kind: token.INT,
Value: match1[1],
},
Elt: goast.NewIdent("byte"),
},
Elts: []goast.Expr{},
}
strValue := expr.(*goast.BasicLit).Value
for i := 1; i < len(strValue)-1; i++ {
s := strValue[i : i+1]
if s == "\\" {
s = strValue[i : i+2]
i += 1
}
// TODO: This does not handle characters that need to be escaped.
value.Elts = append(value.Elts, &goast.BasicLit{
Kind: token.CHAR,
Value: "'" + s + "'",
})
}
value.Elts = append(value.Elts, &goast.BasicLit{
Kind: token.INT,
Value: "0",
})
return value
}
// In the forms of:
// - `[7]byte` -> `string`
// - `char *[12]` -> `string`
match1 = regexp.MustCompile(`\[(\d+)\]byte`).FindStringSubmatch(fromType)
match2 = regexp.MustCompile(`char \*\[(\d+)\]`).FindStringSubmatch(fromType)
if (len(match1) > 0 || len(match2) > 0) && toType == "string" {
size := 0
if len(match1) > 0 {
size = util.Atoi(match1[1])
} else {
size = util.Atoi(match2[1])
}
// The following code builds this:
//
// string(expr[:size - 1])
//
return &goast.CallExpr{
Fun: goast.NewIdent("string"),
Args: []goast.Expr{
&goast.SliceExpr{
X: expr,
High: &goast.BasicLit{
Kind: token.INT,
Value: strconv.Itoa(size - 1),
},
},
},
}
}
// Anything that is a pointer can be compared to nil
if fromType[0] == '*' && toType == "bool" {
return &goast.BinaryExpr{
X: expr,
Op: token.NEQ,
Y: &goast.BasicLit{
Kind: token.STRING,
Value: "nil",
},
}
}
if fromType == "int" && toType == "*int" {
return &goast.BasicLit{
Kind: token.STRING,
Value: "nil",
}
}
if fromType == "int" && toType == "*byte" {
return &goast.BasicLit{
Kind: token.STRING,
Value: `""`,
}
}
if fromType == "_Bool" && toType == "bool" {
return expr
}
if util.InStrings(fromType, types) && util.InStrings(toType, types) {
return &goast.CallExpr{
Fun: goast.NewIdent(toType),
Args: []goast.Expr{expr},
}
}
p.AddImport("github.com/elliotchance/c2go/noarch")
leftName := fromType
rightName := toType
if strings.Index(leftName, ".") != -1 {
parts := strings.Split(leftName, ".")
leftName = parts[len(parts)-1]
}
if strings.Index(rightName, ".") != -1 {
parts := strings.Split(rightName, ".")
rightName = parts[len(parts)-1]
}
return &goast.CallExpr{
Fun: goast.NewIdent(fmt.Sprintf("noarch.%sTo%s",
util.GetExportedName(leftName), util.GetExportedName(rightName))),
Args: []goast.Expr{expr},
}
}
func IsNullExpr(n goast.Expr) bool {
if p1, ok := n.(*goast.ParenExpr); ok {
if p2, ok := p1.X.(*goast.BasicLit); ok && p2.Value == "0" {
return true
}
}
return false
}
func Cast(program *program.Program, expr, fromType, toType string) string {
fromType = ResolveType(program, fromType)
toType = ResolveType(program, toType)
// FIXME: This is a hack to avoid casting in some situations.
if fromType == "" || toType == "" {
return expr
}
if fromType == toType {
return expr
}
// Compatible integer types
types := []string{
// General types:
"int", "int64", "uint16", "uint32", "byte", "uint64",
"float32", "float64",
// Known aliases
"__uint16_t",
// Darwin specific:
"__darwin_ct_rune_t", "darwin.Darwin_ct_rune_t",
}
for _, v := range types {
if fromType == v && toType == "bool" {
return fmt.Sprintf("%s != 0", expr)
}
}
// In the forms of:
// - `string` -> `[8]byte`
// - `string` -> `char *[13]`
match1 := regexp.MustCompile(`\[(\d+)\]byte`).FindStringSubmatch(toType)
match2 := regexp.MustCompile(`char \*\[(\d+)\]`).FindStringSubmatch(toType)
if fromType == "string" && (len(match1) > 0 || len(match2) > 0) {
// Construct a byte array from "first":
//
// var str [5]byte = [5]byte{'f','i','r','s','t'}
s := ""
for i := 1; i < len(expr)-1; i++ {
if i > 1 {
s += "','"
}
// Watch out for escape characters.
if expr[i] == '\\' {
s += fmt.Sprintf("\\%c", expr[i+1])
i += 1
} else {
s += string(expr[i])
}
}
size := "0"
if len(match1) > 0 {
size = match1[1]
} else {
size = match2[1]
}
return fmt.Sprintf("[%s]byte{'%s', 0}", size, s)
}
// In the forms of:
// - `[7]byte` -> `string`
// - `char *[12]` -> `string`
match1 = regexp.MustCompile(`\[(\d+)\]byte`).FindStringSubmatch(fromType)
match2 = regexp.MustCompile(`char \*\[(\d+)\]`).FindStringSubmatch(fromType)
if (len(match1) > 0 || len(match2) > 0) && toType == "string" {
size := 0
if len(match1) > 0 {
size = util.Atoi(match1[1])
} else {
size = util.Atoi(match2[1])
}
return fmt.Sprintf("string(%s[:%d])", expr, size-1)
}
// Anything that is a pointer can be compared to nil
if fromType[0] == '*' && toType == "bool" {
return fmt.Sprintf("%s != nil", expr)
}
if fromType == "int" && toType == "*int" {
return "nil"
}
if fromType == "int" && toType == "*byte" {
return `""`
}
if fromType == "_Bool" && toType == "bool" {
return expr
}
if util.InStrings(fromType, types) && util.InStrings(toType, types) {
return fmt.Sprintf("%s(%s)", toType, expr)
}
program.AddImport("github.com/elliotchance/c2go/noarch")
leftName := fromType
rightName := toType
if strings.Index(leftName, ".") != -1 {
parts := strings.Split(leftName, ".")
leftName = parts[len(parts)-1]
}
if strings.Index(rightName, ".") != -1 {
parts := strings.Split(rightName, ".")
rightName = parts[len(parts)-1]
}
return fmt.Sprintf("noarch.%sTo%s(%s)",
util.GetExportedName(leftName), util.GetExportedName(rightName), expr)
}
|
package factory
type Config struct {
Period int `yaml:"period"`
Delta float64 `yaml:"delta"`
Granularity float64 `yaml:"granularity"`
Host string `yaml:"host"`
GnbIp string `yaml:"gnbIp"`
DnIp string `yaml:"dnIp"`
UpfInfos []UpfInfo `yaml:"upfInfos"`
EdgeInfos []EdgeInfo `yaml:"edgeInfos"`
Logger Logger `yaml:"logger"`
LoadBalancerType int `yaml:"loadBalancerType"`
}
|
package array
import (
"reflect"
"github.com/spf13/cast"
)
// Keys array_keys()
func Keys(input interface{}) (result []interface{}) {
v := reflect.ValueOf(input)
if (v.Kind() != reflect.Map) && (v.Kind() != reflect.Struct) {
return
}
switch v.Kind() {
case reflect.Map:
if v.Len() <= 0 {
return
}
result = make([]interface{}, 0, v.Len())
for _, key := range v.MapKeys() {
result = append(result, key.Interface())
}
case reflect.Struct:
if v.NumField() <= 0 {
return
}
result = make([]interface{}, 0, v.NumField())
for i := 0; i < v.NumField(); i++ {
result = append(result, v.Type().Field(i).Name)
}
}
return
}
// StringKeys array_keys()
func StringKeys(input interface{}) (result []string) {
v := reflect.ValueOf(input)
if (v.Kind() != reflect.Map) && (v.Kind() != reflect.Struct) {
return
}
switch v.Kind() {
case reflect.Map:
if v.Len() <= 0 {
return
}
result = make([]string, 0, v.Len())
for _, key := range v.MapKeys() {
s := cast.ToString(key.Interface())
result = append(result, s)
}
case reflect.Struct:
if v.NumField() <= 0 {
return
}
result = make([]string, 0, v.NumField())
for i := 0; i < v.NumField(); i++ {
result = append(result, v.Type().Field(i).Name)
}
}
return
}
|
package imagekit
import(
"github.com/docker/distribution"
"github.com/docker/distribution/reference"
"github.com/docker/docker/image"
)
// ImageKit represents toolset for manipulate docker image
type ImageKit interface {
Packer
StaticBuilder
ImageDescriptor
}
// Packer pack layers to generate a legal docker image
type Packer interface {
Pack(name string, tag string, imageMeta image.Image, layers []distribution.Descriptor) (distribution.Manifest, error)
}
// DockerImage Descripter
type DockerImage struct{}
// StaticBuilder build image with static files
type StaticBuilder interface {
Build() DockerImage
}
// ImageDescriptor is a descripter for a docker image
type ImageDescriptor interface {
//WIP
Manifest() distribution.Manifest
}
|
package todo
/**
这里是整个todo项目的配置中心
*/
import (
"github.com/baotingfang/gomvc"
"path"
"runtime"
"time"
)
var (
// 数据库驱动类型:支持mymysql和mysql两种配置
DATABASE_Driver string = "mymysql"
// mysql连接字符串: "user:password@/dbname?charset=utf8&keepalive=1"
// mymysql连接字符串: tcp:localhost:3306*dbname/user/pwd
DATABASE_DSN string = "tcp:localhost:3306*goku/root/19840406"
)
var Config *gomvc.ServerConfig = &gomvc.ServerConfig{
Addr: ":8080",
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
//有空测试一下
// RootDir: _, filename, _, _ := runtime.Caller(1),
//静态资源路径
StaticPath: "static",
//视图模板路劲
ViewPath: "views",
LogLevel: gomvc.LOG_LEVEL_LOG,
Debug: true,
}
func init() {
//项目目录为当前目录
_, filename, _, _ := runtime.Caller(1)
Config.RootDir = path.Dir(filename)
/**
设置全局变量
*/
gomvc.SetGlobalViewData("SiteName", "Todo - by gomvc")
}
|
package main
import (
"log"
"sync"
"time"
cleverbot "github.com/ugjka/cleverbot-go"
)
type ConversationCallback func(channel, nick, reply string, err error)
// Conversation is a single conversation.
type Conversation struct {
// channel that the message should be posted to
channel string
// nick the bot is currently talking with.
nick string
// cb is the CleverBot API session handle
bot *cleverbot.Session
// nextInput is the next input we're gonna fetch a response for.
nextInput string
// mutex for safe handling
mutex sync.RWMutex
// flag telling that response is being fetched
gettingReply bool
// callback to call when a reply is generated.
cb ConversationCallback
// lastActive tells when the convo was last active
lastActive time.Time
}
// NewConversation initializes and returns a ready-to-use Conversation
func NewConversation(channel, nick string, callback ConversationCallback) *Conversation {
return &Conversation{
nick: nick,
channel: channel,
bot: cleverbot.New(app.cfg.CleverBotAPIKey),
cb: callback,
}
}
// NewInput feeds new input into the conversation buffer
func (c *Conversation) NewInput(line string) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.nextInput += " " + line
c.lastActive = time.Now()
if c.gettingReply == false {
c.gettingReply = true
go c.GetReply(c.nextInput)
c.nextInput = ""
}
}
// GetReply retrieves a reply to given string from cleverbot
func (c *Conversation) GetReply(to string) {
answer, err := c.bot.Ask(to)
log.Printf("Got answer: %s, delay will be %dms", answer, len(answer)*200)
if err == nil {
time.Sleep(time.Duration((len(answer) * 200)) * time.Millisecond)
}
c.cb(c.channel, c.nick, answer, err)
c.mutex.Lock()
defer c.mutex.Unlock()
c.lastActive = time.Now()
c.gettingReply = false
}
// Nick returns conversation nick
func (c *Conversation) Nick() string {
return c.nick
}
// Channel returns conversation channel
func (c *Conversation) Channel() string {
return c.channel
}
// Idle returns how long the conversation has been idle
func (c *Conversation) Idle() time.Duration {
n := time.Now()
return n.Sub(c.lastActive)
}
|
package main
import (
"BOOKS-LIST/models"
"database/sql"
"encoding/json"
"fmt"
"log"
"net/http"
"strconv"
_ "github.com/go-sql-driver/mysql"
"github.com/gorilla/mux"
)
type Book struct {
ID int `json:id`
Title string `json:title`
Author string `json:author`
Year string `json:year`
}
var books []models.Book
var db *sql.DB
func homePage(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Welcome to the HomePage!")
fmt.Println("Endpoint Hit: homePage")
}
func handleRequests() {
http.HandleFunc("/", homePage)
log.Fatal(http.ListenAndServe(":10000", nil))
}
func main() {
// handleRequests()
// router := mux.NewRouter()
/*
db, err := sql.Open("mysql", "admin:admin@tcp(127.0.0.1:3306)/library")
if err != nil {
panic(err)
}
defer db.Close()
insert, err := db.Query("INSERT INTO books VALUES (2,'Golang GoRoutines','Mr.GoRoutines','2010') ")
if err != nil {
panic(err.Error())
}
defer insert.Close()
*/
books = append(books,
Book{ID: 1, Title: "Golang Pointers", Author: "Mr. Golang", Year: "2010"},
Book{ID: 2, Title: "Golang GoRoutines", Author: "Mr. GoRoutines", Year: "2011"},
Book{ID: 3, Title: "Golang Routers", Author: "Mr. Routers", Year: "2012"},
Book{ID: 4, Title: "Golang Concurrency", Author: "Mr. Concurrency", Year: "2013"},
)
http.HandleFunc("/articles", getBooks)
/*
router.HandleFunc("/books", getBooks).Methods("GET")
router.HandleFunc("/books/{id}", getBook).Methods("GET")
router.HandleFunc("/books", addBook).Methods("POST")
router.HandleFunc("/books", updateBook).Methods("PUT")
router.HandleFunc("/books/{id}", removeBook).Methods("DELETE")
*/
log.Fatal(http.ListenAndServe(":2222", nil))
}
func getBooks(w http.ResponseWriter, r *http.Request) {
// rows, err := db.Query("SELECT * FROM books")
// log.Println(err)
// defer rows.Close()
// for rows.Next() {
// var book Book
// err := rows.Scan(&book.ID, &book.Title, &book.Author, &book.Year)
// if err != nil {
// panic(err.Error())
// }
// books = append(books, book)
// }
// log.Println("helllo")
json.NewEncoder(w).Encode(books)
}
func getBook(w http.ResponseWriter, r *http.Request) {
var book models.Book
params := mux.Vars(r)
id, _ := strconv.Atoi(params["id"])
rows := db.QueryRow("SELECT * FROM books where id=$1", id)
err := rows.Scan(&book.ID, &book.Title, &book.Author, &book.Year)
log.Println(err)
json.NewEncoder(w).Encode(book)
}
func addBook(w http.ResponseWriter, r *http.Request) {
var book models.Book
_ = json.NewDecoder(r.Body).Decode(&book)
books = append(books, book)
json.NewEncoder(w).Encode(books)
}
func updateBook(w http.ResponseWriter, r *http.Request) {
var book models.Book
json.NewDecoder(r.Body).Decode(&book)
books = append(books, book)
json.NewEncoder(w).Encode(books)
for i, item := range books {
if item.ID == book.ID {
books[i] = book
}
}
json.NewEncoder(w).Encode(books)
}
func removeBook(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
id, _ := strconv.Atoi(params["id"])
for i, item := range books {
if item.ID == id {
books = append(books[:i], books[i+1:]...)
}
}
json.NewEncoder(w).Encode(books)
}
|
package xendit
import (
"fmt"
"time"
"github.com/imrenagi/go-payment"
)
// EWalletPaymentStatus stores callback information for xendit ewallet
type EWalletPaymentStatus struct {
Event string `json:"event"`
BusinessID string `json:"business_id"`
CreatedAt time.Time `json:"created"`
Data EWalletPaymentStatusData `json:"data"`
CallbackAuthToken string `json:"callback_authentication_token"`
}
func (s EWalletPaymentStatus) IsValid(authKey string) error {
return checkCallbackToken(authKey, s.CallbackAuthToken)
}
type EWalletPaymentStatusData struct {
ID string `json:"id"`
BusinessID string `json:"business_id"`
ReferenceID string `json:"reference_id"`
Status string `json:"status"`
Currency string `json:"currency"`
ChargeAmount float64 `json:"charge_amount"`
CaptureAmount *float64 `json:"capture_amount"`
ChannelCode string `json:"channel_code"`
CheckoutMethod string `json:"checkout_method"`
ChannelProperties map[string]string `json:"channel_properties,omitempty"`
Actions map[string]string `json:"actions,omitempty"`
IsRedirectRequired bool `json:"is_redirect_required"`
CallbackURL string `json:"callback_url"`
CreatedAt time.Time `json:"created"`
UpdatedAt time.Time `json:"updated"`
VoidedAt *time.Time `json:"voided_at"`
// CaptureNow *bool `json:"capture_now,omitempty"`
CustomerID *string `json:"customer_id"`
PaymentMethodID *string `json:"payment_method_id"`
Metadata map[string]interface{} `json:"metadata"`
}
// DANAPaymentStatus stores the data sent by xendit while triggering
// any webhook for dana payment
type DANAPaymentStatus struct {
ExternalID string `json:"external_id"`
Amount float64 `json:"amount"`
BusinessID string `json:"business_id"`
EWalletType string `json:"ewallet_type"`
PaymentStatus string `json:"payment_status"`
TransactionDate string `json:"transaction_date"`
CallbackAuthToken string `json:"callback_authentication_token"`
}
// IsValid checks whether the callback auth token sent by xendit matches the
// authentication token stored on the dashboard
func (s DANAPaymentStatus) IsValid(authKey string) error {
return checkCallbackToken(authKey, s.CallbackAuthToken)
}
// LinkAjaPaymentStatus stores the data sent by xendit while triggering
// any webhook for linkaja payment
type LinkAjaPaymentStatus struct {
ExternalID string `json:"external_id"`
Amount float64 `json:"amount"`
Status string `json:"status"`
EWalletType string `json:"ewallet_type"`
CallbackAuthToken string `json:"callback_authentication_token"`
}
// IsValid checks whether the callback auth token sent by xendit matches the
// authentication token stored on the dashboard
func (s LinkAjaPaymentStatus) IsValid(authKey string) error {
return checkCallbackToken(authKey, s.CallbackAuthToken)
}
func checkCallbackToken(stored, given string) error {
if stored != given {
return fmt.Errorf("callback authentication token is invalid, %w", payment.ErrBadRequest)
}
return nil
}
// OVOPaymentStatus stores the data sent by xendit while triggering
// any webhook for ovo payment
type OVOPaymentStatus struct {
Event string `json:"event"`
ID string `json:"id"`
ExternalID string `json:"external_id"`
BusinessID string `json:"business_id"`
Phone string `json:"phone"`
EWalletType string `json:"ewallet_type"`
Amount float64 `json:"amount"`
FailureCode string `json:"failure_code"`
Status string `json:"status"`
}
// IsValid always returns no error at least for now since
// we have no idea why xendit is not returning the callback token
// on the notification payload
func (s OVOPaymentStatus) IsValid(authKey string) error {
return nil
}
// InvoicePaymentStatus stores the data sent by xendit while triggering
// any webhook for xenInvoice
// https://xendit.github.io/apireference/#invoice-callback
type InvoicePaymentStatus struct {
ID string `json:"id"`
ExternalID string `json:"external_id"`
UserID string `json:"user_id"`
PaymentMethod string `json:"payment_method"`
Status string `json:"status"`
MerchantName string `json:"merchant_name"`
Amount float64 `json:"amount"`
PaidAmount float64 `json:"paid_amount"`
BankCode string `json:"bank_code"`
RetailOutletName string `json:"retail_outlet_name"`
EwalletType string `json:"ewallet_type"`
OnDemandLink string `json:"on_demand_link"`
RecurringPaymentID string `json:"recurring_payment_id"`
PaidAt string `json:"paid_at"`
PayerEmail string `json:"payer_email"`
Description string `json:"description"`
AdjustedReceivedAmount float64 `json:"adjusted_received_amount"`
FeesPaidAmount float64 `json:"fees_paid_amount"`
CreatedAt string `json:"created"`
UpdatedAt string `json:"updated"`
Currency string `json:"currency"`
PaymentChannel string `json:"payment_channel"`
PaymentDestination string `json:"payment_destination"`
CallbackAuthToken string `json:"-"`
}
// IsValid always returns no error at least for now since
// we have no idea why xendit is not returning the callback token
// on the notification payload
func (s InvoicePaymentStatus) IsValid(authKey string) error {
return nil
}
|
package vibely
import (
"encoding/json"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"time"
"github.com/gorilla/mux"
"github.com/joho/godotenv"
)
const baseUrl = "https://genius.com"
func search(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
query := vars["value"]
url := baseUrl + "/api/search/song?q=" + url.QueryEscape(query)
resp, err := http.Get(url)
if err != nil {
log.Println("Error searching for songs on Genius, ", err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Println("Error parsing response, ", err)
}
var data map[string]interface{}
err = json.Unmarshal(body, &data)
if err != nil {
log.Println("Error unmarshalling response from search ", err)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(data)
}
func returnScrambled(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
query := vars["path"]
url := baseUrl + "/" + query
res := crawlGetSong(url)
jsonScrambled := scramble(res)
w.Header().Add("Content-Type", "application/json")
json.NewEncoder(w).Encode(jsonScrambled)
}
func index(w http.ResponseWriter, r *http.Request) {
indexFile, err := os.Open("./static/index.html")
if err != nil {
io.WriteString(w, "error reading index")
return
}
defer indexFile.Close()
io.Copy(w, indexFile)
}
func Start() {
//create data.json if it doesn't exit
// ensureDataExists()
r := mux.NewRouter()
_ = godotenv.Load()
srv := &http.Server{
Handler: r,
Addr: "127.0.0.1:8996",
WriteTimeout: 60 * time.Second,
ReadTimeout: 60 * time.Second,
}
r.HandleFunc("/", index)
r.Methods("GET").Path("/searchSongs{value}").HandlerFunc(search)
r.Methods("GET").Path("/scramble{path}").HandlerFunc(returnScrambled)
r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir("./static"))))
log.Printf("Server listening on %s\n", srv.Addr)
log.Fatal(srv.ListenAndServe())
}
|
// Copyright 2018 Lars Hoogestraat
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"database/sql"
"errors"
"fmt"
"net/http"
"strings"
"time"
"git.hoogi.eu/snafu/go-blog/crypt"
"git.hoogi.eu/snafu/go-blog/httperror"
"git.hoogi.eu/snafu/go-blog/logger"
"git.hoogi.eu/snafu/go-blog/settings"
"golang.org/x/crypto/bcrypt"
)
// UserDatasourceService defines an interface for CRUD operations for users
type UserDatasourceService interface {
Create(u *User) (int, error)
List(p *Pagination) ([]User, error)
Get(userID int) (*User, error)
Update(u *User, changePassword bool) error
Count(ac AdminCriteria) (int, error)
GetByMail(mail string) (*User, error)
GetByUsername(username string) (*User, error)
Remove(userID int) error
}
// User represents a user
type User struct {
ID int
Username string
Email string
DisplayName string
Password []byte
PlainPassword []byte
Salt []byte
LastModified time.Time
Active bool
IsAdmin bool
}
// UserService containing the service to access users
type UserService struct {
Datasource UserDatasourceService
Config settings.User
UserInterceptor UserInterceptor
}
// UserInterceptor will be executed before and after updating/creating users
type UserInterceptor interface {
PreCreate(user *User) error
PostCreate(user *User) error
PreUpdate(oldUser *User, user *User) error
PostUpdate(oldUser *User, user *User) error
PreRemove(user *User) error
PostRemove(user *User) error
}
type Validations int
const (
VDupEmail = 1 << iota
VDupUsername
VPassword
)
func (u *User) validate(us *UserService, minPasswordLength int, v Validations) error {
u.DisplayName = strings.TrimSpace(u.DisplayName)
u.Email = strings.TrimSpace(u.Email)
u.Username = strings.TrimSpace(u.Username)
if len(u.DisplayName) == 0 {
return httperror.ValueRequired("display name")
}
if len([]rune(u.DisplayName)) > 191 {
return httperror.ValueTooLong("display name", 191)
}
if len(u.Email) == 0 {
return httperror.ValueRequired("email")
}
if len(u.Email) > 191 {
return httperror.ValueTooLong("email", 191)
}
if len(u.Username) == 0 {
return httperror.ValueRequired("username")
}
if len([]rune(u.Username)) > 60 {
return httperror.ValueTooLong("username", 60)
}
if (v & VPassword) != 0 {
if len(u.PlainPassword) < minPasswordLength && len(u.PlainPassword) > 0 {
return httperror.New(http.StatusUnprocessableEntity,
fmt.Sprintf("The password is too short. It must be at least %d characters long.", minPasswordLength),
fmt.Errorf("the password is too short, it must be at least %d characters long", minPasswordLength),
)
}
}
if (v & VDupEmail) != 0 {
if err := us.duplicateMail(u.Email); err != nil {
return err
}
}
if (v & VDupUsername) != 0 {
if err := us.duplicateUsername(u.Username); err != nil {
return err
}
}
return nil
}
func (us *UserService) duplicateMail(mail string) error {
user, err := us.Datasource.GetByMail(mail)
if err != nil {
if !errors.Is(err, sql.ErrNoRows) {
return err
}
}
if user != nil {
return httperror.New(http.StatusUnprocessableEntity, fmt.Sprintf("The mail %s already exists.", mail), fmt.Errorf("the mail %s already exits", mail))
}
return nil
}
func (us *UserService) duplicateUsername(username string) error {
user, err := us.Datasource.GetByUsername(username)
if err != nil {
if !errors.Is(err, sql.ErrNoRows) {
return err
}
}
if user != nil {
return httperror.New(http.StatusUnprocessableEntity,
fmt.Sprintf("The username %s already exists.", username),
fmt.Errorf("the username %s already exists", username))
}
return nil
}
// Count returns the amount of users
func (us *UserService) Count(a AdminCriteria) (int, error) {
return us.Datasource.Count(a)
}
// List returns a list of users. Limits the amount based on the defined pagination
func (us *UserService) List(p *Pagination) ([]User, error) {
return us.Datasource.List(p)
}
// GetByID gets the user based on the given id; will not contain the user password
func (us *UserService) GetByID(userID int) (*User, error) {
u, err := us.Datasource.Get(userID)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, httperror.NotFound("user", fmt.Errorf("the user with id %d was not found", userID))
}
return nil, err
}
return u, nil
}
// GetByUsername gets the user based on the given username; will contain the user password
func (us *UserService) GetByUsername(username string) (*User, error) {
u, err := us.Datasource.GetByUsername(username)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, httperror.NotFound("user", err)
}
return nil, err
}
return u, nil
}
// GetByMail gets the user based on the given mail; will contain the user password
func (us *UserService) GetByMail(mail string) (*User, error) {
u, err := us.Datasource.GetByMail(mail)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, httperror.NotFound("user", err)
}
return nil, err
}
return u, nil
}
// Create creates the user
// If an UserInterceptor is available the action PreCreate is executed before creating and PostCreate after creating the user
func (us *UserService) Create(u *User) (int, error) {
if us.UserInterceptor != nil {
if err := us.UserInterceptor.PreCreate(u); err != nil {
return -1, httperror.InternalServerError(fmt.Errorf("error while executing user interceptor 'PreCreate' error %v", err))
}
}
if err := u.validate(us, us.Config.MinPasswordLength, VDupUsername|VDupEmail|VPassword); err != nil {
return -1, err
}
salt := crypt.GenerateSalt()
saltedPassword := append(u.PlainPassword[:], salt[:]...)
password, err := crypt.CryptPassword([]byte(saltedPassword))
if err != nil {
return -1, err
}
u.Salt = salt
u.Password = password
userID, err := us.Datasource.Create(u)
if err != nil {
return -1, err
}
if us.UserInterceptor != nil {
errUserInterceptor := us.UserInterceptor.PostCreate(u)
logger.Log.Errorf("error while executing PostCreate user interceptor method %v", errUserInterceptor)
}
salt = nil
saltedPassword = nil
u.PlainPassword = nil
return userID, nil
}
//Update updates the user
//If an UserInterceptor is available the action PreUpdate is executed before updating and PostUpdate after updating the user
func (us *UserService) Update(u *User, changePassword bool) error {
oldUser, err := us.Datasource.Get(u.ID)
if err != nil {
return err
}
if !oldUser.IsAdmin {
if oldUser.ID != u.ID {
return httperror.PermissionDenied("update", "user", fmt.Errorf("permission denied user %d is not granted to update user %d", oldUser.ID, u.ID))
}
}
if us.UserInterceptor != nil {
if err := us.UserInterceptor.PreUpdate(oldUser, u); err != nil {
return httperror.InternalServerError(fmt.Errorf("error while executing user interceptor 'PreUpdate' error %v", err))
}
}
var v Validations
if u.Email != oldUser.Email {
v |= VDupEmail
}
if u.Username != oldUser.Username {
v |= VDupUsername
}
if changePassword {
v |= VPassword
}
if err = u.validate(us, us.Config.MinPasswordLength, v); err != nil {
return err
}
oneAdmin, err := us.OneAdmin()
if err != nil {
return err
}
if oneAdmin {
if (oldUser.IsAdmin && !u.IsAdmin) || (oldUser.IsAdmin && !u.Active) {
return httperror.New(http.StatusUnprocessableEntity,
"Could not update user, because no administrator would remain",
fmt.Errorf("could not update user %s action, because no administrator would remain", oldUser.Username))
}
}
if changePassword {
salt := crypt.GenerateSalt()
saltedPassword := append(u.PlainPassword[:], salt[:]...)
password, err := crypt.CryptPassword([]byte(saltedPassword))
if err != nil {
return err
}
u.Password = password
u.Salt = salt
}
if err = us.Datasource.Update(u, changePassword); err != nil {
return err
}
u.Password = nil
if us.UserInterceptor != nil {
if err := us.UserInterceptor.PostUpdate(oldUser, u); err != nil {
logger.Log.Errorf("error while executing PostUpdate user interceptor method %v", err)
}
}
u.PlainPassword = nil
return nil
}
// Authenticate authenticates the user by the given login method (email or username)
// if the user was found but the password is wrong the found user and an error will be returned
func (us *UserService) Authenticate(u *User, loginMethod settings.LoginMethod) (*User, error) {
var err error
if len(u.Username) == 0 || len(u.PlainPassword) == 0 {
return nil, httperror.New(http.StatusUnauthorized, "Your username or password is invalid.", errors.New("no username or password were given"))
}
var password = u.PlainPassword
if loginMethod == settings.EMail {
u, err = us.Datasource.GetByMail(u.Email)
} else {
u, err = us.Datasource.GetByUsername(u.Username)
}
if err != nil {
//Do some extra work
bcrypt.CompareHashAndPassword([]byte("$2a$12$bQlRnXTNZMp6kCyoAlnf3uZW5vtmSj9CHP7pYplRUVK2n0C5xBHBa"), password)
if errors.Is(err, sql.ErrNoRows) {
return nil, httperror.New(http.StatusUnauthorized, "Your username or password is invalid.", err)
}
return nil, err
}
u.PlainPassword = password
if err := u.comparePassword(); err != nil {
return u, httperror.New(http.StatusUnauthorized, "Your username or password is invalid.", err)
}
if !u.Active {
return nil, httperror.New(http.StatusUnprocessableEntity,
"Your account is deactivated.",
fmt.Errorf("the user with id %d tried to logged in but the account is deactivated", u.ID))
}
u.PlainPassword = nil
u.Password = nil
u.Salt = nil
return u, nil
}
// Remove removes the user returns an error if no administrator would remain
func (us *UserService) Remove(u *User) error {
if us.UserInterceptor != nil {
if err := us.UserInterceptor.PreRemove(u); err != nil {
return httperror.InternalServerError(fmt.Errorf("error while executing user interceptor 'PreRemove' error %v", err))
}
}
oneAdmin, err := us.OneAdmin()
if err != nil {
return err
}
if oneAdmin {
if u.IsAdmin {
return httperror.New(http.StatusUnprocessableEntity,
"Could not remove administrator. No Administrator would remain.",
fmt.Errorf("could not remove administrator %s no administrator would remain", u.Username))
}
}
err = us.Datasource.Remove(u.ID)
if us.UserInterceptor != nil {
if err := us.UserInterceptor.PostRemove(u); err != nil {
logger.Log.Errorf("error while executing PostRemove user interceptor method %v", err)
}
}
return err
}
// OneAdmin returns true if there is only one admin
func (us *UserService) OneAdmin() (bool, error) {
c, err := us.Datasource.Count(OnlyAdmins)
if err != nil {
return true, err
}
if c == 1 {
return true, nil
}
return false, nil
}
func (u *User) comparePassword() error {
return bcrypt.CompareHashAndPassword(u.Password, append(u.PlainPassword[:], u.Salt[:]...))
}
|
/*
Given two numbers arr1 and arr2 in base -2, return the result of adding them together.
Each number is given in array format: as an array of 0s and 1s, from most significant bit to least significant bit. For example, arr = [1,1,0,1] represents the number (-2)^3 + (-2)^2 + (-2)^0 = -3. A number arr in array, format is also guaranteed to have no leading zeros: either arr == [0] or arr[0] == 1.
Return the result of adding arr1 and arr2 in the same format: as an array of 0s and 1s with no leading zeros.
Example 1:
Input: arr1 = [1,1,1,1,1], arr2 = [1,0,1]
Output: [1,0,0,0,0]
Explanation: arr1 represents 11, arr2 represents 5, the output represents 16.
Example 2:
Input: arr1 = [0], arr2 = [0]
Output: [0]
Example 3:
Input: arr1 = [0], arr2 = [1]
Output: [1]
Constraints:
1 <= arr1.length, arr2.length <= 1000
arr1[i] and arr2[i] are 0 or 1
arr1 and arr2 have no leading zeros
Hint:
We can try to determine the last digit of the answer, then divide everything by 2 and repeat.
*/
package main
import (
"fmt"
"reflect"
)
func main() {
test([]int{1, 1, 1, 1, 1}, []int{1, 0, 1}, []int{1, 0, 0, 0, 0})
test([]int{0}, []int{0}, []int{0})
test([]int{0}, []int{1}, []int{1})
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func test(x, y, r []int) {
z := nbadd(x, y)
fmt.Println(z)
assert(reflect.DeepEqual(z, r))
}
/*
https://mathworld.wolfram.com/Negabinary.html
https://en.wikipedia.org/wiki/Negative_base
https://walkccc.me/LeetCode/problems/1073/
*/
func nbadd(x, y []int) []int {
z := []int{}
c := 0
i := len(x) - 1
j := len(y) - 1
for c != 0 || i >= 0 || j >= 0 {
if i >= 0 {
c, i = c+x[i], i-1
}
if j >= 0 {
c, j = c+y[j], j-1
}
z = append(z, c&1)
c = -(c >> 1)
}
for {
n := len(z)
if !(n > 1 && z[n-1] == 0) {
break
}
z = z[:n-1]
}
n := len(z)
for i := 0; i < n/2; i++ {
z[i], z[n-i-1] = z[n-i-1], z[i]
}
return z
}
|
package amazonmwsapi
import (
"bytes"
"context"
"encoding/csv"
)
// GetReportRequest requests a single amzMWS report for download
type GetReportRequest struct {
amazonRequest
}
// Do sends request to amazonMWS reports API and returns report data maps
func (r *GetReportRequest) Do(ctx context.Context) ([]map[string]string, error) {
respBytes, err := r.client.callAPI(ctx, &r.amazonRequest)
if err != nil {
return nil, err
}
return r.parseTSVData(respBytes)
}
func (r *GetReportRequest) parseTSVData(rep []byte) ([]map[string]string, error) {
// Parse TSV into string data slices
tsvReader := csv.NewReader(bytes.NewReader(rep))
tsvReader.Comma = '\t'
tsvReader.FieldsPerRecord = -1
data, err := tsvReader.ReadAll()
if err != nil {
return nil, err
}
// Parse into maps indexed by column header title
rowMaps := make([]map[string]string, (len(data) - 1))
for rowIndex, dataRow := range data[1:] {
rowMap := make(map[string]string)
for colIndex, header := range data[0] {
rowMap[header] = dataRow[colIndex]
}
rowMaps[rowIndex] = rowMap
}
return rowMaps, nil
}
// Download sends request to amazonMWS reports API and downloads report to filepath
func (r *GetReportRequest) Download(ctx context.Context, filePath string) error {
err := r.client.downloadReport(ctx, &r.amazonRequest, filePath)
if err != nil {
return err
}
return nil
}
/*
// Response is TSV data:
"_GET_FLAT_FILE_ORDERS_DATA_"
Index Legend:
[0] = order-id
[1] = order-item-id
[2] = purchase-date
[3] = payments-date
[4] = buyer-email
[5] = buyer-name
[6] = buyer-phone-number
[7] = sku
[8] = product-name
[9] = quantity-purchased
[10] = currency
[11] = item-price
[12] = item-tax
[13] = shipping-price
[14] = shipping-tax
[15] = ship-service-level
[16] = recipient-name
[17] = ship-address-1
[18] = ship-address-2
[19] = ship-address-3
[20] = ship-city
[21] = ship-state
[22] = ship-postal-code
[23] = ship-country
[24] = ship-phone-number
[25] = delivery-start-date
[26] = delivery-end-date
[27] = delivery-time-zone
[28] = delivery-Instructions
[29] = sales-channel
[30] = is-business-order
[31] = purchase-order-number
[32] = price-designationROW
*/
|
package minnow
type Hook interface {
MatchesBytes([]byte) bool
Matches(Properties) bool
}
type BasicPropertiesMatchHook struct {
match Properties
}
func NewBasicPropertiesMatchHookFromFile(path Path) (BasicPropertiesMatchHook, error) {
hookProperties, err := PropertiesFromFile(path)
if err != nil {
return BasicPropertiesMatchHook{}, err
}
return BasicPropertiesMatchHook{hookProperties}, nil
}
func (hook BasicPropertiesMatchHook) Matches(matchAgainst Properties) bool {
for expectedKey, expectedValue := range hook.match {
if value, found := matchAgainst[expectedKey]; found {
if value != expectedValue {
return false
}
} else {
// if expectedKey is outright missing, that's not a match
return false
}
}
return true
}
func (hook BasicPropertiesMatchHook) MatchesBytes(matchAgainst []byte) bool {
properties, err := BytesToProperties(matchAgainst)
if err != nil {
return false
}
return hook.Matches(properties)
}
|
package schedules
import (
"sub_account_service/order_server/db"
"sub_account_service/order_server/entity"
"sub_account_service/order_server/handlers"
"github.com/golang/glog"
"time"
)
//add order schedule
func StartAddOrderSchedule() {
go func(){
for {
addOrderSchedule()
time.Sleep(20 * time.Second)
}
}()
}
func addOrderSchedule() {
defer func() {
if err := recover(); err != nil {
glog.Errorln("addOrderSchedule error",err)
}
}()
var orders []*entity.Order
db.DbClient.Client.Where("complete = ?",0).Find(&orders)
if orders != nil && len(orders) > 0 {
for _,order := range orders {
handlers.SendToNumberServer(order,)
}
}
}
|
/*
* Copyright © 2018-2022 Software AG, Darmstadt, Germany and/or its licensors
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package adabas
import (
"fmt"
"testing"
"github.com/SoftwareAG/adabas-go-api/adatypes"
"github.com/stretchr/testify/assert"
)
func TestMapRepositoryReadAll(t *testing.T) {
initTestLogWithFile(t, "map_repositories.log")
adatypes.Central.Log.Infof("TEST: %s", t.Name())
adabas, _ := NewAdabas(24)
defer adabas.Close()
mr := NewMapRepository(adabas, 4)
adabasMaps, err := mr.LoadAllMaps(adabas)
assert.NoError(t, err)
assert.NotNil(t, adabasMaps)
assert.NotEqual(t, 0, len(adabasMaps))
for _, m := range adabasMaps {
fmt.Println(m.Name)
}
}
func TestMapRepositoryRead(t *testing.T) {
initTestLogWithFile(t, "map_repositories.log")
adatypes.Central.Log.Infof("TEST: %s", t.Name())
adabas, _ := NewAdabas(23)
defer adabas.Close()
mr := NewMapRepository(adabas, 4)
employeeMap, serr := mr.SearchMap(adabas, "EMPLOYEES-NAT-DDM")
assert.NotNil(t, employeeMap)
assert.NoError(t, serr)
// fmt.Println(">", employeeMap.String())
// adabasMaps, err := mr.LoadAllMaps(adabas)
// assert.NoError(t, err)
// assert.NotNil(t, adabasMaps)
// assert.NotEqual(t, 0, len(adabasMaps))
// for _, m := range adabasMaps {
// if m.Name == "EMPLOYEES-NAT-DDM" {
// employeeMap = m
// }
// }
// fmt.Println(">", employeeMap.String())
x := employeeMap.fieldMap["AA"]
assert.NotNil(t, x)
// fmt.Printf("%#v", x)
}
|
package config
const MongoDBEndpoint = "mongodb+srv://nocnocAdmin:Nocnoc2021@cluster0.o70ui.mongodb.net"
const DatabaseName = "todo"
|
package pipeline
import (
"time"
"github.com/pkg/errors"
"gopkg.in/guregu/null.v4"
)
type (
Spec struct {
ID int32 `gorm:"primary_key"`
DotDagSource string
CreatedAt time.Time
}
TaskSpec struct {
ID int32 `gorm:"primary_key"`
DotID string
PipelineSpecID int32
Type TaskType
JSON JSONSerializable `gorm:"type:jsonb"`
Index int32
SuccessorID null.Int
CreatedAt time.Time
}
Run struct {
ID int64 `gorm:"primary_key"`
PipelineSpecID int32
Meta JSONSerializable
CreatedAt time.Time
FinishedAt time.Time
}
TaskRun struct {
ID int64 `gorm:"primary_key"`
PipelineRun Run
PipelineRunID int64
Output *JSONSerializable `gorm:"type:jsonb"`
Error null.String
PipelineTaskSpecID int32
PipelineTaskSpec TaskSpec
CreatedAt time.Time
FinishedAt time.Time
}
)
func (Spec) TableName() string { return "pipeline_specs" }
func (Run) TableName() string { return "pipeline_runs" }
func (TaskSpec) TableName() string { return "pipeline_task_specs" }
func (TaskRun) TableName() string { return "pipeline_task_runs" }
func (s TaskSpec) IsFinalPipelineOutput() bool {
return s.SuccessorID.IsZero()
}
func (r TaskRun) DotID() string {
return r.PipelineTaskSpec.DotID
}
func (r TaskRun) Result() Result {
var result Result
if !r.Error.IsZero() {
result.Error = errors.New(r.Error.ValueOrZero())
} else if r.Output != nil && r.Output.Val != nil {
result.Value = r.Output.Val
}
return result
}
|
/*
Copyright (c) 2017 Simon Schmidt
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package remstorage
//import "github.com/maxymania/fastnntp-polyglot-labs/binarix"
//import "github.com/maxymania/fastnntp-polyglot/buffer"
import "github.com/maxymania/fastnntp-polyglot"
//import "github.com/byte-mug/fastnntp/posting"
import "github.com/valyala/fasthttp"
import "github.com/vmihailenco/msgpack"
import "fmt"
type ClientHandler interface{
Do(req *fasthttp.Request, resp *fasthttp.Response) error
}
type Client struct{
H ClientHandler
Shard string
}
func (c *Client) ArticleDirectStat(id []byte) bool {
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req)
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp)
req.Header.SetMethod("GET")
req.URI().SetPath(fmt.Sprintf("/%s/direct/%s/stat",c.Shard,codec.EncodeToString(id)))
if c.H.Do(req,resp)!=nil { return false }
return resp.StatusCode()==200
}
func (c *Client) ArticleDirectGet(id []byte, head, body bool) *newspolyglot.ArticleObject {
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req)
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp)
req.Header.SetMethod("GET")
req.URI().SetPath(fmt.Sprintf("/%s/direct/%s/get",c.Shard,codec.EncodeToString(id)))
if c.H.Do(req,resp)!=nil { return nil }
if resp.StatusCode()!=200 { return nil }
obj := new(newspolyglot.ArticleObject)
err := msgpack.Unmarshal(resp.Body(),&obj.Head,&obj.Body)
if err!=nil { return nil }
return obj
}
func (c *Client) ArticleDirectOverview(id []byte) *newspolyglot.ArticleOverview {
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req)
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp)
req.Header.SetMethod("GET")
req.URI().SetPath(fmt.Sprintf("/%s/direct/%s/xover",c.Shard,codec.EncodeToString(id)))
if c.H.Do(req,resp)!=nil { return nil }
if resp.StatusCode()!=200 { return nil }
obj := new(newspolyglot.ArticleOverview)
err := msgpack.Unmarshal(resp.Body(),obj)
if err!=nil { return nil }
return obj
}
|
package baidu
import (
"github.com/funxdata/baidu/core"
"github.com/funxdata/baidu/face"
"github.com/funxdata/baidu/nlp"
"github.com/funxdata/baidu/ocr"
"github.com/funxdata/baidu/speech"
)
type Baidu struct {
*core.Core
}
func New(apiKey, apiSecret string) *Baidu {
return &Baidu{core.NewCore(apiKey, apiSecret)}
}
// Face
func (b *Baidu) Face() *face.BaiduFace {
return &face.BaiduFace{b.Core}
}
// Face
func (b *Baidu) Speech() *speech.BaiduSpeech {
return &speech.BaiduSpeech{b.Core}
}
// NLP
func (b *Baidu) NLP() *nlp.BaiduNLP {
return &nlp.BaiduNLP{b.Core}
}
// OCR .
func (b *Baidu) OCR() *ocr.BaiduOCR {
return &ocr.BaiduOCR{b.Core}
}
|
package index
type Index struct {
InvertIndex InvertIndex
StorageIndex StorageIndex
}
func NewIndex() *Index {
return &Index{}
}
|
package main
import (
"bytes"
"encoding/gob"
"encoding/json"
"errors"
"fmt"
//"github.com/filecoin-project/go-state-types/abi"
"io/ioutil"
"log"
)
//定义一个结构体
type Monster struct {
Name string
Age int
Birthday string
Sal float64
Skill string
}
type Student struct {
Name string
Age uint8
Address string
}
func serializeJson() {
monster := Monster{
Name: "minger",
Age: 23,
Birthday: "1997-11-13",
Sal: 2000.0,
Skill: "Linux C/C++ Go",
}
data, err := json.Marshal(&monster)
if err != nil {
fmt.Printf("序列号错误 err = %v\n", err)
}
fmt.Printf("monster 序列化后= %v\n", data)
var newData Monster
err = json.Unmarshal(data, &newData)
if err != nil {
fmt.Println("enter")
fmt.Println(newData)
}
fmt.Println(monster)
}
func serializeGob(){
//序列化
ss := make([]Student,10)
s1:=Student{"张三",18,"江苏省"}
ss = append(ss, s1)
var buffer bytes.Buffer
encoder := gob.NewEncoder(&buffer)//创建编码器
err1 := encoder.Encode(&ss)//编码
if err1!=nil{
log.Panic(err1)
}
fmt.Printf("序列化后:%x\n",buffer.Bytes())
if err:=ioutil.WriteFile("./piecesinfo.json",buffer.Bytes(),0644);err != nil {
fmt.Println(errors.New("errors"))
}
//反序列化
newss := make([]Student,10)
byteEn:=buffer.Bytes()
decoder := gob.NewDecoder(bytes.NewReader(byteEn)) //创建解密器
//var s2 Student
err2 := decoder.Decode(&newss)//解密
if err2!=nil{
log.Panic(err2)
}
fmt.Println("反序列化后:",newss)
}
//结构体序列化
func main() {
serializeGob()
}
|
/*
Copyright 2015 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
import (
"database/sql"
"fmt"
"github.com/crunchydata/crunchy-postgresql-manager-openshift/logit"
_ "github.com/lib/pq"
"strconv"
)
func AddStatus(dbConn *sql.DB, status BackupStatus) (string, error) {
logit.Info.Println("AddStatus called")
//logit.Info.Println("AddStatus called")
queryStr := fmt.Sprintf("insert into backupstatus ( containername, starttime, backupname, servername, serverip, path, elapsedtime, backupsize, status, profilename, scheduleid, updatedt) values ( '%s', now(), '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', %s, now()) returning id",
status.ContainerName,
status.BackupName,
status.ServerName,
status.ServerIP,
status.Path,
status.ElapsedTime,
status.BackupSize,
status.Status, status.ProfileName, status.ScheduleID)
logit.Info.Println("AddStatus:" + queryStr)
var theID int
err := dbConn.QueryRow(queryStr).Scan(
&theID)
switch {
case err != nil:
logit.Error.Println("AddStatus: error " + err.Error())
return "", err
default:
}
var strvalue string
strvalue = strconv.Itoa(theID)
logit.Info.Println("AddStatus returning ID=" + strvalue)
return strvalue, nil
}
func UpdateStatus(dbConn *sql.DB, status BackupStatus) error {
logit.Info.Println("backup.UpdateStatus called")
queryStr := fmt.Sprintf("update backupstatus set ( status, backupsize, elapsedtime, updatedt) = ('%s', '%s', '%s', now()) where id = %s returning containername",
status.Status,
status.BackupSize,
status.ElapsedTime,
status.ID)
logit.Info.Println("backup:UpdateStatus:[" + queryStr + "]")
var name string
err := dbConn.QueryRow(queryStr).Scan(&name)
switch {
case err != nil:
logit.Error.Println("backup:UpdateStatus:" + err.Error())
return err
default:
}
return nil
}
func AddSchedule(dbConn *sql.DB, s BackupSchedule) (string, error) {
logit.Info.Println("AddSchedule called")
queryStr := fmt.Sprintf("insert into backupschedule ( serverid, containername, profilename, name, enabled, minutes, hours, dayofmonth, month, dayofweek, updatedt) values ( '%s','%s','%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', now()) returning id",
s.ServerID,
s.ContainerName,
s.ProfileName,
s.Name,
s.Enabled,
s.Minutes,
s.Hours,
s.DayOfMonth,
s.Month,
s.DayOfWeek)
logit.Info.Println("AddSchedule:" + queryStr)
var theID string
err := dbConn.QueryRow(queryStr).Scan(
&theID)
if err != nil {
logit.Error.Println("error in AddSchedule query " + err.Error())
return "", err
}
switch {
case err != nil:
logit.Error.Println("AddSchedule: error " + err.Error())
return "", err
default:
}
return theID, nil
}
func UpdateSchedule(dbConn *sql.DB, s BackupSchedule) error {
logit.Info.Println("backup.UpdateSchedule called")
queryStr := fmt.Sprintf("update backupschedule set ( enabled, serverid, name, minutes, hours, dayofmonth, month, dayofweek, updatedt) = ('%s', %s, '%s', '%s', '%s', '%s', '%s', '%s', now()) where id = %s returning containername",
s.Enabled,
s.ServerID,
s.Name,
s.Minutes,
s.Hours,
s.DayOfMonth,
s.Month,
s.DayOfWeek,
s.ID)
logit.Info.Println("backup:UpdateSchedule:[" + queryStr + "]")
var name string
err := dbConn.QueryRow(queryStr).Scan(&name)
switch {
case err != nil:
logit.Error.Println("backup:UpdateSchedule:" + err.Error())
return err
default:
}
return nil
}
func DeleteSchedule(dbConn *sql.DB, id string) error {
queryStr := fmt.Sprintf("delete from backupschedule where id=%s returning id", id)
logit.Info.Println("backup:DeleteSchedule:" + queryStr)
var theID int
err := dbConn.QueryRow(queryStr).Scan(&theID)
switch {
case err != nil:
return err
default:
}
return nil
}
func GetSchedule(dbConn *sql.DB, id string) (BackupSchedule, error) {
logit.Info.Println("GetSchedule called with id=" + id)
s := BackupSchedule{}
err := dbConn.QueryRow(fmt.Sprintf("select a.id, a.serverid, b.name, b.ipaddress, a.containername, a.profilename, a.name, a.enabled, a.minutes, a.hours, a.dayofmonth, a.month, a.dayofweek, date_trunc('second', a.updatedt)::text from backupschedule a, server b where a.id=%s and b.id = a.serverid", id)).Scan(&s.ID, &s.ServerID, &s.ServerName, &s.ServerIP, &s.ContainerName, &s.ProfileName, &s.Name, &s.Enabled, &s.Minutes, &s.Hours, &s.DayOfMonth, &s.Month, &s.DayOfWeek, &s.UpdateDt)
switch {
case err == sql.ErrNoRows:
logit.Error.Println("backupdb:GetSchedule:no schedule with that id")
return s, err
case err != nil:
logit.Error.Println("backupdb:GetSchedule:" + err.Error())
return s, err
default:
}
return s, nil
}
func GetAllSchedules(dbConn *sql.DB, containerid string) ([]BackupSchedule, error) {
logit.Info.Println("GetAllSchedules called with id=" + containerid)
var rows *sql.Rows
var err error
rows, err = dbConn.Query(fmt.Sprintf("select a.id, a.serverid, s.name, s.ipaddress, a.containername, a.profilename, a.name, a.enabled, a.minutes, a.hours, a.dayofmonth, a.month, a.dayofweek, date_trunc('second', a.updatedt)::text from backupschedule a, container b, server s where a.containername= b.name and b.id = %s and a.serverid = s.id", containerid))
if err != nil {
return nil, err
}
defer rows.Close()
schedules := make([]BackupSchedule, 0)
for rows.Next() {
s := BackupSchedule{}
if err = rows.Scan(
&s.ID,
&s.ServerID,
&s.ServerName,
&s.ServerIP,
&s.ContainerName,
&s.ProfileName,
&s.Name,
&s.Enabled,
&s.Minutes,
&s.Hours,
&s.DayOfMonth,
&s.Month,
&s.DayOfWeek,
&s.UpdateDt); err != nil {
return nil, err
}
schedules = append(schedules, s)
}
if err = rows.Err(); err != nil {
return nil, err
}
return schedules, nil
}
func GetAllStatus(dbConn *sql.DB, scheduleid string) ([]BackupStatus, error) {
logit.Info.Println("GetAllStatus called with scheduleid=" + scheduleid)
var rows *sql.Rows
var err error
rows, err = dbConn.Query(fmt.Sprintf("select id, containername, date_trunc('second', starttime)::text, backupname, servername, serverip, path, elapsedtime, backupsize, status, date_trunc('second', updatedt)::text from backupstatus where scheduleid=%s order by starttime", scheduleid))
if err != nil {
return nil, err
}
defer rows.Close()
stats := make([]BackupStatus, 0)
for rows.Next() {
s := BackupStatus{}
if err = rows.Scan(
&s.ID,
&s.ContainerName,
&s.StartTime,
&s.BackupName,
&s.ServerName,
&s.ServerIP,
&s.Path,
&s.ElapsedTime,
&s.BackupSize,
&s.Status,
&s.UpdateDt); err != nil {
return nil, err
}
stats = append(stats, s)
}
if err = rows.Err(); err != nil {
return nil, err
}
return stats, nil
}
func GetStatus(dbConn *sql.DB, id string) (BackupStatus, error) {
logit.Info.Println("GetStatus called with id=" + id)
s := BackupStatus{}
err := dbConn.QueryRow(fmt.Sprintf("select id, containername, date_trunc('second', starttime), backupname, servername, serverip, path, elapsedtime, backupsize, status, date_trunc('second', updatedt) from backupstatus where id=%s", id)).Scan(&s.ID, &s.ContainerName, &s.StartTime, &s.BackupName, &s.ServerName, &s.ServerIP, &s.Path, &s.ElapsedTime, &s.BackupSize, &s.Status, &s.UpdateDt)
switch {
case err == sql.ErrNoRows:
logit.Error.Println("backupdb:GetStatus:no status with that id")
return s, err
case err != nil:
logit.Error.Println("backupdb:GetStatus:" + err.Error())
return s, err
default:
}
return s, nil
}
func GetSchedules(dbConn *sql.DB) ([]BackupSchedule, error) {
logit.Info.Println("GetSchedules called")
var rows *sql.Rows
var err error
rows, err = dbConn.Query(fmt.Sprintf("select a.id, a.serverid, a.containername, a.profilename, a.name, a.enabled, a.minutes, a.hours, a.dayofmonth, a.month, a.dayofweek, date_trunc('second', a.updatedt)::text from backupschedule a "))
if err != nil {
return nil, err
}
defer rows.Close()
schedules := make([]BackupSchedule, 0)
for rows.Next() {
s := BackupSchedule{}
if err = rows.Scan(
&s.ID,
&s.ServerID,
&s.ContainerName,
&s.ProfileName,
&s.Name,
&s.Enabled,
&s.Minutes,
&s.Hours,
&s.DayOfMonth,
&s.Month,
&s.DayOfWeek,
&s.UpdateDt); err != nil {
return nil, err
}
schedules = append(schedules, s)
}
if err = rows.Err(); err != nil {
return nil, err
}
return schedules, nil
}
|
package 股票问题
// ------------------ 单调栈法 ------------------
// 使用单调递减栈,找 A[:i]从右到左第一个小于A[i]的数
// 优化: 单调栈可以优化掉...
func maxProfit(prices []int) int {
minStack := make([]int, 0)
maxProfitResult := 0
for i := 0; i < len(prices); i++ {
if len(minStack) != 0 && minStack[len(minStack)-1] <= prices[i] {
maxProfitResult += prices[i] - minStack[len(minStack)-1]
for len(minStack) != 0 && minStack[len(minStack)-1] <= prices[i] {
minStack = minStack[:len(minStack)-1]
}
}
minStack = append(minStack, prices[i])
}
return maxProfitResult
}
// 使用单调递增栈,找 A[i+1:]从左到右第一个小于A[i]的数
func maxProfit(prices []int) int {
maxStack := make([]int, 0)
maxProfitResult := 0
for i := 0; i < len(prices); i++ {
if len(maxStack) != 0 && maxStack[len(maxStack)-1] >= prices[i] {
maxProfitResult += maxStack[len(maxStack)-1] - maxStack[0]
maxStack = []int{}
}
maxStack = append(maxStack, prices[i])
}
if len(maxStack) != 0 {
maxProfitResult += maxStack[len(maxStack)-1] - maxStack[0]
}
return maxProfitResult
}
// ------------------ 动态规划 ------------------
// 定义: 前 i 天能获得的最大利润
// 状态: 手上是否有股票
// 操作: 什么都不做、买入、卖出
// dp[i][0] = max(dp[i-1][0],dp[i-1][1] + prices[i])
// dp[i][1] = max(dp[i-1][1],dp[i-1][0] - prices[i])
|
package redis
import (
"errors"
"github.com/go-redis/redis"
"github.com/spf13/viper"
)
const (
Sentinel = "sentinel"
Cluster = "cluster"
DefaultPoolSize = 100
DefaultReadTimeout = 1000
DefaultWriteTimeout = 1000
)
var (
ErrorMissingRedisAddress = errors.New("missing redis address")
)
type Connection interface {
BuildClient() (redis.UniversalClient, error)
}
func NewRedisConfig(add string, db int) Connection {
return &SingleConnection{
address: add,
db: db,
poolSize: DefaultPoolSize,
readTimeout: viper.GetInt("redis.read_timeout"),
writeTimeout: viper.GetInt("redis.write_timeout"),
}
}
|
package sw
import (
"crypto/rsa"
"crypto/x509"
"fmt"
"crypto/sha256"
"errors"
"encoding/asn1"
"math/big"
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp"
)
type rsaPublicKeyASN struct {
N *big.Int
E int
}
type rsaPrivateKey struct {
privKey *rsa.PrivateKey
}
func (k *rsaPrivateKey) Bytes() (raw []byte, err error) {
return nil, errors.New("Not supported.")
}
func (k *rsaPrivateKey) SKI() (ski []byte) {
if k.privKey == nil {
return nil
}
raw, _ := asn1.Marshal(rsaPublicKeyASN{
N: k.privKey.N,
E: k.privKey.E,
})
hash := sha256.New()
hash.Write(raw)
return hash.Sum(nil)
}
func (k *rsaPrivateKey) Symmetric() bool {
return false
}
func (k *rsaPrivateKey) Private() bool {
return true
}
func (k *rsaPrivateKey) PublicKey() (bccsp.Key, error) {
return &rsaPublicKey{&k.privKey.PublicKey}, nil
}
type rsaPublicKey struct {
pubKey *rsa.PublicKey
}
func (k *rsaPublicKey) Bytes() (raw []byte, err error) {
if k.pubKey == nil {
return nil, errors.New("Failed marshalling key. Key is nil.")
}
raw, err = x509.MarshalPKIXPublicKey(k.pubKey)
if err != nil {
return nil, fmt.Errorf("Failed marshalling key [%s]", err)
}
return
}
func (k *rsaPublicKey) SKI() (ski []byte) {
if k.pubKey == nil {
return nil
}
raw, _ := asn1.Marshal(rsaPublicKeyASN{
N: k.pubKey.N,
E: k.pubKey.E,
})
hash := sha256.New()
hash.Write(raw)
return hash.Sum(nil)
}
func (k *rsaPublicKey) Symmetric() bool {
return false
}
func (k *rsaPublicKey) Private() bool {
return false
}
func (k *rsaPublicKey) PublicKey() (bccsp.Key, error) {
return k, nil
}
|
package api
import (
"encoding/json"
"fmt"
"net/http"
"strings"
ldap "gopkg.in/ldap.v2"
"github.com/compsoc-edinburgh/bi-provider/pkg/config"
"github.com/gin-gonic/gin"
"github.com/qaisjp/gosign"
"github.com/sirupsen/logrus"
)
var outNotLoggedIn = gin.H{
"status": "error",
"message": "not logged in",
}
// NewAPI sets up a new API module.
func NewAPI(
conf *config.Config,
log *logrus.Logger,
) *API {
router := gin.Default()
// security measures
router.Use(
func(c *gin.Context) {
// Grant access to either betterinformatics.com, or alpha.betterinformatics.com, but no other website.
origin := c.Request.Header.Get("Origin")
if (origin == "https://betterinformatics.com") || (origin == "https://alpha.betterinformatics.com") {
c.Header("Access-Control-Allow-Origin", origin)
} else {
c.Header("Access-Control-Allow-Origin", "https://betterinformatics.com")
}
c.Header("Vary", "Origin, Cookie")
c.Header("Cache-Control", "max-age=3600")
c.Header("X-Frame-Options", "DENY")
c.Header("Content-Type", "application/json")
c.Header("Access-Control-Allow-Credentials", "true")
c.Next()
},
)
a := &API{
Config: conf,
Log: log,
Gin: router,
}
router.GET("/", a.provide)
return a
}
func (a *API) provide(c *gin.Context) {
cookie, err := c.Cookie("cosign-betterinformatics.com")
if err != nil {
c.JSON(http.StatusUnauthorized, outNotLoggedIn)
return
}
url := "http://localhost:6663/check" +
"/" + a.Config.CoSign.Name +
"/" + a.Config.CoSign.Password +
"?ip=" + c.ClientIP() +
"&cookie=" + strings.Replace(cookie, " ", "%2B", -1)
resp, err := http.Get(url)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"status": "error",
"message": err.Error(),
})
return
}
// defer resp.Body.Close()
// contents, err := ioutil.ReadAll(resp.Body)
// if err != nil {
// fmt.Printf("%s", err)
// os.Exit(1)
// }
// fmt.Printf("%s\n", contents)
decoder := json.NewDecoder(resp.Body)
defer resp.Body.Close()
var result struct {
Status string
Message string
Data gosign.CheckResponse
}
err = decoder.Decode(&result)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"status": "error",
"message": "could not decode JSON",
})
return
}
if resp.StatusCode == http.StatusUnauthorized {
c.JSON(http.StatusUnauthorized, outNotLoggedIn)
return
}
if result.Status != "success" {
c.JSON(http.StatusInternalServerError, gin.H{
"status": "error",
"message": "cosign-webapi: " + result.Message,
})
return
}
if result.Data.Realm != "INF.ED.AC.UK" {
c.JSON(http.StatusForbidden, gin.H{
"status": "error",
"message": "Access denied. Realm " + result.Data.Realm + " is not permitted.",
})
return
}
out, err := getGroups(result.Data.Principal)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"status": "error",
"message": "ldap: " + err.Error(),
})
return
}
c.JSON(http.StatusOK, gin.H{
"status": "success",
"data": out,
})
}
type outStruct struct {
Username string
Name string
Year string
Modules []string
Degree string
Cohort string
IsStudent bool
}
func getGroups(u string) (out outStruct, err error) {
conn, err := ldap.Dial("tcp", "localhost:1389")
if err != nil {
return out, err
}
defer conn.Close()
out.Username = u
// Build the search request for retrieving their name
searchRequest := ldap.NewSearchRequest(
"dc=inf,dc=ed,dc=ac,dc=uk",
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases,
0, 0, false,
fmt.Sprintf("(uid=%s)", u), []string{"givenName"},
nil,
)
// Search for their name
nr, err := conn.Search(searchRequest)
if err != nil {
return out, err
}
// Retrieve their name
for _, entry := range nr.Entries {
out.Name = entry.GetAttributeValue("givenName")
}
// Build the search request for retrieving their groups
searchRequest = ldap.NewSearchRequest(
"dc=inf,dc=ed,dc=ac,dc=uk", // BaseDN
ldap.ScopeWholeSubtree, // default for ldapsearch
ldap.NeverDerefAliases, // default for ldapsearch
0, 0, false,
fmt.Sprintf("(member=uid=%s,ou=People,dc=inf,dc=ed,dc=ac,dc=uk)", u),
nil, // this means ALL -- just requesting `cn`` is ok //[]string{"cn"},
nil,
)
// Search for their groups... (via capabilities)
sr, err := conn.Search(searchRequest)
if err != nil {
return out, err
}
// Retrieve the entries and store them in the output struct
for _, entry := range sr.Entries {
group := entry.GetAttributeValue("cn")
if group == "role/student" {
out.IsStudent = true
}
if strings.HasPrefix(group, "role/year-") {
out.Year = group[10:]
}
if strings.HasPrefix(group, "role/degree-") {
out.Degree = group[12:]
}
if strings.HasPrefix(group, "role/cohort-") {
out.Cohort = group[12:]
}
if strings.HasPrefix(group, "role/module-") {
out.Modules = append(out.Modules, group[12:])
}
}
if out.IsStudent {
// Forum > FH
// ... but where does AT lie?
//
out.IsStudent = out.Cohort != "pgr"
}
return out, nil
}
|
package main
// QueryGenerator describes a generator of queries, typically according to a
// use case.
type QueryGenerator interface {
Dispatch(int, *Query, int)
}
|
package main
import "log"
var (
minTurnover float64 = 2 // 最少成交额 2 亿
minChangehands float64 = 2 // 最低换手 2%
minTurnoverToCirculation float64 = 0.02 // 成交占比流通最少 万2
minMainP float64 = 5 // 主力流入占比最低 5%
mainMainV float64 = 0.1 // 主力流入最少 0.1 亿
// 其实换手率就可以体现 成交额占流通市值的比例
)
func init() {
// flag.Float64Var(&minTurnover, "mt", 2, "最少成交额")
// flag.Float64Var(&minChangehands, "mc", 2, "最低换手")
// flag.Float64Var(&minTurnoverToCirculation, "mttc", 0.02, "成交占比流通")
// flag.Float64Var(&minMainP, "mp", 5, "主力流入占比")
// flag.Float64Var(&mainMainV, "mv", 0.1, "主力流入")
// flag.Parse()
log.SetFlags(0)
}
func main() {
gds := GetMoneyData()
strategy001(gds)
// showGds := make([]*GeneralData, 0)
// for _, gd := range gds {
// if gd.mainV < mainMainV {
// continue
// }
// if gd.mainP < minMainP {
// continue
// }
// if gd.changehands < minChangehands {
// continue
// }
// if gd.turnover < minTurnover {
// continue
// }
// showGds = append(showGds, gd)
// }
// fmt.Printf("%-6s, %-4s, %-4s, %-4s, %-4s, %-4s, %-4s, %-4s \r\n",
// "代码", "名称", "涨跌幅", "成/流", "主力流入", "占比", "5min", "换手")
// var (
// total, fCount, zCount float64
// )
// for _, gd := range showGds {
// fmt.Printf("%6s, %4s, %6.2f%%, %6.4f%%, %6.2f亿, %6.2f%%, %2.2f%% \r\n",
// gd.stockCode,
// gd.stockName,
// gd.upAndDownRange,
// gd.mainV,
// gd.mainP,
// gd.min5,
// gd.changehands,
// )
// total++
// if gd.upAndDownRange > 0 {
// zCount++
// } else {
// fCount++
// }
// }
// fmt.Printf("\r\n涨幅胜率: %.2f%%\r\n", zCount/total*100)
// fmt.Printf("跌幅胜率: %.2f%%", fCount/total*100)
}
|
package logging
import "github.com/sirupsen/logrus"
//The LogrusLogger is the default logger for quacktors.
//As the name implies, it uses logrus under the hood.
type LogrusLogger struct {
Log *logrus.Logger
}
//Init initializes the LogrusLogger with the default config (ForceColors=true, LogLevel=Trace)
func (l *LogrusLogger) Init() {
l.Log = logrus.StandardLogger()
l.Log.SetFormatter(&logrus.TextFormatter{
ForceColors: true,
})
l.Log.SetLevel(logrus.TraceLevel)
}
//Trace adds a logrus log entry on the corresponding log level
func (l *LogrusLogger) Trace(message string, values ...interface{}) {
if l.Log.IsLevelEnabled(logrus.TraceLevel) {
l.Log.WithFields(toMap(values...)).Trace(message)
}
}
//Debug adds a logrus log entry on the corresponding log level
func (l *LogrusLogger) Debug(message string, values ...interface{}) {
if l.Log.IsLevelEnabled(logrus.DebugLevel) {
l.Log.WithFields(toMap(values...)).Debug(message)
}
}
//Info adds a logrus log entry on the corresponding log level
func (l *LogrusLogger) Info(message string, values ...interface{}) {
if l.Log.IsLevelEnabled(logrus.InfoLevel) {
l.Log.WithFields(toMap(values...)).Info(message)
}
}
//Warn adds a logrus log entry on the corresponding log level
func (l *LogrusLogger) Warn(message string, values ...interface{}) {
if l.Log.IsLevelEnabled(logrus.WarnLevel) {
l.Log.WithFields(toMap(values...)).Warn(message)
}
}
//Error adds a logrus log entry on the corresponding log level
func (l *LogrusLogger) Error(message string, values ...interface{}) {
if l.Log.IsLevelEnabled(logrus.ErrorLevel) {
l.Log.WithFields(toMap(values...)).Error(message)
}
}
//Fatal adds a logrus log entry on the corresponding log level
//and quits the application with exit-code 1
func (l *LogrusLogger) Fatal(message string, values ...interface{}) {
if l.Log.IsLevelEnabled(logrus.FatalLevel) {
l.Log.WithFields(toMap(values...)).Fatal(message)
}
}
|
package controllers
import (
"github.com/astaxie/beego"
)
type MainController struct {
beego.Controller
}
type UserController struct {
beego.Controller
}
func (this *MainController) Get() {
this.Data["Website"] = "beego.me"
this.Data["Email"] = "astaxie@gmail.com"
this.TplName = "index.tpl"
}
// 登录
func (this *UserController) Get() {
this.Data["Website"] = "TESTPAGE"
this.Data["Email"] = "victor.yang@hellosanta.com.tw"
this.TplName = "main/login.tpl"
}
|
package tmpl1
import (
"github.com/sko00o/leetcode-adventure/queue-stack/queue/bfs"
)
// BFS return the length of the shortest path between root and target node.
func BFS(root, target *bfs.Node) int {
// store all nodes which are waiting to be processed
var queue bfs.NodeQueue
// number of steps needed from root to current node
var step int
// initialize
queue.EnQueue(root)
for !queue.IsEmpty() {
// iterate the nodes which are already in the queue
for size := len(queue.Data); size != 0; size-- {
n := queue.Front()
if n == target {
return step
}
for i := range n.Neighbors {
queue.EnQueue(n.Neighbors[i])
}
queue.DeQueue()
}
step++
}
// there is no path from root to target
return -1
}
|
package accdownload_test
import(
"testing"
"download/accdownload"
"fmt"
)
func Test_Account_GetBalanceData(t *testing.T){
d := accdownload.NewAccountDownloader()
res := d.GetBalanceData("600001")
fmt.Println(res)
}
func Test_Account_GetIncomeData(t *testing.T){
d := accdownload.NewAccountDownloader()
res := d.GetIncomeData("600001")
fmt.Println(res)
}
func Test_Account_GetCashflowData(t *testing.T){
d := accdownload.NewAccountDownloader()
res := d.GetCashFlowData("600001")
fmt.Println(res)
}
|
package logger
import (
"reflect"
"testing"
"github.com/rifflock/lfshook"
"github.com/sirupsen/logrus"
)
func Test_setLogDirectoryPath(t *testing.T) {
tests := []struct {
name string
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
setLogDirectoryPath()
})
}
}
func Test_setPrimaryOutStream(t *testing.T) {
tests := []struct {
name string
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
setPrimaryOutStream()
})
}
}
func Test_getPathMap(t *testing.T) {
type args struct {
logPath string
}
tests := []struct {
name string
args args
want lfshook.PathMap
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getPathMap(tt.args.logPath); !reflect.DeepEqual(got, tt.want) {
t.Errorf("getPathMap() = %v, want %v", got, tt.want)
}
})
}
}
func TestGetLogger(t *testing.T) {
type args struct {
module string
}
tests := []struct {
name string
args args
want *logrus.Entry
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := GetLogger(tt.args.module); !reflect.DeepEqual(got, tt.want) {
t.Errorf("GetLogger() = %v, want %v", got, tt.want)
}
})
}
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//679. 24 Game
//You have 4 cards each containing a number from 1 to 9. You need to judge whether they could operated through *, /, +, -, (, ) to get the value of 24.
//Example 1:
//Input: [4, 1, 8, 7]
//Output: True
//Explanation: (8-4) * (7-1) = 24
//Example 2:
//Input: [1, 2, 1, 2]
//Output: False
//Note:
//The division operator / represents real division, not integer division. For example, 4 / (1 - 2/3) = 12.
//Every operation done is between two numbers. In particular, we cannot use - as a unary operator. For example, with [1, 1, 1, 1] as input, the expression -1 - 1 - 1 - 1 is not allowed.
//You cannot concatenate numbers together. For example, if the input is [1, 2, 1, 2], we cannot write this as 12 + 12.
//func judgePoint24(nums []int) bool {
//}
// Time Is Money
|
package azure
import (
"fmt"
"strings"
)
// aro is a setting to enable aro-only modifications
var aro bool
// OutboundType is a strategy for how egress from cluster is achieved.
// +kubebuilder:validation:Enum="";Loadbalancer;NatGateway;UserDefinedRouting
type OutboundType string
const (
// LoadbalancerOutboundType uses Standard loadbalancer for egress from the cluster.
// see https://docs.microsoft.com/en-us/azure/load-balancer/load-balancer-outbound-connections#lb
LoadbalancerOutboundType OutboundType = "Loadbalancer"
// NatGatewayOutboundType uses NAT gateway for egress from the cluster
// see https://learn.microsoft.com/en-us/azure/virtual-network/nat-gateway/nat-gateway-resource
NatGatewayOutboundType OutboundType = "NatGateway"
// UserDefinedRoutingOutboundType uses user defined routing for egress from the cluster.
// see https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-udr-overview
UserDefinedRoutingOutboundType OutboundType = "UserDefinedRouting"
)
// Platform stores all the global configuration that all machinesets
// use.
type Platform struct {
// Region specifies the Azure region where the cluster will be created.
Region string `json:"region"`
// ARMEndpoint is the endpoint for the Azure API when installing on Azure Stack.
ARMEndpoint string `json:"armEndpoint,omitempty"`
// ClusterOSImage is the url of a storage blob in the Azure Stack environment containing an RHCOS VHD. This field is required for Azure Stack and not applicable to Azure.
ClusterOSImage string `json:"clusterOSImage,omitempty"`
// BaseDomainResourceGroupName specifies the resource group where the Azure DNS zone for the base domain is found. This field is optional when creating a private cluster, otherwise required.
//
// +optional
BaseDomainResourceGroupName string `json:"baseDomainResourceGroupName,omitempty"`
// DefaultMachinePlatform is the default configuration used when
// installing on Azure for machine pools which do not define their own
// platform configuration.
// +optional
DefaultMachinePlatform *MachinePool `json:"defaultMachinePlatform,omitempty"`
// NetworkResourceGroupName specifies the network resource group that contains an existing VNet
//
// +optional
NetworkResourceGroupName string `json:"networkResourceGroupName,omitempty"`
// VirtualNetwork specifies the name of an existing VNet for the installer to use
//
// +optional
VirtualNetwork string `json:"virtualNetwork,omitempty"`
// ControlPlaneSubnet specifies an existing subnet for use by the control plane nodes
//
// +optional
ControlPlaneSubnet string `json:"controlPlaneSubnet,omitempty"`
// ComputeSubnet specifies an existing subnet for use by compute nodes
//
// +optional
ComputeSubnet string `json:"computeSubnet,omitempty"`
// cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK
// with the appropriate Azure API endpoints.
// If empty, the value is equal to "AzurePublicCloud".
// +optional
CloudName CloudEnvironment `json:"cloudName,omitempty"`
// OutboundType is a strategy for how egress from cluster is achieved. When not specified default is "Loadbalancer".
// "NatGateway" is only available in TechPreview.
//
// +kubebuilder:default=Loadbalancer
// +optional
OutboundType OutboundType `json:"outboundType"`
// ResourceGroupName is the name of an already existing resource group where the cluster should be installed.
// This resource group should only be used for this specific cluster and the cluster components will assume
// ownership of all resources in the resource group. Destroying the cluster using installer will delete this
// resource group.
// This resource group must be empty with no other resources when trying to use it for creating a cluster.
// If empty, a new resource group will created for the cluster.
//
// +optional
ResourceGroupName string `json:"resourceGroupName,omitempty"`
// UserTags has additional keys and values that the installer will add
// as tags to all resources that it creates on AzurePublicCloud alone.
// Resources created by the cluster itself may not include these tags.
// +optional
UserTags map[string]string `json:"userTags,omitempty"`
}
// CloudEnvironment is the name of the Azure cloud environment
// +kubebuilder:validation:Enum="";AzurePublicCloud;AzureUSGovernmentCloud;AzureChinaCloud;AzureGermanCloud;AzureStackCloud
type CloudEnvironment string
const (
// PublicCloud is the general-purpose, public Azure cloud environment.
PublicCloud CloudEnvironment = "AzurePublicCloud"
// USGovernmentCloud is the Azure cloud environment for the US government.
USGovernmentCloud CloudEnvironment = "AzureUSGovernmentCloud"
// ChinaCloud is the Azure cloud environment used in China.
ChinaCloud CloudEnvironment = "AzureChinaCloud"
// GermanCloud is the Azure cloud environment used in Germany.
GermanCloud CloudEnvironment = "AzureGermanCloud"
// StackCloud is the Azure cloud environment used at the edge and on premises.
StackCloud CloudEnvironment = "AzureStackCloud"
)
// Name returns name that Azure uses for the cloud environment.
// See https://github.com/Azure/go-autorest/blob/ec5f4903f77ed9927ac95b19ab8e44ada64c1356/autorest/azure/environments.go#L13
func (e CloudEnvironment) Name() string {
return string(e)
}
// SetBaseDomain parses the baseDomainID and sets the related fields on azure.Platform
func (p *Platform) SetBaseDomain(baseDomainID string) error {
parts := strings.Split(baseDomainID, "/")
p.BaseDomainResourceGroupName = parts[4]
return nil
}
// ClusterResourceGroupName returns the name of the resource group for the cluster.
func (p *Platform) ClusterResourceGroupName(infraID string) string {
if len(p.ResourceGroupName) > 0 {
return p.ResourceGroupName
}
return fmt.Sprintf("%s-rg", infraID)
}
// IsARO returns true if ARO-only modifications are enabled
func (p *Platform) IsARO() bool {
return aro
}
|
// Copyright 2016-2021, Pulumi Corporation.
package schema
import (
jsschema "github.com/lestrrat-go/jsschema"
)
// FlattenJSSchema recursively flattens a schema containing AnyOf or OneOf into a list of schemas
// Note: this only performs a shallow copy, don't use in situations where schema fields may be mutated.
// TODO(https://github.com/pulumi/pulumi-aws-native/issues/992): this should flatten AllOf as well.
func FlattenJSSchema(sch *jsschema.Schema) jsschema.SchemaList {
if len(sch.AnyOf) == 0 && len(sch.OneOf) == 0 {
return jsschema.SchemaList{sch}
}
schemas := make(jsschema.SchemaList, 0, len(sch.AnyOf)+len(sch.OneOf))
// build a flattened list of subschemas
for _, s := range sch.AnyOf {
schemas = append(schemas, FlattenJSSchema(s)...)
}
for _, s := range sch.OneOf {
schemas = append(schemas, FlattenJSSchema(s)...)
}
// merge each with the base schema and clear the AnyOf/OneOf fields
for i, s := range schemas {
merged := jsschema.New()
MergeJSSchema(merged, sch)
merged.AnyOf = nil
merged.OneOf = nil
MergeJSSchema(merged, s)
schemas[i] = merged
}
return schemas
}
// MergeJSSchema overlays the contents of src into dest to merge the values of the fields
// Note: This only does a shallow copy of src fields, don't use where schemas maybe mutated
func MergeJSSchema(dest *jsschema.Schema, src *jsschema.Schema) {
if src.ID != "" {
dest.ID = src.ID
}
if src.Title != "" {
dest.Title = src.Title
}
if src.Description != "" {
dest.Description = src.Description
}
if src.Default != nil {
dest.Default = src.Default
}
if len(src.Type) != 0 {
dest.Type = append(src.Type, dest.Type...)
}
if src.SchemaRef != "" {
dest.SchemaRef = src.SchemaRef
}
if len(src.Definitions) != 0 {
dest.Definitions = mergeMaps(dest.Definitions, src.Definitions)
}
if src.Reference != "" {
dest.Reference = src.Reference
}
if src.Format != "" {
dest.Format = src.Format
}
if src.MultipleOf.Initialized {
dest.MultipleOf = src.MultipleOf
}
if src.Minimum.Initialized {
dest.Minimum = src.Minimum
}
if src.Maximum.Initialized {
dest.Maximum = src.Maximum
}
if src.ExclusiveMinimum.Initialized {
dest.ExclusiveMinimum = src.ExclusiveMinimum
}
if src.ExclusiveMaximum.Initialized {
dest.ExclusiveMaximum = src.ExclusiveMaximum
}
if src.MaxLength.Initialized {
dest.MaxLength = src.MaxLength
}
if src.MinLength.Initialized {
dest.MinLength = src.MinLength
}
if src.Pattern != nil {
dest.Pattern = src.Pattern
}
if src.AdditionalItems != nil {
dest.AdditionalItems = src.AdditionalItems
}
if src.Items != nil {
dest.Items = src.Items
}
if src.MinItems.Initialized {
dest.MinItems = src.MinItems
}
if src.MaxItems.Initialized {
dest.MaxItems = src.MaxItems
}
if src.UniqueItems.Initialized {
dest.UniqueItems = src.UniqueItems
}
if src.MaxProperties.Initialized {
dest.MaxProperties = src.MaxProperties
}
if src.MinProperties.Initialized {
dest.MinProperties = src.MinProperties
}
if len(src.Required) != 0 {
dest.Required = append(src.Required, dest.Required...)
}
if len(src.Dependencies.Names) != 0 {
dest.Dependencies = jsschema.DependencyMap{
Names: mergeMaps(dest.Dependencies.Names, src.Dependencies.Names),
Schemas: mergeMaps(dest.Dependencies.Schemas, src.Dependencies.Schemas),
}
}
if len(src.Properties) != 0 {
dest.Properties = mergeMaps(dest.Properties, src.Properties)
}
if src.AdditionalProperties != nil && src.AdditionalProperties.Schema != nil {
if dest.AdditionalProperties == nil || dest.AdditionalProperties.Schema == nil {
dest.AdditionalProperties = src.AdditionalProperties
} else {
merged := jsschema.AdditionalProperties{Schema: jsschema.New()}
MergeJSSchema(merged.Schema, dest.AdditionalProperties.Schema)
MergeJSSchema(merged.Schema, src.AdditionalProperties.Schema)
dest.AdditionalProperties = &merged
}
}
if len(src.PatternProperties) != 0 {
dest.PatternProperties = mergeMaps(dest.PatternProperties, src.PatternProperties)
}
if len(src.Enum) != 0 {
dest.Enum = append(src.Enum, dest.Enum...)
}
if len(src.AllOf) != 0 {
dest.AllOf = append(src.AllOf, dest.AllOf...)
}
if len(src.AnyOf) != 0 {
dest.AnyOf = append(src.AnyOf, dest.AnyOf...)
}
if len(src.OneOf) != 0 {
dest.OneOf = append(src.OneOf, dest.OneOf...)
}
if src.Not != nil {
dest.Not = src.Not
}
if len(src.Extras) != 0 {
dest.Extras = mergeMaps(dest.Extras, src.Extras)
}
}
func mergeMaps[K comparable, V any](m1, m2 map[K]V) map[K]V {
merged := map[K]V{}
for k, v := range m1 {
merged[k] = v
}
for k, v := range m2 {
merged[k] = v
}
return merged
}
|
package provider
import (
"fmt"
"strconv"
"strings"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/mrparkers/terraform-provider-keycloak/keycloak"
)
func resourceKeycloakSamlClientScope() *schema.Resource {
return &schema.Resource{
Create: resourceKeycloakSamlClientScopeCreate,
Read: resourceKeycloakSamlClientScopeRead,
Delete: resourceKeycloakSamlClientScopeDelete,
Update: resourceKeycloakSamlClientScopeUpdate,
// This resource can be imported using {{realm}}/{{client_scope_id}}. The Client Scope ID is displayed in the GUI
Importer: &schema.ResourceImporter{
State: resourceKeycloakSamlClientScopeImport,
},
Schema: map[string]*schema.Schema{
"realm_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"name": {
Type: schema.TypeString,
Required: true,
},
"description": {
Type: schema.TypeString,
Optional: true,
},
"consent_screen_text": {
Type: schema.TypeString,
Optional: true,
},
"gui_order": {
Type: schema.TypeInt,
Optional: true,
},
},
}
}
func getSamlClientScopeFromData(data *schema.ResourceData) *keycloak.SamlClientScope {
clientScope := &keycloak.SamlClientScope{
Id: data.Id(),
RealmId: data.Get("realm_id").(string),
Name: data.Get("name").(string),
Description: data.Get("description").(string),
}
if consentScreenText, ok := data.GetOk("consent_screen_text"); ok {
clientScope.Attributes.ConsentScreenText = consentScreenText.(string)
clientScope.Attributes.DisplayOnConsentScreen = true
} else {
clientScope.Attributes.DisplayOnConsentScreen = false
}
// Treat 0 as an empty string for the purpose of omitting the attribute to reset the order
if guiOrder := data.Get("gui_order").(int); guiOrder != 0 {
clientScope.Attributes.GuiOrder = strconv.Itoa(guiOrder)
}
return clientScope
}
func setSamlClientScopeData(data *schema.ResourceData, clientScope *keycloak.SamlClientScope) {
data.SetId(clientScope.Id)
data.Set("realm_id", clientScope.RealmId)
data.Set("name", clientScope.Name)
data.Set("description", clientScope.Description)
if clientScope.Attributes.DisplayOnConsentScreen {
data.Set("consent_screen_text", clientScope.Attributes.ConsentScreenText)
}
if guiOrder, err := strconv.Atoi(clientScope.Attributes.GuiOrder); err == nil {
data.Set("gui_order", guiOrder)
}
}
func resourceKeycloakSamlClientScopeCreate(data *schema.ResourceData, meta interface{}) error {
keycloakClient := meta.(*keycloak.KeycloakClient)
clientScope := getSamlClientScopeFromData(data)
err := keycloakClient.NewSamlClientScope(clientScope)
if err != nil {
return err
}
setSamlClientScopeData(data, clientScope)
return resourceKeycloakSamlClientScopeRead(data, meta)
}
func resourceKeycloakSamlClientScopeRead(data *schema.ResourceData, meta interface{}) error {
keycloakClient := meta.(*keycloak.KeycloakClient)
realmId := data.Get("realm_id").(string)
id := data.Id()
clientScope, err := keycloakClient.GetSamlClientScope(realmId, id)
if err != nil {
return handleNotFoundError(err, data)
}
setSamlClientScopeData(data, clientScope)
return nil
}
func resourceKeycloakSamlClientScopeUpdate(data *schema.ResourceData, meta interface{}) error {
keycloakClient := meta.(*keycloak.KeycloakClient)
clientScope := getSamlClientScopeFromData(data)
err := keycloakClient.UpdateSamlClientScope(clientScope)
if err != nil {
return err
}
setSamlClientScopeData(data, clientScope)
return nil
}
func resourceKeycloakSamlClientScopeDelete(data *schema.ResourceData, meta interface{}) error {
keycloakClient := meta.(*keycloak.KeycloakClient)
realmId := data.Get("realm_id").(string)
id := data.Id()
return keycloakClient.DeleteSamlClientScope(realmId, id)
}
func resourceKeycloakSamlClientScopeImport(d *schema.ResourceData, _ interface{}) ([]*schema.ResourceData, error) {
parts := strings.Split(d.Id(), "/")
if len(parts) != 2 {
return nil, fmt.Errorf("Invalid import. Supported import formats: {{realmId}}/{{samlClientScopeId}}")
}
d.Set("realm_id", parts[0])
d.SetId(parts[1])
return []*schema.ResourceData{d}, nil
}
|
package proof
import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
)
///
/// Sealing
///
// Information needed to verify a seal proof.
type SealVerifyInfo struct {
SealProof abi.RegisteredSealProof
abi.SectorID
DealIDs []abi.DealID
Randomness abi.SealRandomness
InteractiveRandomness abi.InteractiveSealRandomness
Proof []byte
// Safe because we get those from the miner actor
SealedCID cid.Cid `checked:"true"` // CommR
UnsealedCID cid.Cid `checked:"true"` // CommD
}
///
/// PoSting
///
// Information about a proof necessary for PoSt verification.
type SectorInfo struct {
SealProof abi.RegisteredSealProof // RegisteredProof used when sealing - needs to be mapped to PoSt registered proof when used to verify a PoSt
SectorNumber abi.SectorNumber
SealedCID cid.Cid // CommR
}
type PoStProof struct {
PoStProof abi.RegisteredPoStProof
ProofBytes []byte
}
// Information needed to verify a Winning PoSt attached to a block header.
// Note: this is not used within the state machine, but by the consensus/election mechanisms.
type WinningPoStVerifyInfo struct {
Randomness abi.PoStRandomness
Proofs []PoStProof
ChallengedSectors []SectorInfo
Prover abi.ActorID // used to derive 32-byte prover ID
}
// Information needed to verify a Window PoSt submitted directly to a miner actor.
type WindowPoStVerifyInfo struct {
Randomness abi.PoStRandomness
Proofs []PoStProof
ChallengedSectors []SectorInfo
Prover abi.ActorID // used to derive 32-byte prover ID
}
|
package main
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"github.com/julienschmidt/httprouter"
)
func main() {
router := httprouter.New()
router.GET("/", index)
router.GET("/testString", testString)
router.GET("/testMap", testMap)
router.GET("/test2", test2Index)
router.GET("/test2/:size", test2)
http.ListenAndServe(":65001", router)
}
func index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
fmt.Fprintln(w, "go api")
}
func testString(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
result := `{"result":"すいようのどようのうしのひ"}`
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprint(w, result)
}
func testMap(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
result := map[string]string{"result": "すいようのどようのうしのひ"}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
encoder := json.NewEncoder(w)
encoder.Encode(result)
}
func test2Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
array := CreateTest2Response(10)
results := map[string][]map[string]string{"results": array}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
encoder := json.NewEncoder(w)
encoder.Encode(results)
}
func test2(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
var pSize string = ps.ByName("size")
size, erorr := strconv.Atoi(pSize)
if erorr != nil {
size = 10
}
array := CreateTest2Response(size)
results := map[string][]map[string]string{"results": array}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
encoder := json.NewEncoder(w)
encoder.Encode(results)
}
func CreateTest2Response(size int) []map[string]string {
array := make([]map[string]string, size)
for i := 0; i < size; i++ {
key := "key" + strconv.Itoa(i + 1)
value := "value" + strconv.Itoa(i + 1)
array[i] = map[string]string{key: value}
}
return array
}
|
package rules
import (
"fmt"
"log"
"os"
)
func GetRulesFiles() {
file, err := os.Open("/home/artem/hello-sql")
if err != nil {
log.Fatalf("failed opening directory: %s", err)
}
defer file.Close()
list, _ := file.Readdirnames(0)
for _, name := range list {
fmt.Println(name)
}
}
|
package main
func longestCommonPrefix(strs []string) string {
if len(strs) == 0 {
return ""
}
res := strs[0]
resRunes := []rune(res)
minL := len(resRunes)
for i := 1; i < len(strs); i++ {
iRunes := []rune(strs[i])
//fmt.Println(string(iRunes), string(resRunes))
minL = min14(minL, len(iRunes))
for index, _ := range iRunes {
if index < len(resRunes) && resRunes[index] != iRunes[index] {
resRunes = resRunes[:index]
break
}
}
}
if len(resRunes) > minL {
resRunes = resRunes[:minL]
}
res = string(resRunes)
return res
}
func min14(a, b int) int {
if a > b {
return b
}
return a
}
|
package main
import "fmt"
func main() {
// sliceChan := make(chan []int, 3) // 切片的值会受到影响
sliceChan := make(chan [3]int, 3) // 数组中的值不会收到影响
// srcSlice := []int{1, 2, 3}
srcSlice := [3]int{1, 2, 3}
fmt.Printf("srcSlice %v\n", srcSlice)
sliceChan <- srcSlice
dstSlice := <-sliceChan
dstSlice[1] = 512
fmt.Printf("After handle ...\n")
fmt.Printf("srcSlice %v and dstSlice %v\n", srcSlice, dstSlice) // srcSlice 中的值发生改变
}
|
package test
import (
"fmt"
"gengine/base"
"gengine/builder"
"gengine/context"
"gengine/engine"
"github.com/sirupsen/logrus"
"testing"
"time"
)
const rule_not = `
rule "test not" "test"
begin
if !(10 < -10 + 6*100 - 10) && !false {
println("hello")
}
end
`
func exec_not(){
dataContext := context.NewDataContext()
dataContext.Add("println", fmt.Println)
//init rule engine
knowledgeContext := base.NewKnowledgeContext()
ruleBuilder := builder.NewRuleBuilder(knowledgeContext, dataContext)
//resolve rules from string
start1 := time.Now().UnixNano()
err := ruleBuilder.BuildRuleFromString(rule_not)
end1 := time.Now().UnixNano()
logrus.Infof("rules num:%d, load rules cost time:%d ns", len(knowledgeContext.RuleEntities), end1-start1 )
if err != nil{
logrus.Errorf("err:%s ", err)
}else{
eng := engine.NewGengine()
start := time.Now().UnixNano()
// true: means when there are many rules, if one rule execute error,continue to execute rules after the occur error rule
err := eng.Execute(ruleBuilder, true)
end := time.Now().UnixNano()
if err != nil{
logrus.Errorf("execute rule error: %v", err)
}
logrus.Infof("execute rule cost %d ns",end-start)
}
}
func Test_not(t *testing.T){
exec_not()
}
|
package main
import "fmt"
// Channels are the pipes that connect concurrent goroutines. You
// can send values into channels from one goroutine and receive
// those values into another goroutine.
func main() {
// Create a new channel
messages := make(chan string)
// Send a value into a channel using the channel <- syntax
go func() { messages <- "ping" }()
msg := <-messages
fmt.Println(msg)
}
|
package onlinestore
import (
"github.com/feast-dev/feast/go/internal/feast/registry"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewRedisOnlineStore(t *testing.T) {
var config = map[string]interface{}{
"connection_string": "redis://localhost:6379",
}
rc := ®istry.RepoConfig{
OnlineStore: config,
EntityKeySerializationVersion: 2,
}
store, err := NewRedisOnlineStore("test", rc, config)
assert.Nil(t, err)
var opts = store.client.Options()
assert.Equal(t, opts.Addr, "redis://localhost:6379")
assert.Equal(t, opts.Password, "")
assert.Equal(t, opts.DB, 0)
assert.Nil(t, opts.TLSConfig)
}
func TestNewRedisOnlineStoreWithPassword(t *testing.T) {
var config = map[string]interface{}{
"connection_string": "redis://localhost:6379,password=secret",
}
rc := ®istry.RepoConfig{
OnlineStore: config,
EntityKeySerializationVersion: 2,
}
store, err := NewRedisOnlineStore("test", rc, config)
assert.Nil(t, err)
var opts = store.client.Options()
assert.Equal(t, opts.Addr, "redis://localhost:6379")
assert.Equal(t, opts.Password, "secret")
}
func TestNewRedisOnlineStoreWithDB(t *testing.T) {
var config = map[string]interface{}{
"connection_string": "redis://localhost:6379,db=1",
}
rc := ®istry.RepoConfig{
OnlineStore: config,
EntityKeySerializationVersion: 2,
}
store, err := NewRedisOnlineStore("test", rc, config)
assert.Nil(t, err)
var opts = store.client.Options()
assert.Equal(t, opts.Addr, "redis://localhost:6379")
assert.Equal(t, opts.DB, 1)
}
func TestNewRedisOnlineStoreWithSsl(t *testing.T) {
var config = map[string]interface{}{
"connection_string": "redis://localhost:6379,ssl=true",
}
rc := ®istry.RepoConfig{
OnlineStore: config,
EntityKeySerializationVersion: 2,
}
store, err := NewRedisOnlineStore("test", rc, config)
assert.Nil(t, err)
var opts = store.client.Options()
assert.Equal(t, opts.Addr, "redis://localhost:6379")
assert.NotNil(t, opts.TLSConfig)
}
|
package leetcode
import (
"reflect"
"testing"
)
func TestCommonChars(t *testing.T) {
if !reflect.DeepEqual(commonChars([]string{"bella", "label", "roller"}), []string{"e", "l", "l"}) {
t.Fatal()
}
}
|
package ipfsaddr
import (
"strings"
"testing"
ma "gx/ipfs/QmNTCey11oxhb1AxDnQBRHtdhap6Ctud872NjAYPYYXPuc/go-multiaddr"
peer "gx/ipfs/QmPJxxDsX2UbchSHobbYuvz7qnyJTFKvaKMzE2rZWJ4x5B/go-libp2p-peer"
)
var good = []string{
"/ipfs/5dru6bJPUM1B7N69528u49DJiWZnok",
"/ipfs/kTRX47RthhwNzWdi6ggwqjuX",
"/ipfs/QmUCseQWXCSrhf9edzVKTvoj8o8Ts5aXFGNPameZRPJ6uR",
"/ip4/1.2.3.4/tcp/1234/ipfs/5dru6bJPUM1B7N69528u49DJiWZnok",
"/ip4/1.2.3.4/tcp/1234/ipfs/kTRX47RthhwNzWdi6ggwqjuX",
"/ip4/1.2.3.4/tcp/1234/ipfs/QmUCseQWXCSrhf9edzVKTvoj8o8Ts5aXFGNPameZRPJ6uR",
}
var transports = []string{
"",
"",
"",
"/ip4/1.2.3.4/tcp/1234",
"/ip4/1.2.3.4/tcp/1234",
"/ip4/1.2.3.4/tcp/1234",
}
var bad = []string{
"5dru6bJPUM1B7N69528u49DJiWZnok", // bad ma
"kTRX47RthhwNzWdi6ggwqjuX", // bad ma
"QmUCseQWXCSrhf9edzVKTvoj8o8Ts5aXFGNPameZRPJ6uR", // bad ma
"ipfs/5dru6bJPUM1B7N69528u49DJiWZnok", // bad ma
"ipfs/kTRX47RthhwNzWdi6ggwqjuX", // bad ma
"ipfs/QmUCseQWXCSrhf9edzVKTvoj8o8Ts5aXFGNPameZRPJ6uR", // bad ma
"/ipfs/5dru6bJPUM1B7N69528u49DJiWZno", // bad mh
"/ipfs/kTRX47RthhwNzWdi6ggwqju", // bad mh
"/ipfs/QmUCseQWXCSrhf9edzVKTvj8o8Ts5aXFGNPameZRPJ6uR", // bad mh
"/ipfs/QmUCseQWXCSrhf9edzVKTvoj8o8Ts5aXFGNPameZRPJ6uR/tcp/1234", // ipfs not last
"/ip4/1.2.3.4/tcp/ipfs/5dru6bJPUM1B7N69528u49DJiWZnok", // bad tcp part
"/ip4/tcp/1234/ipfs/kTRX47RthhwNzWdi6ggwqjuX", // bad ip part
"/ip4/1.2.3.4/tcp/1234/ipfs", // no id
"/ip4/1.2.3.4/tcp/1234/ipfs/", // no id
}
func newMultiaddr(t *testing.T, s string) ma.Multiaddr {
maddr, err := ma.NewMultiaddr(s)
if err != nil {
t.Fatal(err)
}
return maddr
}
func TestParseStringGood(t *testing.T) {
for _, g := range good {
if _, err := ParseString(g); err != nil {
t.Error("failed to parse", g, err)
}
}
}
func TestParseStringBad(t *testing.T) {
for _, b := range bad {
if _, err := ParseString(b); err == nil {
t.Error("succeeded in parsing", b)
}
}
}
func TestParseMultiaddrGood(t *testing.T) {
for _, g := range good {
if _, err := ParseMultiaddr(newMultiaddr(t, g)); err != nil {
t.Error("failed to parse", g, err)
}
}
}
func TestParseMultiaddrBad(t *testing.T) {
for _, b := range bad {
m, err := ma.NewMultiaddr(b)
if err != nil {
continue // skip these.
}
if _, err := ParseMultiaddr(m); err == nil {
t.Error("succeeded in parsing", m)
}
}
}
func TestIDMatches(t *testing.T) {
for _, g := range good {
a, err := ParseString(g)
if err != nil {
t.Error("failed to parse", g, err)
continue
}
sp := strings.Split(g, "/")
sid := sp[len(sp)-1]
id, err := peer.IDB58Decode(sid)
if err != nil {
t.Error("failed to parse", sid, err)
continue
}
if a.ID() != id {
t.Error("not equal", a.ID(), id)
}
}
}
func TestMultiaddrMatches(t *testing.T) {
for _, g := range good {
a, err := ParseString(g)
if err != nil {
t.Error("failed to parse", g, err)
continue
}
m := newMultiaddr(t, g)
if !a.Multiaddr().Equal(m) {
t.Error("not equal", a.Multiaddr(), m)
}
}
}
func TestTransport(t *testing.T) {
for _, g := range good {
a, err := ParseString(g)
if err != nil {
t.Error("failed to parse", g, err)
continue
}
m := newMultiaddr(t, g)
split := ma.Split(m)
if len(split) <= 1 {
if Transport(a) != nil {
t.Error("should not have a transport")
}
} else {
m = ma.Join(split[:len(split)-1]...)
if a.Multiaddr().Equal(m) {
t.Error("should not be equal", a.Multiaddr(), m)
}
if !Transport(a).Equal(m) {
t.Error("should be equal", Transport(a), m)
}
}
}
}
|
package glog
import (
"log"
)
//输出信息到console 使用log库来实现
type consoleTarget struct {
name string //只读
minLevel LogLevel //只读
maxLevel LogLevel //只读
}
func (ct *consoleTarget) Name() string {
return ct.name
}
func (ct *consoleTarget) MinLevel() LogLevel {
return ct.minLevel
}
func (ct *consoleTarget) MaxLevel() LogLevel {
return ct.maxLevel
}
func (ct *consoleTarget) Write(event *LogEvent, sr Serializer) {
bs := sr.Encode(event)
if bs != nil {
log.Println(string(bs))
} else {
log.Printf("%+v\r\n", event)
}
}
func (ct *consoleTarget) Overflow() bool {
return false
}
func (ct *consoleTarget) Flush() {
return
}
func createConsoleTarget(config map[string]interface{}) Target {
ct := &consoleTarget{}
name := config["Name"]
if name == nil {
ct.name = "*"
} else {
ct.name = name.(string)
}
maxLevel := config["MaxLevel"]
if maxLevel == nil {
ct.maxLevel = EveryLevel
} else {
ct.maxLevel = toLevel(maxLevel.(string))
}
minLevel := config["MinLevel"]
if minLevel == nil {
ct.minLevel = EveryLevel
} else {
ct.minLevel = toLevel(minLevel.(string))
}
return ct
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.