text stringlengths 11 4.05M |
|---|
package persistence
import (
"fmt"
"testing"
)
func TestCategoryDAO(t *testing.T) {
result, err := GetCategoryList()
if err != nil {
t.Error(err.Error())
}
fmt.Println(result)
}
|
// md2docx project main.go
package main
import (
"fmt"
"io/ioutil"
"os"
"baliance.com/gooxml/document"
bf "gopkg.in/russross/blackfriday.v2"
)
func main() {
mdf, err := os.Open("/home/cuberl/gopath/src/connect-core/docs/论文.md")
if err != nil {
fmt.Println(err)
os.Exit(-1)
}
input, err := ioutil.ReadAll(mdf)
// input = []byte(`
//- ListItem1
//- ListItem2
//`)
doc, err := document.OpenTemplate("/home/cuberl/backup2.docx")
if err != nil {
fmt.Println(err)
os.Exit(0)
}
for _, s := range doc.Styles.Styles() {
fmt.Printf("%s: (%s)\n", s.Name(), s.StyleID())
}
renderer := &DocxRenderer{doc: doc}
extension := bf.FencedCode
md := bf.New(bf.WithExtensions(extension))
ast := md.Parse(input)
renderer.Render(ast)
err = renderer.doc.SaveToFile("/home/cuberl/new.docx")
if err != nil {
fmt.Println(err)
}
}
|
// Created at 10/21/2021 4:50 PM
// Developer: trungnq2710 (trungnq2710@gmail.com)
package go_apns
type ApsAlert struct {
Title string `json:"title,omitempty"`
Subtitle string `json:"subtitle,omitempty"`
Body string `json:"body,omitempty"`
LaunchImage string `json:"launch-image,omitempty"`
TitleLocKey string `json:"title-loc-key,omitempty"`
TitleLocArgs []string `json:"title-loc-args,omitempty"`
SubtitleLocKey string `json:"subtitle-loc-key,omitempty"`
SubtitleLocArgs []string `json:"subtitle-loc-args,omitempty"`
LocKey string `json:"loc-key,omitempty"`
LocArgs []string `json:"loc-args,omitempty"`
}
func NewApsAlert() *ApsAlert {
return &ApsAlert{}
}
// The title of the notification. Apple Watch displays this string in the short look notification
// interface. Specify a string that’s quickly understood by the user
func (a *ApsAlert) SetTitle(i string) *ApsAlert {
a.Title = i
return a
}
// Additional information that explains the purpose of the notification
func (a *ApsAlert) SetSubtitle(i string) *ApsAlert {
a.Subtitle = i
return a
}
// The content of the alert message
func (a *ApsAlert) SetBody(i string) *ApsAlert {
a.Body = i
return a
}
// The name of the launch image file to display. If the user chooses to launch your app, the
// contents of the specified image or storyboard file are displayed instead of your app’s
// normal launch image
func (a *ApsAlert) SetLaunchImage(i string) *ApsAlert {
a.LaunchImage = i
return a
}
// The key for a localized title string. Specify this key instead of the title key to retrieve
// the title from your app’s Localizable.strings files. The value must contain the name of a
// key in your strings file
func (a *ApsAlert) SetTitleLocKey(i string) *ApsAlert {
a.TitleLocKey = i
return a
}
// An array of strings containing replacement values for variables in your title string.
// Each %@ character in the string specified by the title-loc-key is replaced by a value
// from this array. The first item in the array replaces the first instance of the %@ character
// in the string, the second item replaces the second instance, and so on
func (a *ApsAlert) SetTitleLocArgs(i []string) *ApsAlert {
a.TitleLocArgs = i
return a
}
// The key for a localized subtitle string. Use this key, instead of the subtitle key, to retrieve
// the subtitle from your app’s Localizable.strings file. The value must contain the name of a key
// in your strings file
func (a *ApsAlert) SetSubtitleLocKey(i string) *ApsAlert {
a.SubtitleLocKey = i
return a
}
// An array of strings containing replacement values for variables in your title string.
// Each %@ character in the string specified by subtitle-loc-key is replaced by a value from this
// array. The first item in the array replaces the first instance of the %@ character in the string,
// the second item replaces the second instance, and so on.
func (a *ApsAlert) SetSubtitleLocArgs(i []string) *ApsAlert {
a.SubtitleLocArgs = i
return a
}
// The key for a localized message string. Use this key, instead of the body key, to retrieve the
// message text from your app’s Localizable.strings file. The value must contain the name of a key
// in your strings file.
func (a *ApsAlert) SetLocKey(i string) *ApsAlert {
a.LocKey = i
return a
}
// An array of strings containing replacement values for variables in your message text. Each %@
// character in the string specified by loc-key is replaced by a value from this array. The first
// item in the array replaces the first instance of the %@ character in the string, the second
// item replaces the second instance, and so on
func (a *ApsAlert) SetLocArgs(i []string) *ApsAlert {
a.LocArgs = i
return a
}
|
package main
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
func main() {
arguments := os.Args
if len(arguments) == 1 {
fmt.Println("Usage:selectColumn column <file1> [<file2> [...<fileN]]")
os.Exit(1)
}
temp, err := strconv.Atoi(arguments[1])
if err != nil {
fmt.Println("")
os.Exit(1)
}
column := temp
if column < 0 {
fmt.Println("")
os.Exit(1)
}
for _, fileName := range arguments[2:] {
fmt.Println("\t\t", fileName)
f, err := os.Open(fileName)
if err != nil {
fmt.Println("Error opening file")
continue
}
defer f.Close()
r := bufio.NewReader(f)
for {
line, err := r.ReadString('\n')
if err == io.EOF {
break
} else if err != nil {
fmt.Println("Error opening file", err)
}
l := strings.Fields(line)
if len(l) > column {
fmt.Println(l[column-1])
}
}
}
}
|
package tsmt
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00900103 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:tsmt.009.001.03 Document"`
Message *BaselineAmendmentRequestV03 `xml:"BaselnAmdmntReq"`
}
func (d *Document00900103) AddMessage() *BaselineAmendmentRequestV03 {
d.Message = new(BaselineAmendmentRequestV03)
return d.Message
}
// Scope
// The BaselineAmendmentRequest message is sent by a primary party involved in a transaction to the matching application.
// The message is used to request the amendment of an established baseline.
// Usage
// The BaselineAmendmentRequest message may only be sent if the transaction is in the state Established or Active.
// The BaselineAmendmentRequest message can be sent to the matching application by one of the primary parties involved in a transaction established in the push-through mode to request the amendment of an established baseline.
// The matching application acknowledges the receipt of the amendment request by sending a DeltaReport message to the submitter of the BaselineAmendmentRequest message. It passes on the newly proposed baseline to the counterparty by sending a FullPushThroughReport message, a DeltaReport message and a pre-calculated BaselineReport message.
// The counterparty is expected to either accept or reject the amendment request by submitting an AmendmentAcceptance or AmendmentRejection message.
// or
// The BaselineAmendmentRequest message can be sent by the party involved in a transaction established in the lodge mode to the matching application to amend an established baseline.
// The matching application amends the baseline according to the BaselineAmendmentRequest message and confirms the execution of the request by sending a DeltaReport and calculated BaselineReport message to the requester of the amendment.
type BaselineAmendmentRequestV03 struct {
// Identifies the request message.
RequestIdentification *iso20022.MessageIdentification1 `xml:"ReqId"`
// Unique identification assigned by the matching application to the transaction.
// This identification is to be used in any communication between the parties.
TransactionIdentification *iso20022.SimpleIdentificationInformation `xml:"TxId"`
// Reference to the transaction for the requesting financial institution.
SubmitterTransactionReference *iso20022.SimpleIdentificationInformation `xml:"SubmitrTxRef,omitempty"`
// Specifies the commercial details of the underlying transaction.
Baseline *iso20022.Baseline3 `xml:"Baseln"`
// Person to be contacted in the organisation of the buyer.
BuyerContactPerson []*iso20022.ContactIdentification1 `xml:"BuyrCtctPrsn,omitempty"`
// Person to be contacted in the organisation of the seller.
SellerContactPerson []*iso20022.ContactIdentification1 `xml:"SellrCtctPrsn,omitempty"`
// Person to be contacted in the buyer's bank.
BuyerBankContactPerson []*iso20022.ContactIdentification1 `xml:"BuyrBkCtctPrsn,omitempty"`
// Person to be contacted in the seller's bank.
SellerBankContactPerson []*iso20022.ContactIdentification1 `xml:"SellrBkCtctPrsn,omitempty"`
// Person to be contacted in another bank than the seller or buyer's bank.
OtherBankContactPerson []*iso20022.ContactIdentification3 `xml:"OthrBkCtctPrsn,omitempty"`
}
func (b *BaselineAmendmentRequestV03) AddRequestIdentification() *iso20022.MessageIdentification1 {
b.RequestIdentification = new(iso20022.MessageIdentification1)
return b.RequestIdentification
}
func (b *BaselineAmendmentRequestV03) AddTransactionIdentification() *iso20022.SimpleIdentificationInformation {
b.TransactionIdentification = new(iso20022.SimpleIdentificationInformation)
return b.TransactionIdentification
}
func (b *BaselineAmendmentRequestV03) AddSubmitterTransactionReference() *iso20022.SimpleIdentificationInformation {
b.SubmitterTransactionReference = new(iso20022.SimpleIdentificationInformation)
return b.SubmitterTransactionReference
}
func (b *BaselineAmendmentRequestV03) AddBaseline() *iso20022.Baseline3 {
b.Baseline = new(iso20022.Baseline3)
return b.Baseline
}
func (b *BaselineAmendmentRequestV03) AddBuyerContactPerson() *iso20022.ContactIdentification1 {
newValue := new(iso20022.ContactIdentification1)
b.BuyerContactPerson = append(b.BuyerContactPerson, newValue)
return newValue
}
func (b *BaselineAmendmentRequestV03) AddSellerContactPerson() *iso20022.ContactIdentification1 {
newValue := new(iso20022.ContactIdentification1)
b.SellerContactPerson = append(b.SellerContactPerson, newValue)
return newValue
}
func (b *BaselineAmendmentRequestV03) AddBuyerBankContactPerson() *iso20022.ContactIdentification1 {
newValue := new(iso20022.ContactIdentification1)
b.BuyerBankContactPerson = append(b.BuyerBankContactPerson, newValue)
return newValue
}
func (b *BaselineAmendmentRequestV03) AddSellerBankContactPerson() *iso20022.ContactIdentification1 {
newValue := new(iso20022.ContactIdentification1)
b.SellerBankContactPerson = append(b.SellerBankContactPerson, newValue)
return newValue
}
func (b *BaselineAmendmentRequestV03) AddOtherBankContactPerson() *iso20022.ContactIdentification3 {
newValue := new(iso20022.ContactIdentification3)
b.OtherBankContactPerson = append(b.OtherBankContactPerson, newValue)
return newValue
}
|
package common
import (
"sync"
)
func NewStdRole(id string) *stdRole {
return &stdRole{
id: id,
permissions: make(Permissions),
}
}
type stdRole struct {
sync.RWMutex
id string `json:"id"`
permissions Permissions
}
func (role *stdRole) getId() string {
return role.id
}
func (role *stdRole) assign(p Permission) {
role.Lock()
role.permissions[p.getId()] = p
role.Unlock()
}
func (role *stdRole) permit(p Permission) (rslt bool) {
role.RLock()
defer role.RUnlock()
for _, rp := range role.permissions {
if rp.match(p) {
rslt = true
break
}
}
return
}
func (role *stdRole) revoke(p Permission) {
role.Lock()
delete(role.permissions, p.getId())
role.Unlock()
}
func (role *stdRole) getPermissions() []Permission {
role.RLock()
defer role.RUnlock()
result := make([]Permission, 0, len(role.permissions))
for _, p := range role.permissions {
result = append(result, p)
}
return result
}
func rolesPermit(u User, p Permission) bool {
if len(u) == 0 {
return false
}
have := make(chan bool)
go func(u User, p Permission) {
for _, r := range u {
if r.permit(p) {
have <- true
}
}
have <- false
}(u, p)
return <-have
}
|
package http
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"net"
"net/http/httputil"
"strconv"
"strings"
)
// Response represents a HTTP response.
type Response struct {
StatusCode int
StatusDescription string
Headers Headers
Body string
HTTPVer string
}
// NewResponse returns a new Response created by reading the connection and
// parsing the HTTP response message.
func NewResponse(conn net.Conn) (resp *Response, err error) {
reader := bufio.NewReader(conn)
httpVer, statusCode, statusDescription, err := readResponseStatus(reader)
if err != nil {
return &Response{}, err
}
responseHeaders, err := ReadHeaders(reader)
if err != nil {
return &Response{}, err
}
resp = &Response{
StatusCode: statusCode,
StatusDescription: statusDescription,
Headers: responseHeaders,
Body: "",
HTTPVer: httpVer,
}
// Parse Body if exists
if _, ok := responseHeaders["Content-Length"]; ok {
contentLength, err := strconv.ParseInt(
responseHeaders["Content-Length"],
10,
0,
)
if err != nil {
return &Response{}, err
}
body, err := ioutil.ReadAll(io.LimitReader(reader, contentLength))
if err != nil {
return &Response{}, err
}
resp.Body = string(body)
} else if responseHeaders["Transfer-Encoding"] == "chunked" {
delete(responseHeaders, "Transfer-Encoding")
body, err := ioutil.ReadAll(httputil.NewChunkedReader(reader))
if err != nil {
return &Response{}, err
}
resp.Body = string(body)
}
return resp, nil
}
func readResponseStatus(reader *bufio.Reader) (
httpVer string,
statusCode int,
statusDescription string,
err error,
) {
statusLine, err := reader.ReadString('\n')
if err != nil {
return "", 0, "", err
}
trimmed := strings.TrimRight(statusLine, "\r\n")
status := strings.Split(trimmed, " ")
httpVer = status[0]
statusCodeInt64, err := strconv.ParseInt(status[1], 10, 0)
if err != nil {
return "", 0, "", err
}
statusCode = int(statusCodeInt64)
statusDescription = strings.Join(status[2:], " ")
return httpVer, statusCode, statusDescription, err
}
func (resp *Response) String() (str string) {
var builder strings.Builder
if resp.HTTPVer == "" || resp.StatusCode == 0 ||
resp.StatusDescription == "" {
return builder.String()
}
fmt.Fprintf(
&builder,
"%s %d %s\r\n",
resp.HTTPVer,
resp.StatusCode,
resp.StatusDescription,
)
for k, v := range resp.Headers {
fmt.Fprintf(&builder, "%s: %s\r\n", k, v)
}
fmt.Fprint(&builder, "\r\n")
fmt.Fprint(&builder, resp.Body)
return builder.String()
}
|
//author xinbing
//time 2018/9/11 11:40
//
package utils
type Resp struct {
Code int
Msg string
Data interface{}
}
func (p Resp) Success(msg string, data interface{}) *Resp {
p.Code = 0
p.Msg = msg
p.Data = data
return &p
}
func (p Resp) Failed(msg string) *Resp {
p.Code = -1
p.Msg = msg
return &p
} |
package main
import (
"fmt"
)
func GetNext(source string) []int {
sourceLen := len(source)
next := make([]int, sourceLen)
for i := 0; i < sourceLen; i++ {
if i == 0 {
next[i] = 0
} else {
if source[i] == source[next[i-1]] {
next[i] = next[i-1] + 1
} else {
next[i] = 0
}
}
}
return next
}
func KMPSearch(source string, search string) (int, bool) {
sourceIndex, searchIndex := 0, 0
sourceLen := len(source)
searchLen := len(search)
if searchLen > sourceLen {
return -1, false
}
next := GetNext(source)
fmt.Println("Assistant array:", next)
for sourceIndex < sourceLen {
if source[sourceIndex] == search[searchIndex] {
fmt.Println("Debug match index:", sourceIndex, searchIndex)
if searchIndex == searchLen-1 {
return sourceIndex + 1 - searchLen, true
}
sourceIndex++
searchIndex++
} else {
fmt.Println("Debug mismatch index:", sourceIndex, searchIndex)
if searchIndex > next[sourceIndex-1] {
searchIndex = next[sourceIndex-1]
} else {
searchIndex = next[sourceIndex]
sourceIndex++
}
}
}
return -1, false
}
func main() {
fmt.Println("Welcome to the playground!")
sourceString := "abcdeabcdefgabcd"
searchString := "abcdefg"
searchString2 := "abcdefgh"
index, ok := KMPSearch(sourceString, searchString)
fmt.Println("Find sub string", ok, "with index:", index)
index, ok = KMPSearch(sourceString, searchString2)
fmt.Println("Find sub string", ok, "with index:", index)
}
// Welcome to the playground!
// Assistant array: [0 0 0 0 0 1 2 3 4 5 0 0 1 2 3 4]
// Debug match index: 0 0
// Debug match index: 1 1
// Debug match index: 2 2
// Debug match index: 3 3
// Debug match index: 4 4
// Debug mismatch index: 5 5
// Debug match index: 5 0
// Debug match index: 6 1
// Debug match index: 7 2
// Debug match index: 8 3
// Debug match index: 9 4
// Debug match index: 10 5
// Debug match index: 11 6
// Find sub string true with index: 5
// Assistant array: [0 0 0 0 0 1 2 3 4 5 0 0 1 2 3 4]
// Debug match index: 0 0
// Debug match index: 1 1
// Debug match index: 2 2
// Debug match index: 3 3
// Debug match index: 4 4
// Debug mismatch index: 5 5
// Debug match index: 5 0
// Debug match index: 6 1
// Debug match index: 7 2
// Debug match index: 8 3
// Debug match index: 9 4
// Debug match index: 10 5
// Debug match index: 11 6
// Debug mismatch index: 12 7
// Debug match index: 12 0
// Debug match index: 13 1
// Debug match index: 14 2
// Debug match index: 15 3
// Find sub string false with index: -1 |
package util
import (
"fmt"
git "gopkg.in/src-d/go-git.v4"
gitconfig "gopkg.in/src-d/go-git.v4/config"
)
// GetRepositoryRemotes returns a map containing the remote names and the URLs they
// point to.
func GetRepositoryRemotes(repo *git.Repository) (map[string][]string, error) {
gitRemotes, err := repo.Remotes()
if err != nil {
return nil, fmt.Errorf("cannot list remotes: %v", err)
}
remotes := map[string][]string{}
for _, remote := range gitRemotes {
remoteCfg := remote.Config()
remotes[remoteCfg.Name] = remoteCfg.URLs
}
return remotes, nil
}
// UpdateRepositoryRemotes updates the URLs list for a specific remote.
func UpdateRepositoryRemotes(repo *git.Repository, name string, URLs []string) error {
cfg, err := repo.Storer.Config()
if err != nil {
return err
}
cfg.Remotes[name] = &gitconfig.RemoteConfig{
Name: name,
URLs: URLs,
}
return repo.Storer.SetConfig(cfg)
}
|
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
)
func main() {
flag.Parse()
args := flag.Args()
if len(args) < 1 {
fmt.Println("Enter the url first! \n")
os.Exit(1)
}
retrieve(args[0])
}
func retrieve(url string) {
resp, _ := http.Get(url)
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
file, err := os.Create("index.html")
if err != nil {
log.Fatalf("failed creating file %s", err)
}
defer file.Close()
file.WriteString(string(body))
if err != nil {
log.Fatalf("failed writing to file: %s", err)
} else {
fmt.Printf("Serving the file at : http://localhost:3000 \n")
http.Handle("/", http.FileServer(http.Dir("./")))
http.ListenAndServe(":3000", nil)
}
}
|
package oidc
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestValidateToken(t *testing.T) {
sig, err := validateToken("none", nil)
assert.Equal(t, "", sig)
assert.EqualError(t, err, "square/go-jose: compact JWS format must have three parts")
}
func TestGetTokenSignature(t *testing.T) {
sig, err := getTokenSignature("abc.123")
assert.Equal(t, "", sig)
assert.EqualError(t, err, "header, body and signature must all be set")
}
func TestAssign(t *testing.T) {
a := map[string]any{
"a": "valuea",
"c": "valuea",
}
b := map[string]any{
"b": "valueb",
"c": "valueb",
}
c := assign(a, b)
assert.Equal(t, "valuea", c["a"])
assert.Equal(t, "valueb", c["b"])
assert.Equal(t, "valuea", c["c"])
}
|
package firequeue_test
import (
"context"
"fmt"
"math/rand"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/firehose"
"github.com/aws/aws-sdk-go/service/firehose/firehoseiface"
"github.com/natureglobal/firequeue"
)
type testAWSError struct {
}
func (te *testAWSError) Error() string {
return "retryable error!"
}
func (te *testAWSError) Code() string {
return request.ErrCodeResponseTimeout
}
func (te *testAWSError) Message() string {
return "msg"
}
func (te *testAWSError) OrigErr() error {
return fmt.Errorf("retryable error!")
}
var _ awserr.Error = (*testAWSError)(nil)
func init() {
rand.Seed(time.Now().UTC().UnixNano())
}
type testFirehose struct {
firehoseiface.FirehoseAPI
counter uint32
}
func (tf *testFirehose) PutRecord(*firehose.PutRecordInput) (*firehose.PutRecordOutput, error) {
if rand.Intn(10) < 2 {
// Letting them fail on purpose with a certain probability
return nil, &testAWSError{}
}
atomic.AddUint32(&tf.counter, 1)
return &firehose.PutRecordOutput{}, nil
}
func TestQueue(t *testing.T) {
testCases := []struct {
name string
opts []firequeue.Option
}{{
name: "serial",
}, {
name: "parallel 10",
opts: []firequeue.Option{
firequeue.Parallel(10),
},
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
tf := &testFirehose{}
q := firequeue.New(tf, tc.opts...)
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
errch := make(chan error)
go func() {
errch <- q.Loop(ctx)
}()
const trial = 10000
go func() {
for i := 0; i < trial; i++ {
go q.Send(&firehose.PutRecordInput{})
}
}()
if err := <-errch; err != nil {
t.Errorf("error shoud be nil but: %s", err)
}
if trial != tf.counter {
t.Errorf("got: %d, expect: %d", tf.counter, trial)
}
stats := q.Stats()
valid := func(s firequeue.Stats) bool {
if s.GiveupError > 0 || s.UnretryableError > 0 || s.QueueFullError > 0 || s.QueueLength > 0 {
return false
}
if s.Success+s.RetrySuccess != trial {
return false
}
return s.Success > s.RetrySuccess
}
if !valid(stats) {
t.Errorf("invalid stats: %+v", stats)
}
})
}
}
func TestQueue_Loop(t *testing.T) {
tf := &testFirehose{}
q := firequeue.New(tf,
firequeue.MaxQueueLength(100),
firequeue.ErrorHandler(func(err error, r *firehose.PutRecordInput) {}))
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go q.Loop(ctx)
time.Sleep(50 * time.Millisecond)
err := q.Loop(ctx)
if err == nil || !strings.Contains(err.Error(), "already initialized") {
t.Errorf("already initialized error should be occurred but: %s", err)
}
}
func TestQueue_Send(t *testing.T) {
tf := &testFirehose{}
q := firequeue.New(tf)
err := q.Send(nil)
if err == nil || !strings.Contains(err.Error(), "loop has not yet started") {
t.Errorf("loop has not yet started error should be occurred but: %s", err)
}
}
|
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package localkube
import (
kubeproxy "k8s.io/kubernetes/cmd/kube-proxy/app"
"k8s.io/minikube/pkg/util"
"time"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
)
var (
MasqueradeBit = int32(14)
OOMScoreAdj = int32(qos.KubeProxyOOMScoreAdj)
)
func (lk LocalkubeServer) NewProxyServer() Server {
return NewSimpleServer("proxy", serverInterval, StartProxyServer(lk), noop)
}
func StartProxyServer(lk LocalkubeServer) func() error {
bindaddress := lk.APIServerAddress.String()
if lk.APIServerInsecurePort != 0 {
bindaddress = lk.APIServerInsecureAddress.String()
}
opts := kubeproxy.NewOptions()
fg, err := lk.GetFeatureGates()
if err != nil {
panic(err)
}
config := &kubeproxyconfig.KubeProxyConfiguration{
OOMScoreAdj: &OOMScoreAdj,
ClientConnection: kubeproxyconfig.ClientConnectionConfiguration{
Burst: 10,
QPS: 5,
KubeConfigFile: util.DefaultKubeConfigPath,
},
ConfigSyncPeriod: v1.Duration{Duration: 15 * time.Minute},
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
MasqueradeBit: &MasqueradeBit,
SyncPeriod: v1.Duration{Duration: 30 * time.Second},
MinSyncPeriod: v1.Duration{Duration: 5 * time.Second},
},
BindAddress: bindaddress,
Mode: kubeproxyconfig.ProxyModeIPTables,
FeatureGates: fg,
// Disable the healthz check
HealthzBindAddress: "",
}
if _, err := opts.ApplyDefaults(config); err != nil {
panic(err)
}
lk.SetExtraConfigForComponent("proxy", &config)
opts.SetConfig(config)
return func() error {
return opts.Run()
}
}
|
package main
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"github.com/dnephin/configtf/docs"
"github.com/dnephin/dobi/config"
)
var (
basePath = "docs/gen/config/"
)
func write(filepath string, source interface{}) error {
content, err := docs.Generate(source, docs.ReStructuredText)
if err != nil {
return err
}
return ioutil.WriteFile(filepath, bytes.NewBufferString(content).Bytes(), 0644)
}
func writeDocs() error {
for _, item := range []struct {
filename string
source interface{}
}{
{"meta.rst", config.MetaConfig{}},
{"alias.rst", config.AliasConfig{}},
{"compose.rst", config.ComposeConfig{}},
{"image.rst", config.ImageConfig{}},
{"mount.rst", config.MountConfig{}},
{"job.rst", config.JobConfig{}},
{"env.rst", config.EnvConfig{}},
{"annotationFields.rst", config.AnnotationFields{}},
} {
fmt.Printf("Generating doc %q\n", basePath+item.filename)
if err := write(basePath+item.filename, item.source); err != nil {
return err
}
}
return nil
}
func main() {
if err := writeDocs(); err != nil {
fmt.Printf("Failed to generate docs: %s\n", err)
os.Exit(1)
}
}
|
package aws
import "encoding/json"
func SetJSONMarshal(f func(interface{}) ([]byte, error)) {
jsonMarshal = f
}
func ResetJSONMarshal() {
jsonMarshal = json.Marshal
}
|
package main
//Needs to sort array first
//create a array with the gaps if any from the input array
//first element in array is the first distance from 0.
//check the different from i+1 to i.
//if the distance is greater than jump add that to count.
import (
"fmt"
"sort"
)
func main() {
test := []int{5, 3, 6, 7, 9}
// gap := 0
count := 0
final := 0
// var gapsArry []int
sort.Ints(test)
max := test[len(test)-1]
for i := 1; i <= max; i++ {
for _, x := range test {
if x%i == 0 {
break
} else {
count++
}
}
fmt.Println("count", count, "i", i)
if count == len(test) {
final = i
break
}
count = 0
}
if final == 0 {
final = test[len(test)-1] + 1
}
fmt.Println(final)
}
|
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"bytes"
"context"
"fmt"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/planner/util"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/ranger"
"go.uber.org/zap"
)
type ppdSolver struct{}
// exprPrefixAdder is the wrapper struct to add tidb_shard(x) = val for `OrigConds`
// `cols` is the index columns for a unique shard index
type exprPrefixAdder struct {
sctx sessionctx.Context
OrigConds []expression.Expression
cols []*expression.Column
lengths []int
}
func (*ppdSolver) optimize(_ context.Context, lp LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) {
_, p := lp.PredicatePushDown(nil, opt)
return p, nil
}
func addSelection(p LogicalPlan, child LogicalPlan, conditions []expression.Expression, chIdx int, opt *logicalOptimizeOp) {
if len(conditions) == 0 {
p.Children()[chIdx] = child
return
}
conditions = expression.PropagateConstant(p.SCtx(), conditions)
// Return table dual when filter is constant false or null.
dual := Conds2TableDual(child, conditions)
if dual != nil {
p.Children()[chIdx] = dual
appendTableDualTraceStep(child, dual, conditions, opt)
return
}
conditions = DeleteTrueExprs(p, conditions)
if len(conditions) == 0 {
p.Children()[chIdx] = child
return
}
selection := LogicalSelection{Conditions: conditions}.Init(p.SCtx(), p.SelectBlockOffset())
selection.SetChildren(child)
p.Children()[chIdx] = selection
appendAddSelectionTraceStep(p, child, selection, opt)
}
// PredicatePushDown implements LogicalPlan interface.
func (p *baseLogicalPlan) PredicatePushDown(predicates []expression.Expression, opt *logicalOptimizeOp) ([]expression.Expression, LogicalPlan) {
if len(p.children) == 0 {
return predicates, p.self
}
child := p.children[0]
rest, newChild := child.PredicatePushDown(predicates, opt)
addSelection(p.self, newChild, rest, 0, opt)
return nil, p.self
}
func splitSetGetVarFunc(filters []expression.Expression) ([]expression.Expression, []expression.Expression) {
canBePushDown := make([]expression.Expression, 0, len(filters))
canNotBePushDown := make([]expression.Expression, 0, len(filters))
for _, expr := range filters {
if expression.HasGetSetVarFunc(expr) {
canNotBePushDown = append(canNotBePushDown, expr)
} else {
canBePushDown = append(canBePushDown, expr)
}
}
return canBePushDown, canNotBePushDown
}
// PredicatePushDown implements LogicalPlan PredicatePushDown interface.
func (p *LogicalSelection) PredicatePushDown(predicates []expression.Expression, opt *logicalOptimizeOp) ([]expression.Expression, LogicalPlan) {
predicates = DeleteTrueExprs(p, predicates)
p.Conditions = DeleteTrueExprs(p, p.Conditions)
var child LogicalPlan
var retConditions []expression.Expression
var originConditions []expression.Expression
canBePushDown, canNotBePushDown := splitSetGetVarFunc(p.Conditions)
originConditions = canBePushDown
retConditions, child = p.children[0].PredicatePushDown(append(canBePushDown, predicates...), opt)
retConditions = append(retConditions, canNotBePushDown...)
if len(retConditions) > 0 {
p.Conditions = expression.PropagateConstant(p.SCtx(), retConditions)
// Return table dual when filter is constant false or null.
dual := Conds2TableDual(p, p.Conditions)
if dual != nil {
appendTableDualTraceStep(p, dual, p.Conditions, opt)
return nil, dual
}
return nil, p
}
appendSelectionPredicatePushDownTraceStep(p, originConditions, opt)
return nil, child
}
// PredicatePushDown implements LogicalPlan PredicatePushDown interface.
func (p *LogicalUnionScan) PredicatePushDown(predicates []expression.Expression, opt *logicalOptimizeOp) ([]expression.Expression, LogicalPlan) {
retainedPredicates, _ := p.children[0].PredicatePushDown(predicates, opt)
p.conditions = make([]expression.Expression, 0, len(predicates))
p.conditions = append(p.conditions, predicates...)
// The conditions in UnionScan is only used for added rows, so parent Selection should not be removed.
return retainedPredicates, p
}
// PredicatePushDown implements LogicalPlan PredicatePushDown interface.
func (ds *DataSource) PredicatePushDown(predicates []expression.Expression, opt *logicalOptimizeOp) ([]expression.Expression, LogicalPlan) {
predicates = expression.PropagateConstant(ds.SCtx(), predicates)
predicates = DeleteTrueExprs(ds, predicates)
// Add tidb_shard() prefix to the condtion for shard index in some scenarios
// TODO: remove it to the place building logical plan
predicates = ds.AddPrefix4ShardIndexes(ds.SCtx(), predicates)
ds.allConds = predicates
ds.pushedDownConds, predicates = expression.PushDownExprs(ds.SCtx().GetSessionVars().StmtCtx, predicates, ds.SCtx().GetClient(), kv.UnSpecified)
appendDataSourcePredicatePushDownTraceStep(ds, opt)
return predicates, ds
}
// PredicatePushDown implements LogicalPlan PredicatePushDown interface.
func (p *LogicalTableDual) PredicatePushDown(predicates []expression.Expression, _ *logicalOptimizeOp) ([]expression.Expression, LogicalPlan) {
return predicates, p
}
// PredicatePushDown implements LogicalPlan PredicatePushDown interface.
func (p *LogicalJoin) PredicatePushDown(predicates []expression.Expression, opt *logicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) {
simplifyOuterJoin(p, predicates)
var equalCond []*expression.ScalarFunction
var leftPushCond, rightPushCond, otherCond, leftCond, rightCond []expression.Expression
switch p.JoinType {
case LeftOuterJoin, LeftOuterSemiJoin, AntiLeftOuterSemiJoin:
predicates = p.outerJoinPropConst(predicates)
dual := Conds2TableDual(p, predicates)
if dual != nil {
appendTableDualTraceStep(p, dual, predicates, opt)
return ret, dual
}
// Handle where conditions
predicates = expression.ExtractFiltersFromDNFs(p.SCtx(), predicates)
// Only derive left where condition, because right where condition cannot be pushed down
equalCond, leftPushCond, rightPushCond, otherCond = p.extractOnCondition(predicates, true, false)
leftCond = leftPushCond
// Handle join conditions, only derive right join condition, because left join condition cannot be pushed down
_, derivedRightJoinCond := DeriveOtherConditions(
p, p.children[0].Schema(), p.children[1].Schema(), false, true)
rightCond = append(p.RightConditions, derivedRightJoinCond...)
p.RightConditions = nil
ret = append(expression.ScalarFuncs2Exprs(equalCond), otherCond...)
ret = append(ret, rightPushCond...)
case RightOuterJoin:
predicates = p.outerJoinPropConst(predicates)
dual := Conds2TableDual(p, predicates)
if dual != nil {
appendTableDualTraceStep(p, dual, predicates, opt)
return ret, dual
}
// Handle where conditions
predicates = expression.ExtractFiltersFromDNFs(p.SCtx(), predicates)
// Only derive right where condition, because left where condition cannot be pushed down
equalCond, leftPushCond, rightPushCond, otherCond = p.extractOnCondition(predicates, false, true)
rightCond = rightPushCond
// Handle join conditions, only derive left join condition, because right join condition cannot be pushed down
derivedLeftJoinCond, _ := DeriveOtherConditions(
p, p.children[0].Schema(), p.children[1].Schema(), true, false)
leftCond = append(p.LeftConditions, derivedLeftJoinCond...)
p.LeftConditions = nil
ret = append(expression.ScalarFuncs2Exprs(equalCond), otherCond...)
ret = append(ret, leftPushCond...)
case SemiJoin, InnerJoin:
tempCond := make([]expression.Expression, 0, len(p.LeftConditions)+len(p.RightConditions)+len(p.EqualConditions)+len(p.OtherConditions)+len(predicates))
tempCond = append(tempCond, p.LeftConditions...)
tempCond = append(tempCond, p.RightConditions...)
tempCond = append(tempCond, expression.ScalarFuncs2Exprs(p.EqualConditions)...)
tempCond = append(tempCond, p.OtherConditions...)
tempCond = append(tempCond, predicates...)
tempCond = expression.ExtractFiltersFromDNFs(p.SCtx(), tempCond)
tempCond = expression.PropagateConstant(p.SCtx(), tempCond)
// Return table dual when filter is constant false or null.
dual := Conds2TableDual(p, tempCond)
if dual != nil {
appendTableDualTraceStep(p, dual, tempCond, opt)
return ret, dual
}
equalCond, leftPushCond, rightPushCond, otherCond = p.extractOnCondition(tempCond, true, true)
p.LeftConditions = nil
p.RightConditions = nil
p.EqualConditions = equalCond
p.OtherConditions = otherCond
leftCond = leftPushCond
rightCond = rightPushCond
case AntiSemiJoin:
predicates = expression.PropagateConstant(p.SCtx(), predicates)
// Return table dual when filter is constant false or null.
dual := Conds2TableDual(p, predicates)
if dual != nil {
appendTableDualTraceStep(p, dual, predicates, opt)
return ret, dual
}
// `predicates` should only contain left conditions or constant filters.
_, leftPushCond, rightPushCond, _ = p.extractOnCondition(predicates, true, true)
// Do not derive `is not null` for anti join, since it may cause wrong results.
// For example:
// `select * from t t1 where t1.a not in (select b from t t2)` does not imply `t2.b is not null`,
// `select * from t t1 where t1.a not in (select a from t t2 where t1.b = t2.b` does not imply `t1.b is not null`,
// `select * from t t1 where not exists (select * from t t2 where t2.a = t1.a)` does not imply `t1.a is not null`,
leftCond = leftPushCond
rightCond = append(p.RightConditions, rightPushCond...)
p.RightConditions = nil
}
leftCond = expression.RemoveDupExprs(p.SCtx(), leftCond)
rightCond = expression.RemoveDupExprs(p.SCtx(), rightCond)
leftRet, lCh := p.children[0].PredicatePushDown(leftCond, opt)
rightRet, rCh := p.children[1].PredicatePushDown(rightCond, opt)
addSelection(p, lCh, leftRet, 0, opt)
addSelection(p, rCh, rightRet, 1, opt)
p.updateEQCond()
buildKeyInfo(p)
return ret, p.self
}
// updateEQCond will extract the arguments of a equal condition that connect two expressions.
func (p *LogicalJoin) updateEQCond() {
lChild, rChild := p.children[0], p.children[1]
var lKeys, rKeys []expression.Expression
var lNAKeys, rNAKeys []expression.Expression
// We need two steps here:
// step1: try best to extract normal EQ condition from OtherCondition to join EqualConditions.
for i := len(p.OtherConditions) - 1; i >= 0; i-- {
need2Remove := false
if eqCond, ok := p.OtherConditions[i].(*expression.ScalarFunction); ok && eqCond.FuncName.L == ast.EQ {
// If it is a column equal condition converted from `[not] in (subq)`, do not move it
// to EqualConditions, and keep it in OtherConditions. Reference comments in `extractOnCondition`
// for detailed reasons.
if expression.IsEQCondFromIn(eqCond) {
continue
}
lExpr, rExpr := eqCond.GetArgs()[0], eqCond.GetArgs()[1]
if expression.ExprFromSchema(lExpr, lChild.Schema()) && expression.ExprFromSchema(rExpr, rChild.Schema()) {
lKeys = append(lKeys, lExpr)
rKeys = append(rKeys, rExpr)
need2Remove = true
} else if expression.ExprFromSchema(lExpr, rChild.Schema()) && expression.ExprFromSchema(rExpr, lChild.Schema()) {
lKeys = append(lKeys, rExpr)
rKeys = append(rKeys, lExpr)
need2Remove = true
}
}
if need2Remove {
p.OtherConditions = append(p.OtherConditions[:i], p.OtherConditions[i+1:]...)
}
}
// eg: explain select * from t1, t3 where t1.a+1 = t3.a;
// tidb only accept the join key in EqualCondition as a normal column (join OP take granted for that)
// so once we found the left and right children's schema can supply the all columns in complicated EQ condition that used by left/right key.
// we will add a layer of projection here to convert the complicated expression of EQ's left or right side to be a normal column.
adjustKeyForm := func(leftKeys, rightKeys []expression.Expression, isNA bool) {
if len(leftKeys) > 0 {
needLProj, needRProj := false, false
for i := range leftKeys {
_, lOk := leftKeys[i].(*expression.Column)
_, rOk := rightKeys[i].(*expression.Column)
needLProj = needLProj || !lOk
needRProj = needRProj || !rOk
}
var lProj, rProj *LogicalProjection
if needLProj {
lProj = p.getProj(0)
}
if needRProj {
rProj = p.getProj(1)
}
for i := range leftKeys {
lKey, rKey := leftKeys[i], rightKeys[i]
if lProj != nil {
lKey = lProj.appendExpr(lKey)
}
if rProj != nil {
rKey = rProj.appendExpr(rKey)
}
eqCond := expression.NewFunctionInternal(p.SCtx(), ast.EQ, types.NewFieldType(mysql.TypeTiny), lKey, rKey)
if isNA {
p.NAEQConditions = append(p.NAEQConditions, eqCond.(*expression.ScalarFunction))
} else {
p.EqualConditions = append(p.EqualConditions, eqCond.(*expression.ScalarFunction))
}
}
}
}
adjustKeyForm(lKeys, rKeys, false)
// Step2: when step1 is finished, then we can determine whether we need to extract NA-EQ from OtherCondition to NAEQConditions.
// when there are still no EqualConditions, let's try to be a NAAJ.
// todo: by now, when there is already a normal EQ condition, just keep NA-EQ as other-condition filters above it.
// eg: select * from stu where stu.name not in (select name from exam where exam.stu_id = stu.id);
// combination of <stu.name NAEQ exam.name> and <exam.stu_id EQ stu.id> for join key is little complicated for now.
canBeNAAJ := (p.JoinType == AntiSemiJoin || p.JoinType == AntiLeftOuterSemiJoin) && len(p.EqualConditions) == 0
if canBeNAAJ && p.SCtx().GetSessionVars().OptimizerEnableNAAJ {
var otherCond expression.CNFExprs
for i := 0; i < len(p.OtherConditions); i++ {
eqCond, ok := p.OtherConditions[i].(*expression.ScalarFunction)
if ok && eqCond.FuncName.L == ast.EQ && expression.IsEQCondFromIn(eqCond) {
// here must be a EQCondFromIn.
lExpr, rExpr := eqCond.GetArgs()[0], eqCond.GetArgs()[1]
if expression.ExprFromSchema(lExpr, lChild.Schema()) && expression.ExprFromSchema(rExpr, rChild.Schema()) {
lNAKeys = append(lNAKeys, lExpr)
rNAKeys = append(rNAKeys, rExpr)
} else if expression.ExprFromSchema(lExpr, rChild.Schema()) && expression.ExprFromSchema(rExpr, lChild.Schema()) {
lNAKeys = append(lNAKeys, rExpr)
rNAKeys = append(rNAKeys, lExpr)
}
continue
}
otherCond = append(otherCond, p.OtherConditions[i])
}
p.OtherConditions = otherCond
// here is for cases like: select (a+1, b*3) not in (select a,b from t2) from t1.
adjustKeyForm(lNAKeys, rNAKeys, true)
}
}
func (p *LogicalProjection) appendExpr(expr expression.Expression) *expression.Column {
if col, ok := expr.(*expression.Column); ok {
return col
}
expr = expression.ColumnSubstitute(expr, p.schema, p.Exprs)
p.Exprs = append(p.Exprs, expr)
col := &expression.Column{
UniqueID: p.SCtx().GetSessionVars().AllocPlanColumnID(),
RetType: expr.GetType().Clone(),
}
col.SetCoercibility(expr.Coercibility())
col.SetRepertoire(expr.Repertoire())
p.schema.Append(col)
// reset ParseToJSONFlag in order to keep the flag away from json column
if col.GetType().GetType() == mysql.TypeJSON {
col.GetType().DelFlag(mysql.ParseToJSONFlag)
}
return col
}
func (p *LogicalJoin) getProj(idx int) *LogicalProjection {
child := p.children[idx]
proj, ok := child.(*LogicalProjection)
if ok {
return proj
}
proj = LogicalProjection{Exprs: make([]expression.Expression, 0, child.Schema().Len())}.Init(p.SCtx(), child.SelectBlockOffset())
for _, col := range child.Schema().Columns {
proj.Exprs = append(proj.Exprs, col)
}
proj.SetSchema(child.Schema().Clone())
proj.SetChildren(child)
p.children[idx] = proj
return proj
}
// simplifyOuterJoin transforms "LeftOuterJoin/RightOuterJoin" to "InnerJoin" if possible.
func simplifyOuterJoin(p *LogicalJoin, predicates []expression.Expression) {
if p.JoinType != LeftOuterJoin && p.JoinType != RightOuterJoin && p.JoinType != InnerJoin {
return
}
innerTable := p.children[0]
outerTable := p.children[1]
if p.JoinType == LeftOuterJoin {
innerTable, outerTable = outerTable, innerTable
}
// first simplify embedded outer join.
if innerPlan, ok := innerTable.(*LogicalJoin); ok {
simplifyOuterJoin(innerPlan, predicates)
}
if outerPlan, ok := outerTable.(*LogicalJoin); ok {
simplifyOuterJoin(outerPlan, predicates)
}
if p.JoinType == InnerJoin {
return
}
// then simplify embedding outer join.
canBeSimplified := false
for _, expr := range predicates {
// avoid the case where the expr only refers to the schema of outerTable
if expression.ExprFromSchema(expr, outerTable.Schema()) {
continue
}
isOk := isNullRejected(p.SCtx(), innerTable.Schema(), expr)
if isOk {
canBeSimplified = true
break
}
}
if canBeSimplified {
p.JoinType = InnerJoin
}
}
// isNullRejected check whether a condition is null-rejected
// A condition would be null-rejected in one of following cases:
// If it is a predicate containing a reference to an inner table that evaluates to UNKNOWN or FALSE when one of its arguments is NULL.
// If it is a conjunction containing a null-rejected condition as a conjunct.
// If it is a disjunction of null-rejected conditions.
func isNullRejected(ctx sessionctx.Context, schema *expression.Schema, expr expression.Expression) bool {
expr = expression.PushDownNot(ctx, expr)
if expression.ContainOuterNot(expr) {
return false
}
sc := ctx.GetSessionVars().StmtCtx
sc.InNullRejectCheck = true
defer func() {
sc.InNullRejectCheck = false
}()
for _, cond := range expression.SplitCNFItems(expr) {
result := expression.EvaluateExprWithNull(ctx, schema, cond)
x, ok := result.(*expression.Constant)
if !ok {
continue
}
if x.Value.IsNull() {
return true
} else if isTrue, err := x.Value.ToBool(sc); err == nil && isTrue == 0 {
return true
}
}
return false
}
// PredicatePushDown implements LogicalPlan PredicatePushDown interface.
func (p *LogicalExpand) PredicatePushDown(predicates []expression.Expression, opt *logicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) {
// Note that, grouping column related predicates can't be pushed down, since grouping column has nullability change after Expand OP itself.
// condition related with grouping column shouldn't be pushed down through it.
// currently, since expand is adjacent to aggregate, any filter above aggregate wanted to be push down through expand only have two cases:
// 1. agg function related filters. (these condition is always above aggregate)
// 2. group-by item related filters. (there condition is always related with grouping sets columns, which can't be pushed down)
// As a whole, we banned all the predicates pushing-down logic here that remained in Expand OP, and constructing a new selection above it if any.
remained, child := p.baseLogicalPlan.PredicatePushDown(nil, opt)
return append(remained, predicates...), child
}
// PredicatePushDown implements LogicalPlan PredicatePushDown interface.
func (p *LogicalProjection) PredicatePushDown(predicates []expression.Expression, opt *logicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) {
canBePushed := make([]expression.Expression, 0, len(predicates))
canNotBePushed := make([]expression.Expression, 0, len(predicates))
for _, expr := range p.Exprs {
if expression.HasAssignSetVarFunc(expr) {
_, child := p.baseLogicalPlan.PredicatePushDown(nil, opt)
return predicates, child
}
}
if len(p.children) == 1 {
if _, isDual := p.children[0].(*LogicalTableDual); isDual {
return predicates, p
}
}
for _, cond := range predicates {
substituted, hasFailed, newFilter := expression.ColumnSubstituteImpl(cond, p.Schema(), p.Exprs, true)
if substituted && !hasFailed && !expression.HasGetSetVarFunc(newFilter) {
canBePushed = append(canBePushed, newFilter)
} else {
canNotBePushed = append(canNotBePushed, cond)
}
}
remained, child := p.baseLogicalPlan.PredicatePushDown(canBePushed, opt)
return append(remained, canNotBePushed...), child
}
// PredicatePushDown implements LogicalPlan PredicatePushDown interface.
func (p *LogicalUnionAll) PredicatePushDown(predicates []expression.Expression, opt *logicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) {
for i, proj := range p.children {
newExprs := make([]expression.Expression, 0, len(predicates))
newExprs = append(newExprs, predicates...)
retCond, newChild := proj.PredicatePushDown(newExprs, opt)
addSelection(p, newChild, retCond, i, opt)
}
return nil, p
}
// pushDownPredicatesForAggregation split a condition to two parts, can be pushed-down or can not be pushed-down below aggregation.
func (la *LogicalAggregation) pushDownPredicatesForAggregation(cond expression.Expression, groupByColumns *expression.Schema, exprsOriginal []expression.Expression) ([]expression.Expression, []expression.Expression) {
var condsToPush []expression.Expression
var ret []expression.Expression
switch cond.(type) {
case *expression.Constant:
condsToPush = append(condsToPush, cond)
// Consider SQL list "select sum(b) from t group by a having 1=0". "1=0" is a constant predicate which should be
// retained and pushed down at the same time. Because we will get a wrong query result that contains one column
// with value 0 rather than an empty query result.
ret = append(ret, cond)
case *expression.ScalarFunction:
extractedCols := expression.ExtractColumns(cond)
ok := true
for _, col := range extractedCols {
if !groupByColumns.Contains(col) {
ok = false
break
}
}
if ok {
newFunc := expression.ColumnSubstitute(cond, la.Schema(), exprsOriginal)
condsToPush = append(condsToPush, newFunc)
} else {
ret = append(ret, cond)
}
default:
ret = append(ret, cond)
}
return condsToPush, ret
}
// pushDownPredicatesForAggregation split a CNF condition to two parts, can be pushed-down or can not be pushed-down below aggregation.
// It would consider the CNF.
// For example,
// (a > 1 or avg(b) > 1) and (a < 3), and `avg(b) > 1` can't be pushed-down.
// Then condsToPush: a < 3, ret: a > 1 or avg(b) > 1
func (la *LogicalAggregation) pushDownCNFPredicatesForAggregation(cond expression.Expression, groupByColumns *expression.Schema, exprsOriginal []expression.Expression) ([]expression.Expression, []expression.Expression) {
var condsToPush []expression.Expression
var ret []expression.Expression
subCNFItem := expression.SplitCNFItems(cond)
if len(subCNFItem) == 1 {
return la.pushDownPredicatesForAggregation(subCNFItem[0], groupByColumns, exprsOriginal)
}
for _, item := range subCNFItem {
condsToPushForItem, retForItem := la.pushDownDNFPredicatesForAggregation(item, groupByColumns, exprsOriginal)
if len(condsToPushForItem) > 0 {
condsToPush = append(condsToPush, expression.ComposeDNFCondition(la.SCtx(), condsToPushForItem...))
}
if len(retForItem) > 0 {
ret = append(ret, expression.ComposeDNFCondition(la.SCtx(), retForItem...))
}
}
return condsToPush, ret
}
// pushDownDNFPredicatesForAggregation split a DNF condition to two parts, can be pushed-down or can not be pushed-down below aggregation.
// It would consider the DNF.
// For example,
// (a > 1 and avg(b) > 1) or (a < 3), and `avg(b) > 1` can't be pushed-down.
// Then condsToPush: (a < 3) and (a > 1), ret: (a > 1 and avg(b) > 1) or (a < 3)
func (la *LogicalAggregation) pushDownDNFPredicatesForAggregation(cond expression.Expression, groupByColumns *expression.Schema, exprsOriginal []expression.Expression) ([]expression.Expression, []expression.Expression) {
//nolint: prealloc
var condsToPush []expression.Expression
var ret []expression.Expression
subDNFItem := expression.SplitDNFItems(cond)
if len(subDNFItem) == 1 {
return la.pushDownPredicatesForAggregation(subDNFItem[0], groupByColumns, exprsOriginal)
}
for _, item := range subDNFItem {
condsToPushForItem, retForItem := la.pushDownCNFPredicatesForAggregation(item, groupByColumns, exprsOriginal)
if len(condsToPushForItem) <= 0 {
return nil, []expression.Expression{cond}
}
condsToPush = append(condsToPush, expression.ComposeCNFCondition(la.SCtx(), condsToPushForItem...))
if len(retForItem) > 0 {
ret = append(ret, expression.ComposeCNFCondition(la.SCtx(), retForItem...))
}
}
if len(ret) == 0 {
// All the condition can be pushed down.
return []expression.Expression{cond}, nil
}
dnfPushDownCond := expression.ComposeDNFCondition(la.SCtx(), condsToPush...)
// Some condition can't be pushed down, we need to keep all the condition.
return []expression.Expression{dnfPushDownCond}, []expression.Expression{cond}
}
// PredicatePushDown implements LogicalPlan PredicatePushDown interface.
func (la *LogicalAggregation) PredicatePushDown(predicates []expression.Expression, opt *logicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) {
var condsToPush []expression.Expression
exprsOriginal := make([]expression.Expression, 0, len(la.AggFuncs))
for _, fun := range la.AggFuncs {
exprsOriginal = append(exprsOriginal, fun.Args[0])
}
groupByColumns := expression.NewSchema(la.GetGroupByCols()...)
// It's almost the same as pushDownCNFPredicatesForAggregation, except that the condition is a slice.
for _, cond := range predicates {
subCondsToPush, subRet := la.pushDownDNFPredicatesForAggregation(cond, groupByColumns, exprsOriginal)
if len(subCondsToPush) > 0 {
condsToPush = append(condsToPush, subCondsToPush...)
}
if len(subRet) > 0 {
ret = append(ret, subRet...)
}
}
la.baseLogicalPlan.PredicatePushDown(condsToPush, opt)
return ret, la
}
// PredicatePushDown implements LogicalPlan PredicatePushDown interface.
func (p *LogicalLimit) PredicatePushDown(predicates []expression.Expression, opt *logicalOptimizeOp) ([]expression.Expression, LogicalPlan) {
// Limit forbids any condition to push down.
p.baseLogicalPlan.PredicatePushDown(nil, opt)
return predicates, p
}
// PredicatePushDown implements LogicalPlan PredicatePushDown interface.
func (p *LogicalMaxOneRow) PredicatePushDown(predicates []expression.Expression, opt *logicalOptimizeOp) ([]expression.Expression, LogicalPlan) {
// MaxOneRow forbids any condition to push down.
p.baseLogicalPlan.PredicatePushDown(nil, opt)
return predicates, p
}
// DeriveOtherConditions given a LogicalJoin, check the OtherConditions to see if we can derive more
// conditions for left/right child pushdown.
func DeriveOtherConditions(
p *LogicalJoin, leftSchema *expression.Schema, rightSchema *expression.Schema,
deriveLeft bool, deriveRight bool) (
leftCond []expression.Expression, rightCond []expression.Expression) {
isOuterSemi := (p.JoinType == LeftOuterSemiJoin) || (p.JoinType == AntiLeftOuterSemiJoin)
for _, expr := range p.OtherConditions {
if deriveLeft {
leftRelaxedCond := expression.DeriveRelaxedFiltersFromDNF(expr, leftSchema)
if leftRelaxedCond != nil {
leftCond = append(leftCond, leftRelaxedCond)
}
notNullExpr := deriveNotNullExpr(expr, leftSchema)
if notNullExpr != nil {
leftCond = append(leftCond, notNullExpr)
}
}
if deriveRight {
rightRelaxedCond := expression.DeriveRelaxedFiltersFromDNF(expr, rightSchema)
if rightRelaxedCond != nil {
rightCond = append(rightCond, rightRelaxedCond)
}
// For LeftOuterSemiJoin and AntiLeftOuterSemiJoin, we can actually generate
// `col is not null` according to expressions in `OtherConditions` now, but we
// are putting column equal condition converted from `in (subq)` into
// `OtherConditions`(@sa https://github.com/pingcap/tidb/pull/9051), then it would
// cause wrong results, so we disable this optimization for outer semi joins now.
// TODO enable this optimization for outer semi joins later by checking whether
// condition in `OtherConditions` is converted from `in (subq)`.
if isOuterSemi {
continue
}
notNullExpr := deriveNotNullExpr(expr, rightSchema)
if notNullExpr != nil {
rightCond = append(rightCond, notNullExpr)
}
}
}
return
}
// deriveNotNullExpr generates a new expression `not(isnull(col))` given `col1 op col2`,
// in which `col` is in specified schema. Caller guarantees that only one of `col1` or
// `col2` is in schema.
func deriveNotNullExpr(expr expression.Expression, schema *expression.Schema) expression.Expression {
binop, ok := expr.(*expression.ScalarFunction)
if !ok || len(binop.GetArgs()) != 2 {
return nil
}
ctx := binop.GetCtx()
arg0, lOK := binop.GetArgs()[0].(*expression.Column)
arg1, rOK := binop.GetArgs()[1].(*expression.Column)
if !lOK || !rOK {
return nil
}
childCol := schema.RetrieveColumn(arg0)
if childCol == nil {
childCol = schema.RetrieveColumn(arg1)
}
if isNullRejected(ctx, schema, expr) && !mysql.HasNotNullFlag(childCol.RetType.GetFlag()) {
return expression.BuildNotNullExpr(ctx, childCol)
}
return nil
}
// Conds2TableDual builds a LogicalTableDual if cond is constant false or null.
func Conds2TableDual(p LogicalPlan, conds []expression.Expression) LogicalPlan {
if len(conds) != 1 {
return nil
}
con, ok := conds[0].(*expression.Constant)
if !ok {
return nil
}
sc := p.SCtx().GetSessionVars().StmtCtx
if expression.MaybeOverOptimized4PlanCache(p.SCtx(), []expression.Expression{con}) {
return nil
}
if isTrue, err := con.Value.ToBool(sc); (err == nil && isTrue == 0) || con.Value.IsNull() {
dual := LogicalTableDual{}.Init(p.SCtx(), p.SelectBlockOffset())
dual.SetSchema(p.Schema())
return dual
}
return nil
}
// DeleteTrueExprs deletes the surely true expressions
func DeleteTrueExprs(p LogicalPlan, conds []expression.Expression) []expression.Expression {
newConds := make([]expression.Expression, 0, len(conds))
for _, cond := range conds {
con, ok := cond.(*expression.Constant)
if !ok {
newConds = append(newConds, cond)
continue
}
if expression.MaybeOverOptimized4PlanCache(p.SCtx(), []expression.Expression{con}) {
newConds = append(newConds, cond)
continue
}
sc := p.SCtx().GetSessionVars().StmtCtx
if isTrue, err := con.Value.ToBool(sc); err == nil && isTrue == 1 {
continue
}
newConds = append(newConds, cond)
}
return newConds
}
// outerJoinPropConst propagates constant equal and column equal conditions over outer join.
func (p *LogicalJoin) outerJoinPropConst(predicates []expression.Expression) []expression.Expression {
outerTable := p.children[0]
innerTable := p.children[1]
if p.JoinType == RightOuterJoin {
innerTable, outerTable = outerTable, innerTable
}
lenJoinConds := len(p.EqualConditions) + len(p.LeftConditions) + len(p.RightConditions) + len(p.OtherConditions)
joinConds := make([]expression.Expression, 0, lenJoinConds)
for _, equalCond := range p.EqualConditions {
joinConds = append(joinConds, equalCond)
}
joinConds = append(joinConds, p.LeftConditions...)
joinConds = append(joinConds, p.RightConditions...)
joinConds = append(joinConds, p.OtherConditions...)
p.EqualConditions = nil
p.LeftConditions = nil
p.RightConditions = nil
p.OtherConditions = nil
nullSensitive := p.JoinType == AntiLeftOuterSemiJoin || p.JoinType == LeftOuterSemiJoin
joinConds, predicates = expression.PropConstOverOuterJoin(p.SCtx(), joinConds, predicates, outerTable.Schema(), innerTable.Schema(), nullSensitive)
p.AttachOnConds(joinConds)
return predicates
}
// GetPartitionByCols extracts 'partition by' columns from the Window.
func (p *LogicalWindow) GetPartitionByCols() []*expression.Column {
partitionCols := make([]*expression.Column, 0, len(p.PartitionBy))
for _, partitionItem := range p.PartitionBy {
partitionCols = append(partitionCols, partitionItem.Col)
}
return partitionCols
}
// PredicatePushDown implements LogicalPlan PredicatePushDown interface.
func (p *LogicalWindow) PredicatePushDown(predicates []expression.Expression, opt *logicalOptimizeOp) ([]expression.Expression, LogicalPlan) {
canBePushed := make([]expression.Expression, 0, len(predicates))
canNotBePushed := make([]expression.Expression, 0, len(predicates))
partitionCols := expression.NewSchema(p.GetPartitionByCols()...)
for _, cond := range predicates {
// We can push predicate beneath Window, only if all of the
// extractedCols are part of partitionBy columns.
if expression.ExprFromSchema(cond, partitionCols) {
canBePushed = append(canBePushed, cond)
} else {
canNotBePushed = append(canNotBePushed, cond)
}
}
p.baseLogicalPlan.PredicatePushDown(canBePushed, opt)
return canNotBePushed, p
}
// PredicatePushDown implements LogicalPlan PredicatePushDown interface.
func (p *LogicalMemTable) PredicatePushDown(predicates []expression.Expression, _ *logicalOptimizeOp) ([]expression.Expression, LogicalPlan) {
if p.Extractor != nil {
predicates = p.Extractor.Extract(p.SCtx(), p.schema, p.names, predicates)
}
return predicates, p.self
}
func (*ppdSolver) name() string {
return "predicate_push_down"
}
func appendTableDualTraceStep(replaced LogicalPlan, dual LogicalPlan, conditions []expression.Expression, opt *logicalOptimizeOp) {
action := func() string {
return fmt.Sprintf("%v_%v is replaced by %v_%v", replaced.TP(), replaced.ID(), dual.TP(), dual.ID())
}
reason := func() string {
buffer := bytes.NewBufferString("The conditions[")
for i, cond := range conditions {
if i > 0 {
buffer.WriteString(",")
}
buffer.WriteString(cond.String())
}
buffer.WriteString("] are constant false or null")
return buffer.String()
}
opt.appendStepToCurrent(dual.ID(), dual.TP(), reason, action)
}
func appendSelectionPredicatePushDownTraceStep(p *LogicalSelection, conditions []expression.Expression, opt *logicalOptimizeOp) {
action := func() string {
return fmt.Sprintf("%v_%v is removed", p.TP(), p.ID())
}
reason := func() string {
return ""
}
if len(conditions) > 0 {
reason = func() string {
buffer := bytes.NewBufferString("The conditions[")
for i, cond := range conditions {
if i > 0 {
buffer.WriteString(",")
}
buffer.WriteString(cond.String())
}
fmt.Fprintf(buffer, "] in %v_%v are pushed down", p.TP(), p.ID())
return buffer.String()
}
}
opt.appendStepToCurrent(p.ID(), p.TP(), reason, action)
}
func appendDataSourcePredicatePushDownTraceStep(ds *DataSource, opt *logicalOptimizeOp) {
if len(ds.pushedDownConds) < 1 {
return
}
reason := func() string {
return ""
}
action := func() string {
buffer := bytes.NewBufferString("The conditions[")
for i, cond := range ds.pushedDownConds {
if i > 0 {
buffer.WriteString(",")
}
buffer.WriteString(cond.String())
}
fmt.Fprintf(buffer, "] are pushed down across %v_%v", ds.TP(), ds.ID())
return buffer.String()
}
opt.appendStepToCurrent(ds.ID(), ds.TP(), reason, action)
}
func appendAddSelectionTraceStep(p LogicalPlan, child LogicalPlan, sel *LogicalSelection, opt *logicalOptimizeOp) {
reason := func() string {
return ""
}
action := func() string {
return fmt.Sprintf("add %v_%v to connect %v_%v and %v_%v", sel.TP(), sel.ID(), p.TP(), p.ID(), child.TP(), child.ID())
}
opt.appendStepToCurrent(sel.ID(), sel.TP(), reason, action)
}
// AddPrefix4ShardIndexes add expression prefix for shard index. e.g. an index is test.uk(tidb_shard(a), a).
// It transforms the sql "SELECT * FROM test WHERE a = 10" to
// "SELECT * FROM test WHERE tidb_shard(a) = val AND a = 10", val is the value of tidb_shard(10).
// It also transforms the sql "SELECT * FROM test WHERE a IN (10, 20, 30)" to
// "SELECT * FROM test WHERE tidb_shard(a) = val1 AND a = 10 OR tidb_shard(a) = val2 AND a = 20"
// @param[in] conds the original condtion of this datasource
// @retval - the new condition after adding expression prefix
func (ds *DataSource) AddPrefix4ShardIndexes(sc sessionctx.Context, conds []expression.Expression) []expression.Expression {
if !ds.containExprPrefixUk {
return conds
}
var err error
newConds := conds
for _, path := range ds.possibleAccessPaths {
if !path.IsUkShardIndexPath {
continue
}
newConds, err = ds.addExprPrefixCond(sc, path, newConds)
if err != nil {
logutil.BgLogger().Error("Add tidb_shard expression failed",
zap.Error(err),
zap.Uint64("connection id", sc.GetSessionVars().ConnectionID),
zap.String("database name", ds.DBName.L),
zap.String("table name", ds.tableInfo.Name.L),
zap.String("index name", path.Index.Name.L))
return conds
}
}
return newConds
}
func (ds *DataSource) addExprPrefixCond(sc sessionctx.Context, path *util.AccessPath,
conds []expression.Expression) ([]expression.Expression, error) {
idxCols, idxColLens :=
expression.IndexInfo2PrefixCols(ds.Columns, ds.schema.Columns, path.Index)
if len(idxCols) == 0 {
return conds, nil
}
adder := &exprPrefixAdder{
sctx: sc,
OrigConds: conds,
cols: idxCols,
lengths: idxColLens,
}
return adder.addExprPrefix4ShardIndex()
}
// AddExprPrefix4ShardIndex
// if original condition is a LogicOr expression, such as `WHERE a = 1 OR a = 10`,
// call the function AddExprPrefix4DNFCond to add prefix expression tidb_shard(a) = xxx for shard index.
// Otherwise, if the condition is `WHERE a = 1`, `WHERE a = 1 AND b = 10`, `WHERE a IN (1, 2, 3)`......,
// call the function AddExprPrefix4CNFCond to add prefix expression for shard index.
func (adder *exprPrefixAdder) addExprPrefix4ShardIndex() ([]expression.Expression, error) {
if len(adder.OrigConds) == 1 {
if sf, ok := adder.OrigConds[0].(*expression.ScalarFunction); ok && sf.FuncName.L == ast.LogicOr {
return adder.addExprPrefix4DNFCond(sf)
}
}
return adder.addExprPrefix4CNFCond(adder.OrigConds)
}
// AddExprPrefix4CNFCond
// add the prefix expression for CNF condition, e.g. `WHERE a = 1`, `WHERE a = 1 AND b = 10`, ......
// @param[in] conds the original condtion of the datasoure. e.g. `WHERE t1.a = 1 AND t1.b = 10 AND t2.a = 20`.
//
// if current datasource is `t1`, conds is {t1.a = 1, t1.b = 10}. if current datasource is
// `t2`, conds is {t2.a = 20}
//
// @return - the new condition after adding expression prefix
func (adder *exprPrefixAdder) addExprPrefix4CNFCond(conds []expression.Expression) ([]expression.Expression, error) {
newCondtionds, err := ranger.AddExpr4EqAndInCondition(adder.sctx,
conds, adder.cols)
return newCondtionds, err
}
// AddExprPrefix4DNFCond
// add the prefix expression for DNF condition, e.g. `WHERE a = 1 OR a = 10`, ......
// The condition returned is `WHERE (tidb_shard(a) = 214 AND a = 1) OR (tidb_shard(a) = 142 AND a = 10)`
// @param[in] condition the original condtion of the datasoure. e.g. `WHERE a = 1 OR a = 10`. condtion is `a = 1 OR a = 10`
// @return - the new condition after adding expression prefix. It's still a LogicOr expression.
func (adder *exprPrefixAdder) addExprPrefix4DNFCond(condition *expression.ScalarFunction) ([]expression.Expression, error) {
var err error
dnfItems := expression.FlattenDNFConditions(condition)
newAccessItems := make([]expression.Expression, 0, len(dnfItems))
for _, item := range dnfItems {
if sf, ok := item.(*expression.ScalarFunction); ok {
var accesses []expression.Expression
if sf.FuncName.L == ast.LogicAnd {
cnfItems := expression.FlattenCNFConditions(sf)
accesses, err = adder.addExprPrefix4CNFCond(cnfItems)
if err != nil {
return []expression.Expression{condition}, err
}
newAccessItems = append(newAccessItems, expression.ComposeCNFCondition(adder.sctx, accesses...))
} else if sf.FuncName.L == ast.EQ || sf.FuncName.L == ast.In {
// only add prefix expression for EQ or IN function
accesses, err = adder.addExprPrefix4CNFCond([]expression.Expression{sf})
if err != nil {
return []expression.Expression{condition}, err
}
newAccessItems = append(newAccessItems, expression.ComposeCNFCondition(adder.sctx, accesses...))
} else {
newAccessItems = append(newAccessItems, item)
}
} else {
newAccessItems = append(newAccessItems, item)
}
}
return []expression.Expression{expression.ComposeDNFCondition(adder.sctx, newAccessItems...)}, nil
}
// PredicatePushDown implements LogicalPlan PredicatePushDown interface.
func (p *LogicalCTE) PredicatePushDown(predicates []expression.Expression, _ *logicalOptimizeOp) ([]expression.Expression, LogicalPlan) {
if p.cte.recursivePartLogicalPlan != nil {
// Doesn't support recursive CTE yet.
return predicates, p.self
}
if !p.isOuterMostCTE {
return predicates, p.self
}
pushedPredicates := make([]expression.Expression, len(predicates))
copy(pushedPredicates, predicates)
// The filter might change the correlated status of the cte.
// We forbid the push down that makes the change for now.
// Will support it later.
if !p.cte.IsInApply {
for i := len(pushedPredicates) - 1; i >= 0; i-- {
if len(expression.ExtractCorColumns(pushedPredicates[i])) == 0 {
continue
}
pushedPredicates = append(pushedPredicates[0:i], pushedPredicates[i+1:]...)
}
}
if len(pushedPredicates) == 0 {
p.cte.pushDownPredicates = append(p.cte.pushDownPredicates, expression.NewOne())
return predicates, p.self
}
newPred := make([]expression.Expression, 0, len(predicates))
for i := range pushedPredicates {
newPred = append(newPred, pushedPredicates[i].Clone())
ResolveExprAndReplace(newPred[i], p.cte.ColumnMap)
}
p.cte.pushDownPredicates = append(p.cte.pushDownPredicates, expression.ComposeCNFCondition(p.SCtx(), newPred...))
return predicates, p.self
}
// PredicatePushDown implements the LogicalPlan interface.
// Currently, we only maintain the main query tree.
func (p *LogicalSequence) PredicatePushDown(predicates []expression.Expression, op *logicalOptimizeOp) ([]expression.Expression, LogicalPlan) {
lastIdx := len(p.children) - 1
remained, newLastChild := p.children[lastIdx].PredicatePushDown(predicates, op)
p.SetChild(lastIdx, newLastChild)
return remained, p
}
|
package main
import (
"fmt"
)
func main(){
mapPlayer := make(map[int]Player)
mapPlayer[1] = Player{id : 1, name : "hendrawan"}
mapPlayer[2] = Player{id : 2, name : "jupe"}
for _, v := range mapPlayer{
fmt.Println(v.name)
}
}
type Player struct{
id int
name string
} |
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package view
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestResourceView(t *testing.T) {
app := NewApp(nil, nil, "")
view := NewCommonView(app)
assert.Equal(t, view.Name(), "Resource")
view.Init()
assert.Equal(t, view.GetBorderColor(), view.app.config.Theme.Border.Table.Color())
assert.Equal(t, len(view.Hint()), 3)
view.BuildHeader([]string{"Name", "Data"})
assert.Equal(t, view.GetCell(0, 0).Color, view.app.config.Theme.Table.Header.Color())
assert.Equal(t, view.GetCell(0, 0).Text, "Name")
view.BuildBody([][]string{{"Name1", "Data1"}})
assert.Equal(t, view.GetCell(1, 0).Color, view.app.config.Theme.Table.Body.Color())
assert.Equal(t, view.GetCell(1, 0).Text, "Name1")
assert.Equal(t, view.GetCell(1, 1).Text, "Data1")
view.Refresh(true, func(func()) {})
assert.Equal(t, view.GetCell(0, 0).Text, "")
view.BuildHeader([]string{"Name", "Data"})
view.Refresh(false, func(func()) {})
assert.Equal(t, view.GetCell(0, 0).Text, "Name")
view.BuildHeader([]string{"Name", "Data"})
assert.Equal(t, view.GetCell(0, 0).Text, "Name")
view.Stop()
assert.Equal(t, view.GetCell(0, 0).Text, "")
}
|
package main
import (
"log"
"net/http"
"github.com/Ankr-network/dccn-midway/handlers"
"github.com/gorilla/mux"
)
func main() {
r := mux.NewRouter()
// user management
r.HandleFunc("/signup", handlers.Signup) // POST
r.HandleFunc("/confirm_registration", handlers.ConfirmRegistration) // POST
r.HandleFunc("/login", handlers.Signin) // POST
r.HandleFunc("/logout", handlers.Logout) // POST
r.HandleFunc("/refresh", handlers.Refresh) // POST
r.HandleFunc("/forgot_password", handlers.ForgotPassword) // POST
r.HandleFunc("/confirm_password", handlers.ConfirmPassword) // POST
r.HandleFunc("/change_password", handlers.ChangePassword) // POST
r.HandleFunc("/change_email", handlers.ChangeEmail) // POST
r.HandleFunc("/confirmemail", handlers.ConfirmEmail) // POST
r.HandleFunc("/update_attribute", handlers.UpdateAttribute) // POST
// r.HandleFunc("/welcome", handlers.Welcome)
// task management
r.HandleFunc("/task/create", handlers.CreateTask) // POST
r.HandleFunc("/task/update", handlers.UpdateTask) // POST
r.HandleFunc("/task/list", handlers.ListTask) // GET
r.HandleFunc("/task/delete", handlers.CancelTask) // POST
r.HandleFunc("/task/purge", handlers.PurgeTask) // POST
// data center management
r.HandleFunc("/dc/list", handlers.DataCenterList) // GET
r.HandleFunc("/dc/task_overview", handlers.TaskOverview) // GET
r.HandleFunc("/dc/task_leaderboard", handlers.TaskLeaderBoard) // GET
r.HandleFunc("/dc/network_info", handlers.NetworkInfo) // GET
r.HandleFunc("/dc/leaderboard", handlers.DCLeaderBoard) // GET
// Bitrex realated
r.HandleFunc("/price/bittrex", handlers.AnkrPrice)
// start the server on port 8000
http.Handle("/", &MyServer{r})
log.Fatal(http.ListenAndServe(":8080", nil))
}
type MyServer struct {
r *mux.Router
}
func (s *MyServer) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if origin := req.Header.Get("Origin"); origin != "" {
rw.Header().Add("Access-Control-Allow-Origin", origin)
rw.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
rw.Header().Add("Access-Control-Allow-Headers",
"Accept, Content-Type, Content-Length, Authorization")
}
// Stop here if its Preflighted OPTIONS request
if req.Method == "OPTIONS" {
return
}
// Lets Gorilla work
s.r.ServeHTTP(rw, req)
}
|
package server
import (
"fmt"
//"github.com/danielkrainas/shrugmud/logging"
)
type InputHandler func(input string, d *Descriptor) error
type Ctrl interface {
Do(input string, d *Descriptor) error
ValidateState(state CtrlState) bool
NewState(d *Descriptor) CtrlState
}
type CtrlState interface{}
type CtrlRouter struct {
routes map[DescState]Ctrl
}
func NewRouter() *CtrlRouter {
return &CtrlRouter{
routes: make(map[DescState]Ctrl),
}
}
func (router *CtrlRouter) Register(ctrl Ctrl, canonicalStates ...string) {
for _, canonical := range canonicalStates {
state := canonicalToState(canonical)
router.routes[state] = ctrl
}
}
func (router *CtrlRouter) Dispatch(input string, d *Descriptor) error {
ctrl, found := router.routes[d.State]
if !found {
return fmt.Errorf("unsupported state: %s", stateToCanonical(d.State))
}
if !ctrl.ValidateState(d.CtrlState) {
d.CtrlState = ctrl.NewState(d)
}
return ctrl.Do(input, d)
}
|
package bgControllers
import (
"github.com/astaxie/beego"
"fmt"
"GiantTech/models"
"GiantTech/controllers/tools"
)
type BgUserUpdatePassWordController struct {
beego.Controller
}
func (this *BgUserUpdatePassWordController) Prepare() {
s := this.StartSession()
username = s.Get("login")
beego.Informational(username)
if username == nil {
this.Ctx.Redirect(302, "/login")
}
}
func (this *BgUserUpdatePassWordController) Post() {
user, _ := models.GetTUsersByName(username.(string))
password := this.Ctx.Request.FormValue("passWord")
if user.UserStatus == 1 {
user.UserPassword = tools.EncryptionPassWord(password, "zpy")
user.UserStatus = 0
if err := models.UpdateTUsersById(user);err != nil {
beego.Error(err)
fmt.Fprint(this.Ctx.ResponseWriter, "更新失败")
}else {
fmt.Fprint(this.Ctx.ResponseWriter, "修改成功")
}
}else {
fmt.Fprint(this.Ctx.ResponseWriter, "更新失败,请先验证手机号码!")
}
} |
package notifications
import (
"bytes"
"fmt"
"html/template"
"log"
"net/smtp"
"net/url"
"strings"
)
// NotifyMessage is an interface to send notification messages for new posts
// this gets sent when a post is set to 'published' for the first time
type NotifyMessage interface {
send(title string, url string) error
}
// Notification contains configuration for how to notify people of new
// posts via smtp
type Notification struct {
MailFunc func(string, smtp.Auth, string, []string, []byte) error `json:"-"`
SMTPServer string
SMTPUsername string
SMTPPassword string
Sender string
Recipient string
}
// TemplateText contains the HTML that is filled in and emailed to someone
// This could get moved to either a file or inline in the configuration.
const TemplateText = `To: {{.Recipient}}
MIME-version: 1.0;
Content-Type: text/html; charset="UTF-8";
Subject: New Post: {{.Title}}
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head></head>
<body>
<p>Hello,</p>
<p>There's a new post on <b>{{.Domain}}</b> entitled "<a href="{{.URL}}">{{.Title}}"</a></p>
<p>Please visit {{.URL}} to view it.</p>
<p style="font-size:small">Please don't reply, as this account is not monitored.</p>
</body>
</html>`
// Send implements sending a post to someone
func (n *Notification) Send(title string, posturl string) error {
components := strings.SplitN(n.SMTPServer, ":", 2)
auth := smtp.PlainAuth("", n.SMTPUsername, n.SMTPPassword, components[0])
templateData := struct {
Title string
URL string
Domain string
Recipient string
}{
Title: title,
URL: posturl,
Recipient: n.Recipient,
}
if parsed, err := url.Parse(posturl); err == nil {
templateData.Domain = parsed.Host
}
buf := new(bytes.Buffer)
t, err := template.New("email").Parse(TemplateText)
if err != nil {
return err
}
if err = t.Execute(buf, templateData); err != nil {
return err
}
body := []byte(buf.String())
if err := n.MailFunc(n.SMTPServer, auth, n.Sender, []string{n.Recipient}, body); err != nil {
log.Println("Send mail failed: " + err.Error())
return err
}
return nil
}
var savedNotification *Notification
// Initialize remembers the notification configuration for later use
func Initialize(n *Notification) error {
n.MailFunc = smtp.SendMail
if n.SMTPUsername != "" && n.SMTPPassword == "" {
return fmt.Errorf("No password specified for SMTP user %q", n.SMTPUsername)
}
savedNotification = n
return nil
}
// Send sends an email using the globally saved configuration
func Send(title string, posturl string) error {
return savedNotification.Send(title, posturl)
}
|
/*
* @lc app=leetcode.cn id=1385 lang=golang
*
* [1385] 两个数组间的距离值
*/
// @lc code=start
package main
import "math"
func findTheDistanceValue(arr1 []int, arr2 []int, d int) int {
count := 0
for i := 0; i < len(arr1); i++ {
flag := true
for j := 0; j < len(arr2); j++ {
if int(math.Abs(float64(arr1[i]-arr2[j]))) <= d {
flag = false
break
}
}
if flag {
count++
}
}
return count
}
// @lc code=end
|
package 一维子串问题
/*
给定一个未经排序的整数数组,找到最长且连续的的递增序列。
*/
func findLengthOfLCIS(nums []int) int {
/* 1. 明白dp定义后,初始化dp数组 */
dp := make([]int, len(nums)) // 定义dp[i]为: 以nums[i]为结尾的最长递增串长度
for i := 0; i < len(nums); i++ {
if i == 0 {
/* 2. dp[i]基础情况处理 (nums[i]前面没有元素时) */
dp[i] = 1
continue
}
/* 3. 根据条件更新dp[i] */
if nums[i] > nums[i-1]{
dp[i] = dp[i-1] + 1
}else{
dp[i] = 1
}
}
maxLength := 0
for i:=0;i<len(dp);i++{
maxLength = max(maxLength,dp[i])
}
return maxLength
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
/*
题目链接:
https://leetcode-cn.com/problems/longest-continuous-increasing-subsequence/ 最长连续递增序列
*/
/*
总结
1. 对于这个题,官方还有类似滑动窗口的解法。
*/ |
package factories
import (
"database/sql"
"github.com/barrydev/api-3h-shop/src/common/connect"
"github.com/barrydev/api-3h-shop/src/connections"
"github.com/barrydev/api-3h-shop/src/model"
)
func FindOneCoupon(query *connect.QueryMySQL) (*model.Coupon, error) {
connection := connections.Mysql.GetConnection()
queryString := `
SELECT
_id, code, discount, description, expires_at, updated_at
FROM coupons
`
var args []interface{}
if query != nil {
queryString += query.QueryString
args = query.Args
}
stmt, err := connection.Prepare(queryString)
if err != nil {
return nil, err
}
defer stmt.Close()
var _coupon model.Coupon
err = stmt.QueryRow(args...).Scan(
&_coupon.RawId,
&_coupon.RawCode,
&_coupon.RawDiscount,
&_coupon.RawDescription,
&_coupon.RawExpiresAt,
&_coupon.RawUpdatedAt,
)
switch err {
case sql.ErrNoRows:
return nil, nil
case nil:
_coupon.FillResponse()
return &_coupon, nil
default:
return nil, err
}
return nil, nil
}
|
// SPDX-FileCopyrightText: (c) 2018 Daniel Czerwonk
//
// SPDX-License-Identifier: MIT
package server
import (
"context"
"fmt"
"math"
"net"
bnet "github.com/bio-routing/bio-rd/net"
"github.com/bio-routing/bio-rd/protocols/bgp/types"
"github.com/bio-routing/bio-rd/route"
"github.com/czerwonk/bioject/pkg/api"
"github.com/czerwonk/bioject/pkg/database"
"github.com/czerwonk/bioject/pkg/tracing"
pb "github.com/czerwonk/bioject/proto"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
)
type bgpService interface {
addPath(ctx context.Context, pfx *bnet.Prefix, p *route.Path) error
removePath(ctx context.Context, pfx *bnet.Prefix, p *route.Path) bool
}
type apiServer struct {
bgp bgpService
db database.RouteStore
}
func startAPIServer(listenAddress string, bgp bgpService, db database.RouteStore, metrics *Metrics) error {
lis, err := net.Listen("tcp", listenAddress)
if err != nil {
return fmt.Errorf("failed to listen: %v", err)
}
api := &apiServer{
bgp: bgp,
db: db,
}
s := grpc.NewServer()
pb.RegisterBioJectServiceServer(s, newMetricAPIAdapter(api, metrics))
reflection.Register(s)
log.Println("Starting API server on", listenAddress)
if err := s.Serve(lis); err != nil {
return fmt.Errorf("failed to serve: %v", err)
}
return nil
}
func (s *apiServer) AddRoute(ctx context.Context, req *pb.AddRouteRequest) (*pb.Result, error) {
log.Info("Received AddRoute request:", req)
ctx, span := tracing.Tracer().Start(ctx, "API.AddRoute")
defer span.End()
pfx, err := s.prefixForRequest(req.Route.Prefix)
if err != nil {
return s.errorResult(api.StatusCodeRequestError, err.Error()), nil
}
p, err := s.pathForRoute(req.Route)
if err != nil {
return s.errorResult(api.StatusCodeRequestError, err.Error()), nil
}
err = s.addCommunitiesToBGPPath(p.BGPPath, req)
if err != nil {
return s.errorResult(api.StatusCodeRequestError, err.Error()), nil
}
s.addLargeCommunitiesToBGPPath(p.BGPPath, req)
if err := s.bgp.addPath(ctx, pfx, p); err != nil {
return s.errorResult(api.StatusCodeProcessingError, err.Error()), nil
}
if err := s.db.Save(ctx, convertToDatabaseRoute(pfx, p)); err != nil {
return s.errorResult(api.StatusCodeProcessingError, err.Error()), nil
}
return &pb.Result{Code: api.StatusCodeOK}, nil
}
func (s *apiServer) WithdrawRoute(ctx context.Context, req *pb.WithdrawRouteRequest) (*pb.Result, error) {
log.Info("Received WithdrawRoute request:", req)
ctx, span := tracing.Tracer().Start(ctx, "API.WithdrawRoute")
defer span.End()
pfx, err := s.prefixForRequest(req.Route.Prefix)
if err != nil {
return s.errorResult(api.StatusCodeRequestError, err.Error()), nil
}
p, err := s.pathForRoute(req.Route)
if err != nil {
return s.errorResult(api.StatusCodeRequestError, err.Error()), nil
}
if !s.bgp.removePath(ctx, pfx, p) {
return s.errorResult(api.StatusCodeProcessingError, "did not remove path"), nil
}
if err := s.db.Delete(ctx, convertToDatabaseRoute(pfx, p)); err != nil {
return s.errorResult(api.StatusCodeProcessingError, err.Error()), nil
}
return &pb.Result{Code: api.StatusCodeOK}, nil
}
func (s *apiServer) pathForRoute(r *pb.Route) (*route.Path, error) {
nextHopIP, err := bnet.IPFromBytes(r.NextHop)
if err != nil {
return nil, err
}
return &route.Path{
Type: route.BGPPathType,
BGPPath: &route.BGPPath{
ASPath: emptyASPath(),
BGPPathA: &route.BGPPathA{
Source: &bnet.IP{},
LocalPref: uint32(r.LocalPref),
MED: uint32(r.Med),
NextHop: nextHopIP.Ptr(),
EBGP: true,
},
},
}, nil
}
func (s *apiServer) prefixForRequest(pfx *pb.Prefix) (*bnet.Prefix, error) {
ip, err := bnet.IPFromBytes(pfx.Ip)
if err != nil {
return &bnet.Prefix{}, err
}
return bnet.NewPfx(ip, uint8(pfx.Length)).Ptr(), nil
}
func (s *apiServer) addCommunitiesToBGPPath(p *route.BGPPath, req *pb.AddRouteRequest) error {
comms := make(types.Communities, len(req.Communities))
for i, c := range req.Communities {
if c.Asn > math.MaxUint16 {
return fmt.Errorf("the ASN part of community too large: (%d:%d)", c.Asn, c.Value)
}
if c.Value > math.MaxUint16 {
return fmt.Errorf("value part of community too large: (%d:%d)", c.Asn, c.Value)
}
comms[i] = c.Asn<<16 + c.Value
}
p.Communities = &comms
return nil
}
func (s *apiServer) addLargeCommunitiesToBGPPath(p *route.BGPPath, req *pb.AddRouteRequest) {
comms := make(types.LargeCommunities, len(req.Communities))
for i, c := range req.LargeCommunities {
comms[i] = types.LargeCommunity{
GlobalAdministrator: c.GlobalAdministrator,
DataPart1: c.LocalDataPart1,
DataPart2: c.LocalDataPart2,
}
}
p.LargeCommunities = &comms
}
func (s *apiServer) errorResult(code uint32, msg string) *pb.Result {
log.Error("Error:", msg)
return &pb.Result{
Code: code,
Message: msg,
}
}
|
// Copyright 2020 orivil.com. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found at https://mit-license.org.
package modes
type BookPattern struct {
ID int
PlatformID int `gorm:"index"`
Url string
Title string
Author string
DAuthor string `desc:"默认作者,当没有作者信息时使用"`
Category string `desc:"分类"`
DCategory string `desc:"默认分类,当没有分类信息时使用"`
Description string
Finished string
Pic string
FirstChapterID string
DescDelPattern string `desc:"删除简介正则规则,数组字符串类型"`
}
|
package memory
import (
"fmt"
"github.com/SergeyShpak/owngame/server/src/model/layers"
"github.com/SergeyShpak/owngame/server/src/types"
)
type memoryRoomLayer struct {
rooms *rooms
roomPlayers *roomPlayers
roomAdmins *roomAdmin
}
func NewMemoryRoomLayer() (layers.RoomsDataLayer, error) {
m := &memoryRoomLayer{
rooms: newRooms(),
roomPlayers: newRoomPlayers(),
roomAdmins: newRoomAdmin(),
}
return m, nil
}
func (m *memoryRoomLayer) CreateRoom(r *types.RoomCreateRequest, roomToken string) error {
rMeta := &roomMeta{
Name: r.RoomName,
Password: r.Password,
MaxPlayersCount: 3,
}
if ok := m.rooms.PutRoomMeta(rMeta); !ok {
return fmt.Errorf("room %s already exists", r.RoomName)
}
if ok := m.roomAdmins.PutAdmin(r.RoomName, roomToken); !ok {
return fmt.Errorf("could not put an admin for the room %s", r.RoomName)
}
if ok := m.roomPlayers.PutHost(r.RoomName, r.Login); !ok {
return fmt.Errorf("could not promote user %s to the admin role of the room %s", r.RoomName)
}
return nil
}
func (m *memoryRoomLayer) CheckPassword(roomName string, password string) error {
meta, ok := m.rooms.GetRoomMeta(roomName)
if !ok {
return fmt.Errorf("room %s not found", roomName)
}
if meta.Password != password {
return fmt.Errorf("password validation for room %s failed, expected: %s, actual: %s", roomName, meta.Password, password)
}
return nil
}
func (m *memoryRoomLayer) JoinRoom(roomName string, login string) (types.PlayerRole, error) {
meta, ok := m.rooms.GetRoomMeta(roomName)
if !ok {
return types.PlayerRoleParticipant, fmt.Errorf("room %s not found", roomName)
}
if ok := m.roomPlayers.PutHost(roomName, login); ok {
return types.PlayerRoleHost, nil
}
if ok := m.roomPlayers.AddParticipant(meta, login); ok {
return types.PlayerRoleParticipant, nil
}
return types.PlayerRoleParticipant, fmt.Errorf("failed to join the room %s", roomName)
}
func (m *memoryRoomLayer) GetParticipants(roomName string) ([]types.Participant, error) {
p, ok := m.roomPlayers.GetPlayers(roomName)
if !ok {
return nil, fmt.Errorf("room %s not found", roomName)
}
playersCount := len(p.Players) + 1
ps := make([]types.Participant, playersCount)
for i, player := range p.Players {
ps[i] = types.Participant{
Login: player,
Role: types.PlayerRoleParticipant,
}
}
ps[len(p.Players)] = types.Participant{
Login: p.Host,
Role: types.PlayerRoleHost,
}
return ps, nil
}
type rooms keyValStore
func newRooms() *rooms {
r := (rooms)(*newKeyValStore())
return &r
}
type roomMeta struct {
Name string
Password string
MaxPlayersCount int
}
func (r *rooms) PutRoomMeta(meta *roomMeta) bool {
kvs := (*keyValStore)(r)
roomName := meta.Name
return kvs.Put(roomName, meta)
}
func (r *rooms) GetRoomMeta(roomName string) (*roomMeta, bool) {
kvs := (*keyValStore)(r)
roomMetaIface, ok := kvs.Get(roomName)
if !ok {
return nil, false
}
roomMeta := (roomMetaIface).(*roomMeta)
return roomMeta, true
}
type roomPlayers keyValStore
func newRoomPlayers() *roomPlayers {
rp := (roomPlayers)(*newKeyValStore())
return &rp
}
type players struct {
Host string
Players []string
Observers []string
}
func newPlayers() *players {
p := &players{
Players: make([]string, 0, 3),
Observers: make([]string, 0),
}
return p
}
func (rp *roomPlayers) PutHost(roomName string, login string) bool {
kvs := (*keyValStore)(rp)
ok := kvs.Alter(roomName, func(playersIface interface{}, exist bool) (interface{}, bool) {
if !exist {
p := newPlayers()
p.Host = login
return p, true
}
p := playersIface.(*players)
if len(p.Host) != 0 {
return nil, false
}
p.Host = login
return p, true
})
return ok
}
func (rp *roomPlayers) AddParticipant(meta *roomMeta, login string) bool {
kvs := (*keyValStore)(rp)
ok := kvs.Alter(meta.Name, func(playersIface interface{}, exist bool) (interface{}, bool) {
if !exist {
return nil, false
}
p := playersIface.(*players)
if meta.MaxPlayersCount == len(p.Players) {
return nil, false
}
p.Players = append(p.Players, login)
return p, true
})
return ok
}
func (rp *roomPlayers) GetPlayers(roomName string) (*players, bool) {
kvs := (*keyValStore)(rp)
pIface, ok := kvs.Get(roomName)
if !ok {
return nil, false
}
players := pIface.(*players)
return players, true
}
type roomAdmin keyValStore
func newRoomAdmin() *roomAdmin {
ra := (roomAdmin)(*newKeyValStore())
return &ra
}
func (ra *roomAdmin) PutAdmin(roomName string, adminToken string) bool {
kvs := (*keyValStore)(ra)
ok := kvs.Alter(roomName, func(tokensIface interface{}, exist bool) (interface{}, bool) {
if !exist {
t := make([]string, 1)
t[0] = adminToken
return t, true
}
t := tokensIface.([]string)
for _, at := range t {
if adminToken == at {
return t, true
}
}
t = append(t, adminToken)
return t, true
})
return ok
}
func (ra *roomAdmin) DeleteAdmin(roomName string, adminToken string) bool {
t, ok := ra.GetAdmins(roomName)
if !ok {
return false
}
for i, at := range t {
if adminToken == at {
t = append(t[:i], t[i+1:]...)
}
}
return true
}
func (ra *roomAdmin) GetAdmins(roomName string) ([]string, bool) {
kvs := (*keyValStore)(ra)
tokensIface, ok := kvs.Get(roomName)
if !ok {
return nil, false
}
t := tokensIface.([]string)
return t, true
}
|
package peach
import (
"github.com/BurntSushi/toml"
)
//TomlClient 解析toml文件的客户端
type TomlClient struct {
mapping map[string]interface{}
}
//NewToml 新建toml
func (t *TomlClient) NewToml(text []byte) (*TomlClient, error) {
raws := map[string]interface{}{}
if err := toml.Unmarshal(text, &raws); err != nil {
return nil, err
}
return &TomlClient{}, nil
}
//Get 获取key
func (t *TomlClient) Get(k string) *Value {
return t.mapping[k].(*Value)
}
|
package controller
import (
"enter-module/core/common"
"enter-module/core/config"
"enter-module/core/info"
"fmt"
"net/http"
)
//获取服务配置
func GetConfig(w http.ResponseWriter, r *http.Request) {
var configList [10]info.ConfigInfo
var i = 0
for item, value := range config.MemoryRouteConfig {
fmt.Println(item)
configInfo := info.ConfigInfo{ApplicationName: item, RouteConfigInfo: value}
configList[i] = configInfo
i++
}
fmt.Fprintf(w, common.SuccessData(configList))
}
|
/*
* @lc app=leetcode.cn id=20 lang=golang
*
* [20] 有效的括号
*/
// @lc code=start
package main
import "fmt"
// import "container/list"
func main() {
var s string
s = "(())"
fmt.Printf("%s, %t\n", s, isValid(s))
s = "(()))"
fmt.Printf("%s, %t\n", s, isValid(s))
s = "()[]{}"
fmt.Printf("%s, %t\n", s, isValid(s))
}
func isValid(s string) bool {
stack := []byte{}
for i := 0; i < len(s); i++ {
if s[i] == '(' || s[i] == '[' || s[i] == '{' {
stack = append(stack, s[i])
} else {
if len(stack) == 0 {
fmt.Printf("stack is %v\n", stack)
return false
}
prev := stack[len(stack)-1]
stack = stack[0 : len(stack)-1]
tmp := string(prev) + string(s[i])
fmt.Printf("tmp is %s\n", tmp)
if tmp == "()" || tmp == "[]" || tmp == "{}" {
continue
} else {
fmt.Printf("this end, tmp is %v\n", tmp)
return false
}
}
}
fmt.Printf("this end is %v\n", stack)
if len(stack) == 0 {
return true
} else {
return false
}
}
// @lc code=end
|
package proxy
const (
// Thrift协议中 SEQ_ID的访问
BACKEND_CONN_MIN_SEQ_ID = 1
BACKEND_CONN_MAX_SEQ_ID = 1000000 // 100万次请求一个轮回(也挺不容易的)
INVALID_ARRAY_INDEX = -1 // 无效的数组元素下标
HB_TIMEOUT = 6 // 心跳超时时间间隔
REQUEST_EXPIRED_TIME_MICRO = 5 * 1000000 // 5s
TEST_PRODUCT_NAME = "test"
VERSION = "0.1.0-2015121712" // 版本信息
)
|
// Copyright 2018 The InjectSec Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package injectsec
import "testing"
func TestDetector(t *testing.T) {
maker, err := NewDetectorMaker()
if err != nil {
t.Fatal(err)
}
detector := maker.Make()
attacks := []string{
"test or 1337=1337 --\"",
" or 1=1 ",
"/**/or/**/1337=1337",
}
detector.SkipRegex = true
for _, s := range attacks {
probability, err := detector.Detect(s)
if err != nil {
t.Fatal(err)
}
if probability < 50 {
t.Fatal("should be a sql injection attack", s)
}
}
detector.SkipRegex = false
for _, s := range attacks {
probability, err := detector.Detect(s)
if err != nil {
t.Fatal(err)
}
if probability < 50 {
t.Fatal("should be a sql injection attack", s)
}
}
notAttacks := []string{
"abc123",
"abc123 123abc",
"123",
"abcorabc",
"available",
"orcat1",
"cat1or",
"cat1orcat1",
}
detector.SkipRegex = true
for _, s := range notAttacks {
probability, err := detector.Detect(s)
if err != nil {
t.Fatal(err)
}
if probability > 50 {
t.Fatal("should not be a sql injection attack", s)
}
}
detector.SkipRegex = false
for _, s := range notAttacks {
probability, err := detector.Detect(s)
if err != nil {
t.Fatal(err)
}
if probability > 50 {
t.Fatal("should not be a sql injection attack", s)
}
}
}
|
// Package delayed wraps a datastore allowing to artificially
// delay all operations.
package delayed
import (
delay "gx/ipfs/QmUe1WCHkQaz4UeNKiHDUBV2T6i9prc3DniqyHPXyfGaUq/go-ipfs-delay"
ds "gx/ipfs/Qmf4xQhNomPNhrtZc67qSnfJSjxjXs9LWvknJtSXwimPrM/go-datastore"
dsq "gx/ipfs/Qmf4xQhNomPNhrtZc67qSnfJSjxjXs9LWvknJtSXwimPrM/go-datastore/query"
)
// New returns a new delayed datastore.
func New(ds ds.Datastore, delay delay.D) ds.Datastore {
return &delayed{ds: ds, delay: delay}
}
type delayed struct {
ds ds.Datastore
delay delay.D
}
func (dds *delayed) Put(key ds.Key, value []byte) (err error) {
dds.delay.Wait()
return dds.ds.Put(key, value)
}
func (dds *delayed) Get(key ds.Key) (value []byte, err error) {
dds.delay.Wait()
return dds.ds.Get(key)
}
func (dds *delayed) Has(key ds.Key) (exists bool, err error) {
dds.delay.Wait()
return dds.ds.Has(key)
}
func (dds *delayed) GetSize(key ds.Key) (size int, err error) {
dds.delay.Wait()
return dds.ds.GetSize(key)
}
func (dds *delayed) Delete(key ds.Key) (err error) {
dds.delay.Wait()
return dds.ds.Delete(key)
}
func (dds *delayed) Query(q dsq.Query) (dsq.Results, error) {
dds.delay.Wait()
return dds.ds.Query(q)
}
func (dds *delayed) Batch() (ds.Batch, error) {
return ds.NewBasicBatch(dds), nil
}
func (dds *delayed) DiskUsage() (uint64, error) {
dds.delay.Wait()
return ds.DiskUsage(dds.ds)
}
|
package extended
import "github.com/mkamadeus/cipher/cipher/hill"
func Encrypt(plain []byte, key []byte) []byte {
result := []byte{}
for i, char := range plain {
result = append(result, byte(hill.CorrectModulus(int(char+key[i%len(key)]), 256)))
}
return result
}
|
package main
import "github.com/daniilperestoronin/go-chain/cmd"
func main() {
cli := cmd.CLI{}
cli.Run()
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package isolation_test
import (
"context"
"fmt"
"testing"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessiontxn"
"github.com/pingcap/tidb/sessiontxn/isolation"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/testkit/testfork"
"github.com/stretchr/testify/require"
tikverr "github.com/tikv/client-go/v2/error"
)
func TestPessimisticSerializableTxnProviderTS(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
defer tk.MustExec("rollback")
se := tk.Session()
provider := initializePessimisticSerializableProvider(t, tk)
stmts, _, err := parser.New().Parse("select * from t", "", "")
require.NoError(t, err)
readOnlyStmt := stmts[0]
stmts, _, err = parser.New().Parse("select * from t for update", "", "")
require.NoError(t, err)
forUpdateStmt := stmts[0]
compareTS := getOracleTS(t, se)
require.NoError(t, executor.ResetContextOfStmt(se, readOnlyStmt))
require.NoError(t, provider.OnStmtStart(context.TODO(), nil))
ts, err := provider.GetStmtReadTS()
require.NoError(t, err)
require.Greater(t, compareTS, ts)
prevTs := ts
// In Oracle-like serializable isolation, readTS equals to the for update ts
require.NoError(t, executor.ResetContextOfStmt(se, forUpdateStmt))
require.NoError(t, provider.OnStmtStart(context.TODO(), nil))
ts, err = provider.GetStmtForUpdateTS()
require.NoError(t, err)
require.Greater(t, compareTS, ts)
require.Equal(t, prevTs, ts)
}
func TestPessimisticSerializableTxnContextProviderLockError(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
defer tk.MustExec("rollback")
se := tk.Session()
provider := initializePessimisticSerializableProvider(t, tk)
ctx := context.Background()
stmts, _, err := parser.New().Parse("select * from t for update", "", "")
require.NoError(t, err)
stmt := stmts[0]
// retryable errors
for _, lockErr := range []error{
kv.ErrWriteConflict,
&tikverr.ErrDeadlock{Deadlock: &kvrpcpb.Deadlock{}, IsRetryable: true},
} {
require.NoError(t, executor.ResetContextOfStmt(se, stmt))
require.NoError(t, provider.OnStmtStart(context.TODO(), nil))
nextAction, err := provider.OnStmtErrorForNextAction(ctx, sessiontxn.StmtErrAfterPessimisticLock, lockErr)
require.Same(t, lockErr, err)
require.Equal(t, sessiontxn.StmtActionError, nextAction)
}
// non-retryable errors
for _, lockErr := range []error{
&tikverr.ErrDeadlock{Deadlock: &kvrpcpb.Deadlock{}, IsRetryable: false},
errors.New("err"),
} {
require.NoError(t, executor.ResetContextOfStmt(se, stmt))
require.NoError(t, provider.OnStmtStart(context.TODO(), nil))
nextAction, err := provider.OnStmtErrorForNextAction(ctx, sessiontxn.StmtErrAfterPessimisticLock, lockErr)
require.Same(t, lockErr, err)
require.Equal(t, sessiontxn.StmtActionError, nextAction)
}
}
func TestSerializableInitialize(t *testing.T) {
store := testkit.CreateMockStore(t)
testfork.RunTest(t, func(t *testfork.T) {
clearScopeSettings := forkScopeSettings(t, store)
defer clearScopeSettings()
tk := testkit.NewTestKit(t, store)
defer tk.MustExec("rollback")
se := tk.Session()
tk.MustExec("set tidb_skip_isolation_level_check = 1")
tk.MustExec("set @@tx_isolation = 'SERIALIZABLE'")
tk.MustExec("set @@tidb_txn_mode='pessimistic'")
// begin outsize a txn
assert := activeSerializableAssert(t, se, true)
tk.MustExec("begin")
assert.Check(t)
// begin outsize a txn
assert = activeSerializableAssert(t, se, true)
tk.MustExec("begin")
assert.Check(t)
// START TRANSACTION WITH CAUSAL CONSISTENCY ONLY
assert = activeSerializableAssert(t, se, true)
assert.causalConsistencyOnly = true
tk.MustExec("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY")
assert.Check(t)
// EnterNewTxnDefault will create an active txn, but not explicit
assert = activeSerializableAssert(t, se, false)
require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{
Type: sessiontxn.EnterNewTxnDefault,
TxnMode: ast.Pessimistic,
}))
assert.Check(t)
// non-active txn and then active it
tk.MustExec("rollback")
tk.MustExec("set @@autocommit=0")
assert = inactiveSerializableAssert(se)
assertAfterActive := activeSerializableAssert(t, se, true)
require.NoError(t, se.PrepareTxnCtx(context.TODO()))
provider := assert.CheckAndGetProvider(t)
require.NoError(t, provider.OnStmtStart(context.TODO(), nil))
ts, err := provider.GetStmtReadTS()
require.NoError(t, err)
assertAfterActive.Check(t)
require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS)
tk.MustExec("rollback")
// Case Pessimistic Autocommit
config.GetGlobalConfig().PessimisticTxn.PessimisticAutoCommit.Store(true)
assert = inactiveSerializableAssert(se)
assertAfterActive = activeSerializableAssert(t, se, true)
require.NoError(t, se.PrepareTxnCtx(context.TODO()))
provider = assert.CheckAndGetProvider(t)
require.NoError(t, provider.OnStmtStart(context.TODO(), nil))
ts, err = provider.GetStmtReadTS()
require.NoError(t, err)
assertAfterActive.Check(t)
require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS)
tk.MustExec("rollback")
})
}
func TestTidbSnapshotVarInSerialize(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
defer tk.MustExec("rollback")
se := tk.Session()
tk.MustExec("set tidb_skip_isolation_level_check = 1")
tk.MustExec("set @@tx_isolation = 'SERIALIZABLE'")
safePoint := "20160102-15:04:05 -0700"
tk.MustExec(fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%s', '') ON DUPLICATE KEY UPDATE variable_value = '%s', comment=''`, safePoint, safePoint))
time.Sleep(time.Millisecond * 50)
tk.MustExec("set @a=now(6)")
snapshotISVersion := dom.InfoSchema().SchemaMetaVersion()
time.Sleep(time.Millisecond * 50)
tk.MustExec("use test")
tk.MustExec("create table t1(id int)")
tk.MustExec("create temporary table t2(id int)")
tk.MustExec("set @@tidb_snapshot=@a")
snapshotTS := tk.Session().GetSessionVars().SnapshotTS
isVersion := dom.InfoSchema().SchemaMetaVersion()
assert := activeSerializableAssert(t, se, true)
tk.MustExec("begin pessimistic")
provider := assert.CheckAndGetProvider(t)
txn, err := se.Txn(false)
require.NoError(t, err)
require.Greater(t, txn.StartTS(), snapshotTS)
checkUseSnapshot := func() {
is := provider.GetTxnInfoSchema()
require.Equal(t, snapshotISVersion, is.SchemaMetaVersion())
require.IsType(t, &infoschema.SessionExtendedInfoSchema{}, is)
readTS, err := provider.GetStmtReadTS()
require.NoError(t, err)
require.Equal(t, snapshotTS, readTS)
forUpdateTS, err := provider.GetStmtForUpdateTS()
require.NoError(t, err)
require.Equal(t, readTS, forUpdateTS)
}
checkUseTxn := func() {
is := provider.GetTxnInfoSchema()
require.Equal(t, isVersion, is.SchemaMetaVersion())
require.IsType(t, &infoschema.SessionExtendedInfoSchema{}, is)
readTS, err := provider.GetStmtReadTS()
require.NoError(t, err)
require.NotEqual(t, snapshotTS, readTS)
require.Equal(t, se.GetSessionVars().TxnCtx.StartTS, readTS)
forUpdateTS, err := provider.GetStmtForUpdateTS()
require.NoError(t, err)
require.Equal(t, readTS, forUpdateTS)
}
// information schema and ts should equal to snapshot when tidb_snapshot is set
require.NoError(t, provider.OnStmtStart(context.TODO(), nil))
checkUseSnapshot()
// information schema and ts will restore when set tidb_snapshot to empty
tk.MustExec("set @@tidb_snapshot=''")
require.NoError(t, provider.OnStmtStart(context.TODO(), nil))
checkUseTxn()
// txn will not be active after `GetStmtReadTS` or `GetStmtForUpdateTS` when `tidb_snapshot` is set
// txn will not be active after `GetStmtReadTS` or `GetStmtForUpdateTS` when `tidb_snapshot` is set
for _, autocommit := range []int{0, 1} {
func() {
tk.MustExec("rollback")
tk.MustExec("set @@tidb_txn_mode='pessimistic'")
tk.MustExec(fmt.Sprintf("set @@autocommit=%d", autocommit))
tk.MustExec("set @@tidb_snapshot=@a")
if autocommit == 1 {
origPessimisticAutoCommit := config.GetGlobalConfig().PessimisticTxn.PessimisticAutoCommit.Load()
config.GetGlobalConfig().PessimisticTxn.PessimisticAutoCommit.Store(true)
defer func() {
config.GetGlobalConfig().PessimisticTxn.PessimisticAutoCommit.Store(origPessimisticAutoCommit)
}()
}
assert = inactiveSerializableAssert(se)
assertAfterUseSnapshot := activeSnapshotTxnAssert(se, se.GetSessionVars().SnapshotTS, "SERIALIZABLE")
require.NoError(t, se.PrepareTxnCtx(context.TODO()))
provider = assert.CheckAndGetProvider(t)
require.NoError(t, provider.OnStmtStart(context.TODO(), nil))
checkUseSnapshot()
assertAfterUseSnapshot.Check(t)
}()
}
}
func activeSerializableAssert(t testing.TB, sctx sessionctx.Context,
inTxn bool) *txnAssert[*isolation.PessimisticSerializableTxnContextProvider] {
return &txnAssert[*isolation.PessimisticSerializableTxnContextProvider]{
sctx: sctx,
isolation: "SERIALIZABLE",
minStartTime: time.Now(),
active: true,
inTxn: inTxn,
minStartTS: getOracleTS(t, sctx),
}
}
func inactiveSerializableAssert(sctx sessionctx.Context) *txnAssert[*isolation.PessimisticSerializableTxnContextProvider] {
return &txnAssert[*isolation.PessimisticSerializableTxnContextProvider]{
sctx: sctx,
isolation: "SERIALIZABLE",
minStartTime: time.Now(),
active: false,
}
}
func initializePessimisticSerializableProvider(t *testing.T,
tk *testkit.TestKit) *isolation.PessimisticSerializableTxnContextProvider {
tk.MustExec("set tidb_skip_isolation_level_check = 1")
tk.MustExec("set @@tx_isolation = 'SERIALIZABLE'")
assert := activeSerializableAssert(t, tk.Session(), true)
tk.MustExec("begin pessimistic")
return assert.CheckAndGetProvider(t)
}
|
package foo
import "fmt"
func init() {
fmt.Println("foo.")
}
func Foo() {
fmt.Println("foo Foo.")
}
|
// Copyright 2020 MongoDB Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package atlas
import (
"fmt"
"strings"
"github.com/mongodb/mongocli/internal/cli"
"github.com/mongodb/mongocli/internal/config"
"github.com/mongodb/mongocli/internal/description"
"github.com/mongodb/mongocli/internal/flag"
"github.com/mongodb/mongocli/internal/store"
"github.com/mongodb/mongocli/internal/usage"
"github.com/spf13/cobra"
atlas "go.mongodb.org/atlas/mongodbatlas"
)
type ClustersIndexesCreateOpts struct {
cli.GlobalOpts
clusterName string
name string
db string
collection string
keys []string
unique bool
sparse bool
background bool
store store.IndexCreator
}
func (opts *ClustersIndexesCreateOpts) initStore() error {
var err error
opts.store, err = store.New(config.Default())
return err
}
func (opts *ClustersIndexesCreateOpts) Run() error {
req, err := opts.newIndex()
if err != nil {
return err
}
if err := opts.store.CreateIndex(opts.ConfigProjectID(), opts.clusterName, req); err != nil {
return err
}
fmt.Println("Your index is being created")
return nil
}
func (opts *ClustersIndexesCreateOpts) newIndex() (*atlas.IndexConfiguration, error) {
keys, err := opts.indexKeys()
if err != nil {
return nil, err
}
i := new(atlas.IndexConfiguration)
i.DB = opts.db
i.Collection = opts.collection
i.Keys = keys
i.Options = opts.newIndexOptions()
return i, nil
}
func (opts *ClustersIndexesCreateOpts) newIndexOptions() *atlas.IndexOptions {
return &atlas.IndexOptions{
Background: opts.background,
Unique: opts.unique,
Sparse: opts.sparse,
Name: opts.name,
}
}
func (opts *ClustersIndexesCreateOpts) indexKeys() ([]map[string]string, error) {
keys := make([]map[string]string, len(opts.keys))
for i, key := range opts.keys {
value := strings.Split(key, ":")
if len(value) != 2 {
return nil, fmt.Errorf("unexpected key format: %s", key)
}
keys[i] = map[string]string{value[0]: value[1]}
}
return keys, nil
}
// ClustersIndexesCreateBuilder builds a cobra.Command that can run as:
// mcli atlas clusters index create [name] --clusterName clusterName --collection collection --dbName dbName [--key field:type]
func ClustersIndexesCreateBuilder() *cobra.Command {
opts := &ClustersIndexesCreateOpts{}
cmd := &cobra.Command{
Use: "create [name]",
Short: description.CreateIndex,
Args: cobra.MaximumNArgs(1),
PreRunE: func(cmd *cobra.Command, args []string) error {
return opts.PreRunE(opts.initStore)
},
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 1 {
opts.name = args[0]
}
return opts.Run()
},
}
cmd.Flags().StringVar(&opts.clusterName, flag.ClusterName, "", usage.ClusterName)
cmd.Flags().StringVar(&opts.db, flag.Database, "", usage.Database)
cmd.Flags().StringVar(&opts.collection, flag.Collection, "", usage.Collection)
cmd.Flags().StringArrayVar(&opts.keys, flag.Key, nil, usage.Key)
cmd.Flags().BoolVar(&opts.unique, flag.Unique, false, usage.Unique)
cmd.Flags().BoolVar(&opts.sparse, flag.Sparse, false, usage.Sparse)
cmd.Flags().BoolVar(&opts.background, flag.Background, false, usage.Background)
cmd.Flags().StringVar(&opts.ProjectID, flag.ProjectID, "", usage.ProjectID)
_ = cmd.MarkFlagRequired(flag.ClusterName)
_ = cmd.MarkFlagRequired(flag.Database)
_ = cmd.MarkFlagRequired(flag.Collection)
_ = cmd.MarkFlagRequired(flag.Key)
_ = cmd.Flags().MarkHidden(flag.Background) // Deprecated
return cmd
}
|
package user
import (
"github.com/btnguyen2k/prom"
"github.com/btnguyen2k/henge"
)
// NewUserDaoAwsDynamodb is helper function to create AWS DynamoDB-implementation of UserDao.
func NewUserDaoAwsDynamodb(dync *prom.AwsDynamodbConnect, tableName string) UserDao {
var spec *henge.DynamodbDaoSpec = nil
dao := &UserDaoAwsDynamodb{UniversalDao: henge.NewUniversalDaoDynamodb(dync, tableName, spec)}
dao.spec = spec
return dao
}
// InitUserTableAwsDynamodb is helper function to initialize AWS DynamoDB table(s) to store users.
// This function also creates table indexes if needed.
//
// Available since v0.7.0.
func InitUserTableAwsDynamodb(adc *prom.AwsDynamodbConnect, tableName string) error {
spec := &henge.DynamodbTablesSpec{MainTableRcu: 1, MainTableWcu: 1}
return henge.InitDynamodbTables(adc, tableName, spec)
}
// UserDaoAwsDynamodb is AWS DynamoDB-implementation of UserDao.
type UserDaoAwsDynamodb struct {
henge.UniversalDao
spec *henge.DynamodbDaoSpec
}
// Delete implements UserDao.Delete.
func (dao *UserDaoAwsDynamodb) Delete(bo *User) (bool, error) {
return dao.UniversalDao.Delete(bo.UniversalBo)
}
// Create implements UserDao.Create.
func (dao *UserDaoAwsDynamodb) Create(bo *User) (bool, error) {
ubo := bo.sync().UniversalBo
if dao.spec != nil && dao.spec.PkPrefix != "" {
ubo.SetExtraAttr(dao.spec.PkPrefix, dao.spec.PkPrefixValue)
}
return dao.UniversalDao.Create(ubo)
}
// Get implements UserDao.Get.
func (dao *UserDaoAwsDynamodb) Get(id string) (*User, error) {
ubo, err := dao.UniversalDao.Get(id)
return NewUserFromUbo(ubo), err
}
// Update implements UserDao.Update.
func (dao *UserDaoAwsDynamodb) Update(bo *User) (bool, error) {
return dao.UniversalDao.Update(bo.sync().UniversalBo)
}
|
package test
import (
"fmt"
"os"
"testing"
"github.com/gruntwork-io/terratest/modules/random"
"github.com/gruntwork-io/terratest/modules/terraform"
)
var githubOrganization, githubToken string
func init() {
githubOrganization = os.Getenv("GITHUB_OWNER")
githubToken = os.Getenv("GITHUB_TOKEN")
if githubOrganization == "" {
panic("Please set a github organization using the GITHUB_OWNER environment variable.")
}
if githubToken == "" {
panic("Please set a github token using the GITHUB_TOKEN environment variable.")
}
}
func TestGithubTeam(t *testing.T) {
t.Parallel()
repositoryA := fmt.Sprintf("a-repository-%s", random.UniqueId())
repositoryB := fmt.Sprintf("B-Repository-%s", random.UniqueId())
teamName := fmt.Sprintf("team-%s", random.UniqueId())
terraformOptions := &terraform.Options{
// The path to where your Terraform code is located
TerraformDir: "public-repositories-with-team",
Upgrade: true,
Vars: map[string]interface{}{
"team_name": teamName,
"a-repository-name": repositoryA,
"b-repository-name": repositoryB,
},
}
// At the end of the test, run `terraform destroy` to clean up any resources that were created
defer terraform.Destroy(t, terraformOptions)
// This will run `terraform init` and `terraform apply` and fail the test if there are any errors
terraform.InitAndPlan(t, terraformOptions)
terraform.ApplyAndIdempotent(t, terraformOptions)
}
|
package models
import (
"post_crud_golang/database"
"github.com/jinzhu/gorm"
)
type Post struct {
gorm.Model
Title string `gorm:"size:50;not null"`
Content string `gorm:"size:2000;not null"`
}
func dbInit() (d *gorm.DB) {
d = database.Init()
defer d.Close()
d.AutoMigrate(&Post{})
return d
}
func Insert(title string, content string) {
d := dbInit()
d.Create(&Post{
Title: title,
Content: content,
})
defer d.Close()
}
func update(id int, title string, content string) {
d := dbInit()
var post Post
d.First(&post, id)
post.Title = title
post.Content = content
d.Save(&post)
d.Close()
}
func delete(id int) {
d := dbInit()
var post Post
d.First(&post, id)
d.Delete(&post)
d.Close()
}
func GetAll() []Post {
d := dbInit()
var posts []Post
d.Order("created_at desc").Find(&posts)
d.Close()
return posts
}
func GetFirst(id int) Post {
d := dbInit()
var post Post
d.First(&post, id)
d.Close()
return post
}
|
package log
import "context"
var (
root Logger
)
// Root return default logger instance
func Root() Logger {
if root == nil {
root = newGlog()
}
return root
}
// NewContext return a new logger context
func NewContext(ctx context.Context, logger Logger) context.Context {
if logger == nil {
logger = Root()
}
return context.WithValue(ctx, loggerKey, logger)
}
// FromContext get logger form context
func FromContext(ctx context.Context) Logger {
if ctx == nil {
return Root()
}
if logger, ok := ctx.Value(loggerKey).(Logger); ok {
return logger
}
return Root()
}
// WithContext return a logger from the given context
func WithContext(ctx context.Context) Logger {
return FromContext(ctx)
}
// WithFields return a new logger entry with fields
func WithFields(fields Fields) Logger {
return Root().WithFields(fields)
}
// Info print info.
func Info(v ...interface{}) {
Root().Info(v...)
}
// Debug print debug.
func Debug(v ...interface{}) {
Root().Debug(v...)
}
// Warn print warning.
func Warn(v ...interface{}) {
Root().Warn(v...)
}
// Error print error.
func Error(v ...interface{}) {
Root().Error(v...)
}
// Panic panic.
func Panic(v ...interface{}) {
Root().Panic(v...)
}
// Infof print info with format.
func Infof(format string, v ...interface{}) {
Root().Infof(format, v...)
}
// Debugf print debug with format.
func Debugf(format string, v ...interface{}) {
Root().Debugf(format, v...)
}
// Warnf print warning with format.
func Warnf(format string, v ...interface{}) {
Root().Warnf(format, v...)
}
// Errorf print error with format.
func Errorf(format string, v ...interface{}) {
Root().Errorf(format, v...)
}
// Panicf panic with format.
func Panicf(format string, v ...interface{}) {
Root().Panicf(format, v...)
}
|
package server
import "github.com/SOMAS2020/SOMAS2020/internal/common/disasters"
// probeDisaster checks if a disaster occurs this turn
func (s *SOMASServer) probeDisaster() (disasters.Environment, error) {
s.logf("start probeDisaster")
defer s.logf("finish probeDisaster")
e := s.gameState.Environment
e = e.SampleForDisaster(s.gameConfig.DisasterConfig, s.gameState.Turn) // update env instance with sampled disaster info
disasterReport := e.DisplayReport(s.gameState.CommonPool, s.gameConfig.DisasterConfig) // displays disaster info and effects
s.logf(disasterReport)
return e, nil
}
// probeDisaster checks if a disaster occurs this turn
func (s *SOMASServer) applyDisasterEffects() {
s.logf("start applyDisasterEffects")
defer s.logf("finish applyDisasterEffects")
e := s.gameState.Environment
effects := e.ComputeDisasterEffects(s.gameState.CommonPool, s.gameConfig.DisasterConfig) // get disaster effects - absolute, proportional and CP-mitigated
s.islandDeplete(effects.CommonPoolMitigated) //island's resource will be depleted by disaster only when disaster happens and cp cannot fully mitigate
}
|
package main
import (
"fmt"
"github.com/feng/future/design/factory/factory"
)
func main() {
var pf factory.PenFactory
p := pf.Produce("brush")
p.Write()
var of factory.OperationFactory
o := of.Produce("*")
result := o.Operation(10, 20)
fmt.Println(result)
var bp factory.BrushPen
f(bp)
u := factory.User{ "xxxx", 29 }
var fac factory.AccessFactory
var absfactory factory.DBAbsFactory
absfactory = fac
iu := absfactory.Produce()
iu.Insert(u)
}
func f(af factory.AbstractFactory) {
p := af.Produce()
p.Write()
}
|
package main
import (
"strings"
"github.com/corymurphy/adventofcode/shared"
)
type Direction int
const (
Unknown Direction = 0
Up Direction = 1
Down Direction = 2
Left Direction = 3
Right Direction = 4
)
type Instruction struct {
Distance int
Direction Direction
}
func (d Direction) String() string {
switch d {
case Up:
return "Up"
case Down:
return "Down"
case Left:
return "Left"
case Right:
return "Right"
default:
return "Unknown"
}
}
func parseDirection(input string) Direction {
switch input {
case "U":
return Up
case "D":
return Down
case "L":
return Left
case "R":
return Right
default:
return Unknown
}
}
func NewInstruction(input string) Instruction {
instruction := strings.Split(input, " ")
return Instruction{
Distance: shared.ToInt(instruction[1]),
Direction: parseDirection(instruction[0]),
}
}
|
/*****************************************************************
* Copyright©,2020-2022, email: 279197148@qq.com
* Version: 1.0.0
* @Author: yangtxiang
* @Date: 2020-07-30 17:11
* Description:
*****************************************************************/
package xthrift
import "github.com/apache/thrift/lib/go/thrift"
type TPSet struct {
ElemType thrift.TType
Size int32
Body []*TPValue
}
|
/*
Package shaping provides tables corresponding to Unicode® Character Data tables relevant
for text shaping.
___________________________________________________________________________
License
This project is provided under the terms of the UNLICENSE or
the 3-Clause BSD license denoted by the following SPDX identifier:
SPDX-License-Identifier: 'Unlicense' OR 'BSD-3-Clause'
You may use the project under the terms of either license.
Licenses are reproduced in the license file in the root folder of this module.
Copyright © 2021 Norbert Pillmayer <norbert@pillmayer.com>
*/
package shaping
//go:generate go run ../internal/classgen -f 3 -o arabictables.go -x ARAB_ -u ArabicShaping.txt -noclass
//go:generate go run ../internal/classgen -f 2 -o uipctables.go -x UIPC_ -u IndicPositionalCategory.txt -noclass
//go:generate go run ../internal/classgen -f 2 -o uisctables.go -x UISC_ -u IndicSyllabicCategory.txt -noclass
|
package search
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestGetYoutubeLink(t *testing.T) {
tests := []struct {
title string
artists []string
link string
}{
{"CLOUDS", []string{"NF"}, "https://youtube.com/watch?v=JXOYZXb0no4"},
{"Blinding Lights", []string{"The Weeknd"}, "https://youtube.com/watch?v=J7p4bzqLvCw"},
{"Fearless Pt. II", []string{"Lost Sky", "Chris Linton"}, "https://youtube.com/watch?v=JTjmZZ1W2ew"},
}
for _, tCase := range tests {
t.Run(tCase.title, func(t *testing.T) {
ytLink, err := GetYoutubeLink(tCase.title, tCase.artists)
if assert.Nil(t, err) {
assert.Equal(t, tCase.link, ytLink)
}
})
}
}
|
package shell
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestCommandsNameShouldReturnCommands(t *testing.T) {
assert.Equal(t, "commands", commands(0).name())
}
func TestCommandssDescriptionShouldNotBeEmpty(t *testing.T) {
assert.NotEqual(t, "", commands(0).description())
}
func TestCommandsUsageShouldNotBeEmpty(t *testing.T) {
assert.NotEqual(t, "", commands(0).usage())
}
func TestCommandsRunShouldReturnNil(t *testing.T) {
assert.Nil(t, commands(0).run(nil, nil))
}
func TestCommandsShouldRegisterItself(t *testing.T) {
_, ok := allCommands[commands(0).name()]
assert.True(t, ok)
}
func TestSortedNamesShouldHaveTheSameLengthAsAllCommands(t *testing.T) {
assert.Equal(t, len(sortedCommandNames()), len(allCommands))
}
func TestMaxLenShouldBeGreaterThanZero(t *testing.T) {
assert.True(t, maxLen() > 0)
}
|
package iface
type IRequest interface {
GetLen() uint32
GetData() []byte
GetConn() Iconnection
}
|
package server
import (
"encoding/json"
"testing"
"github.com/yekhlakov/gojsonrpc/common"
)
func TestJsonRpcServer_AddHandler(t *testing.T) {
s := NewServer()
s.AddHandler(test_PassHandler{}, "Handle_")
if len(s.Methods) != 1 {
t.Errorf("Methods were not extracted from the first handler")
}
s.AddHandler(test_PassHandler{}, "Handle_")
if len(s.Methods) != 1 {
t.Errorf("Methods were extracted from the first handler twice")
}
s.AddHandler(test_ErrorHandler{}, "Handle_")
if len(s.Methods) != 2 {
t.Errorf("Methods were not extracted from the second handler")
}
}
func TestJsonRpcServer_GetMethod(t *testing.T) {
s := NewServer()
s.AddHandler(test_PassHandler{}, "Handle_")
if len(s.Methods) != 1 {
t.Errorf("Methods were not extracted from the first handler")
}
m, ok := s.GetMethod("pass")
if !ok {
t.Errorf("Method was not found")
} else {
if m.Name != "pass" {
t.Errorf("wrong method was extracted")
}
}
}
func TestJsonRpcServer_ProcessRawRequest(t *testing.T) {
s := NewServer()
s.AddHandler(test_EmptyHandler{}, "Handle_")
rc := common.EmptyRequestContext()
rc.RawRequest = []byte(`{"badjson":`)
err := s.ProcessRawRequest(&rc)
if err == nil {
t.Errorf("Bad json parsing did not generate an error")
}
testData := []struct {
Handler Handler
In string
Out string
}{
{
test_EmptyHandler{},
`{"jsonrpc":"2.0","id":"test","method":"empty","params":{"name":"lol"}}`,
`{"jsonrpc":"2.0","id":"test","result":{}}`,
},
{
test_ConstHandler{},
`{"jsonrpc":"2.0","id":"test","method":"const","params":{"name":"lol"}}`,
`{"jsonrpc":"2.0","id":"test","result":{"value":"test"}}`,
},
{
test_PassHandler{},
`{"jsonrpc":"2.0","id":"test","method":"pass","params":{"name":"lol"}}`,
`{"jsonrpc":"2.0","id":"test","result":{"value":"lol"}}`,
},
{
test_ErrorHandler{},
`{"jsonrpc":"2.0","id":"test","method":"error","params":{"name":"lol"}}`,
`{"jsonrpc":"2.0","id":"test","error":{"code":666,"message":"error"}}`,
},
{
test_PassHandler{},
`{"jsonrpc":"2.0","id":"test","method":"lol","params":{"name":"lol"}}`,
`{"jsonrpc":"2.0","id":"test","error":{"code":-32601,"message":"Method not found"}}`,
},
}
for k, data := range testData {
s := NewServer()
s.AddHandler(data.Handler, "Handle_")
rc := common.EmptyRequestContext()
rc.RawRequest = []byte(data.In)
err := rc.ParseRawRequest()
if err != nil {
t.Errorf("%d Request parse failed", k)
}
_ = s.ProcessRawRequest(&rc)
if string(rc.RawRequest) != data.In {
t.Errorf("%d Request context was not passed through properly", k)
}
if string(rc.RawResponse) != data.Out {
t.Errorf("%d Request was not processed properly", k)
}
}
}
func TestJsonRpcServer_ProcessRawBatch(t *testing.T) {
testData := []struct {
Handler Handler
In []string
Out string
}{
{
test_EmptyHandler{},
[]string{},
`[]`,
},
{
test_EmptyHandler{},
[]string{`{"jsonrpc":"2.0","id":"test","method":"empty","params":{"name":"lol"}}`},
`[{"jsonrpc":"2.0","id":"test","result":{}}]`,
},
{
test_EmptyHandler{},
[]string{
`{"jsonrpc":"2.0","id":"test","method":"empty","params":{"name":"lol"}}`,
`{"badjson`,
},
`[{"jsonrpc":"2.0","id":"test","result":{}},{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error"}}]`,
},
{
test_PassHandler{},
[]string{
`{"jsonrpc":"2.0","id":"t1","method":"pass","params":{"name":"lol"}}`,
`{"jsonrpc":"2.0","id":"t2","method":"nope","params":{"name":"kek"}}`,
},
`[{"jsonrpc":"2.0","id":"t1","result":{"value":"lol"}},{"jsonrpc":"2.0","id":"t2","error":{"code":-32601,"message":"Method not found"}}]`,
},
}
for k, data := range testData {
s := NewServer()
s.AddHandler(data.Handler, "Handle_")
rc := common.EmptyRequestContext()
batch := make([]json.RawMessage, len(data.In))
for i, v := range data.In {
batch[i] = []byte(v)
}
err := s.ProcessRawBatch(batch, &rc)
if err != nil {
t.Errorf("%d Batch processing generated an error", k)
t.Errorf(err.Error())
} else if string(rc.RawResponse) != data.Out {
t.Errorf("%d Request was not processed properly", k)
t.Errorf(string(rc.RawResponse))
}
}
}
func TestJsonRpcServer_ProcessRawInput(t *testing.T) {
testData := []struct {
Name string
Handler Handler
In string
Out string
}{
{
"Empty Batch",
test_EmptyHandler{},
`[]`,
`{"jsonrpc":"2.0","error":{"code":-32600,"message":"Invalid request"}}`,
},
{
"Leading whitespace",
test_EmptyHandler{},
" \t\r\n []",
`{"jsonrpc":"2.0","error":{"code":-32600,"message":"Invalid request"}}`, // todo: remake
},
{
"Bad input",
test_EmptyHandler{},
"666",
`{"jsonrpc":"2.0","error":{"code":-32600,"message":"Invalid request"}}`,
},
{
"Bad json",
test_EmptyHandler{},
"[...]",
`{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error"}}`,
},
{
"Ok batch",
test_EmptyHandler{},
`[{"jsonrpc":"2.0","id":"test","method":"empty","params":{"name":"lol"}}]`,
`[{"jsonrpc":"2.0","id":"test","result":{}}]`,
},
{
"Invalid request in a batch",
test_EmptyHandler{},
`[{"jsonrpc":"2.0","id":"test","method":"empty","params":{"name":"lol"}},{"ololo":"trololo"}]`,
`[{"jsonrpc":"2.0","id":"test","result":{}},{"jsonrpc":"2.0","error":{"code":-32600,"message":"Invalid request"}}]`,
},
{
"Bad request",
test_EmptyHandler{},
"{.}",
`{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error"}}`,
},
{
"Ok request",
test_EmptyHandler{},
`{"jsonrpc":"2.0","id":"test","method":"empty","params":{"name":"lol"}}`,
`{"jsonrpc":"2.0","id":"test","result":{}}`,
},
}
for k, data := range testData {
s := NewServer()
s.AddHandler(data.Handler, "Handle_")
rc := common.EmptyRequestContext()
rc.RawRequest = []byte(data.In)
_ = s.ProcessRawInput(&rc)
if string(rc.RawResponse) != data.Out {
t.Errorf("%d '%s': input was not processed properly", k, data.Name)
t.Errorf("expected %s", data.Out)
t.Errorf("received %s", string(rc.RawResponse))
}
}
}
|
/*
make用于内建类型(map、slice 和channel)的内存分配。new用于各种类型的内存分配。
内建函数new本质上说跟其它语言中的同名函数功能一样:new(T)分配了零值填充的T类型的内存空间,并且返回其地址,
即一个*T类型的值。用Go的术语说,它返回了一个指针,指向新分配的类型T的零值。有一点非常重要:
new返回指针。
内建函数make(T, args)与new(T)有着不同的功能,make只能创建slice、map和channel,并且返回一个有初始值(非零)的T类型,
而不是*T。本质来讲,导致这三个类型有所不同的原因是指向数据结构的引用在使用前必须被初始化。
例如,一个slice,是一个包含指向数据(内部array)的指针、长度和容量的三项描述符;
在这些项目被初始化之前,slice为nil。对于slice、map和channel来说,make初始化了内部的数据结构,填充适当的值。
make返回初始化后的(非零)值。
总的来说,make需要传入参数才可以分配内存,返回的是对象,该对象一般包含指向数据的指针,及其他描述信息。如slice包含一个
指向数组的指针,长度和容量等描述符。
而new则不需要传入参数,系统默认分配内存并用零值填充,返回指向数据的指针。
*/
package main
import (
"fmt"
)
func main() {
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"testing"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/testkit/testdata"
"github.com/stretchr/testify/require"
)
func TestSetOperationOnDiffColType(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t1, t2, t3`)
tk.MustExec(`create table t1(a int, b int)`)
tk.MustExec(`create table t2(a int, b varchar(20))`)
tk.MustExec(`create table t3(a int, b decimal(30,10))`)
tk.MustExec(`insert into t1 values (1,1),(1,1),(2,2),(3,3),(null,null)`)
tk.MustExec(`insert into t2 values (1,'1'),(2,'2'),(null,null),(null,'3')`)
tk.MustExec(`insert into t3 values (2,2.1),(3,3)`)
var input []string
var output []struct {
SQL string
Plan []string
Res []string
}
executorSuiteData.LoadTestCases(t, &input, &output)
for i, tt := range input {
testdata.OnRecord(func() {
output[i].SQL = tt
output[i].Plan = testdata.ConvertRowsToStrings(tk.MustQuery("explain " + tt).Rows())
output[i].Res = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows())
})
tk.MustQuery("explain " + tt).Check(testkit.Rows(output[i].Plan...))
tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...))
}
}
// issue-23038: wrong key range of index scan for year column
func TestIndexScanWithYearCol(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test;")
tk.MustExec("set tidb_cost_model_version=2")
tk.MustExec("drop table if exists t;")
tk.MustExec("create table t (c1 year(4), c2 int, key(c1));")
tk.MustExec("insert into t values(2001, 1);")
var input []string
var output []struct {
SQL string
Plan []string
Res []string
}
executorSuiteData.LoadTestCases(t, &input, &output)
for i, tt := range input {
testdata.OnRecord(func() {
output[i].SQL = tt
output[i].Plan = testdata.ConvertRowsToStrings(tk.MustQuery("explain format = 'brief' " + tt).Rows())
output[i].Res = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows())
})
tk.MustQuery("explain format = 'brief' " + tt).Check(testkit.Rows(output[i].Plan...))
tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...))
}
}
func TestSetOperation(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`use test`)
tk.MustExec("set tidb_cost_model_version=2")
tk.MustExec(`drop table if exists t1, t2, t3`)
tk.MustExec(`create table t1(a int)`)
tk.MustExec(`create table t2 like t1`)
tk.MustExec(`create table t3 like t1`)
tk.MustExec(`insert into t1 values (1),(1),(2),(3),(null)`)
tk.MustExec(`insert into t2 values (1),(2),(null),(null)`)
tk.MustExec(`insert into t3 values (2),(3)`)
var input []string
var output []struct {
SQL string
Plan []string
Res []string
}
executorSuiteData.LoadTestCases(t, &input, &output)
for i, tt := range input {
testdata.OnRecord(func() {
output[i].SQL = tt
output[i].Plan = testdata.ConvertRowsToStrings(tk.MustQuery("explain " + tt).Rows())
output[i].Res = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows())
})
tk.MustQuery("explain " + tt).Check(testkit.Rows(output[i].Plan...))
tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...))
}
// from https://github.com/pingcap/tidb/issues/40279
tk.MustExec("CREATE TABLE `issue40279` (`a` char(155) NOT NULL DEFAULT 'on1unvbxp5sko6mbetn3ku26tuiyju7w3wc0olzto9ew7gsrx',`b` mediumint(9) NOT NULL DEFAULT '2525518',PRIMARY KEY (`b`,`a`) /*T![clustered_index] CLUSTERED */);")
tk.MustExec("insert into `issue40279` values ();")
tk.MustQuery("( select `issue40279`.`b` as r0 , from_base64( `issue40279`.`a` ) as r1 from `issue40279` ) " +
"except ( " +
"select `issue40279`.`a` as r0 , elt(2, `issue40279`.`a` , `issue40279`.`a` ) as r1 from `issue40279`);").
Check(testkit.Rows("2525518 <nil>"))
tk.MustExec("drop table if exists t2")
tk.MustExec("CREATE TABLE `t2` ( `a` varchar(20) CHARACTER SET gbk COLLATE gbk_chinese_ci DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")
tk.MustExec("insert into t2 values(0xCED2)")
result := tk.MustQuery("(select elt(2,t2.a,t2.a) from t2) except (select 0xCED2 from t2)")
rows := result.Rows()
require.Len(t, rows, 0)
}
func TestCompareIssue38361(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("drop database if exists TEST1")
tk.MustExec("create database TEST1")
tk.MustExec("use TEST1")
tk.MustExec("create table t(a datetime, b bigint, c bigint)")
tk.MustExec("insert into t values(cast('2023-08-09 00:00:00' as datetime), 20230809, 20231310)")
tk.MustQuery("select a > 20230809 from t").Check(testkit.Rows("0"))
tk.MustQuery("select a = 20230809 from t").Check(testkit.Rows("1"))
tk.MustQuery("select a < 20230810 from t").Check(testkit.Rows("1"))
// 20231310 can't be converted to valid datetime, thus should be compared using real date type,and datetime will be
// converted to something like 'YYYYMMDDHHMMSS', bigger than 20231310
tk.MustQuery("select a < 20231310 from t").Check(testkit.Rows("0"))
tk.MustQuery("select 20230809 < a from t").Check(testkit.Rows("0"))
tk.MustQuery("select 20230809 = a from t").Check(testkit.Rows("1"))
tk.MustQuery("select 20230810 > a from t").Check(testkit.Rows("1"))
tk.MustQuery("select 20231310 > a from t").Check(testkit.Rows("0"))
// constant datetime cmp numeric constant should be compared as real data type
tk.MustQuery("select cast('2023-08-09 00:00:00' as datetime) > 20230809 from t").Check(testkit.Rows("1"))
tk.MustQuery("select cast('2023-08-09 00:00:00' as datetime) = 20230809 from t").Check(testkit.Rows("0"))
tk.MustQuery("select cast('2023-08-09 00:00:00' as datetime) < 20230810 from t").Check(testkit.Rows("0"))
tk.MustQuery("select cast('2023-08-09 00:00:00' as datetime) < 20231310 from t").Check(testkit.Rows("0"))
tk.MustQuery("select 20230809 < cast('2023-08-09 00:00:00' as datetime) from t").Check(testkit.Rows("1"))
tk.MustQuery("select 20230809 = cast('2023-08-09 00:00:00' as datetime) from t").Check(testkit.Rows("0"))
tk.MustQuery("select 20230810 > cast('2023-08-09 00:00:00' as datetime) from t").Check(testkit.Rows("0"))
tk.MustQuery("select 20231310 > cast('2023-08-09 00:00:00' as datetime) from t").Check(testkit.Rows("0"))
// datetime column cmp numeric column should be compared as real data type
tk.MustQuery("select a > b from t").Check(testkit.Rows("1"))
tk.MustQuery("select a = b from t").Check(testkit.Rows("0"))
tk.MustQuery("select a < b + 1 from t").Check(testkit.Rows("0"))
tk.MustQuery("select a < c from t").Check(testkit.Rows("0"))
tk.MustQuery("select b < a from t").Check(testkit.Rows("1"))
tk.MustQuery("select b = a from t").Check(testkit.Rows("0"))
tk.MustQuery("select b > a from t").Check(testkit.Rows("0"))
tk.MustQuery("select c > a from t").Check(testkit.Rows("0"))
}
|
package sort
// BucketSort 桶排序
func BucketSort(arr *[]int, bucketSize int) {
if len(*arr) < 2 {
return
}
var maxVal = (*arr)[0]
var minVal = (*arr)[0]
// 找到最大最小值
for _, v := range *arr {
if v < minVal {
minVal = v
} else if v > maxVal {
maxVal = v
}
}
// 创建桶
buckets := make([][]int, (maxVal-minVal)/bucketSize+1)
// 分配到桶中
for _, v := range *arr {
buckets[(v-minVal)/bucketSize] = append(buckets[(v-minVal)/bucketSize], v)
}
var arrIdx = 0
for i, v := range buckets {
InsertionSort(&buckets[i])
for _, vj := range v {
(*arr)[arrIdx] = vj
arrIdx++
}
}
}
|
// Package adapter contains the required logic for creating data structures used for
// feeding CloudFormation templates.
//
// It follows the adapter pattern https://en.wikipedia.org/wiki/Adapter_pattern in the
// sense that it has the knowledge to transform a aws custom object into a data structure
// easily interpolable into the templates without any additional view logic.
//
// There's a base template in `service/templates/cloudformation/guest/main.yaml` which defines
// the basic structure and includes the rest of templates that form the stack as nested
// templates. Those subtemplates should use a `define` action with the name that will be
// used to refer to them from the main template, as explained here
// https://golang.org/pkg/text/template/#hdr-Nested_template_definitions
//
// Each adapter is related to one of these nested templates. It includes the data structure
// with all the values needed to interpolate in the related template and the logic required
// to obtain them, this logic is packed into functions called `hydraters`.
//
// When extending the stack we will just need to:
// * Add the template file in `service/template/cloudformation/guest` and modify
// `service/template/cloudformation/main.yaml` to include the new template.
// * Add the adapter logic file in `service/resource/cloudformation/adapter` with the type
// definition and the Hydrater function to fill the fields (like asg.go or
// launch_configuration.go).
// * Add the new type to the Adapter type in `service/resource/cloudformation/adapter/adapter.go`
// and include the Hydrater function in the `hydraters` slice.
package adapter
import (
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/giantswarm/microerror"
"sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
"github.com/giantswarm/aws-operator/service/controller/clusterapi/v30/controllercontext"
)
type Config struct {
APIWhitelist APIWhitelist
AWSRegion string
ControlPlaneAccountID string
ControlPlaneNATGatewayAddresses []*ec2.Address
ControlPlanePeerRoleARN string
ControlPlaneVPCID string
ControlPlaneVPCCidr string
CustomObject v1alpha1.Cluster
EncrypterBackend string
GuestAccountID string
InstallationName string
PublicRouteTables string
Route53Enabled bool
StackState StackState
TenantClusterAccountID string
TenantClusterKMSKeyARN string
TenantClusterAvailabilityZones []controllercontext.ContextSpecTenantClusterTCCPAvailabilityZone
}
type Adapter struct {
Guest GuestAdapter
}
func NewGuest(cfg Config) (Adapter, error) {
a := Adapter{}
hydraters := []Hydrater{
a.Guest.IAMPolicies.Adapt,
a.Guest.InternetGateway.Adapt,
a.Guest.Instance.Adapt,
a.Guest.LoadBalancers.Adapt,
a.Guest.NATGateway.Adapt,
a.Guest.Outputs.Adapt,
a.Guest.RecordSets.Adapt,
a.Guest.RouteTables.Adapt,
a.Guest.SecurityGroups.Adapt,
a.Guest.Subnets.Adapt,
a.Guest.VPC.Adapt,
}
for _, h := range hydraters {
if err := h(cfg); err != nil {
return Adapter{}, microerror.Mask(err)
}
}
return a, nil
}
type GuestAdapter struct {
IAMPolicies GuestIAMPoliciesAdapter
InternetGateway GuestInternetGatewayAdapter
Instance GuestInstanceAdapter
LoadBalancers GuestLoadBalancersAdapter
NATGateway GuestNATGatewayAdapter
Outputs GuestOutputsAdapter
RecordSets GuestRecordSetsAdapter
RouteTables GuestRouteTablesAdapter
SecurityGroups GuestSecurityGroupsAdapter
Subnets GuestSubnetsAdapter
VPC GuestVPCAdapter
}
|
package gitcomm
import (
"fmt"
"os"
)
// CheckIfError should be used to naively panics if an error is not nil
func CheckIfError(err error) {
if err == nil {
return
}
fmt.Printf("\x1b[31;1m%s\x1b[0m\n", fmt.Sprintf("error: %s", err))
os.Exit(1)
}
// Info should be used to describe the example commands that are about to run.
func Info(format string, args ...interface{}) {
fmt.Printf("\x1b[34;1m%s\x1b[0m\n", fmt.Sprintf(format, args...))
}
// ExitIfError exits with status 1 if an error is not nil
func ExitIfError(err error) {
if err == nil {
return
}
os.Exit(1)
}
|
// @Package gotorsocks
// @Author: Kalle Vedin <kalle.vedin@fripost.org>
// @Author: hIMEI <himei@tuta.io>
// @Date: 2017-12-16 22:02:59
// @Copyright © 2017 hIMEI <himei@tuta.io>
// @license MIT
// gotorsocks Here is GitHub fork of https://bitbucket.org/kallevedin/torsocks.
// Import path "code.google.com/p/go.net/proxy" (same as "golang.org/x/net/proxy")
// used in original version, is fatal broken, so original package uninstallable.
// In current version import path corrected, and some detail of code is changed
//
// https://bitbucket.org/kallevedin/torsocks is relised on Public Domain.
package gotorsocks
import (
"bytes"
"errors"
"net"
"time"
"golang.org/x/net/proxy"
)
// TorGate is a Tor proxy. Is actually just a string with the address of the Tor Proxy.
// (Needs to be an IPv4 address or a domain name that can be translated to an IPv4
// address, with a port.)
// Examples: "127.0.0.1:9050", "10.0.30.11:9150".
type TorGate string
// TOR_GATE string constant with localhost's Tor port
const TOR_GATE_ string = "127.0.0.1:9050"
// NewTorGate creates new TorGate
func NewTorGate() (*TorGate, error) {
duration, _ := time.ParseDuration("10s")
connect, err := net.DialTimeout("tcp4", TOR_GATE_, duration)
if err != nil {
return nil, errors.New("Could not test TOR_GATE_: " + err.Error())
}
// Tor proxies reply to anything that looks like
// HTTP GET or POST with known error message.
connect.Write([]byte("GET /\n"))
connect.SetReadDeadline(time.Now().Add(10 * time.Second))
buf := make([]byte, 4096)
for {
n, err := connect.Read(buf)
if err != nil {
return nil, errors.New("It is not TOR_GATE_")
}
if bytes.Contains(buf[:n], []byte("Tor is not an HTTP Proxy")) {
connect.Close()
gate := TorGate(TOR_GATE_)
return &gate, nil
}
}
}
// DialTor dials to the .onion address
func (gate *TorGate) DialTor(address string) (net.Conn, error) {
dialer, err := proxy.SOCKS5("tcp4", string(*gate), nil, proxy.Direct)
if err != nil {
return nil, errors.New("Could not connect to TOR_GATE_: " + err.Error())
}
connect, err := dialer.Dial("tcp4", address)
if err != nil {
return nil, errors.New("Failed to connect: " + err.Error())
}
return connect, nil
}
|
//go:build test
// +build test
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package service
import (
"context"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"os/exec"
"regexp"
"time"
"github.com/Azure/aks-engine/test/e2e/kubernetes/util"
"github.com/pkg/errors"
)
// List holds a list of services returned from kubectl get svc
type List struct {
Services []Service `json:"items"`
}
// Service represents a kubernetes service
type Service struct {
Metadata Metadata `json:"metadata"`
Spec Spec `json:"spec"`
Status Status `json:"status"`
}
// Metadata holds information like name, namespace, and labels
type Metadata struct {
CreatedAt time.Time `json:"creationTimestamp"`
Labels map[string]string `json:"labels"`
Name string `json:"name"`
Namespace string `json:"namespace"`
}
// Spec holds information like clusterIP and port
type Spec struct {
ClusterIP string `json:"clusterIP"`
Ports []Port `json:"ports"`
Type string `json:"type"`
}
// Port represents a service port definition
type Port struct {
NodePort int `json:"nodePort"`
Port int `json:"port"`
Protocol string `json:"protocol"`
TargetPort int `json:"targetPort"`
}
// Status holds the load balancer definition
type Status struct {
LoadBalancer LoadBalancer `json:"loadBalancer"`
}
// LoadBalancer holds the ingress definitions
type LoadBalancer struct {
Ingress []map[string]string `json:"ingress"`
}
// GetResult is a return struct for GetAsync
type GetResult struct {
svc *Service
err error
}
// GetAsync wraps Get with a struct response for goroutine + channel usage
func GetAsync(name, namespace string) GetResult {
svc, err := Get(name, namespace)
return GetResult{
svc: svc,
err: err,
}
}
// Get returns the service definition specified in a given namespace
func Get(name, namespace string) (*Service, error) {
cmd := exec.Command("k", "get", "svc", "-o", "json", "-n", namespace, name)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error getting svc:\n")
util.PrintCommand(cmd)
return nil, err
}
s := Service{}
err = json.Unmarshal(out, &s)
if err != nil {
log.Printf("Error unmarshalling service json:%s\n", err)
return nil, err
}
return &s, nil
}
// GetAll will return all services in a given namespace
func GetAll(namespace string) (*List, error) {
cmd := exec.Command("k", "get", "svc", "-n", namespace, "-o", "json")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error getting all services:\n")
return nil, err
}
sl := List{}
err = json.Unmarshal(out, &sl)
if err != nil {
log.Printf("Error unmarshalling services json:%s\n", err)
return nil, err
}
return &sl, nil
}
// GetAllByPrefixResult is a return struct for GetAllByPrefixAsync
type GetAllByPrefixResult struct {
svcs []Service
err error
}
// GetAllByPrefixAsync wraps Get with a struct response for goroutine + channel usage
func GetAllByPrefixAsync(prefix, namespace string) GetAllByPrefixResult {
svcs, err := GetAllByPrefix(prefix, namespace)
return GetAllByPrefixResult{
svcs: svcs,
err: err,
}
}
// GetAllByPrefix will return all services in a given namespace that match a prefix
func GetAllByPrefix(prefix, namespace string) ([]Service, error) {
sl, err := GetAll(namespace)
if err != nil {
return nil, err
}
services := []Service{}
for _, s := range sl.Services {
matched, err := regexp.MatchString(prefix+"-.*", s.Metadata.Name)
if err != nil {
log.Printf("Error trying to match service name:%s\n", err)
return nil, err
}
if matched {
services = append(services, s)
}
}
return services, nil
}
// Delete will delete a service in a given namespace
func (s *Service) Delete(retries int) error {
var zeroValueDuration time.Duration
var kubectlOutput []byte
var kubectlError error
for i := 0; i < retries; i++ {
cmd := exec.Command("k", "delete", "svc", "-n", s.Metadata.Namespace, s.Metadata.Name)
kubectlOutput, kubectlError = util.RunAndLogCommand(cmd, zeroValueDuration)
if kubectlError != nil {
log.Printf("Error while trying to delete service %s in namespace %s:%s\n", s.Metadata.Namespace, s.Metadata.Name, kubectlError)
log.Printf("%s\n", string(kubectlOutput))
continue
}
break
}
return kubectlError
}
// DescribeServices describes all service resources whose name matches a substring
func DescribeServices(svcPrefix, namespace string) {
svcs, err := GetAllByPrefix(svcPrefix, namespace)
if err != nil {
log.Printf("Unable to get services matching prefix %s in namespace %s: %s", svcPrefix, namespace, err)
}
for _, svc := range svcs {
err := svc.Describe()
if err != nil {
log.Printf("Unable to describe service %s: %s", svc.Metadata.Name, err)
}
}
}
// Describe will describe a service resource
func (s *Service) Describe() error {
var commandTimeout time.Duration
cmd := exec.Command("k", "describe", "svc", s.Metadata.Name, "-n", s.Metadata.Namespace)
out, err := util.RunAndLogCommand(cmd, commandTimeout)
log.Printf("\n%s\n", string(out))
return err
}
// GetNodePort will return the node port for a given pod
func (s *Service) GetNodePort(port int) int {
for _, p := range s.Spec.Ports {
if p.Port == port {
return p.NodePort
}
}
return 0
}
// WaitForIngress waits for an Ingress to be provisioned
func (s *Service) WaitForIngress(timeout, sleep time.Duration) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
var mostRecentWaitForIngressError error
ch := make(chan GetResult)
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- GetAsync(s.Metadata.Name, s.Metadata.Namespace)
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentWaitForIngressError = result.err
svc := result.svc
if mostRecentWaitForIngressError == nil {
if svc != nil && svc.Status.LoadBalancer.Ingress != nil {
s.Status.LoadBalancer.Ingress = svc.Status.LoadBalancer.Ingress
return nil
}
}
case <-ctx.Done():
err := s.Describe()
if err != nil {
log.Printf("Unable to describe service\n: %s", err)
}
return errors.Errorf("WaitForIngress timed out: %s\n", mostRecentWaitForIngressError)
}
}
}
// WaitOnDeleted returns when a service resource is successfully deleted
func WaitOnDeleted(servicePrefix, namespace string, sleep, timeout time.Duration) (bool, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan GetAllByPrefixResult)
var mostRecentWaitOnDeletedError error
var svcs []Service
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- GetAllByPrefixAsync(servicePrefix, namespace)
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentWaitOnDeletedError = result.err
svcs = result.svcs
if mostRecentWaitOnDeletedError == nil {
if len(svcs) == 0 {
return true, nil
}
}
case <-ctx.Done():
DescribeServices(servicePrefix, namespace)
return false, errors.Errorf("WaitOnDeleted timed out: %s\n", mostRecentWaitOnDeletedError)
}
}
}
// ValidateWithRetry waits for an Ingress to be provisioned
func (s *Service) ValidateWithRetry(bodyResponseTextMatch string, sleep, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
var mostRecentValidateWithRetryError error
ch := make(chan error)
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- s.Validate(bodyResponseTextMatch)
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentValidateWithRetryError = result
if mostRecentValidateWithRetryError == nil {
return nil
}
case <-ctx.Done():
err := s.Describe()
if err != nil {
log.Printf("Unable to describe service\n: %s", err)
}
return errors.Errorf("ValidateWithRetry timed out: %s\n", mostRecentValidateWithRetryError)
}
}
}
// Validate will attempt to run an http.Get against the root service url
func (s *Service) Validate(bodyResponseTextMatch string) error {
if len(s.Status.LoadBalancer.Ingress) < 1 {
return errors.Errorf("No LB ingress IP for service %s", s.Metadata.Name)
}
var resp *http.Response
url := fmt.Sprintf("http://%s", s.Status.LoadBalancer.Ingress[0]["ip"])
resp, err := http.Get(url)
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
return errors.Errorf("Unable to call service at URL %s: %s", url, err)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return errors.Errorf("Unable to parse response body: %s", err)
}
matched, err := regexp.MatchString(bodyResponseTextMatch, string(body))
if err != nil {
return errors.Errorf("Unable to evalute response body against a regular expression match: %s", err)
}
if matched {
return nil
}
return errors.Errorf("Got unexpected URL body, expected to find %s, got:\n%s\n", bodyResponseTextMatch, string(body))
}
// CreateServiceFromFile will create a Service from file with a name
func CreateServiceFromFile(filename, name, namespace string) (*Service, error) {
cmd := exec.Command("k", "create", "-f", filename)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to create Service %s:%s\n", name, string(out))
return nil, err
}
svc, err := Get(name, namespace)
if err != nil {
log.Printf("Error while trying to fetch Service %s:%s\n", name, err)
return nil, err
}
return svc, nil
}
// CreateServiceFromFileDeleteIfExist will create a Service from file, deleting any pre-existing service with the same name
func CreateServiceFromFileDeleteIfExist(filename, name, namespace string) (*Service, error) {
s, _ := Get(name, namespace)
if s != nil {
err := s.Delete(util.DefaultDeleteRetries)
if err != nil {
return nil, err
}
_, err = WaitOnDeleted(name, namespace, 10*time.Second, 2*time.Minute)
if err != nil {
return nil, err
}
}
return CreateServiceFromFile(filename, name, namespace)
}
// CreateServiceFromFileDeleteIfExistAsync wraps CreateServiceFromFileDeleteIfExist with a struct response for goroutine + channel usage
func CreateServiceFromFileDeleteIfExistAsync(filename, name, namespace string) GetResult {
s, err := CreateServiceFromFileDeleteIfExist(filename, name, namespace)
return GetResult{
svc: s,
err: err,
}
}
// CreateServiceFromFileDeleteIfExistWithRetry will kubectl apply a Service from file (if it exists already it will be deleted) with a name with retry toleration
func CreateServiceFromFileDeleteIfExistWithRetry(filename, name, namespace string, sleep, timeout time.Duration) (*Service, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan GetResult)
var mostRecentCreateServiceFromFileDeleteIfExistWithRetryWithRetryError error
var s *Service
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- CreateServiceFromFileDeleteIfExistAsync(filename, name, namespace)
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentCreateServiceFromFileDeleteIfExistWithRetryWithRetryError = result.err
s = result.svc
if mostRecentCreateServiceFromFileDeleteIfExistWithRetryWithRetryError == nil {
if s != nil {
return s, nil
}
}
case <-ctx.Done():
return s, errors.Errorf("CreateServiceFromFileDeleteIfExistWithRetry timed out: %s\n", mostRecentCreateServiceFromFileDeleteIfExistWithRetryWithRetryError)
}
}
}
|
package vastflow
import (
"github.com/jack0liu/logs"
"io/ioutil"
"math"
"os"
"os/exec"
"path"
"runtime"
"strconv"
"strings"
"sync"
"time"
)
const (
DefaultWndCapacity = 500
defaultShrinkLen = 2
defaultExtendLen = 2
)
var flowWnd = &FlowWnd{
Capacity: DefaultWndCapacity,
CurSize: 0,
}
var sys = &SysInfo{}
type SysInfo struct {
CPU float64
}
type stat struct {
utime float64
stime float64
start float64
uptime float64
}
type FlowWnd struct {
sync.RWMutex
Capacity int64
extended int64
CurSize int64
usedCpuNum int
markRate float64
clk float64
history stat
}
func InitWnd(capacity int64, cpuNum int, markRate float64) {
flowWnd.Capacity = capacity
flowWnd.usedCpuNum = cpuNum
flowWnd.markRate = markRate
flowWnd.clk = 100
if runtime.GOOS == "linux" {
clkTckStdout, err := exec.Command("getconf", "CLK_TCK").Output()
if err == nil {
flowWnd.clk = parseFloat(formatStdOut(clkTckStdout, 0)[0])
}
}
}
func checkShrink() {
t := time.NewTicker(10 * time.Second)
continueCount := 0
for {
select {
case <-t.C:
if flowWnd.extended <= 0 {
break
}
flowWnd.Lock()
if flowWnd.CurSize+defaultShrinkLen < flowWnd.Capacity+flowWnd.extended {
continueCount++
} else {
continueCount = 0
}
if continueCount >= 12 {
flowWnd.Shrink(defaultShrinkLen)
continueCount = 0
}
flowWnd.Unlock()
}
}
}
func (fw *FlowWnd) checkShrink() bool {
if fw.CurSize+defaultShrinkLen <= fw.Capacity {
return true
}
return false
}
func (fw *FlowWnd) Extend(num int64) {
logs.Info("wnd extended")
fw.extended = fw.extended + num
}
func (fw *FlowWnd) Shrink(num int64) {
logs.Info("wnd shrink")
fw.extended = fw.extended - num
if fw.extended < 0 {
fw.extended = 0
}
}
func (fw *FlowWnd) Inc() {
fw.CurSize = fw.CurSize + 1
}
func (fw *FlowWnd) Dec() {
fw.CurSize = fw.CurSize - 1
if fw.CurSize < 0 {
fw.CurSize = 0
}
}
func (fw *FlowWnd) Remains() int64 {
return fw.Capacity + fw.extended - fw.CurSize
}
func (fw *FlowWnd) IsBelowMark() bool {
info := fw.calc()
if info.CPU < fw.markRate*100*float64(fw.usedCpuNum) {
return true
}
return false
}
func (fw *FlowWnd) Print() {
logs.Info("flow window curSize:%d, total capacity:%d", fw.CurSize, fw.Capacity+fw.extended)
}
func (fw *FlowWnd) calc() *SysInfo {
if runtime.GOOS != "linux" {
logs.Debug("not linux, return 0.0")
return sys
}
uptimeFileBytes, err := ioutil.ReadFile(path.Join("/proc", "uptime"))
if err != nil {
logs.Error("get uptime fail")
return sys
}
uptime := parseFloat(strings.Split(string(uptimeFileBytes), " ")[0])
procStatFileBytes, _ := ioutil.ReadFile(path.Join("/proc", strconv.Itoa(os.Getpid()), "stat"))
splitAfter := strings.SplitAfter(string(procStatFileBytes), ")")
if len(splitAfter) == 0 || len(splitAfter) == 1 {
logs.Error("get stat fail")
return sys
}
infos := strings.Split(splitAfter[1], " ")
st := stat{
utime: parseFloat(infos[12]),
stime: parseFloat(infos[13]),
start: parseFloat(infos[20]) / fw.clk,
uptime: uptime,
}
_stime := 0.0
_utime := 0.0
if fw.history.stime != 0 {
_stime = fw.history.stime
}
if fw.history.utime != 0 {
_utime = fw.history.utime
}
total := st.stime - _stime + st.utime - _utime
total = total / fw.clk
seconds := st.start - uptime
if fw.history.uptime != 0 {
seconds = uptime - fw.history.uptime
}
fw.history = st
seconds = math.Abs(seconds)
if seconds == 0 {
seconds = 1
}
sys.CPU = (total / seconds) * 100
logs.Debug("cpu: %f", sys.CPU)
return sys
}
func parseFloat(val string) float64 {
floatVal, _ := strconv.ParseFloat(val, 64)
return floatVal
}
func formatStdOut(stdout []byte, userfulIndex int) []string {
infoArr := strings.Split(string(stdout), "\n")[userfulIndex]
ret := strings.Fields(infoArr)
return ret
}
|
package core
import (
"github.com/labstack/echo/v4"
"github.com/pkg/errors"
"html/template"
"io"
"path/filepath"
)
type (
Template struct {
LayoutPath string
IncludePath string
//Templates *template.Template
templates map[string]*template.Template
}
)
func (t *Template) Render(w io.Writer, name string, data interface{}, c echo.Context) error {
tmpl, ok := t.templates[name]
if !ok {
return errors.New("template doesn't exist")
}
//_ = tmpl.ExecuteTemplate(os.Stdout, "main", data)
return tmpl.ExecuteTemplate(w, "main", data)
}
func NewTemplate(includePath string) *Template {
return &Template{
IncludePath: includePath,
templates: map[string]*template.Template{},
}
}
func (t *Template) WithLayoutPath(layoutPath string) *Template {
t.LayoutPath = layoutPath
return t
}
func (t *Template) Parse(e *echo.Echo) *Template {
layoutFiles, err := filepath.Glob(t.LayoutPath + "*.html")
if err != nil {
e.Logger.Fatal(err)
}
includeFiles, err := filepath.Glob(t.IncludePath + "*.html")
if err != nil {
e.Logger.Fatal(err)
}
mainTemplate := template.New("main")
mainTemplate, err = mainTemplate.Parse(`{{define "main" }}{{ template "base.html" . }}{{ end }}`)
if err != nil {
e.Logger.Fatal(err)
}
for _, file := range includeFiles {
fileName := filepath.Base(file)
files := append(layoutFiles, file)
t.templates[fileName], err = mainTemplate.Clone()
if err != nil {
e.Logger.Fatal(err)
}
t.templates[fileName] = template.Must(t.templates[fileName].ParseFiles(files...))
}
return t
}
|
package protocol
type Command interface {
Evaluate([]string) Result
}
|
package main
import "fmt"
var x, y int
var (
a int
b bool
)
var c, d int = 1, 2
var e, f = 123, "hello"
func main(){
g, h := "g", "h"
fmt.Println(x, y, a, b, c, d, e, f, g, h)
i, j := 123, 456
fmt.Println(i, j)
i = j // only copy values instead of reference
fmt.Println(i, j)
fmt.Println(&i, &j)
j += 1
fmt.Println(i, j)
fmt.Println("--------------------------------")
fmt.Println("address in stack")
fmt.Println("x: <int> ", x, &x)
fmt.Println("y: <int> ", y, &y)
fmt.Println("a: <int> ", a, &a)
fmt.Println("b: <bool> ", b, &b)
fmt.Println("c: <int> ", c, &c)
fmt.Println("d: <int> ", d, &d)
fmt.Println("e: <int> ", e, &e)
fmt.Println("f: <string>", f, &f)
fmt.Println("g: <string>", g, &g)
fmt.Println("h: <string>", h, &h)
fmt.Println("i: <int> ", i, &i)
fmt.Println("j: <int> ", j, &j)
}
|
// O exemplo mostra vários tipos de variáveis e também que as declarações de variáveis
// podem ser "construídas" em blocos, como com as declarações de importação.
// Os tipos int, uint e uintptr são geralmente de 32 bits em sistemas de 32 bits e 64 bits
// em sistemas de 64 bits. Quando você precisar de um valor inteiro deverá usar int, a menos
// que tenha um motivo específico para usar um tipo de inteiro com tamanho especificado ou sem sinal.
package main
import (
"fmt"
"math/cmplx"
)
var (
ToBe bool = false
MaxInt uint64 = 1<<64 - 1
z complex128 = cmplx.Sqrt(-5 + 12i)
)
func main() {
fmt.Printf("Type: %T Value: %v\n", ToBe, ToBe)
fmt.Printf("Type: %T Value: %v\n", MaxInt, MaxInt)
fmt.Printf("Type: %T Value: %v\n", z, z)
}
|
package apiResponse
import (
"fmt"
"net/http"
config "github.com/alexhornbake/go-crud-api/config"
log "github.com/alexhornbake/go-crud-api/lib/logging"
)
type ErrorResponse struct {
Status int
Body interface{}
}
type errorBody struct {
Error string
}
func InternalServerError(message interface{}, args ...interface{}) *ErrorResponse {
return createErrorResponse(http.StatusInternalServerError, message, args...)
}
func ServiceUnavailable(message interface{}, args ...interface{}) *ErrorResponse {
return createErrorResponse(http.StatusServiceUnavailable, message, args...)
}
func NotFound(message interface{}, args ...interface{}) *ErrorResponse {
return createErrorResponse(http.StatusNotFound, message, args...)
}
func BadRequest(message interface{}, args ...interface{}) *ErrorResponse {
return createErrorResponse(http.StatusBadRequest, message, args...)
}
func RequestParseError(message interface{}, args ...interface{}) *ErrorResponse {
message = fmt.Sprintf("Failed to parse request body, %v", message)
return createErrorResponse(http.StatusBadRequest, message, args...)
}
func createErrorResponse(status int, message interface{}, args ...interface{}) *ErrorResponse {
cleanedMessage := ""
if str, ok := message.(string); ok {
cleanedMessage = str
} else {
cleanedMessage = fmt.Sprintf("%v", message)
}
// default error response
responseMessage := http.StatusText(status)
// default + internal error message
fullMessage := fmt.Sprint(responseMessage, " : ", fmt.Sprintf(cleanedMessage, args...))
// Do we want to expose full or generic error to client? If not, log it.
if config.ReturnVerboseErrorsToClient {
responseMessage = fullMessage
} else {
log.Debug(fullMessage)
}
err := ErrorResponse{
Status: status,
Body: errorBody{
Error: responseMessage,
},
}
return &err
}
func SendError(w http.ResponseWriter, r *http.Request, error *ErrorResponse) {
response := ExtendedResponse{
Status: error.Status,
Body: error.Body,
}
Send(w, r, &response)
}
|
/*
* Tencent is pleased to support the open source community by making Blueking Container Service available.,
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under,
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package option
import (
"flag"
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/Tencent/bk-bcs/bcs-common/common/blog"
"github.com/Tencent/bk-bcs/bcs-common/common/conf"
"github.com/Tencent/bk-bcs/bcs-runtime/bcs-k8s/bcs-network/bcs-ingress-controller/internal/constant"
)
// ControllerOption options for controller
type ControllerOption struct {
// ImageTag image tag used by controller
ImageTag string
// Address address for server
Address string
// PodIPs contains ipv4 and ipv6 address get from status.podIPs
PodIPs []string
// Port port for server
Port int
// MetricPort port for metric server
MetricPort int
// Cloud cloud mod
Cloud string
// Region cloud region
Region string
// ElectionNamespace election namespace
ElectionNamespace string
// IsNamespaceScope if the ingress can only be associated with the service and workload in the same namespace
IsNamespaceScope bool
// LogConfig for blog
conf.LogConfig
// IsTCPUDPReuse if the loadbalancer provider support tcp udp port reuse
// if enabled, we will find protocol info in 4 layer listener name
IsTCPUDPPortReuse bool
// IsBulkMode if use bulk interface for cloud lb
IsBulkMode bool
// PortBindingCheckInterval check interval for portbinding
PortBindingCheckInterval time.Duration
// ServerCertFile server cert file path
ServerCertFile string
// ServerKeyFile server key file path
ServerKeyFile string
// KubernetesQPS the qps of k8s client request
KubernetesQPS int
// KubernetesBurst the burst of k8s client request
KubernetesBurst int
// ConflictCheckOpen if false, skip all conflict checking about ingress and port pool
ConflictCheckOpen bool
// NodePortBindingNs namespace that node portbinding will be created in,
// and if node's annotation have not related portpool namespace, will use NodePortBindingNs as default
NodePortBindingNs string
// HttpServerPort port for http api
HttpServerPort uint
// NodeInfoExporterOpen 如果为true,将会记录集群中的节点信息
NodeInfoExporterOpen bool
// LBCacheExpiration lb缓存过期时间,单位分钟
LBCacheExpiration int
// Conf HttpServer conf
Conf Conf
// ServCert http server cert
ServCert ServCert
}
// Conf 服务配置
type Conf struct {
ServCert ServCert
InsecureAddress string
InsecurePort uint
VerifyClientTLS bool
}
// ServCert 服务证书配置
type ServCert struct {
CAFile string
CertFile string
KeyFile string
CertPasswd string
IsSSL bool
}
// SetFromEnv set options by environment
func (op *ControllerOption) SetFromEnv() {
// get env var name for tcp and udp port reuse
isTCPUDPPortReuseStr := os.Getenv(constant.EnvNameIsTCPUDPPortReuse)
if len(isTCPUDPPortReuseStr) != 0 {
blog.Infof("env option %s is %s", constant.EnvNameIsTCPUDPPortReuse, isTCPUDPPortReuseStr)
isTCPUDPPortReuse, err := strconv.ParseBool(isTCPUDPPortReuseStr)
if err != nil {
blog.Errorf("parse bool string %s failed, err %s", isTCPUDPPortReuseStr, err.Error())
os.Exit(1)
}
if isTCPUDPPortReuse {
op.IsTCPUDPPortReuse = isTCPUDPPortReuse
}
}
// get env var name for bulk mode
isBulkModeStr := os.Getenv(constant.EnvNameIsBulkMode)
if len(isBulkModeStr) != 0 {
blog.Infof("env option %s is %s", constant.EnvNameIsBulkMode, isBulkModeStr)
isBulkMode, err := strconv.ParseBool(isBulkModeStr)
if err != nil {
blog.Errorf("parse bool string %s failed, err %s", isBulkModeStr, err.Error())
os.Exit(1)
}
if isBulkMode {
op.IsBulkMode = isBulkMode
}
}
podIPs := os.Getenv(constant.EnvNamePodIPs)
if len(podIPs) == 0 {
blog.Errorf("empty pod ip")
podIPs = op.Address
}
blog.Infof("pod ips: %s", podIPs)
op.PodIPs = strings.Split(podIPs, ",")
imageTag := os.Getenv(constant.EnvNameImageTag)
if len(imageTag) == 0 {
blog.Errorf("empty image tag")
}
op.ImageTag = imageTag
}
// BindFromCommandLine 读取命令行参数并绑定
func (op *ControllerOption) BindFromCommandLine() {
var checkIntervalStr string
var verbosity int
flag.StringVar(&op.Address, "address", "127.0.0.1", "address for controller")
flag.IntVar(&op.MetricPort, "metric_port", 8081, "metric port for controller")
flag.IntVar(&op.Port, "port", 8080, "por for controller")
flag.StringVar(&op.Cloud, "cloud", "tencentcloud", "cloud mode for controller")
flag.StringVar(&op.Region, "region", "", "default cloud region for controller")
flag.StringVar(&op.ElectionNamespace, "election_namespace", "bcs-system", "namespace for leader election")
flag.BoolVar(&op.IsNamespaceScope, "is_namespace_scope", false,
"if the ingress can only be associated with the service and workload in the same namespace")
flag.StringVar(&checkIntervalStr, "portbinding_check_interval", "3m",
"check interval of port binding, golang time format")
flag.StringVar(&op.LogDir, "log_dir", "./logs", "If non-empty, write log files in this directory")
flag.Uint64Var(&op.LogMaxSize, "log_max_size", 500, "Max size (MB) per log file.")
flag.IntVar(&op.LogMaxNum, "log_max_num", 10, "Max num of log file.")
flag.BoolVar(&op.ToStdErr, "logtostderr", false, "log to standard error instead of files")
flag.BoolVar(&op.AlsoToStdErr, "alsologtostderr", false, "log to standard error as well as files")
flag.IntVar(&verbosity, "v", 0, "log level for V logs")
flag.StringVar(&op.StdErrThreshold, "stderrthreshold", "2", "logs at or above this threshold go to stderr")
flag.StringVar(&op.VModule, "vmodule", "", "comma-separated list of pattern=N settings for file-filtered logging")
flag.StringVar(&op.TraceLocation, "log_backtrace_at", "", "when logging hits line file:N, emit a stack trace")
flag.StringVar(&op.ServerCertFile, "server_cert_file", "", "server cert file for webhook server")
flag.StringVar(&op.ServerKeyFile, "server_key_file", "", "server key file for webhook server")
flag.IntVar(&op.KubernetesQPS, "kubernetes_qps", 100, "the qps of k8s client request")
flag.IntVar(&op.KubernetesBurst, "kubernetes_burst", 200, "the burst of k8s client request")
flag.BoolVar(&op.ConflictCheckOpen, "conflict_check_open", true, "if false, "+
"skip all conflict checking about ingress and port pool")
flag.BoolVar(&op.NodeInfoExporterOpen, "node_info_exporter_open", false, "if true, "+
"bcs-ingress-controller will record node info in cluster")
flag.StringVar(&op.NodePortBindingNs, "node_portbinding_ns", "default",
"namespace that node portbinding will be created in ")
flag.UintVar(&op.HttpServerPort, "http_svr_port", 8088, "port for ingress controller http server")
flag.IntVar(&op.LBCacheExpiration, "lb_cache_expiration", 60, "lb cache expiration, unit: minute ")
flag.Parse()
op.Verbosity = int32(verbosity)
checkInterval, err := time.ParseDuration(checkIntervalStr)
if err != nil {
fmt.Printf("check interval %s invalid", checkIntervalStr)
os.Exit(1)
}
op.PortBindingCheckInterval = checkInterval
}
|
/*
Copyright 2013 The Camlistore Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package flickr implements an importer for flickr.com accounts.
package flickr
import (
"crypto/md5"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strings"
"camlistore.org/pkg/importer"
"camlistore.org/pkg/jsonconfig"
)
const (
authURL = "http://www.flickr.com/auth-72157636676651636"
apiURL = "http://api.flickr.com/services/rest/"
apiSecret = "6ed517d5f44946c9"
apiKey = "b5801cdbc870073e7b136f24fb50396f"
)
func init() {
importer.Register("flickr", newFromConfig)
}
type imp struct {
authToken string
userId string
}
func newFromConfig(cfg jsonconfig.Obj) (importer.Importer, error) {
// TODO(aa): miniToken config is temporary. There should be UI to auth using oauth.
miniToken := cfg.RequiredString("miniToken")
if err := cfg.Validate(); err != nil {
return nil, err
}
im := &imp{}
if miniToken != "" {
if err := im.authenticate(http.DefaultClient, miniToken); err != nil {
return nil, err
}
}
return im, nil
}
func (im *imp) CanHandleURL(url string) bool { return false }
func (im *imp) ImportURL(url string) error { panic("unused") }
func (im *imp) Prefix() string {
return fmt.Sprintf("flickr:%s", im.userId)
}
type photoMeta struct {
Id string
Title string
Ispublic int
Isfriend int
Isfamily int
Description struct {
Content string `json:"_content"`
}
Dateupload string
Datetaken string
Originalformat string
Lastupdate string
Latitude float32
Longitude float32
Tags string
Machinetags string `json:"machine_tags"`
Views string
Media string
URL string `json:"url_o"`
}
type searchPhotosResult struct {
Photos struct {
Page int
Pages int
Perpage int
Total int `json:",string"`
Photo []photoMeta
}
Stat string
}
func (im *imp) Run(h *importer.Host, intr importer.Interrupt) error {
if im.authToken == "" {
return fmt.Errorf("miniToken config key required. Go to %s to get one.", authURL)
}
resp := searchPhotosResult{}
if err := im.flickrRequest(h.HTTPClient(), map[string]string{
"method": "flickr.photos.search",
"user_id": "me",
"extras": "description, date_upload, date_taken, original_format, last_update, geo, tags, machine_tags, views, media, url_o"},
&resp); err != nil {
return err
}
for _, item := range resp.Photos.Photo {
camliIdFramgment := fmt.Sprintf("photo-%s", item.Id)
photoContentHint := item.Lastupdate
fmt.Println(camliIdFramgment, photoContentHint)
// TODO(aa): Stuff
}
return nil
}
type getFullAuthTokenResp struct {
Auth struct {
Token struct {
Content string `json:"_content"`
}
User struct {
Nsid string
}
}
Stat string
}
func (im *imp) authenticate(httpClient *http.Client, miniToken string) error {
resp := getFullAuthTokenResp{}
if err := im.flickrRequest(httpClient, map[string]string{
"method": "flickr.auth.getFullToken",
"mini_token": miniToken}, &resp); err != nil {
return err
}
im.userId = resp.Auth.User.Nsid
im.authToken = resp.Auth.Token.Content
return nil
}
func (im *imp) flickrRequest(httpClient *http.Client, params map[string]string, result interface{}) error {
params["api_key"] = apiKey
params["format"] = "json"
params["nojsoncallback"] = "1"
if im.authToken != "" {
params["auth_token"] = im.authToken
}
paramList := make([]string, 0, len(params))
for key, val := range params {
paramList = append(paramList, key+val)
}
sort.Strings(paramList)
hash := md5.New()
body := apiSecret + strings.Join(paramList, "")
io.WriteString(hash, body)
digest := hash.Sum(nil)
reqURL, _ := url.Parse(apiURL)
q := reqURL.Query()
for key, val := range params {
q.Set(key, val)
}
q.Set("api_sig", fmt.Sprintf("%x", digest))
reqURL.RawQuery = q.Encode()
res, err := httpClient.Get(reqURL.String())
if err != nil {
return err
}
if res.StatusCode != http.StatusOK {
return fmt.Errorf("Auth request failed with: %s", res.Status)
}
defer res.Body.Close()
return json.NewDecoder(res.Body).Decode(result)
}
|
package httpresponse
import (
"context"
"encoding/json"
"fmt"
"net/http"
)
type httpErrorResponse struct {
Error string `json:"error"`
ErrorDescription string `json:"error_description"`
}
// RespondJSON writes JSON as http response
func RespondJSON(w http.ResponseWriter, httpStatusCode int, object interface{}, headers map[string]string) {
bytes, err := json.Marshal(object)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
if headers != nil {
for key, value := range headers {
w.Header().Set(key, value)
}
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(httpStatusCode)
w.Write(bytes)
}
// RespondText writes Text as http response
func RespondText(w http.ResponseWriter, httpStatusCode int, text, fileName string, headers map[string]string) {
if headers != nil {
for key, value := range headers {
w.Header().Set(key, value)
}
}
w.Header().Set("Content-Type", "text/plain")
if fileName != "" {
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment;filename=%v.txt", fileName))
}
w.WriteHeader(httpStatusCode)
w.Write([]byte(text))
}
// ErrorResponseJSON write ErrorResponse as http response
func ErrorResponseJSON(ctx context.Context, w http.ResponseWriter, httpStatusCode int, err string, description string) {
// logger.Errorf("Error: %v, Response Code: %v, Description: %v", err, httpStatusCode, description)
RespondJSON(w, httpStatusCode, httpErrorResponse{
Error: err,
ErrorDescription: description,
}, nil)
}
|
package agent
import (
"crypto/md5"
"fmt"
"io"
"strings"
"github.com/astaxie/beego/httplib"
)
type TaobaoAgent struct {
Url string
AppKey string
Secret string
Session string
params map[string]string
}
func NewTaobaoAgent(url, appkey, secret, session string) *TaobaoAgent {
a := new(TaobaoAgent)
a.AppKey = appkey
a.Secret = secret
a.Session = session
a.Url = url
return a
}
func (ta *TaobaoAgent) buildSign(api string, params map[string]string) string {
ta.params = map[string]string{
"v": "2.0",
"app_key": ta.AppKey,
"session": ta.Session,
"sign_method": "md5",
"format": "json",
"method": api,
"timestamp": TaobaoTime.NowString(),
}
for k, v := range params {
ta.params[k] = v
}
signMap := NewSign(ta.params)
sign := signMap.BracketToString(ta.Secret)
h := md5.New()
io.WriteString(h, sign)
return strings.ToUpper(fmt.Sprintf("%x", h.Sum(nil)))
}
func (ta *TaobaoAgent) Request(api string, params map[string]string) *JsonResponse {
sign := ta.buildSign(api, params)
conn := httplib.Get(ta.Url)
for k, v := range ta.params {
conn.Param(k, v)
}
conn.Param("sign", sign)
s, err := conn.String()
if err != nil {
return &JsonResponse{err: err, api: api}
}
return &JsonResponse{originString: s, api: api}
}
func (ta *TaobaoAgent) Joins(_slice []string) string {
return strings.Join(_slice, ",")
}
|
package persistence
import (
"database/sql"
"errors"
"gopetstore/src/domain"
"gopetstore/src/util"
"log"
)
const getCategoryListSQL = "SELECT CATID AS categoryId,NAME,DESCN AS description FROM CATEGORY"
const getCategoryByIdSQL = "SELECT CATID AS categoryId,NAME,DESCN AS description FROM CATEGORY WHERE CATID = ?"
// 代码封装:通用 scan category 逻辑
func scanCategory(r *sql.Rows) (*domain.Category, error) {
var categoryId, name, description string
err := r.Scan(&categoryId, &name, &description)
if err != nil {
return nil, err
}
return &domain.Category{
CategoryId: categoryId,
Name: name,
Description: description,
}, nil
}
// 获取所有的 category
func GetCategoryList() ([]*domain.Category, error) {
d, err := util.GetConnection()
defer func() {
if d != nil {
_ = d.Close()
}
}()
var result []*domain.Category
if err != nil {
return result, err
}
r, err := d.Query(getCategoryListSQL)
if err != nil {
return result, err
}
for r.Next() {
c, err := scanCategory(r)
if err != nil {
log.Printf("error: %v", err.Error())
continue
}
result = append(result, c)
}
defer r.Close()
err = r.Err()
if err != nil {
return result, err
}
return result, nil
}
// 通过 id 获取指定的 category
func GetCategory(categoryId string) (*domain.Category, error) {
d, err := util.GetConnection()
defer func() {
if d != nil {
_ = d.Close()
}
}()
if err != nil {
return nil, err
}
r, err := d.Query(getCategoryByIdSQL, categoryId)
if err != nil {
return nil, err
}
if r.Next() {
c, err := scanCategory(r)
if err != nil {
return nil, err
}
return c, nil
}
defer r.Close()
err = r.Err()
if err != nil {
return nil, err
}
return nil, errors.New("can not find a category by this id")
}
|
package main
type S int
func main() {
a := S(0)
b := make([]*S, 2)
b[0] = &a
c := new(S)
b[1] = c
}
//go run -gcflags "-m -l" escape.go
|
package virtualgateway
import (
"context"
appmesh "github.com/aws/aws-app-mesh-controller-for-k8s/apis/appmesh/v1beta2"
"github.com/aws/aws-app-mesh-controller-for-k8s/pkg/k8s"
"github.com/aws/aws-app-mesh-controller-for-k8s/pkg/webhook"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"strings"
)
// MembershipDesignator designates VirtualGateway membership for pods and namespaced AppMesh GatewayRoute CRs.
type MembershipDesignator interface {
// DesignateForGatewayRoute will choose a VirtualGateway for given namespaced GatewayRoute CR.
DesignateForGatewayRoute(ctx context.Context, obj *appmesh.GatewayRoute) (*appmesh.VirtualGateway, error)
// DesignateForPod will choose a VirtualGateway for given pod.
DesignateForPod(ctx context.Context, pod *corev1.Pod) (*appmesh.VirtualGateway, error)
}
// NewMembershipDesignator creates new MembershipDesignator.
func NewMembershipDesignator(k8sClient client.Client) MembershipDesignator {
return &membershipDesignator{k8sClient: k8sClient}
}
var _ MembershipDesignator = &membershipDesignator{}
// virtualGatewaySelectorDesignator designates VirtualGateway membership based on selectors on VirtualGateway.
type membershipDesignator struct {
k8sClient client.Client
}
// +kubebuilder:rbac:groups=appmesh.k8s.aws,resources=virtualgateways,verbs=get;list;watch
func (d *membershipDesignator) DesignateForPod(ctx context.Context, pod *corev1.Pod) (*appmesh.VirtualGateway, error) {
// see https://github.com/kubernetes/kubernetes/issues/88282 and https://github.com/kubernetes/kubernetes/issues/76680
req := webhook.ContextGetAdmissionRequest(ctx)
vgList := appmesh.VirtualGatewayList{}
if err := d.k8sClient.List(ctx, &vgList, client.InNamespace(req.Namespace)); err != nil {
return nil, errors.Wrap(err, "failed to list VirtualGateways in cluster")
}
var vgCandidates []*appmesh.VirtualGateway
for _, vgObj := range vgList.Items {
selector, err := metav1.LabelSelectorAsSelector(vgObj.Spec.PodSelector)
if err != nil {
return nil, err
}
if selector.Matches(labels.Set(pod.Labels)) {
vgCandidates = append(vgCandidates, vgObj.DeepCopy())
}
}
if len(vgCandidates) == 0 {
return nil, nil
}
if len(vgCandidates) > 1 {
var vgCandidatesNames []string
for _, vg := range vgCandidates {
vgCandidatesNames = append(vgCandidatesNames, k8s.NamespacedName(vg).String())
}
return nil, errors.Errorf("found multiple matching VirtualGateways for pod %s: %s",
k8s.NamespacedName(pod).String(), strings.Join(vgCandidatesNames, ","))
}
return vgCandidates[0], nil
}
// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch
func (d *membershipDesignator) DesignateForGatewayRoute(ctx context.Context, obj *appmesh.GatewayRoute) (*appmesh.VirtualGateway, error) {
// see https://github.com/kubernetes/kubernetes/issues/88282 and https://github.com/kubernetes/kubernetes/issues/76680
req := webhook.ContextGetAdmissionRequest(ctx)
objNS := corev1.Namespace{}
if err := d.k8sClient.Get(ctx, types.NamespacedName{Name: req.Namespace}, &objNS); err != nil {
return nil, errors.Wrapf(err, "failed to get namespace: %s", obj.GetNamespace())
}
vgList := appmesh.VirtualGatewayList{}
if err := d.k8sClient.List(ctx, &vgList); err != nil {
return nil, errors.Wrap(err, "failed to list virtualGateways in cluster")
}
var vgCandidates []*appmesh.VirtualGateway
for _, vgObj := range vgList.Items {
selector, err := metav1.LabelSelectorAsSelector(vgObj.Spec.NamespaceSelector)
if err != nil {
return nil, err
}
if selector.Matches(labels.Set(objNS.Labels)) {
vgCandidates = append(vgCandidates, vgObj.DeepCopy())
}
}
if len(vgCandidates) == 0 {
return nil, errors.Errorf("failed to find matching virtualGateway for namespace: %s, expecting 1 but found %d",
obj.GetNamespace(), 0)
}
if len(vgCandidates) > 1 {
var vgCandidatesNames []string
for _, vg := range vgCandidates {
vgCandidatesNames = append(vgCandidatesNames, vg.Name)
}
return nil, errors.Errorf("found multiple matching virtualGateways for namespace: %s, expecting 1 but found %d: %s",
obj.GetNamespace(), len(vgCandidates), strings.Join(vgCandidatesNames, ","))
}
return vgCandidates[0], nil
}
|
package types
import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
)
var _ sdk.Msg = &MsgCreateCosmosToEth{}
func NewMsgCreateCosmosToEth(sender string, ethDest string, amount string, bridgeFee string) *MsgCreateCosmosToEth {
return &MsgCreateCosmosToEth{
Sender: sender,
EthDest: ethDest,
Amount: amount,
BridgeFee: bridgeFee,
}
}
func (msg *MsgCreateCosmosToEth) Route() string {
return RouterKey
}
func (msg *MsgCreateCosmosToEth) Type() string {
return "CreateCosmosToEth"
}
func (msg *MsgCreateCosmosToEth) GetSigners() []sdk.AccAddress {
creator, err := sdk.AccAddressFromBech32(msg.Sender)
if err != nil {
panic(err)
}
return []sdk.AccAddress{creator}
}
func (msg *MsgCreateCosmosToEth) GetSignBytes() []byte {
bz := ModuleCdc.MustMarshalJSON(msg)
return sdk.MustSortJSON(bz)
}
func (msg *MsgCreateCosmosToEth) ValidateBasic() error {
_, err := sdk.AccAddressFromBech32(msg.Sender)
if err != nil {
return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err)
}
return nil
}
var _ sdk.Msg = &MsgUpdateCosmosToEth{}
func NewMsgUpdateCosmosToEth(creator string, id uint64, sender string, ethDest string, amount string, bridgeFee string) *MsgUpdateCosmosToEth {
return &MsgUpdateCosmosToEth{
Id: id,
Creator: creator,
Sender: sender,
EthDest: ethDest,
Amount: amount,
BridgeFee: bridgeFee,
}
}
func (msg *MsgUpdateCosmosToEth) Route() string {
return RouterKey
}
func (msg *MsgUpdateCosmosToEth) Type() string {
return "UpdateCosmosToEth"
}
func (msg *MsgUpdateCosmosToEth) GetSigners() []sdk.AccAddress {
creator, err := sdk.AccAddressFromBech32(msg.Creator)
if err != nil {
panic(err)
}
return []sdk.AccAddress{creator}
}
func (msg *MsgUpdateCosmosToEth) GetSignBytes() []byte {
bz := ModuleCdc.MustMarshalJSON(msg)
return sdk.MustSortJSON(bz)
}
func (msg *MsgUpdateCosmosToEth) ValidateBasic() error {
_, err := sdk.AccAddressFromBech32(msg.Creator)
if err != nil {
return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err)
}
return nil
}
var _ sdk.Msg = &MsgCreateCosmosToEth{}
func NewMsgDeleteCosmosToEth(creator string, id uint64) *MsgDeleteCosmosToEth {
return &MsgDeleteCosmosToEth{
Id: id,
Creator: creator,
}
}
func (msg *MsgDeleteCosmosToEth) Route() string {
return RouterKey
}
func (msg *MsgDeleteCosmosToEth) Type() string {
return "DeleteCosmosToEth"
}
func (msg *MsgDeleteCosmosToEth) GetSigners() []sdk.AccAddress {
creator, err := sdk.AccAddressFromBech32(msg.Creator)
if err != nil {
panic(err)
}
return []sdk.AccAddress{creator}
}
func (msg *MsgDeleteCosmosToEth) GetSignBytes() []byte {
bz := ModuleCdc.MustMarshalJSON(msg)
return sdk.MustSortJSON(bz)
}
func (msg *MsgDeleteCosmosToEth) ValidateBasic() error {
_, err := sdk.AccAddressFromBech32(msg.Creator)
if err != nil {
return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err)
}
return nil
}
|
package main
import (
"database/sql"
_ "github.com/mattn/go-sqlite3"
"strings"
"log"
"os"
"io"
"path/filepath"
"fmt"
"net/http"
"net/url"
)
const (
database = "austen.db"
tempDir = "/tmp/output/data"
outDir = "output"
)
var (
db_length int
)
func main() {
_, err := os.Open(database)
if err != nil {
log.Fatalf("checking existence of the database: %v", err)
}
db, err := openDatabase(database)
if err != nil {
log.Fatalf("opening database: %v", err)
}
defer db.Close()
// FIGURE OUT PARTITION LENGTH AND SPLIT
row := db.QueryRow("SELECT count(key) FROM pairs")
var db_length int
_ = row.Scan(&db_length)
//fmt.Printf("length of db in rows: %d\n", db_length)
//fmt.Printf("Average partition length in rows: %d\n", db_length/50)
//fmt.Printf("Remaining rows are in a partition of length: %d\n", db_length-((db_length/50)*50))
_, err = splitDatabase(database, tempDir, "output-%d.db", 50)
if err != nil {
log.Fatalf("splitting db: %v", err)
}
fmt.Printf("SUCCESS!\n")
// SERVER STARTUP
go func() {
http.Handle("/data/", http.StripPrefix("/data", http.FileServer(http.Dir("/tmp/output/data"))))
if err := http.ListenAndServe("localhost:3410", nil); err != nil {
log.Printf("Error in HTTP server for %s: %v", "localhost:3410", err)
}
}()
// ITERATE THROUGH SPLIT DIRECTORY AND CREATE URLS
fileList := make([]string, 0)
e := filepath.Walk(tempDir, func(path string, f os.FileInfo, err error) error {
fileList = append(fileList, path)
return err
})
if e != nil { // WAYTOODANK
panic(e)
}
var urls []string
for i, file := range fileList {
url := strings.Split(file, "/")
if i == 0 {continue} // SKIP THE DIRECTORY ITSELF
urls = append(urls, "http://localhost:3410/data/" + url[len(url)-1])
}
db, err = mergeDatabases(urls, "new.db", tempDir)
if err != nil {
log.Fatalf("merging: %v", err)
}
fmt.Printf("SUCCESS!\n")
}
func openDatabase(path string) (*sql.DB, error) {
options :=
"?" + "_busy_timeout=10000" +
"&" + "_case_sensitive_like=OFF" +
"&" + "_foreign_keys=ON" +
"&" + "_journal_mode=OFF" +
"&" + "_locking_mode=NORMAL" +
"&" + "mode=rw" +
"&" + "_synchronous=OFF"
db, err := sql.Open("sqlite3", path+options)
return db, err
}
func createDatabase(path string) (*sql.DB, error) {
options :=
"?" + "_busy_timeout=10000" +
"&" + "_case_sensitive_like=OFF" +
"&" + "_foreign_keys=ON" +
"&" + "_journal_mode=OFF" +
"&" + "_locking_mode=NORMAL" +
"&" + "mode=rw" +
"&" + "_synchronous=OFF"
if _, err := os.Create(path); err != nil {
log.Fatalf("creating database file: %v", err)
}
db, err := sql.Open("sqlite3", path+options)
tx, errr := db.Begin()
if errr != nil {
log.Fatalf("beginning table create tx: %v", errr)
}
_, errr = tx.Exec("CREATE TABLE pairs(key text, value text)")
if errr != nil {
log.Fatalf("creating pairs table: %v", errr)
}
tx.Commit()
return db, err
}
func splitDatabase(source, outputDir, outputPattern string, m int) ([]string, error) {
fmt.Printf("splitting %s into %d new files in %s\n", source, m, outputDir)
var names []string
var err error
db, err := openDatabase(source)
defer db.Close()
var r = db.QueryRow("SELECT count(key) from pairs")
var count int
_ = r.Scan(&count)
if count < m {
return names, err
}
var partition_length = count / m
var remainder = count - ((count / m) * m)
if err != nil {return names, err}
var splits []*sql.DB
if err != nil {
log.Fatalf("opening database for splitting: %v", err)
}
rows, err := db.Query("SELECT key, value FROM pairs")
for i := 1; i <= m; i++ {
var path = filepath.Join(outputDir, fmt.Sprintf(outputPattern, i))
names = append(names, path)
out_db, err := createDatabase(path)
if err != nil {
return names, err
}
splits = append(splits, out_db)
var j = 0
for j := 0; j < partition_length; j++ {
rows.Next()
var key, value string
_ = rows.Scan(&key, &value)
out_db.Exec("INSERT INTO pairs(key, value) values(?, ?)", key, value)
}
if i == 50 { // ON THE LAST ITERATION, DISTRIBUTE THE REMAINING DATA
for j = 0; j < remainder; j++ {
rows.Next()
var key, value string
_ = rows.Scan(&key, &value)
splits[j].Exec("INSERT INTO pairs(key, value) values(?, ?)", key, value)
}
}
}
return names, err
}
func mergeDatabases(urls []string, path, temp string) (*sql.DB, error) {
fmt.Printf("downloading %d files from %s into %s and merging them into new file %s\n", len(urls), temp, outDir, path)
db, err := createDatabase(path)
for _, u := range urls {
// DOWNLOAD
file, err := url.Parse(u) // EXTRACT THE URL OBJECT
filename := file.Path // GET THE ACTUAL FILENAME
s := strings.Split(filename, "/")
filename = s[len(s)-1]
p := filepath.Join("output", filename)
f, err := os.Create(p)
if err != nil {
return db, err
}
defer f.Close()
resp, err := http.Get(u) // GET REQUEST TO SERVER
_, err = io.Copy(f, resp.Body) // COPY THE RESPONSE BODY TO THE NEW FILE
resp.Body.Close()
if err != nil {
return db, err
}
// MERGE
if err != nil {
return db, err
}
_, err = db.Exec("attach ? as merge; insert into pairs select * from merge.pairs; detach merge", p)
if err != nil {
return db, err
}
// DELETE
parts := strings.Split(u, "/")
err = os.Remove(filepath.Join(tempDir, parts[len(parts)-1]))
if err != nil {
return db, err
}
err = os.Remove(p)
if err != nil {
return db, err
}
}
db.Close()
return db, err
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl_test
import (
"fmt"
"strconv"
"strings"
"testing"
"time"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/ddl"
ddlutil "github.com/pingcap/tidb/ddl/util"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/testkit/external"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/gcutil"
"github.com/stretchr/testify/require"
)
// TestModifyColumnTypeArgs test job raw args won't be updated when error occurs in `updateVersionAndTableInfo`.
func TestModifyColumnTypeArgs(t *testing.T) {
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/mockUpdateVersionAndTableInfoErr", `return(2)`))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockUpdateVersionAndTableInfoErr"))
}()
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t_modify_column_args")
tk.MustExec("create table t_modify_column_args(a int, unique(a))")
err := tk.ExecToErr("alter table t_modify_column_args modify column a tinyint")
require.Error(t, err)
// error goes like `mock update version and tableInfo error,jobID=xx`
strs := strings.Split(err.Error(), ",")
require.Equal(t, "[ddl:-1]mock update version and tableInfo error", strs[0])
jobID := strings.Split(strs[1], "=")[1]
tbl := external.GetTableByName(t, tk, "test", "t_modify_column_args")
require.Len(t, tbl.Meta().Columns, 1)
require.Len(t, tbl.Meta().Indices, 1)
id, err := strconv.Atoi(jobID)
require.NoError(t, err)
historyJob, err := ddl.GetHistoryJobByID(tk.Session(), int64(id))
require.NoError(t, err)
require.NotNil(t, historyJob)
var (
newCol *model.ColumnInfo
oldColName *model.CIStr
modifyColumnTp byte
updatedAutoRandomBits uint64
changingCol *model.ColumnInfo
changingIdxs []*model.IndexInfo
)
pos := &ast.ColumnPosition{}
err = historyJob.DecodeArgs(&newCol, &oldColName, pos, &modifyColumnTp, &updatedAutoRandomBits, &changingCol, &changingIdxs)
require.NoError(t, err)
require.Nil(t, changingCol)
require.Nil(t, changingIdxs)
}
func TestParallelUpdateTableReplica(t *testing.T) {
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount", `return(true)`))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount"))
}()
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("create database test_db_state default charset utf8 default collate utf8_bin")
tk.MustExec("use test_db_state")
tk.MustExec("drop table if exists t1;")
tk.MustExec("create table t1 (a int);")
tk.MustExec("alter table t1 set tiflash replica 3 location labels 'a','b';")
tk1, tk2, ch, originalCallback := prepareTestControlParallelExecSQL(t, store, dom)
defer dom.DDL().SetHook(originalCallback)
t1 := external.GetTableByName(t, tk, "test_db_state", "t1")
var err1 error
var err2 error
var wg util.WaitGroupWrapper
wg.Run(func() {
// Mock for table tiflash replica was available.
err1 = domain.GetDomain(tk1.Session()).DDL().UpdateTableReplicaInfo(tk1.Session(), t1.Meta().ID, true)
})
wg.Run(func() {
<-ch
// Mock for table tiflash replica was available.
err2 = domain.GetDomain(tk2.Session()).DDL().UpdateTableReplicaInfo(tk2.Session(), t1.Meta().ID, true)
})
wg.Wait()
require.NoError(t, err1)
require.EqualError(t, err2, "[ddl:-1]the replica available status of table t1 is already updated")
}
// TestParallelFlashbackTable tests parallel flashback table.
func TestParallelFlashbackTable(t *testing.T) {
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`))
defer func(originGC bool) {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange"))
if originGC {
ddlutil.EmulatorGCEnable()
} else {
ddlutil.EmulatorGCDisable()
}
}(ddlutil.IsEmulatorGCEnable())
// disable emulator GC.
// Disable emulator GC, otherwise, emulator GC will delete table record as soon as possible after executing drop table DDL.
ddlutil.EmulatorGCDisable()
gcTimeFormat := "20060102-15:04:05 -0700 MST"
timeBeforeDrop := time.Now().Add(0 - 48*60*60*time.Second).Format(gcTimeFormat)
safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '')
ON DUPLICATE KEY
UPDATE variable_value = '%[1]s'`
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
// clear GC variables first.
tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )")
// set GC safe point
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
// set GC enable.
require.NoError(t, gcutil.EnableGC(tk.Session()))
// prepare dropped table.
tk.MustExec("create database test_db_state default charset utf8 default collate utf8_bin")
tk.MustExec("use test_db_state")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int)")
tk.MustExec("drop table if exists t")
// Test parallel flashback table.
sql1 := "flashback table t to t_flashback"
f := func(err1, err2 error) {
require.NoError(t, err1)
require.EqualError(t, err2, "[schema:1050]Table 't_flashback' already exists")
}
testControlParallelExecSQL(t, tk, store, dom, "", sql1, sql1, f)
// Test parallel flashback table with different name
tk.MustExec("drop table t_flashback")
sql1 = "flashback table t_flashback"
sql2 := "flashback table t_flashback to t_flashback2"
testControlParallelExecSQL(t, tk, store, dom, "", sql1, sql2, f)
}
|
package domain
import (
"fmt"
"regexp"
"github.com/quintans/faults"
)
// FullName is a Value Object representing a first and last names
type FullName struct {
firstName string
lastName string
}
func (f FullName) String() string {
return fmt.Sprintf("%s %s", f.firstName, f.lastName)
}
func NewFullName(
firstName string,
lastName string,
) (FullName, error) {
if firstName == "" {
return FullName{}, faults.New("FullName.firstName cannot be empty")
}
if lastName == "" {
return FullName{}, faults.New("FullName.lastName cannot be empty")
}
f := FullName{
firstName: firstName,
lastName: lastName,
}
return f, nil
}
func MustNewFullName(
firstName string,
lastName string,
) FullName {
f, err := NewFullName(
firstName,
lastName,
)
if err != nil {
panic(err)
}
return f
}
func (f FullName) FirstName() string {
return f.firstName
}
func (f FullName) LastName() string {
return f.lastName
}
func (f FullName) IsZero() bool {
return f == FullName{}
}
type Email struct {
email string
}
func (e Email) String() string {
return e.email
}
var emailRe = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$")
func (e Email) validate() error {
if !emailRe.MatchString(e.email) {
return faults.Errorf("%s is not a valid email", e.email)
}
return nil
}
func NewEmail(
email string,
) (Email, error) {
if email == "" {
return Email{}, faults.New("Email.email cannot be empty")
}
e := Email{
email: email,
}
if err := e.validate(); err != nil {
return Email{}, faults.Wrap(err)
}
return e, nil
}
func MustNewEmail(
email string,
) Email {
e, err := NewEmail(
email,
)
if err != nil {
panic(err)
}
return e
}
func (e Email) IsZero() bool {
return e == Email{}
}
func (e Email) MarshalText() (text []byte, err error) {
return []byte(e.email), nil
}
func (e *Email) UnmarshalText(text []byte) error {
e.email = string(text)
return e.validate()
}
|
package merkle
import (
"crypto/sha256"
"hash"
"testing"
)
func TestNewConfig(t *testing.T) {
type input struct {
hasher hash.Hash
depth int
hashSize int
}
type output struct {
config *Config
err error
}
testCases := []struct {
name string
in input
out output
}{
{
"success: min size",
input{sha256.New(), 1, 1},
output{&Config{allLeavesNum: 2, allNodesNum: 3}, nil},
},
{
"success: max size",
input{sha256.New(), 16, 64},
output{&Config{allLeavesNum: 65536, allNodesNum: 131071}, nil},
},
{
"failure: too small depth",
input{sha256.New(), 0, 32},
output{nil, ErrTooSmallDepth},
},
{
"failure: too large depth",
input{sha256.New(), 17, 32},
output{nil, ErrTooLargeDepth},
},
{
"failure: too small hash size",
input{sha256.New(), 8, 0},
output{nil, ErrTooSmallHashSize},
},
{
"failure: too large hash size",
input{sha256.New(), 8, 65},
output{nil, ErrTooLargeHashSize},
},
}
for i, tc := range testCases {
t.Logf("[%d] %s", i, tc.name)
in, out := tc.in, tc.out
conf, err := NewConfig(in.hasher, in.depth, in.hashSize)
if err != out.err {
t.Errorf("expected: %v, actual: %v", out.err, err)
}
if conf != nil {
if conf.allLeavesNum != out.config.allLeavesNum {
t.Errorf("expected: %d, actual: %d", out.config.allLeavesNum, conf.allLeavesNum)
}
if conf.allNodesNum != out.config.allNodesNum {
t.Errorf("expected: %d, actual: %d", out.config.allNodesNum, conf.allNodesNum)
}
}
}
}
|
package utils
import (
"net/url"
"strconv"
"github.com/athlum/gorp"
"github.com/juju/errors"
)
// Direction represents the sort direction
// swagger:strfmt Direction
// enum:ASC,DESC
type Direction string
// sort directions
const (
Asc Direction = "ASC"
Desc Direction = "DESC"
)
const (
DefaultPageSize = 20
)
// NewPage creates *Page from query parameters(e.g. pageStart=2&pageSize=50&sort=id&order=DESC).
func NewPage(params url.Values) *Page {
if params == nil {
return &Page{
PageStart: 0,
PageSize: DefaultPageSize,
Order: "DESC",
Sort: "",
Keyword: "",
}
}
page := &Page{}
n, err := strconv.ParseInt(params.Get("pageStart"), 10, 64)
if err == nil {
page.PageStart = int(n)
}
n, err = strconv.ParseInt(params.Get("pageSize"), 10, 64)
if err == nil {
page.PageSize = int(n)
}
if page.PageSize == 0 {
page.PageSize = DefaultPageSize
}
page.Sort = params.Get("sort")
page.Order = params.Get("order")
if len(page.Order) == 0 {
page.Order = "DESC"
}
page.Keyword = params.Get("keyword")
return page
}
// Page contains pagination information
type Page struct {
// page start index, start from 0
// example:1
// default:1
PageStart int `db:"pageStart" json:"pageStart"`
// size of page
// example:10
// default:10
PageSize int `db:"pageSize" json:"pageSize"`
// sort order, ASC or DESC, default to DESC
// example:DESC
Order string `json:"order"`
// field name to sort
// example:id
Sort string `json:"sort"`
// keyword to query
// example:arch
Keyword string `db:"keyword" json:"keyword"`
}
func (p Page) ToParams() url.Values {
params := make(url.Values)
params.Add("pageStart", strconv.Itoa(p.PageStart))
params.Add("pageSize", strconv.Itoa(p.PageSize))
if len(p.Sort) > 0 {
params.Add("sort", p.Sort)
}
if len(p.Order) > 0 {
params.Add("order", p.Order)
}
return params
}
// String returns readable string represents the page
func (p Page) String() string {
return p.ToParams().Encode()
}
// Validate implements Validatable interface
func (p Page) Validate() error {
if p.PageStart < 0 {
return errors.Errorf("pageStart must greater or equal than 0, got %v", p.PageStart)
}
if p.PageSize < 1 {
return errors.Errorf("size must greater than 0, got %v", p.PageSize)
}
if len(p.Order) > 0 {
if p.Order != "ASC" && p.Order != "DESC" {
return errors.Errorf("invalid sort order: %v", p.Order)
}
}
return nil
}
// PageResponse is the response to a page request
// swagger:model
type PageResponse struct {
*Page
// total item
// example:100
Total int `db:"total" json:"total"`
// payload
Data interface{} `db:"data" json:"data"`
}
// NewPageResponse creates a PageResponse
func NewPageResponse(page *Page, total int, data interface{}) *PageResponse {
return &PageResponse{
Page: page,
Total: total,
Data: data,
}
}
func LoadPage(tx gorp.SqlExecutor, q *Query, page *Page, holder interface{}, countQueries ...*Query) (int, error) {
cq := q
if len(countQueries) > 0 {
if v := countQueries[0]; v != nil {
cq = v
}
}
n, err := cq.Count(tx)
if err != nil {
return -1, errors.Trace(err)
}
if page != nil {
if err := page.Validate(); err != nil {
return -1, errors.Trace(err)
}
if len(page.Sort) > 0 {
if len(page.Order) == 0 {
page.Order = "DESC"
}
q.ClearOrderBy().OrderByString(page.Sort, page.Order)
}
q.Offset(page.PageStart).Limit(page.PageSize)
}
if _, err = q.FetchAll(tx, holder); err != nil {
return -1, errors.Trace(err)
}
return int(n), nil
}
func LoadPageResponse(tx gorp.SqlExecutor, q *Query, page *Page, holder interface{}, countQueries ...*Query) (*PageResponse, error) {
count, err := LoadPage(tx, q, page, holder, countQueries...)
if err != nil {
return nil, err
}
return NewPageResponse(page, count, holder), nil
}
func LoadPage2(tx gorp.SqlExecutor, q *Query, page *Page, holder interface{}) error {
if page != nil {
if err := page.Validate(); err != nil {
return errors.Trace(err)
}
if len(page.Sort) > 0 {
if len(page.Order) == 0 {
page.Order = "DESC"
}
q.ClearOrderBy().OrderByString(page.Sort, page.Order)
}
q.Offset(page.PageStart).Limit(page.PageSize)
}
if _, err := q.FetchAll(tx, holder); err != nil {
return errors.Trace(err)
}
return nil
}
func LoadPageResponse2(tx gorp.SqlExecutor, q *Query, page *Page, holder interface{}, countQuery *Query) (*PageResponse, error) {
countQueryStr, vals, err := countQuery.ValQuery()
if err != nil {
return nil, q.QueryValError(err, countQueryStr, nil)
}
count, err := tx.SelectInt(countQueryStr, vals...)
if err != nil {
return nil, q.QueryValError(err, countQueryStr, nil)
}
if err := LoadPage2(tx, q, page, holder); err != nil {
return nil, err
}
return NewPageResponse(page, int(count), holder), nil
}
|
package inmem
import (
chat "github.com/greatchat/gochat/transport"
)
type client struct {
chanBuff int
channels map[string]chan chat.Message
}
// NewClient initalises new in memory client
func NewClient(chanBuffer int) chat.BasicClient {
return &client{
chanBuff: chanBuffer,
channels: make(map[string]chan chat.Message),
}
}
func (c *client) channel(name string) chan chat.Message {
ch, found := c.channels[name]
if !found {
ch = make(chan chat.Message, c.chanBuff)
c.channels[name] = ch
}
return ch
}
// Send message to dest channel
func (c *client) Send(dest string, msg chat.Message) error {
// TODO timeout
c.channel(dest) <- msg
return nil
}
// Receive gets message from src chan
func (c *client) Receive(src string) (chat.Message, error) {
// TODO timeout
msg := <-c.channel(src)
return msg, nil
}
|
package main
import "fmt"
const heightTallPerson = 1.90
func main() {
firstName := "Joabe"
lastName := "Leão"
heightInMeters := 1.72
workInAVCorp := true
monthsWork := 30
if len(firstName) < 1 || firstName == "" {
panic("O primeiro nome esta vazio.")
}
if firstName == "Joabe" {
fmt.Println("Ele é o Joabe ", lastName)
} else {
panic("Não conheço essa pessoa.")
}
if heightInMeters > heightTallPerson {
fmt.Println("Ele é alto, mede ", heightInMeters)
} else {
fmt.Println("Ele não é tão alto, mede ", heightInMeters)
}
if !workInAVCorp {
panic("Essa pessoa não trabalha na AVCorp.")
}
if monthsWork > 12 {
fmt.Printf("Ele já trabalha há um tempo na AVCorp, mais precisamente %d meses.\n", monthsWork)
} else {
fmt.Println("Ele trabalha a pouco tempo na AVCorp.")
}
}
|
package service
type Update interface {
Do(event UpdateEvent) error
}
|
package builder
import (
exec "os/exec"
"../../../contract/request"
sshBuilder "../../../domain/builder"
)
type SshCommandBuilder struct {
}
func InitSshCommandBuilder() sshBuilder.ISshCommandBuilder {
builder := &SshCommandBuilder{}
return builder
}
func (b *SshCommandBuilder) BuildSshCommand(request request.SshCommandBuildRequest) string {
return request.CommandName + " " + request.Command
}
func (b *SshCommandBuilder) Build(request request.SshCommandBuildRequest) *exec.Cmd {
cmd := exec.Command(request.CommandName, request.Command)
return cmd
}
|
package command
import (
"context"
"github.com/quintans/faults"
"github.com/quintans/go-clean-ddd/internal/app"
"github.com/quintans/go-clean-ddd/internal/domain"
"github.com/quintans/go-clean-ddd/internal/domain/customer"
)
type UpdateCustomerHandler interface {
Handle(context.Context, UpdateCustomerCommand) error
}
type UpdateCustomerCommand struct {
Id string
FirstName string
LastName string
}
type UpdateCustomer struct {
customerRepository app.CustomerRepository
}
func NewUpdateCustomer(customerRepository app.CustomerRepository, customerView app.CustomerViewRepository) UpdateCustomer {
return UpdateCustomer{
customerRepository: customerRepository,
}
}
func (r UpdateCustomer) Handle(ctx context.Context, cmd UpdateCustomerCommand) error {
id, err := customer.ParseCustomerID(cmd.Id)
if err != nil {
return faults.Wrap(err)
}
fullName, err := domain.NewFullName(cmd.FirstName, cmd.LastName)
if err != nil {
return faults.Wrap(err)
}
return r.customerRepository.Update(ctx, id, func(ctx context.Context, c *customer.Customer) error {
c.UpdateInfo(fullName)
return nil
})
}
|
package main
import (
"bufio"
"log"
"net/http"
"os"
"strings"
)
var (
Headers map[string]*tokenizedString
)
func loadHeaders() {
Headers = map[string]*tokenizedString{}
file, err := os.Open(*headersFile)
if err != nil {
//Probably file not found
} else {
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
arr := strings.Split(scanner.Text(), ": ")
Headers[arr[0]] = NewTokenizedString(arr[1])
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
}
}
func injectHeaders(req *http.Request) {
for k := range Headers {
req.Header.Set(k, Headers[k].String())
}
}
|
package main
import (
"fmt"
)
func sortedSquares(A []int) []int {
ret := make([]int, len(A))
// 使用两个指针,一个指向第一个,一个指向最后一个, 然后挨个比较,比较后的值,从后往前填入到要返回的数组中
i := 0
j := len(A) - 1
n := j
for i <= j {
if abs(A[i]) >= abs(A[j]) {
ret[n] = abs(A[i])
n--
i++
continue
} else {
ret[n] = abs(A[j])
n--
j--
continue
}
}
return ret
}
func abs(a int) int {
return a * a
}
func main() {
A := []int{-7, -3, 2, 3, 11}
fmt.Println(sortedSquares(A))
}
|
package mq
import (
"encoding/json"
"fmt"
"github.com/streadway/amqp"
"mq-go/config"
"testing"
"time"
)
func TestAdmin(t *testing.T) {
adminMq := Admin()
msg := AdminMsg{
Name: "ybdx",
Value: "hello",
}
adminMq.Publish(msg)
}
func TestQueue_Consume(t *testing.T) {
adminMq := Admin()
adminMq.Consume(ack, nil)
}
func nack(delivery amqp.Delivery) {
data := &body{}
json.Unmarshal(delivery.Body, data)
fmt.Printf("%s\n", data.Body)
delivery.Nack(false, true)
time.Sleep(time.Second * 5)
}
func ack(delivery amqp.Delivery) {
//data := &body{}
body := struct {
Body AdminMsg `json:"body"`
Headers Headers `json:"header"`
}{}
json.Unmarshal(delivery.Body, &body)
fmt.Printf("%+v\n", body)
delivery.Ack(false)
}
func TestNew(t *testing.T) {
ybdxConfig := config.Config{
PublishUrl: "amqp://admin:admin@127.0.0.1:5672/test?heartbeat=15",
ConsumeUrl: "amqp://admin:admin@127.0.0.1:5672/test?heartbeat=15",
Queue: "ybdx",
Key: "rk.admin.qa",
Exchange: "ex-admin-direct.qa",
PrefetchCount: 100,
}
MqInstance := New("ybdx", ybdxConfig)
//MqInstance.Publish("this is the first")
//MqInstance.Publish("this is the Second")
time.Sleep(1 * time.Second)
MqInstance.Consume(ack, nil)
}
|
package main
// 优美的解法 (根本在于消除长度差)
// 迭代解法
func getIntersectionNode(headA, headB *ListNode) *ListNode {
pA, pB := headA, headB
for pA != pB {
if pA == nil {
pA = headB
} else {
pA = pA.Next
}
if pB == nil {
pB = headA
} else {
pB = pB.Next
}
}
return pA
}
func main() {
}
/*
题目链接: https://leetcode-cn.com/problems/intersection-of-two-linked-lists/
*/ |
package trello
type Checklist struct {
Id string `json:"id"`
Name string `json:"name"`
IdBoard string `json:"idBoard"`
IdCard string `json:"idCard"`
Pos float32 `json:"pos"`
CheckItems []struct {
State string `json:"state"`
Id string `json:"id"`
Name string `json:"name"`
NameData interface{} `json:"nameData"`
Pos float32 `json:"pos"`
} `json:"checkItems"`
}
|
package main
import (
"github.com/colinmarc/hdfs/v2"
"os"
"time"
)
func touch(paths []string, noCreate bool, accessTime bool, modifyTime bool) {
paths, nn, err := normalizePaths(paths)
if err != nil {
fatal(err)
}
if len(paths) == 0 {
printHelp()
}
client, err := getClient(nn)
if err != nil {
fatal(err)
}
for _, p := range paths {
if hasGlob(p) {
fatal(&os.PathError{"mkdir", p, os.ErrNotExist})
}
finfo, err := client.Stat(p)
exists := !os.IsNotExist(err)
if (err != nil && exists) || (!exists && noCreate) {
fatal(err)
}
if exists {
if accessTime {
now := time.Now()
atime := now
err = client.Chtimes(p, atime, finfo.ModTime())
}
if modifyTime {
now := time.Now()
mtime := now
hdfsFileInfo := finfo.(*hdfs.FileInfo)
atime := hdfsFileInfo.AccessTime()
err = client.Chtimes(p, atime, mtime)
}
} else {
err = client.CreateEmptyFile(p)
}
if err != nil {
fatal(err)
}
}
}
|
package storage
import (
"encoding/json"
"path/filepath"
"sync"
"time"
)
const (
MaxRecordsPerFile = 10
ResultsSubdirectory = "results"
ProductSubdirectory = "results_by_product"
)
var FlushInterval = time.Second // FIXME make into a parameter. turned var from const to make tests more responsive
// batchWriter is a recordwriter that writes records in batches.
// it is not safe for concurrent use
// init with a file seq #s
// implement recordWriter interface
type batchWriter struct {
fs
fileSeq int64
entrySeq int64
productEntrySeq map[string]int64
*batch
}
func (w *batchWriter) writeRecord(r *record) (err error) {
err = w.startBatchIfNeeded(r.entry.Time)
if err != nil {
return
}
// ensure batch is flushed if it's full
if w.batch.nRecords+1 == MaxRecordsPerFile {
defer w.closeBatch()
}
defer func() {
if err == nil {
w.entrySeq++
}
}()
// FIXME
// entrySeq needs to be the sequence number for that specific product id
// this duplicates work performed by the fs based priceReader interface
// this lives here because batchWriter sees records to be written in
// order, and therefore can maintain a consistent view of these sequence
// numbers
if w.productEntrySeq == nil {
// create lazily to avoid polluting other code with this
// workaround during construction time
w.productEntrySeq = make(map[string]int64)
}
productEntrySeq, exists := w.productEntrySeq[r.ProductId]
if !exists {
files, err := w.fs.Sub(filepath.Join(ProductSubdirectory, ProductIdHash(r.ProductId))).Files()
if err != nil {
return err
}
if len(files) > 0 {
// calculate next entryseq based on previous file entries
var f filename
err = f.FromString(files[len(files)-1])
if err != nil {
return err // TODO wrap
}
productEntrySeq = f.entrySeq + f.nRecords - 1
}
}
productEntrySeq++
w.productEntrySeq[r.ProductId] = productEntrySeq
return w.batch.writeRecord(r, productEntrySeq)
}
func (w *batchWriter) startBatchIfNeeded(now time.Time) (err error) {
if w.batch != nil {
if now.Sub(w.batch.start) < FlushInterval {
// batch is set, and OK to use
return nil
} else {
// next event is too late for batch
w.closeBatch()
}
}
b := &batch{
fs: w.fs,
filename: filename{
fileSeq: w.fileSeq + 1,
entrySeq: w.entrySeq + 1,
start: now,
},
end: now,
}
// FIXME refactor filepath logic into some abstraction
b.file, err = w.fs.New(filepath.Join(ResultsSubdirectory, b.filename.String()))
if err != nil {
return err
}
w.fileSeq++
err = b.initialize()
if err != nil {
return err
}
w.batch = b
return nil
}
func (w *batchWriter) closeBatch() {
batch := w.batch
w.batch = nil
batch.flush()
}
type batch struct {
fs writeFS
filename
end time.Time
productFields map[string]*perProductInfo
file appendFile
synced chan struct{} // closes when synced
flushOnce sync.Once
sync.Mutex // FIXME needed because of outstanding data race
}
type perProductInfo struct {
nRecords int64
entrySeq int64
}
func (b *batch) initialize() error {
b.productFields = make(map[string]*perProductInfo, MaxRecordsPerFile)
b.synced = make(chan struct{})
// FIXME redo with ndjson for robustness
_, err := b.file.Write([]byte("[\n\t")) // start a JSON array as per spec
if err != nil {
return err
}
// ensure buffer is always flushed after it can no longer be filled
time.AfterFunc(FlushInterval, func() {
b.flush()
})
return nil
}
func (b *batch) writeRecord(r *record, hackyProductEntrySeq int64) (err error) {
if r.entry.Time.Before(b.end) {
panic("time went backwards")
}
// FIXME due to data races on internal fields, should not be necessary in principle
b.Lock()
defer b.Unlock()
// FIXME ndjson to remove this hack while retaining durability of early writes
if b.nRecords > 0 && err == nil {
_, err = b.file.Write([]byte(",\n\t"))
if err != nil {
return
}
}
by, err := json.MarshalIndent(r, "\t", "\t")
if err != nil {
return
}
_, err = b.file.Write(by)
if err != nil {
return
}
old := b.filename
b.nRecords++
b.end = r.entry.Time
if perProduct, exists := b.productFields[r.ProductId]; !exists {
b.nProductIds++
b.productFields[r.ProductId] = &perProductInfo{
nRecords: 1,
entrySeq: hackyProductEntrySeq, // FIXME see above
}
} else {
// only assign nRecords, since we want to know the first
// entrySeq in the file
perProduct.nRecords++
}
// keep update nRecords and nProducts fields up to date in the filename
err = b.fs.Rename(
// TODO abstract ResultsSubdirectory logic
filepath.Join(ResultsSubdirectory, old.String()),
filepath.Join(ResultsSubdirectory, b.filename.String()),
)
// TODO how to track entrySeq per product?
// could keep track and independently load from snapshot
return
}
func (b *batch) flush() {
b.flushOnce.Do(func() {
go func() {
b.Lock() // FIXME still needed due to data race on f.filename, in principle should not be necessary
defer b.Unlock()
_, _ = b.file.Write([]byte("\n]\n")) // TODO error
_ = b.file.Sync() // TODO error
_ = b.file.Close() // TODO error
// TODO abstract filepath logic
finalName := filepath.Join(ResultsSubdirectory, b.filename.String())
// link to product index directories
for productId, v := range b.productFields {
productFilename := b.filename
productFilename.nRecords = v.nRecords
productFilename.entrySeq = v.entrySeq
_ = b.fs.Link(finalName, filepath.Join(ProductSubdirectory, ProductIdHash(productId), productFilename.String())) // TODO error
}
close(b.synced)
}()
})
}
|
package main
import (
"fmt"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/mysql" // 选择驱动
)
type Product struct {
gorm.Model
Code string
Price uint
}
var product Product
// 根据ID查询数据
func R(id int, db *gorm.DB) {
res := db.First(&product, id).Value // 查询id为1的product
if res.(Product).Model.ID == 0 {
fmt.Println("查询的数据不存在")
return
}
fmt.Printf("%+v", res)
}
// 新增数据
func C(data *Product, db *gorm.DB) {
if db.Create(&data).Error != nil {
fmt.Println("创建失败")
} else {
fmt.Println("创建成功")
}
}
// 更新数据
func U(id int, data Product, db *gorm.DB) {
// 需要确定更新的那条数据是否存在
res := db.First(&product, id).Value // 查询id为1的product
if res.(Product).Model.ID == 0 {
fmt.Println("更新的数据不存在")
return
}
if db.Model(&product).Update("Price", data.Price).Where("id", id).Error != nil {
fmt.Println("更新失败")
} else {
fmt.Println("更新成功")
}
}
// 删除数据
func D(id int, db *gorm.DB) {
if db.Delete(&product, id).Error != nil {
fmt.Println("删除失败")
} else {
fmt.Println("删除成功")
}
}
func main() {
db, err := gorm.Open("mysql", "root:123456@tcp(203.195.196.216:3306)/test?charset=utf8&parseTime=True&loc=Local")
if err != nil {
panic("连接数据库失败")
}
//db.SingularTable(true) // 设置该属性为true使表后不自动加"s"
defer db.Close()
// 自动迁移模式
db.AutoMigrate(&Product{})
//C(&Product{Code:"123456", Price:123455}, db)
db.Model(&product).Where("id=?", 8).Update("Price", 20000)
// 创建 C
//db.Create(&Product{Code: "L12121212", Price: 1000})
//var product Product
// 读取 R
//res := db.First(&product, 1000).Value // 查询id为1的product
//fmt.Printf("%+v", res)
//fmt.Println(res)
//db.First(&product, "code = ?", "L1212") // 查询code为l1212的product
//
//// 更新 - 更新product的price为2000 U
//db.Model(&product).Update("Price", 4000).Where("id", 100)
// 删除 - 删除product D
//db.Delete(&product, 2)
}
|
package swarm
import (
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/binary"
"encoding/pem"
"errors"
"io"
"log"
"math/big"
"net"
"github.com/pion/stun"
)
func addrFromStun(conn *net.UDPConn) (string, error) {
raddr, err := net.ResolveUDPAddr("udp4", "stun.l.google.com:19302")
if err != nil {
return "", err
}
// building binding request with random transaction id.
message := stun.MustBuild(stun.TransactionID, stun.BindingRequest)
// must use manual WriteToUDP instead of client to preserve port multiplex (client must use DialUDP)
_, err = conn.WriteToUDP(message.Raw, raddr)
if err != nil {
return "", err
}
// read in the first response and assume it is correct
// ideally you would check this and retry until you got a good response
buf := make([]byte, 1024)
n, _, err := conn.ReadFromUDP(buf)
if err != nil {
return "", err
}
buf = buf[:n]
if !stun.IsMessage(buf) {
return "", errors.New("got bogus message from STUN server")
}
resp := new(stun.Message)
resp.Raw = buf
err = resp.Decode()
if err != nil {
return "", err
}
var xorAddr stun.XORMappedAddress
err = xorAddr.GetFrom(resp)
if err != nil {
return "", err
}
return xorAddr.String(), nil
}
// prefixStringWithLen: prefixes a string with its length, for use with ReadPrefixedStringWithLen
// fails if len(s) == 0
func prefixStringWithLen(s string) []byte {
if len(s) == 0 {
log.Fatal("Cannot prefix a string with length 0")
}
buf := make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(len(s)))
return append(buf, []byte(s)...)
}
// readLenPrefixedString: reads in a string from the reader assuming that the first 4 bytes
// are the length of the string
func readLenPrefixedString(r io.Reader) (string, error) {
buf := make([]byte, 4)
_, err := io.ReadFull(r, buf)
if err != nil {
return "", err
}
len := binary.BigEndian.Uint32(buf)
ret := make([]byte, len)
_, err = io.ReadFull(r, ret)
if err != nil {
return "", err
}
return string(ret), nil
}
// GenerateTLSConfig : Setup a bare-bones TLS config for the server
func GenerateTLSConfig() *tls.Config {
key, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
panic(err)
}
template := x509.Certificate{SerialNumber: big.NewInt(1)}
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key)
if err != nil {
panic(err)
}
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)})
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
tlsCert, err := tls.X509KeyPair(certPEM, keyPEM)
if err != nil {
panic(err)
}
return &tls.Config{
Certificates: []tls.Certificate{tlsCert},
NextProtos: []string{"quic-holepunch"},
}
}
|
// kggseq project doc.go
/*
kggseq document
*/
package main
|
package swarm
import (
"errors"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/swarm"
"github.com/gaia-docker/tugbot-leader/mockclient"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"testing"
)
func TestIsValidNode_Error(t *testing.T) {
client := mockclient.NewMockClient()
client.On("Info", mock.Anything).Return(types.Info{}, errors.New("Expected :)")).Once()
assert.Error(t, IsValidNode(client))
client.AssertExpectations(t)
}
func TestIsValidNode_NotSwarmMaster(t *testing.T) {
client := mockclient.NewMockClient()
client.On("Info", mock.Anything).Return(types.Info{
Swarm: swarm.Info{
LocalNodeState: swarm.LocalNodeStateActive,
ControlAvailable: false}},
nil).Once()
assert.Error(t, IsValidNode(client))
client.AssertExpectations(t)
}
func TestIsValidNode_NotActive(t *testing.T) {
client := mockclient.NewMockClient()
client.On("Info", mock.Anything).Return(types.Info{
Swarm: swarm.Info{
LocalNodeState: swarm.LocalNodeStateInactive,
ControlAvailable: true}},
nil).Once()
assert.Error(t, IsValidNode(client))
client.AssertExpectations(t)
}
func TestIsValidNode(t *testing.T) {
client := mockclient.NewMockClient()
client.On("Info", mock.Anything).Return(types.Info{
Swarm: swarm.Info{
LocalNodeState: swarm.LocalNodeStateActive,
ControlAvailable: true}},
nil).Once()
assert.NoError(t, IsValidNode(client))
client.AssertExpectations(t)
}
|
package linked_lists
import (
"testing"
)
func TestListFromSlice(t *testing.T) {
tests := []struct {
in []int
expected bool
}{
{[]int{1, 2, 3, 4, 5}, false},
}
for _, test := range tests {
actualList := listFromSlice(test.in...)
actualSlice := sliceFromList(actualList)
if len(actualSlice) != len(test.in) {
t.Fail()
}
for i := range test.in {
if actualSlice[i] != test.in[i] {
t.Fail()
}
}
}
}
func TestRemoveDuplicates(t *testing.T) {
tests := []struct {
in []int
expected []int
}{
{[]int{1, 3, 2, 3}, []int{1, 3, 2}},
{[]int{1, 3, 2, 3, 2, 4}, []int{1, 3, 2, 4}},
{[]int{1, 1, 3, 2, 3, 3, 4, 5, 4}, []int{1, 3, 2, 4, 5}},
}
for _, test := range tests {
list := listFromSlice(test.in...)
removeDuplicates(list)
actualSlice := sliceFromList(list)
if len(test.expected) != len(actualSlice) {
t.Error()
}
for i := range test.expected {
if actualSlice[i] != test.expected[i] {
t.Error()
}
}
}
}
func TestRemoveDuplicatesNoBuffer(t *testing.T) {
tests := []struct {
in []int
expected []int
}{
{[]int{1, 3, 2, 3}, []int{1, 3, 2}},
{[]int{1, 3, 2, 3, 2, 4}, []int{1, 3, 2, 4}},
{[]int{1, 1, 3, 2, 3, 3, 4, 5, 4}, []int{1, 3, 2, 4, 5}},
}
for _, test := range tests {
list := listFromSlice(test.in...)
removeDuplicatesNoBuffer(list)
actualSlice := sliceFromList(list)
if len(test.expected) != len(actualSlice) {
t.Error()
}
for i := range test.expected {
if actualSlice[i] != test.expected[i] {
t.Error()
}
}
}
}
func TestFindNthFromLast(t *testing.T) {
tests := []struct {
in1 []int
in2 int
expected interface{}
}{
{[]int{1, 2, 3, 4, 5}, 2, 4},
{[]int{1, 2, 3, 4, 5}, 3, 3},
{[]int{1, 2, 3, 4, 5}, 4, 2},
{[]int{1, 2, 3, 4, 5}, 6, nil},
}
for _, test := range tests {
list := listFromSlice(test.in1...)
actual := findNthFromLast(list, test.in2)
// test nil pointers
if actual == nil {
if test.expected != nil {
t.Errorf("Expected: %v, got: %v", test.expected, actual)
}
continue
}
if test.expected != actual.data {
t.Errorf("Expected: %v, got: %v", test.expected, actual.data)
}
}
}
|
package skel
import (
"github.com/globalsign/mgo"
)
// Get 定义获取操作
func (skel *Skel) Get() (skelRet *Skel, err error) {
c := skel.GetC()
defer c.Database.Session.Close()
err = c.Find(skel).One(&skelRet)
if err != nil {
if err != mgo.ErrNotFound {
return
}
err = nil
return
}
return
}
|
package mos6502
import (
"testing"
"github.com/KaiWalter/go6502/pkg/addressbus"
"github.com/KaiWalter/go6502/pkg/memory"
)
const (
endOfDecimalTest = 0x024b
resultDecimalTest = 0x0b
)
func TestDecimal(t *testing.T) {
// arrange
ramContent, err := RetrieveROM("6502_decimal_test.bin")
if err != nil {
t.Errorf("could not retrieve ROM: %v", err)
}
for i := 0; i < 0x1FF; i++ {
ramContent[i+0x200] = ramContent[i]
ramContent[i] = 0
}
ram := memory.Memory{AddressOffset: 0, AddressSpace: ramContent[:]}
bus := addressbus.SimpleBus{}
bus.InitBus(&ram)
Init(&bus)
WaitForSystemResetCycles()
PC = 0x200
// prevPC := uint16(0xFFFF)
newInstruction := true
// act
for int(PC) != endOfDecimalTest {
err := Cycle()
if err != nil {
t.Errorf("CPU processing failed %v", err)
break
}
if newInstruction {
// fmt.Printf("%s %04x %04x SP:%02x A:%02x X:%02x Y:%02x abs:%04x fetched:%02x Status:%02x %08b\n",
// opDef.memnonic, CurrentPC, prevPC, SP, A, X, Y,
// absoluteAddress, fetched, Status, Status,
// )
// prevPC = CurrentPC
newInstruction = false
}
if CyclesCompleted() {
newInstruction = true
}
}
// assert
if ramContent[resultDecimalTest] != 0 {
t.Errorf("failed - value actual %x / 0 expected", ramContent[resultDecimalTest])
}
}
|
package executor_runner
import (
"fmt"
"os/exec"
"strings"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
)
type ExecutorRunner struct {
executorBin string
listenAddr string
wardenNetwork string
wardenAddr string
etcdCluster []string
loggregatorServer string
loggregatorSecret string
Session *gexec.Session
Config Config
}
type Config struct {
MemoryMB string
DiskMB string
ConvergenceInterval time.Duration
HeartbeatInterval time.Duration
TempDir string
TimeToClaim time.Duration
ContainerOwnerName string
ContainerMaxCpuShares int
DrainTimeout time.Duration
}
var defaultConfig = Config{
MemoryMB: "1024",
DiskMB: "1024",
ConvergenceInterval: 30 * time.Second,
HeartbeatInterval: 60 * time.Second,
TempDir: "/tmp",
TimeToClaim: 30 * 60 * time.Second,
ContainerOwnerName: "",
ContainerMaxCpuShares: 1024,
DrainTimeout: 5 * time.Second,
}
func New(executorBin, listenAddr, wardenNetwork, wardenAddr string, etcdCluster []string, loggregatorServer string, loggregatorSecret string) *ExecutorRunner {
return &ExecutorRunner{
executorBin: executorBin,
listenAddr: listenAddr,
wardenNetwork: wardenNetwork,
wardenAddr: wardenAddr,
etcdCluster: etcdCluster,
loggregatorServer: loggregatorServer,
loggregatorSecret: loggregatorSecret,
Config: defaultConfig,
}
}
func (r *ExecutorRunner) Start(config ...Config) {
r.StartWithoutCheck(config...)
Eventually(r.Session, 5*time.Second).Should(gbytes.Say("executor.started"))
}
func (r *ExecutorRunner) StartWithoutCheck(config ...Config) {
if r.Session != nil {
panic("starting more than one executor!!!")
}
configToUse := r.generateConfig(config...)
executorSession, err := gexec.Start(
exec.Command(
r.executorBin,
"-listenAddr", r.listenAddr,
"-wardenNetwork", r.wardenNetwork,
"-wardenAddr", r.wardenAddr,
"-etcdCluster", strings.Join(r.etcdCluster, ","),
"-memoryMB", configToUse.MemoryMB,
"-diskMB", configToUse.DiskMB,
"-heartbeatInterval", fmt.Sprintf("%s", configToUse.HeartbeatInterval),
"-loggregatorServer", r.loggregatorServer,
"-loggregatorSecret", r.loggregatorSecret,
"-tempDir", configToUse.TempDir,
"-containerOwnerName", configToUse.ContainerOwnerName,
"-containerMaxCpuShares", fmt.Sprintf("%d", configToUse.ContainerMaxCpuShares),
"-drainTimeout", fmt.Sprintf("%s", configToUse.DrainTimeout),
),
ginkgo.GinkgoWriter,
ginkgo.GinkgoWriter,
)
Ω(err).ShouldNot(HaveOccurred())
r.Config = configToUse
r.Session = executorSession
}
func (r *ExecutorRunner) Stop() {
r.Config = defaultConfig
if r.Session != nil {
r.Session.Terminate().Wait(5 * time.Second)
r.Session = nil
}
}
func (r *ExecutorRunner) KillWithFire() {
if r.Session != nil {
r.Session.Kill().Wait(5 * time.Second)
r.Session = nil
}
}
func (r *ExecutorRunner) generateConfig(configs ...Config) Config {
configToReturn := defaultConfig
configToReturn.ContainerOwnerName = fmt.Sprintf("executor-on-node-%d", config.GinkgoConfig.ParallelNode)
if len(configs) == 0 {
return configToReturn
}
givenConfig := configs[0]
if givenConfig.MemoryMB != "" {
configToReturn.MemoryMB = givenConfig.MemoryMB
}
if givenConfig.DiskMB != "" {
configToReturn.DiskMB = givenConfig.DiskMB
}
if givenConfig.ConvergenceInterval != 0 {
configToReturn.ConvergenceInterval = givenConfig.ConvergenceInterval
}
if givenConfig.HeartbeatInterval != 0 {
configToReturn.HeartbeatInterval = givenConfig.HeartbeatInterval
}
if givenConfig.TempDir != "" {
configToReturn.TempDir = givenConfig.TempDir
}
if givenConfig.TimeToClaim != 0 {
configToReturn.TimeToClaim = givenConfig.TimeToClaim
}
if givenConfig.ContainerOwnerName != "" {
configToReturn.ContainerOwnerName = givenConfig.ContainerOwnerName
}
return configToReturn
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.