text stringlengths 11 4.05M |
|---|
package main
import (
"fmt"
"log"
"net/http"
"io/ioutil"
"encoding/json"
)
type Data struct {
Entry []struct {
ID int64 `json:"id,string"`
Messaging []struct {
Message struct {
Mid string `json:"mid"`
Seq int64 `json:"seq"`
Text string `json:"text"`
} `json:"message"`
Recipient struct {
ID int64 `json:"id,string"`
} `json:"recipient"`
Sender struct {
ID int64 `json:"id,string"`
} `json:"sender"`
Timestamp int64 `json:"timestamp"`
} `json:"messaging"`
Time int64 `json:"time"`
} `json:"entry"`
Object string `json:"object"`
}
func route(w http.ResponseWriter, req *http.Request) {
if req.FormValue("hub.mode") == "subscribe" {
token := req.FormValue("hub.verify_token")
mode := req.FormValue("hub.mode")
challenge := req.FormValue("hub.challenge")
if (token == "YOUR_TOKEN" && mode == "subscribe") {
w.Header().Set("Server", "A Go Web Server")
w.WriteHeader(200)
w.Write([]byte(challenge))
return
} else {
fmt.Printf("Your toke in not valid")
}
} else {
fmt.Printf("in the post\n")
fmt.Printf("%s\n", req)
body, err := ioutil.ReadAll(req.Body)
if err != nil {
log.Println(err)
return
}
var data Data
err = json.Unmarshal(body, &data)
if err != nil {
log.Println(err)
return
}
message_handler(data)
}
w.WriteHeader(200)
}
func main() {
http.HandleFunc("/webhook", route)
http.ListenAndServe(":8080", nil)
}
|
package models
type TodoItem struct {
Id int
Title string
Done bool
}
type NewTodoItem struct {
Title string `json:"title"`
}
func (n NewTodoItem) TodoItemModel() *TodoItem {
generatedItem := new(TodoItem)
generatedItem.Title = n.Title
generatedItem.Done = false
return generatedItem
}
func (item TodoItem) SetAsCompleted() {
item.Done = true
}
|
package gominin
import (
"io"
"testing"
)
func TestNewCharTokenizer(t *testing.T) {
tokenizer := newCharTokenizer()
tokenizer.Init([]byte("foo"))
if string(tokenizer.textBytes) != "foo" {
t.Error("Init should initialize tokenizer fields.")
}
if tokenizer.pos != 0 {
t.Error("Init should initialize pos field.")
}
if tokenizer.length != len("foo") {
t.Error("Init should initialize length.")
}
}
func TestNext(t *testing.T) {
tokenizer := newCharTokenizer()
tokenizer.Init([]byte("foo"))
token, err := tokenizer.Next()
if token.Text() != "f" {
t.Error("Failed to return a token.")
}
if token.Offset() != 0 {
t.Error("Failed to return an offset.")
}
if err != nil {
t.Error("Failed to return no error.")
}
}
func TestSomeNext(t *testing.T) {
tokenizer := newCharTokenizer()
tokenizer.Init([]byte("foo"))
token, err := tokenizer.Next()
var str string
for err != io.EOF {
str += token.Text()
token, err = tokenizer.Next()
}
if str != "foo" {
t.Error("Failed to return all tokens.")
}
}
func TestNextReturnCode(t *testing.T) {
tokenizer := newCharTokenizer()
tokenizer.Init([]byte("\n"))
token, err := tokenizer.Next()
var str string
for err != io.EOF {
str += token.Text()
token, err = tokenizer.Next()
}
if str != "\n" {
t.Error("Failed to return all tokens.")
}
}
func TestNextOffset(t *testing.T) {
tokenizer := newCharTokenizer()
tokenizer.Init([]byte("foo"))
for i := 0; i < len("foo"); i++ {
token, _ := tokenizer.Next()
if token.Offset() != i {
t.Error("Offset should return a correct position.")
}
}
}
func TestNextForMultiBytes(t *testing.T) {
tokenizer := newCharTokenizer()
tokenizer.Init([]byte("f\u3042\u3044"))
token, err := tokenizer.Next()
if token.Text() != "f" {
t.Error("Check a single byte returns correctly.")
}
token, err = tokenizer.Next()
if token.Text() != "\u3042" {
t.Error("Multibyte does not return correctly.")
}
if token.Offset() != 1 {
t.Error("1st multibyte char pos should be 1.")
}
token, err = tokenizer.Next()
if token.Text() != "\u3044" {
t.Error("Multibyte should be able to handle.")
}
if token.Offset() != 2 {
t.Error("Token offset should be the location of char instead of bytes.")
}
token, err = tokenizer.Next()
if err != io.EOF {
t.Error("Last error should be io.EOF")
}
}
|
package main
import "fmt"
func main() {
fmt.Println("test drone -c -n ")
}
|
package domain
import (
"time"
uuid "github.com/satori/go.uuid"
)
type Auth struct {
ID uuid.UUID `db:"id" json:"id"`
UserID uuid.UUID `db:"user_id" json:"user_id"`
AccessToken string `db:"access_token" json:"access_token"`
ExpiredAt time.Time `db:"expired_at" json:"expired_at"`
CreatedAt time.Time `db:"created_at" json:"created_at"`
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
}
func NewAuth(userID uuid.UUID, accessToken string, expiredAt time.Time) (*Auth, error) {
return &Auth{
ID: uuid.NewV4(),
UserID: userID,
AccessToken: accessToken,
ExpiredAt: expiredAt,
}, nil
}
|
package channels
import (
"fmt"
"testing"
)
func sum(a []int, c chan int) {
sum := 0
for _, v := range a {
sum += v
}
c <- sum // send sum to c
}
func TestChannels(t *testing.T) {
/*
채널은 채널 연산자 <- 를 이용해 값을 주고 받을 수 있는, 타입이 존재하는 파이프입니다.
ch <- v // v 를 ch로 보냅니다.
v := <-ch // ch로부터 값을 받아서
// v 로 넘깁니다.
(데이터가 화살표 방향에 따라 흐릅니다.)
맵이나 슬라이스처럼, 채널은 사용되기 전에 생성되어야 합니다:
*/
a := []int{7, 2, 8, -9, 4, 0}
c := make(chan int)
go sum(a[:len(a)/2], c)
go sum(a[len(a)/2:], c)
x, y := <-c, <-c // refeice from c
fmt.Println(x, y, x+y)
}
|
package main
import "fmt"
var a []int
func main() {
b := make(chan int)
fmt.Println(b)
go func() {
x := <-b
fmt.Println(x)
}()
b <- 10
}
|
package routes
import (
"github.com/go-chi/chi"
"github.com/jmc-quetzal/api/config"
"github.com/jmc-quetzal/api/handlers"
"github.com/jmc-quetzal/api/postgres"
"github.com/jmc-quetzal/api/redis"
)
func userRoutes(router *chi.Mux, cfg *config.Config) {
pgStore := postgres.UserStore{DB: cfg.DB}
sessionStore := redis.RedisSessions{Pool: cfg.Sessions}
router.Route("/users", func(router chi.Router) {
router.Post("/", handlers.CreateUserHandler(pgStore,sessionStore))
router.Post("/login", handlers.LoginHandler(pgStore,sessionStore))
router.Delete("/logout",handlers.LogoutHandler(sessionStore))
})
}
|
package math
import "testing"
func TestAverage(t *testing.T) {
_, _, avg, _ := GetStats([]float32{1, 2, 3})
if avg != 2 {
t.Error("Expected 2, got ", avg)
}
}
func TestMin(t *testing.T) {
_, min, _, _ := GetStats([]float32{1, 2, 3})
if min != 1 {
t.Error("Expected 1, got ", min)
}
}
func TestMax(t *testing.T) {
max, _, _, _ := GetStats([]float32{1, 2, 3})
if max != 3 {
t.Error("Expected 3, got ", max)
}
}
func TestErr(t *testing.T) {
_, _, _, err := GetStats(nil)
if err != 1 {
t.Error("Expected 1, got ", err)
}
}
func TestErr2(t *testing.T) {
_, _, _, err := GetStats([]float32{})
if err != 1 {
t.Error("Expected 1, got ", err)
}
}
func BenchmarkStats(b *testing.B) {
for i := 0; i < b.N; i++ {
GetStats([]float32{1, 2, 3, 34, 13, 35, 42534.2, 3, 3423})
}
}
|
package problems
// Node Represents a node in a tree.
type Node struct {
Val string
Left *Node
Right *Node
}
|
package piscine
import "fmt"
func PrintWordsTables(table []string) {
for str := range table {
Printstr(str)
z01.PrintRune('\n')
}
}
|
package main
import (
"log"
"sync"
"github.com/PumpkinSeed/concurrent-mysql-benchmark/backend"
"github.com/PumpkinSeed/concurrent-mysql-benchmark/database"
"github.com/PumpkinSeed/concurrent-mysql-benchmark/database/models"
)
const connection = "ec_user:password@tcp(127.0.0.1:3306)/experiment_company"
var wg = &sync.WaitGroup{}
func main() {
db, err := database.Connect(connection)
if err != nil {
log.Fatal(err)
return
}
handler := backend.NewHandler(&backend.DatabaseHandlers{
User: models.NewUserHandler(db),
Item: models.NewItemHandler(db),
Purchase: models.NewPurchaseHandler(db),
}, db)
err = handler.ActionTx()
if err != nil {
log.Fatal(err)
return
}
/*var innerWg = &sync.WaitGroup{}
for j := 0; j < 100; j++ {
for i := 0; i < 100; i++ {
innerWg.Add(1)
go func() {
action(db)
innerWg.Done()
}()
}
}
innerWg.Wait()*/
}
|
/*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package cloudprovider
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/Tencent/bk-bcs/bcs-common/common/blog"
"github.com/Tencent/bk-bcs/bcs-common/common/modules"
"github.com/Tencent/bk-bcs/bcs-common/pkg/odm/drivers"
"github.com/Tencent/bk-bcs/bcs-common/pkg/odm/operator"
proto "github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/api/clustermanager"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/actions"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/clusterops"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/common"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/options"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/remote/alarm"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/remote/alarm/bkmonitor"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/remote/alarm/tmp"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/remote/cmdb"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/remote/nodeman"
storeopt "github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/store/options"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/types"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/utils"
k8scorev1 "k8s.io/api/core/v1"
v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
)
const (
// BKSOPTask bk-sops common job
BKSOPTask = "bksopsjob"
// UnCordonNodesAction 节点可调度任务
UnCordonNodesAction = "unCordonNodes"
// CordonNodesAction 节点不可调度任务
CordonNodesAction = "cordonNodes"
// WatchTask watch component common job
WatchTask = "watchjob"
// RemoveHostFromCmdbAction remove host action
RemoveHostFromCmdbAction = "removeHostFromCmdb"
// InstallGseAgentAction install gseAgent action
InstallGseAgentAction = "installGseAgent"
// TransferHostModuleAction transfer module action
TransferHostModuleAction = "transferHostModule"
// EnsureAutoScalerAction install/update ca component
EnsureAutoScalerAction = "ensureAutoScaler"
// JobFastExecuteScriptAction execute script by job
JobFastExecuteScriptAction = "jobFastExecuteScript"
// InstallVclusterAction install vcluster
InstallVclusterAction = "installVcluster"
// DeleteVclusterAction uninstall vcluster
DeleteVclusterAction = "deleteVcluster"
// UpgradeVclusterAction upgrade vcluster
UpgradeVclusterAction = "upgradeVcluster"
// CreateNamespaceAction 创建命名空间任务
CreateNamespaceAction = "createNamespace"
// DeleteNamespaceAction 删除命名空间任务
DeleteNamespaceAction = "deleteNamespace"
// SetNodeLabelsAction 节点设置labels任务
SetNodeLabelsAction = "nodeSetLabels"
// SetNodeAnnotationsAction 节点设置Annotations任务
SetNodeAnnotationsAction = "nodeSetAnnotations"
// CheckKubeAgentStatusAction 检测agent组件状态
CheckKubeAgentStatusAction = "checkAgentStatus"
// CreateResourceQuotaAction 创建资源配额任务
CreateResourceQuotaAction = "createResourceQuota"
// DeleteResourceQuotaAction 删除资源配额任务
DeleteResourceQuotaAction = "deleteResourceQuota"
// ResourcePoolLabelAction 设置资源池标签
ResourcePoolLabelAction = "resourcePoolLabel"
// LadderResourcePoolLabelAction 标签设置
LadderResourcePoolLabelAction = "yunti-ResourcePoolLabelTask"
)
var (
defaultTaskID = "qwertyuiop123456"
// TaskID inject taskID into ctx
TaskID = "taskID"
)
// GetTaskIDFromContext get taskID from context
func GetTaskIDFromContext(ctx context.Context) string {
if id, ok := ctx.Value(TaskID).(string); ok {
return id
}
return defaultTaskID
}
// WithTaskIDForContext will return a new context wrapped taskID flag around the original ctx
func WithTaskIDForContext(ctx context.Context, taskID string) context.Context {
// NOCC:golint/type(设计如此)
return context.WithValue(ctx, TaskID, taskID)
}
// CredentialData dependency data
type CredentialData struct {
// Cloud cloud
Cloud *proto.Cloud
// Cluster cluster
AccountID string
}
// GetCredential get specified credential information according Cloud configuration, if Cloud conf is nil, try Cluster Account.
// @return CommonOption: option can be nil if no credential conf in cloud or cluster account or when cloudprovider don't support authentication
// GetCredential get cloud credential by cloud or cluster
func GetCredential(data *CredentialData) (*CommonOption, error) {
if data.Cloud == nil && data.AccountID == "" {
return nil, fmt.Errorf("lost cloud/account information")
}
option := &CommonOption{}
// if credential not exist account, get from common cloud
if data.AccountID != "" {
// try to get credential in cluster
account, err := GetStorageModel().GetCloudAccount(context.Background(),
data.Cloud.CloudID, data.AccountID, false)
if err != nil {
return nil, fmt.Errorf("GetCloudAccount failed: %v", err)
}
option.Account = account.Account
}
// get credential from cloud
if option.Account == nil && data.Cloud.CloudCredential != nil {
option.Account = &proto.Account{
SecretID: data.Cloud.CloudCredential.Key,
SecretKey: data.Cloud.CloudCredential.Secret,
SubscriptionID: data.Cloud.CloudCredential.SubscriptionID,
TenantID: data.Cloud.CloudCredential.TenantID,
ResourceGroupName: data.Cloud.CloudCredential.ResourceGroupName,
ClientID: data.Cloud.CloudCredential.ClientID,
ClientSecret: data.Cloud.CloudCredential.ClientSecret,
ServiceAccountSecret: data.Cloud.CloudCredential.ServiceAccountSecret,
GkeProjectID: data.Cloud.CloudCredential.GkeProjectID,
}
}
// set cloud basic confInfo
option.CommonConf = CloudConf{
CloudInternalEnable: data.Cloud.ConfInfo.CloudInternalEnable,
CloudDomain: data.Cloud.ConfInfo.CloudDomain,
MachineDomain: data.Cloud.ConfInfo.MachineDomain,
VpcDomain: data.Cloud.ConfInfo.VpcDomain,
}
// check cloud credential info
err := checkCloudCredentialValidate(data.Cloud, option)
if err != nil {
return nil, fmt.Errorf("checkCloudCredentialValidate %s failed: %v", data.Cloud.CloudProvider, err)
}
return option, nil
}
func checkCloudCredentialValidate(cloud *proto.Cloud, option *CommonOption) error {
validate, err := GetCloudValidateMgr(cloud.CloudProvider)
if err != nil {
return err
}
err = validate.ImportCloudAccountValidate(option.Account)
if err != nil {
return err
}
return nil
}
// CloudDependBasicInfo cloud depend cluster info
type CloudDependBasicInfo struct {
// Cluster info
Cluster *proto.Cluster
// Cloud info
Cloud *proto.Cloud
// NodeGroup info
NodeGroup *proto.NodeGroup
// NodeTemplate info
NodeTemplate *proto.NodeTemplate
// CmOption option
CmOption *CommonOption
}
// GetBasicInfoReq getDependBasicInfo, clusterID and cloudID must be not empty
type GetBasicInfoReq struct {
ClusterID string
CloudID string
NodeGroupID string
NodeTemplateID string
}
// GetClusterDependBasicInfo get cluster/cloud/nodeGroup depend info, nodeGroup may be nil.
// only get metadata, try not to change it
func GetClusterDependBasicInfo(request GetBasicInfoReq) (*CloudDependBasicInfo, error) {
var (
cluster *proto.Cluster
cloud *proto.Cloud
nodeGroup *proto.NodeGroup
nodeTemplate *proto.NodeTemplate
err error
)
cloud, cluster, err = actions.GetCloudAndCluster(GetStorageModel(), request.CloudID, request.ClusterID)
if err != nil {
return nil, err
}
// cloud credential info
cmOption, err := GetCredential(&CredentialData{
Cloud: cloud,
AccountID: cluster.CloudAccountID,
})
if err != nil {
return nil, err
}
cmOption.Region = cluster.Region
if len(request.NodeGroupID) > 0 {
nodeGroup, err = actions.GetNodeGroupByGroupID(GetStorageModel(), request.NodeGroupID)
if err != nil {
return nil, err
}
}
if len(request.NodeTemplateID) > 0 {
nodeTemplate, err = actions.GetNodeTemplateByTemplateID(GetStorageModel(), request.NodeTemplateID)
if err != nil {
return nil, err
}
}
return &CloudDependBasicInfo{cluster, cloud, nodeGroup,
nodeTemplate, cmOption}, nil
}
// UpdateClusterStatus set cluster status
func UpdateClusterStatus(clusterID string, status string) (*proto.Cluster, error) {
cluster, err := GetStorageModel().GetCluster(context.Background(), clusterID)
if err != nil {
return nil, err
}
cluster.Status = status
err = GetStorageModel().UpdateCluster(context.Background(), cluster)
if err != nil {
return nil, err
}
return cluster, nil
}
// GetClusterByID get cluster by clusterID
func GetClusterByID(clusterID string) (*proto.Cluster, error) {
return GetStorageModel().GetCluster(context.Background(), clusterID)
}
// UpdateCluster set cluster status
func UpdateCluster(cluster *proto.Cluster) error {
err := GetStorageModel().UpdateCluster(context.Background(), cluster)
if err != nil {
return err
}
return nil
}
// GetClusterCredentialByClusterID get cluster credential what agent report
func GetClusterCredentialByClusterID(ctx context.Context, clusterID string) (bool, error) {
_, exist, err := GetStorageModel().GetClusterCredential(ctx, clusterID)
if err != nil {
blog.Errorf("GetClusterCredentialByClusterID[%s] failed: %v", clusterID, err)
return false, err
}
return exist, nil
}
// UpdateClusterCredentialByConfig update clusterCredential by kubeConfig
func UpdateClusterCredentialByConfig(clusterID string, config *types.Config) error {
// first import cluster need to auto generate clusterCredential info, subsequently kube-agent report to update
// currently, bcs only support token auth, kubeConfigList length greater 0, get zeroth kubeConfig
var (
server = ""
caCertData = ""
token = ""
clientCert = ""
clientKey = ""
)
if len(config.Clusters) > 0 {
server = config.Clusters[0].Cluster.Server
caCertData = string(config.Clusters[0].Cluster.CertificateAuthorityData)
}
if len(config.AuthInfos) > 0 {
token = config.AuthInfos[0].AuthInfo.Token
clientCert = string(config.AuthInfos[0].AuthInfo.ClientCertificateData)
clientKey = string(config.AuthInfos[0].AuthInfo.ClientKeyData)
}
if server == "" || caCertData == "" || (token == "" && (clientCert == "" || clientKey == "")) {
return fmt.Errorf("importClusterCredential parse kubeConfig failed: %v", "[server|caCertData|token] null")
}
// need to handle crypt
now := time.Now().Format(time.RFC3339)
err := GetStorageModel().PutClusterCredential(context.Background(), &proto.ClusterCredential{
ServerKey: clusterID,
ClusterID: clusterID,
ClientModule: modules.BCSModuleKubeagent,
ServerAddress: server,
CaCertData: caCertData,
UserToken: token,
ConnectMode: modules.BCSConnectModeDirect,
CreateTime: now,
UpdateTime: now,
ClientKey: clientKey,
ClientCert: clientCert,
})
if err != nil {
return err
}
return nil
}
// ListNodesInClusterNodePool list nodeGroup nodes
func ListNodesInClusterNodePool(clusterID, nodePoolID string) ([]*proto.Node, error) {
condM := make(operator.M)
condM["nodegroupid"] = nodePoolID
condM["clusterid"] = clusterID
cond := operator.NewLeafCondition(operator.Eq, condM)
nodes, err := GetStorageModel().ListNode(context.Background(), cond, &storeopt.ListOption{})
if err != nil {
blog.Errorf("ListNodesInClusterNodePool NodeGroup %s all Nodes failed, %s", nodePoolID, err.Error())
return nil, err
}
// sum running & creating nodes, these status are ready to serve workload
var (
goodNodes []*proto.Node
)
for _, node := range nodes {
if node.Status == common.StatusRunning || node.Status == common.StatusInitialization ||
node.Status == common.StatusAddNodesFailed || node.Status == common.StatusResourceApplyFailed ||
node.Status == common.StatusDeleting {
goodNodes = append(goodNodes, node)
}
}
return goodNodes, nil
}
// GetNodesNumWhenApplyInstanceTask get nodeNum
func GetNodesNumWhenApplyInstanceTask(clusterID, nodeGroupID, taskType, status string, steps []string) (int, error) {
cond := operator.NewLeafCondition(operator.Eq, operator.M{
"clusterid": clusterID,
"tasktype": taskType,
"nodegroupid": nodeGroupID,
"status": status,
})
taskList, err := GetStorageModel().ListTask(context.Background(), cond, &storeopt.ListOption{})
if err != nil {
blog.Errorf("GetNodesNumWhenApplyInstanceTask failed: %v", err)
return 0, err
}
currentScalingNodes := 0
for i := range taskList {
if utils.StringInSlice(taskList[i].CurrentStep, steps) {
desiredNodes := taskList[i].CommonParams[ScalingNodesNumKey.String()]
nodeNum, err := strconv.Atoi(desiredNodes)
if err != nil {
blog.Errorf("GetNodesNumWhenApplyInstanceTask strconv desiredNodes failed: %v", err)
continue
}
currentScalingNodes += nodeNum
}
}
return currentScalingNodes, nil
}
// UpdateNodeGroupDesiredSize when scaleOutNodes failed
func UpdateNodeGroupDesiredSize(groupID string, nodeNum int, scaleOut bool) error {
group, err := GetStorageModel().GetNodeGroup(context.Background(), groupID)
if err != nil {
blog.Errorf("updateNodeGroupDesiredSize failed when CA scale nodes: %v", err)
return err
}
if scaleOut {
if group.AutoScaling.DesiredSize >= uint32(nodeNum) {
group.AutoScaling.DesiredSize = group.AutoScaling.DesiredSize - uint32(nodeNum)
} else {
group.AutoScaling.DesiredSize = 0
blog.Warnf("updateNodeGroupDesiredSize abnormal, desiredSize[%v] scaleNodesNum[%v]",
group.AutoScaling.DesiredSize, nodeNum)
}
} else {
group.AutoScaling.DesiredSize = group.AutoScaling.DesiredSize + uint32(nodeNum)
}
err = GetStorageModel().UpdateNodeGroup(context.Background(), group)
if err != nil {
blog.Errorf("updateNodeGroupDesiredSize failed when CA scale nodes: %v", err)
return err
}
return nil
}
// SaveNodeInfoToDB save node to DB
func SaveNodeInfoToDB(ctx context.Context, node *proto.Node, isIP bool) error {
var (
oldNode *proto.Node
err error
)
taskID := GetTaskIDFromContext(ctx)
if isIP {
oldNode, err = GetStorageModel().GetNodeByIP(context.Background(), node.InnerIP)
} else {
oldNode, err = GetStorageModel().GetNode(context.Background(), node.NodeID)
}
blog.Infof("SaveNodeInfoToDB[%s] node[%s:%s] node[%+v] err: %v", taskID, node.InnerIP, node.NodeID, oldNode, err)
if err != nil && !errors.Is(err, drivers.ErrTableRecordNotFound) {
return fmt.Errorf("saveNodeInfoToDB[%s] getNode[%s] failed: %v", taskID, node.NodeID, err)
}
if oldNode == nil {
// check repeated cluster ips
inDb, inCluster := checkRepeatedNodes(ctx, node)
blog.Infof("SaveNodeInfoToDB[%s] cluster[%s] nodeGroup[%s] checkRepeatedNodes[%+v:%+v]",
taskID, node.ClusterID, node.NodeGroupID, inDb, inCluster)
if inDb && !inCluster {
GetStorageModel().DeleteNodeByIP(context.Background(), node.InnerIP)
}
err = GetStorageModel().CreateNode(context.Background(), node)
if err != nil {
return fmt.Errorf("saveNodeInfoToDB[%s] createNode[%s] failed: %v", taskID, node.InnerIP, err)
}
blog.Infof("saveNodeInfoToDB[%s] createNode[%s:%s] success", taskID, node.InnerIP, node.NodeID)
return nil
}
blog.Infof("saveNodeInfoToDB[%s] exist node[%s:%s]", taskID, node.InnerIP, node.NodeID)
err = GetStorageModel().UpdateNode(context.Background(), node)
if err != nil {
return fmt.Errorf("saveNodeInfoToDB updateNode[%s] failed: %v", node.InnerIP, err)
}
return nil
}
// checkRepeatedNodes check ip repeated: ip in db, ip in cluster
func checkRepeatedNodes(ctx context.Context, n *proto.Node) (bool, bool) {
var (
ip = n.InnerIP
)
taskID := GetTaskIDFromContext(ctx)
if ip != "" {
existNode, err := GetStorageModel().GetNodeByIP(context.Background(), ip)
if err != nil && !errors.Is(err, drivers.ErrTableRecordNotFound) {
blog.Infof("checkRepeatedNodes[%s] GetNodeByIP[%s] failed: %v", taskID, ip, err)
return false, false
}
// db not exist ip
if existNode == nil {
blog.Infof("checkRepeatedNodes[%s] IP[%s] not exist db", taskID, ip)
return false, false
}
// check ip exist in cluster
clusterID := existNode.ClusterID
if clusterID == "" {
blog.Infof("checkRepeatedNodes[%s] IP[%s] clusterID empty", taskID, ip)
return true, false
}
found := checkNodeExistInCluster(clusterID, ip)
if found {
blog.Infof("checkRepeatedNodes[%s] IP[%s] exist in cluster[%s]", taskID, ip, clusterID)
return true, true
}
blog.Infof("checkRepeatedNodes[%s] IP[%s] not exist in cluster[%s]", taskID, ip, clusterID)
return true, false
}
return false, false
}
func checkNodeExistInCluster(clusterID, ip string) bool {
if clusterID == "" || ip == "" {
return false
}
found := false
k8sClient := clusterops.NewK8SOperator(options.GetGlobalCMOptions(), GetStorageModel())
node, _ := k8sClient.GetClusterNode(context.Background(), clusterops.QueryNodeOption{
ClusterID: clusterID,
NodeIP: ip,
})
if node != nil {
found = true
return found
}
return found
}
// GetInstanceIPsByID get InstanceIP by NodeID
func GetInstanceIPsByID(ctx context.Context, nodeIDs []string) []string {
var (
nodeIPs = make([]string, 0)
taskID = GetTaskIDFromContext(ctx)
)
for _, id := range nodeIDs {
node, err := GetStorageModel().GetNode(context.Background(), id)
if err != nil {
blog.Errorf("GetInstanceIPsByID[%s] nodeID[%s] failed: %v", taskID, id, err)
continue
}
nodeIPs = append(nodeIPs, node.InnerIP)
}
return nodeIPs
}
// GetNodesByInstanceIDs get nodes by instanceIDs
func GetNodesByInstanceIDs(instanceIDs []string) []*proto.Node {
nodes := make([]*proto.Node, 0)
for _, id := range instanceIDs {
node, err := GetStorageModel().GetNode(context.Background(), id)
if err != nil {
continue
}
nodes = append(nodes, node)
}
return nodes
}
// UpdateNodeStatusByInstanceID update node status
func UpdateNodeStatusByInstanceID(instanceID, status string) error {
node, err := GetStorageModel().GetNode(context.Background(), instanceID)
if err != nil {
return err
}
node.Status = status
err = GetStorageModel().UpdateNode(context.Background(), node)
if err != nil {
return err
}
return nil
}
// UpdateClusterSystemID set cluster systemID
func UpdateClusterSystemID(clusterID string, systemID string) error {
cluster, err := GetStorageModel().GetCluster(context.Background(), clusterID)
if err != nil {
return err
}
cluster.SystemID = systemID
err = GetStorageModel().UpdateCluster(context.Background(), cluster)
if err != nil {
return err
}
return nil
}
// UpdateNodeListStatus update nodeList status
func UpdateNodeListStatus(isInstanceIP bool, instances []string, status string) error {
for i := range instances {
err := UpdateNodeStatus(isInstanceIP, instances[i], status)
if err != nil {
// batch update if one failed need to handle, other than task failed
continue
}
}
return nil
}
// UpdateNodeStatus update node status; isInstanceIP true, instance is InstanceIP; isInstanceIP true, instance is InstanceID
func UpdateNodeStatus(isInstanceIP bool, instance, status string) error {
var (
node *proto.Node
err error
)
if isInstanceIP {
node, err = GetStorageModel().GetNodeByIP(context.Background(), instance)
} else {
node, err = GetStorageModel().GetNode(context.Background(), instance)
}
if err != nil && !errors.Is(err, drivers.ErrTableRecordNotFound) {
return err
}
if errors.Is(err, drivers.ErrTableRecordNotFound) {
return nil
}
node.Status = status
err = GetStorageModel().UpdateNode(context.Background(), node)
if err != nil {
return err
}
return nil
}
// GetClusterMasterIPList get cluster masterIPs
func GetClusterMasterIPList(cluster *proto.Cluster) []string {
masterIPs := make([]string, 0)
for masterIP := range cluster.Master {
masterIPs = append(masterIPs, masterIP)
}
return masterIPs
}
// StepOptions xxx
type StepOptions struct {
Retry uint32
SkipFailed bool
}
// StepOption xxx
type StepOption func(opt *StepOptions)
// WithStepRetry xxx
func WithStepRetry(retry uint32) StepOption {
return func(opt *StepOptions) {
opt.Retry = retry
}
}
// WithStepSkipFailed xxx
func WithStepSkipFailed(skip bool) StepOption {
return func(opt *StepOptions) {
opt.SkipFailed = skip
}
}
// InitTaskStep init task step
func InitTaskStep(stepInfo StepInfo, opts ...StepOption) *proto.Step {
defaultOptions := &StepOptions{Retry: 0, SkipFailed: false}
for _, opt := range opts {
opt(defaultOptions)
}
nowStr := time.Now().Format(time.RFC3339)
return &proto.Step{
Name: stepInfo.StepMethod,
System: "api",
Params: make(map[string]string),
Retry: 0,
SkipOnFailed: defaultOptions.SkipFailed,
Start: nowStr,
Status: TaskStatusNotStarted,
TaskMethod: stepInfo.StepMethod,
TaskName: stepInfo.StepName,
}
}
// GetIDToIPMap get instanceID to instanceIP map
func GetIDToIPMap(nodeIDs, nodeIPs []string) map[string]string {
idToIPMap := make(map[string]string, 0)
for i := range nodeIDs {
if i < len(nodeIPs) {
idToIPMap[nodeIDs[i]] = nodeIPs[i]
}
}
return idToIPMap
}
// IsExternalNodePool check group external nodePool
func IsExternalNodePool(group *proto.NodeGroup) bool {
if group == nil {
return false
}
switch group.GetNodeGroupType() {
case common.External.String():
return true
case common.Normal.String(), "":
return false
}
return false
}
// ParseNodeIpOrIdFromCommonMap parse nodeIDs or nodeIPs by chart
func ParseNodeIpOrIdFromCommonMap(taskCommonMap map[string]string, key string, chart string) []string {
val, ok := taskCommonMap[key]
if !ok || val == "" {
return nil
}
return strings.Split(val, chart)
}
// ParseMapFromStepParas from step parse k1=v1;k2=v2; to map
func ParseMapFromStepParas(stepMap map[string]string, key string) map[string]string {
val, ok := stepMap[key]
if !ok || val == "" {
return nil
}
return utils.StringsToMap(val)
}
// GetScaleOutModuleID get scaleOut module ID
func GetScaleOutModuleID(cls *proto.Cluster, asOption *proto.ClusterAutoScalingOption,
template *proto.NodeTemplate, isGroup bool) string {
if template != nil && template.Module != nil && template.Module.ScaleOutModuleID != "" {
return template.Module.ScaleOutModuleID
}
if isGroup && len(cls.GetModuleID()) > 0 {
return cls.GetModuleID()
}
if asOption != nil && asOption.Module != nil && asOption.Module.ScaleOutModuleID != "" {
return asOption.Module.ScaleOutModuleID
}
return ""
}
// GetScaleInModuleID get scaleIn module ID only from template
func GetScaleInModuleID(asOption *proto.ClusterAutoScalingOption, template *proto.NodeTemplate) string {
if template != nil && template.Module != nil && template.Module.ScaleInModuleID != "" {
return template.Module.ScaleInModuleID
}
if asOption != nil && asOption.Module != nil && asOption.Module.ScaleInModuleID != "" {
return asOption.Module.ScaleInModuleID
}
return ""
}
// GetBusinessID get business id, default cluster business id
func GetBusinessID(asOption *proto.ClusterAutoScalingOption, template *proto.NodeTemplate, scale bool) string {
getBizID := func(scale bool, scaleOut, scaleIn string) string {
switch scale {
case true:
return scaleOut
case false:
return scaleIn
}
return ""
}
if template != nil && template.Module != nil {
return getBizID(scale, template.Module.ScaleOutBizID, template.Module.ScaleInBizID)
}
if asOption != nil && asOption.Module != nil {
return getBizID(scale, asOption.Module.ScaleOutBizID, asOption.Module.ScaleInBizID)
}
return ""
}
// GetBKCloudName get bk cloud name by id
func GetBKCloudName(bkCloudID int) string {
cli := nodeman.GetNodeManClient()
if cli == nil {
return ""
}
list, err := cli.CloudList()
if err != nil {
blog.Errorf("get cloud list failed, err %s", err.Error())
return ""
}
for _, v := range list {
if v.BKCloudID == bkCloudID {
return v.BKCloudName
}
}
return ""
}
// GetModuleName get module name
func GetModuleName(bkBizID, bkModuleID int) string {
cli := cmdb.GetCmdbClient()
if cli == nil {
return ""
}
list, err := cli.ListTopology(bkBizID, false, true)
if err != nil {
blog.Errorf("list topology failed, err %s", err.Error())
return ""
}
if list == nil {
return ""
}
name := ""
for _, v := range list.Child {
name = list.BKInstName + " / " + v.BKInstName
for _, c := range v.Child {
if c.BKInstID == bkModuleID {
name += " / " + c.BKInstName
return name
}
}
}
return name
}
// UpdateNodeGroupCloudAndModuleInfo update cloudID && moduleInfo
func UpdateNodeGroupCloudAndModuleInfo(nodeGroupID string, cloudGroupID string,
consumer bool, clusterBiz string) error {
group, err := GetStorageModel().GetNodeGroup(context.Background(), nodeGroupID)
if err != nil {
return err
}
if consumer {
group.ConsumerID = cloudGroupID
} else {
group.CloudNodeGroupID = cloudGroupID
}
// update group module info
if group.NodeTemplate != nil && group.NodeTemplate.Module != nil {
if group.NodeTemplate.Module.ScaleOutBizID == "" {
group.NodeTemplate.Module.ScaleOutBizID = clusterBiz
}
if group.NodeTemplate.Module.ScaleInBizID == "" {
group.NodeTemplate.Module.ScaleInBizID = clusterBiz
}
if group.NodeTemplate.Module.ScaleOutModuleID != "" {
scaleOutBiz, _ := strconv.Atoi(group.NodeTemplate.Module.ScaleOutBizID)
scaleOutModule, _ := strconv.Atoi(group.NodeTemplate.Module.ScaleOutModuleID)
group.NodeTemplate.Module.ScaleOutModuleName = GetModuleName(scaleOutBiz, scaleOutModule)
}
if group.NodeTemplate.Module.ScaleInModuleID != "" {
scaleInBiz, _ := strconv.Atoi(group.NodeTemplate.Module.ScaleInBizID)
scaleInModule, _ := strconv.Atoi(group.NodeTemplate.Module.ScaleInModuleID)
group.NodeTemplate.Module.ScaleInModuleName = GetModuleName(scaleInBiz, scaleInModule)
}
}
err = GetStorageModel().UpdateNodeGroup(context.Background(), group)
if err != nil {
return err
}
return nil
}
// ShieldHostAlarm shield host alarm for user
func ShieldHostAlarm(ctx context.Context, bizID string, ips []string) error {
taskID := GetTaskIDFromContext(ctx)
if len(ips) == 0 {
return fmt.Errorf("ShieldHostAlarm[%s] ips empty", taskID)
}
biz, _ := strconv.Atoi(bizID)
bizData, err := cmdb.GetCmdbClient().GetBusinessMaintainer(biz)
if err != nil {
blog.Errorf("ShieldHostAlarm[%s] GetBusinessMaintainer[%s] failed: %v", taskID, bizID, err)
return err
}
maintainers := strings.Split(bizData.BKBizMaintainer, ",")
if len(maintainers) == 0 {
return fmt.Errorf("ShieldHostAlarm[%s] BKBizMaintainer[%s] empty", taskID, bizID)
}
hostData, err := cmdb.GetCmdbClient().QueryAllHostInfoWithoutBiz(ips)
if err != nil {
blog.Errorf("ShieldHostAlarm[%s] QueryAllHostInfoWithoutBiz[%+v] failed: %v", taskID, ips, err)
return err
}
hosts := make([]alarm.HostInfo, 0)
for i := range hostData {
hosts = append(hosts, alarm.HostInfo{
IP: hostData[i].BKHostInnerIP,
CloudID: uint64(hostData[i].BkCloudID),
})
}
blog.Infof("ShieldHostAlarm[%s] bizID[%s] hostInfo[%+v]", taskID, bizID, hosts)
var alarms = []alarm.AlarmInterface{tmp.GetBKAlarmClient(), bkmonitor.GetBkMonitorClient()}
for i := range alarms {
err = alarms[i].ShieldHostAlarmConfig(maintainers[0], &alarm.ShieldHost{
BizID: bizID,
HostList: hosts,
})
if err != nil {
blog.Errorf("ShieldHostAlarm[%s][%s] ShieldHostAlarmConfig failed: %v", taskID, alarms[i].Name(), err)
continue
}
blog.Infof("ShieldHostAlarm[%s][%s] ShieldHostAlarmConfig success", taskID, alarms[i].Name())
}
return nil
}
// UpdateAutoScalingOptionModuleInfo update cluster ca moduleInfo
func UpdateAutoScalingOptionModuleInfo(clusterID string) error {
cls, err := GetStorageModel().GetCluster(context.Background(), clusterID)
if err != nil {
return err
}
asOption, err := GetStorageModel().GetAutoScalingOption(context.Background(), clusterID)
if err != nil {
return err
}
// update asOption module info
if asOption.Module != nil {
if asOption.Module.ScaleOutBizID == "" {
asOption.Module.ScaleOutBizID = cls.BusinessID
}
if asOption.Module.ScaleInBizID == "" {
asOption.Module.ScaleInBizID = cls.BusinessID
}
if asOption.Module.ScaleOutModuleID != "" {
scaleOutBiz, _ := strconv.Atoi(asOption.Module.ScaleOutBizID)
scaleOutModule, _ := strconv.Atoi(asOption.Module.ScaleOutModuleID)
asOption.Module.ScaleOutModuleName = GetModuleName(scaleOutBiz, scaleOutModule)
}
if asOption.Module.ScaleInModuleID != "" {
scaleInBiz, _ := strconv.Atoi(asOption.Module.ScaleInBizID)
scaleInModule, _ := strconv.Atoi(asOption.Module.ScaleInModuleID)
asOption.Module.ScaleInModuleName = GetModuleName(scaleInBiz, scaleInModule)
}
}
err = GetStorageModel().UpdateAutoScalingOption(context.Background(), asOption)
if err != nil {
return err
}
return nil
}
// ImportClusterNodesToCM writes cluster nodes to DB
func ImportClusterNodesToCM(ctx context.Context, nodes []k8scorev1.Node, clusterID string) error {
for i := range nodes {
ipv4, ipv6 := utils.GetNodeIPAddress(&nodes[i])
node := &proto.Node{
InnerIP: utils.SliceToString(ipv4),
InnerIPv6: utils.SliceToString(ipv6),
Status: common.StatusRunning,
NodeName: nodes[i].Name,
ClusterID: clusterID,
}
err := GetStorageModel().CreateNode(ctx, node)
if err != nil {
blog.Errorf("ImportClusterNodesToCM CreateNode[%s] failed: %v", nodes[i].Name, err)
}
}
return nil
}
// IsInDependentCluster check independent cluster
func IsInDependentCluster(cluster *proto.Cluster) bool {
return cluster.ManageType == common.ClusterManageTypeIndependent
}
// IsManagedCluster check managed cluster
func IsManagedCluster(cluster *proto.Cluster) bool {
return cluster.ManageType == common.ClusterManageTypeManaged
}
// GetCRDByKubeConfig get crd by kubeConfig
func GetCRDByKubeConfig(kubeConfig string) (*v1.CustomResourceDefinitionList, error) {
_, err := types.GetKubeConfigFromYAMLBody(false, types.YamlInput{
FileName: "",
YamlContent: kubeConfig,
})
if err != nil {
return nil, fmt.Errorf("checkKubeConfig get kubeConfig from YAML body failed: %v", err)
}
// 解析 kubeConfig 字符串
cfg, err := clientcmd.NewClientConfigFromBytes([]byte(kubeConfig))
if err != nil {
return nil, err
}
// 获取 Kubernetes 配置
config, err := cfg.ClientConfig()
if err != nil {
return nil, err
}
// 使用 Kubernetes 配置创建一个 Kubernetes 客户端
cli, err := clientset.NewForConfig(config)
if err != nil {
return nil, err
}
// 获取 CRD
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
defer cancel()
return cli.ApiextensionsV1().CustomResourceDefinitions().List(ctx, metav1.ListOptions{})
}
// UpdateVirtualNodeStatus update virtual nodes status
func UpdateVirtualNodeStatus(clusterId, nodeGroupId, taskID string) error {
if clusterId == "" || nodeGroupId == "" || taskID == "" {
blog.Infof("UpdateVirtualNodeStatus[%s] validate data", taskID)
return nil
}
condM := make(operator.M)
condM["nodegroupid"] = nodeGroupId
condM["clusterid"] = clusterId
condM["taskid"] = taskID
cond := operator.NewLeafCondition(operator.Eq, condM)
nodes, err := GetStorageModel().ListNode(context.Background(), cond, &storeopt.ListOption{})
if err != nil {
blog.Errorf("UpdateVirtualNodeStatus[%s] NodeGroup %s all Nodes failed, %s",
taskID, nodeGroupId, err.Error())
return err
}
blog.Infof("UpdateVirtualNodeStatus[%s] ListNodes[%+v] success", taskID, nodes)
for i := range nodes {
blog.Infof("UpdateVirtualNodeStatus[%s] node status", nodes[i].NodeID)
nodes[i].Status = common.StatusResourceApplyFailed
GetStorageModel().UpdateNode(context.Background(), nodes[i])
}
return nil
}
// DeleteVirtualNodes delete virtual nodes
func DeleteVirtualNodes(clusterId, nodeGroupId, taskID string) error {
if clusterId == "" || nodeGroupId == "" || taskID == "" {
blog.Infof("DeleteVirtualNodes[%s] validate data", taskID)
return nil
}
condM := make(operator.M)
condM["nodegroupid"] = nodeGroupId
condM["clusterid"] = clusterId
condM["taskid"] = taskID
cond := operator.NewLeafCondition(operator.Eq, condM)
nodes, err := GetStorageModel().ListNode(context.Background(), cond, &storeopt.ListOption{})
if err != nil {
blog.Errorf("ListNodesInClusterNodePool[%s] NodeGroup %s all Nodes failed, %s",
taskID, nodeGroupId, err.Error())
return err
}
blog.Infof("DeleteVirtualNodes[%s] ListNodes[%+v] success", taskID, nodes)
for i := range nodes {
blog.Infof("DeleteVirtualNodes[%s] node[%s] status", taskID, nodes[i].NodeID)
if !strings.HasPrefix(nodes[i].GetNodeID(), "bcs") {
continue
}
GetStorageModel().DeleteNode(context.Background(), nodes[i].GetNodeID())
}
return nil
}
|
package generate
func generate(numRows int) [][]int {
ret := make([][]int, numRows)
for i := 1; i <= numRows; i++ {
ret[i-1] = make([]int, i)
ret[i-1][0], ret[i-1][i-1] = 1, 1
for j := 1; j < i-1; j++ {
ret[i-1][j] = ret[i-2][j-1] + ret[i-2][j]
}
}
return ret
}
|
//author xinbing
//time 2018/8/30 21:04
package utilities
|
package api
import (
"InkaTry/warehouse-storage-be/internal/http/admin/dtos"
"InkaTry/warehouse-storage-be/internal/pkg/errs"
"InkaTry/warehouse-storage-be/internal/pkg/http/responder"
"InkaTry/warehouse-storage-be/internal/pkg/stores"
"context"
"encoding/json"
"github.com/gorilla/mux"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"testing"
)
func TestListInventories(t *testing.T) {
tts := []struct {
caseName string
handlerFunc func(ctx context.Context, p *dtos.ListInventoriesRequest) (*dtos.ListInventoriesResponse, error)
request func() *http.Request
result func(resp *http.Response)
}{
{
caseName: "when all param is ok",
request: func() *http.Request {
req, _ := http.NewRequest(http.MethodGet, "/list/inventories?warehouse_id=1", nil)
return req
},
handlerFunc: func(ctx context.Context, p *dtos.ListInventoriesRequest) (*dtos.ListInventoriesResponse, error) {
return &dtos.ListInventoriesResponse{
Inventories: stores.Inventories{
{
ID: 1,
},
},
HasNext: false,
Page: 1,
}, nil
},
result: func(resp *http.Response) {
var responseBody *responder.AdvanceCommonResponse
json.NewDecoder(resp.Body).Decode(&responseBody)
assert.Equal(t, resp.StatusCode, http.StatusOK)
},
},
{
caseName: "when warehouse id is empty",
request: func() *http.Request {
req, _ := http.NewRequest(http.MethodGet, "/list/inventories", nil)
return req
},
handlerFunc: func(ctx context.Context, p *dtos.ListInventoriesRequest) (*dtos.ListInventoriesResponse, error) {
return nil, nil
},
result: func(resp *http.Response) {
var responseBody *responder.AdvanceCommonResponse
json.NewDecoder(resp.Body).Decode(&responseBody)
assert.Equal(t, resp.StatusCode, http.StatusBadRequest)
respByte, _ := json.Marshal(responseBody.Data)
expectedByte, _ := json.Marshal([]string{errs.ErrInvalidWarehouseID.Error()})
assert.JSONEq(t, string(expectedByte), string(respByte))
},
},
{
caseName: "when no result found",
request: func() *http.Request {
req, _ := http.NewRequest(http.MethodGet, "/list/inventories?warehouse_id=123", nil)
return req
},
handlerFunc: func(ctx context.Context, p *dtos.ListInventoriesRequest) (*dtos.ListInventoriesResponse, error) {
return nil, errs.ErrNoResultFound
},
result: func(resp *http.Response) {
var responseBody *responder.CommonResponse
json.NewDecoder(resp.Body).Decode(&responseBody)
assert.Equal(t, resp.StatusCode, http.StatusBadRequest)
},
},
}
for _, tt := range tts {
t.Log(tt.caseName)
router := mux.NewRouter()
router.Handle("/list/inventories", ListInventories(tt.handlerFunc))
rr := httptest.NewRecorder()
req := tt.request()
router.ServeHTTP(rr, req)
tt.result(rr.Result())
}
}
|
package app
import (
"github.com/ebar-go/ego/component/log"
"github.com/ebar-go/ego/config"
"github.com/ebar-go/ego/errors"
"github.com/ebar-go/ego/utils"
"github.com/ebar-go/event"
"github.com/go-redis/redis"
"github.com/jinzhu/gorm"
"time"
)
const (
// config init event
ConfigInitEvent = "CONFIG_INIT_EVENT"
// log manager init event
LogManagerInitEvent = "LOG_MANAGER_INIT_EVENT"
// mysql connect event
MySqlConnectEvent = "MYSQL_CONNECT_EVENT"
// redis connect event
RedisConnectEvent = "REDIS_CONNECT_EVENT"
)
func init() {
// init event dispatcher
utils.FatalError("InitEventDispatcher", Container.Provide(event.NewDispatcher))
// use eventDispatcher manage global service initialize
eventDispatcher := EventDispatcher()
eventDispatcher.Register(LogManagerInitEvent, func(ev event.Event) {
utils.FatalError("InitLogManager", initLogManager())
})
eventDispatcher.Register(MySqlConnectEvent, func(ev event.Event) {
utils.FatalError("ConnectDatabase", connectDatabase())
})
eventDispatcher.Register(RedisConnectEvent, func(ev event.Event) {
utils.FatalError("ConnectRedis", connectRedis())
})
}
// initLogManager
func initLogManager() error {
return Container.Provide(func() log.Manager {
return log.NewManager(log.ManagerConf{
SystemName: config.Server().Name,
SystemPort: config.Server().Port,
LogPath: config.Server().LogPath,
})
})
}
// connectRedis
func connectRedis() error {
return Container.Provide(func() (*redis.Client, error) {
connection := redis.NewClient(config.Redis().Options())
_, err := connection.Ping().Result()
if err != nil {
return nil, errors.RedisConnectFailed("%s", err.Error())
}
return connection, nil
})
}
// connectDatabase
func connectDatabase() error {
return Container.Provide(func() (*gorm.DB, error) {
options := config.Mysql()
connection, err := gorm.Open("mysql", options.Dsn())
if err != nil {
return nil, errors.MysqlConnectFailed("%s", err.Error())
}
// set log mod
connection.LogMode(options.LogMode)
// set pool config
connection.DB().SetMaxIdleConns(options.MaxIdleConnections)
connection.DB().SetMaxOpenConns(options.MaxOpenConnections)
connection.DB().SetConnMaxLifetime(time.Duration(options.MaxLifeTime) * time.Second)
return connection, nil
})
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package partition
import (
"context"
"testing"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/testkit/testsetup"
"go.uber.org/goleak"
)
const (
// waitForCleanDataRound indicates how many times should we check data is cleaned or not.
waitForCleanDataRound = 150
// waitForCleanDataInterval is a min duration between 2 check for data clean.
waitForCleanDataInterval = time.Millisecond * 100
)
func TestMain(m *testing.M) {
testsetup.SetupForCommonTest()
config.UpdateGlobal(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.SafeWindow = 0
conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0
})
ddl.SetWaitTimeWhenErrorOccurred(time.Microsecond)
opts := []goleak.Option{
goleak.IgnoreTopFunction("github.com/golang/glog.(*fileSink).flushDaemon"),
goleak.IgnoreTopFunction("github.com/lestrrat-go/httprc.runFetchWorker"),
goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"),
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
}
goleak.VerifyTestMain(m, opts...)
}
func backgroundExec(s kv.Storage, schema, sql string, done chan error) {
se, err := session.CreateSession4Test(s)
if err != nil {
done <- errors.Trace(err)
return
}
defer se.Close()
_, err = se.Execute(context.Background(), "use "+schema)
if err != nil {
done <- errors.Trace(err)
return
}
_, err = se.Execute(context.Background(), sql)
done <- errors.Trace(err)
}
|
package main
import (
"fmt"
)
func main() {
a := []int{}
fmt.Println(a)
fmt.Printf("Length: %v\n", len(a))
fmt.Printf("Capacity: %v\n", cap(a))
a = append(a,1)
fmt.Println(a)
fmt.Printf("Length: %v\n", len(a))
fmt.Printf("Capacity: %v\n", cap(a))
a = append(a, []int{2, 3, 4, 5}...)
fmt.Println(a)
fmt.Printf("Length: %v\n", len(a))
fmt.Printf("Capacity: %v\n", cap(a))
//array is fixed size
//item should max size all time
aa := []int{1, 2, 3, 4, 5}
b := aa[:len(aa)-1]
c := append(aa[:2],aa[3:]...)
fmt.Println(b)
fmt.Println(c)
fmt.Println(aa[2:3])
} |
package handlers
import (
"bytes"
"encoding/json"
"github.com/bpross/password-as-a-service/stats"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestStatsHandlerInitial(t *testing.T) {
req := httptest.NewRequest("GET", "/stats", nil)
rr := httptest.NewRecorder()
st := stats.New()
StatsHandler(rr, req, st)
if status := rr.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v",
status, http.StatusOK)
}
expected := `{"Total":0,"Average":0}`
actual := rr.Body.String()
if actual != expected {
t.Fatalf("%v", actual)
}
}
func TestStatsHandlerOneRequest(t *testing.T) {
req := httptest.NewRequest("GET", "/stats", nil)
rr := httptest.NewRecorder()
st := stats.New()
st = addStats(st, 1)
StatsHandler(rr, req, st)
if status := rr.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v",
status, http.StatusOK)
}
expected := `{"Total":1,"Average":0}`
actual := rr.Body.String()
if actual != expected {
t.Fatalf("%v", actual)
}
}
func TestStatsHandlerMultipleRequest(t *testing.T) {
req := httptest.NewRequest("GET", "/stats", nil)
rr := httptest.NewRecorder()
st := stats.New()
numberRequests := 3
st = addStats(st, numberRequests)
StatsHandler(rr, req, st)
if status := rr.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v",
status, http.StatusOK)
}
readBuf, _ := ioutil.ReadAll(rr.Body)
r := bytes.NewReader(readBuf)
decoder := json.NewDecoder(r)
resp := &StatsResponse{}
err := decoder.Decode(resp)
if err != nil {
t.Fatalf("Failed to decode json")
}
if resp.Total != int64(numberRequests) {
t.Fatalf("Incorrect number of requests %v", resp.Total)
}
expectedAverageResponseTime := int64(1000000)
if resp.Average < expectedAverageResponseTime {
t.Fatalf("Average Response Time too fast %v", resp.Average)
}
}
func addStats(st *stats.Stats, numReq int) *stats.Stats {
for i := 0; i < numReq; i++ {
start := time.Now()
wait := time.Duration(i)
time.Sleep(wait * time.Second)
st.End(start)
}
return st
}
|
package scene
import (
"github.com/mikee385/GolangRayTracer/color"
"github.com/mikee385/GolangRayTracer/geometry"
"math"
)
const Bias = 1.0E-4
type Scene struct {
backgroundColor color.ColorRGB
refractiveIndex float32
maxRayDepth uint
items []internalObject
lights []internalLight
}
func NewScene(backgroundColor color.ColorRGB, refractiveIndex float32, maxRayDepth uint) Scene {
return Scene{
backgroundColor: backgroundColor,
refractiveIndex: refractiveIndex,
maxRayDepth: maxRayDepth,
items: make([]internalObject, 0, 4),
lights: make([]internalLight, 0, 4),
}
}
func (scene *Scene) AddLightSource(light *SceneLight) {
var index = len(scene.items)
scene.items = append(scene.items, internalObject{
index: index,
object: light,
isLight: true,
})
scene.lights = append(scene.lights, internalLight{
index: index,
light: light,
})
}
func (scene *Scene) AddObject(object SceneObject) {
var index = len(scene.items)
scene.items = append(scene.items, internalObject{
index: index,
object: object,
isLight: false,
})
}
func (scene *Scene) Trace(ray geometry.Ray3D, depth uint) TraceResult {
var nearestDistance float32 = 0.0
var nearestItem internalObject
var nearestIntersection = false
// Find the nearest object that the ray intersects.
for _, item := range scene.items {
var currentDistance, hasIntersection = item.object.Intersect(ray)
if hasIntersection {
if !nearestIntersection || currentDistance < nearestDistance {
nearestDistance = currentDistance
nearestItem = item
nearestIntersection = true
}
}
}
// If the ray doesn't hit any objects, return the background color.
if !nearestIntersection {
return TraceResult{
Color: scene.backgroundColor,
Distance: 0.0,
}
}
// Get the point where the ray intersects the object.
var point = ray.Point(nearestDistance)
// If the ray intersects a light source, simply return the color of the light.
if nearestItem.isLight {
return TraceResult{
Color: nearestItem.object.Material(point).Color,
Distance: nearestDistance,
}
}
// Get the surface normal and color at the intersection point.
var normal = nearestItem.object.Normal(point)
var surfaceMaterial = nearestItem.object.Material(point)
var rayVector = ray.Direction.ToVector()
var normalVector = normal.ToVector()
// Calculate the color at the intersection point.
var totalRayColor = color.Black()
if depth < scene.maxRayDepth {
// TODO: Add Fresnel effects (?)
// Calculate the color from the reflected ray.
var reflection = surfaceMaterial.Reflection
if reflection > 0.0 {
var reflectedDirection = rayVector.Sub(normalVector.Scale(2.0 * geometry.Dot(rayVector, normalVector))).ToUnit()
var nearbyPoint = point.Translate_Dist(reflectedDirection, Bias)
var reflectedResult = scene.Trace(geometry.NewRay(nearbyPoint, reflectedDirection), depth+1)
totalRayColor = totalRayColor.Add(reflectedResult.Color.Scale(reflection).Mul(surfaceMaterial.Color))
}
// Calculate the color from the refracted ray.
var refraction = surfaceMaterial.Refraction
if refraction > 0.0 {
var n, cosI float32
if geometry.Dot(rayVector, normalVector) > 0.0 {
// Internal refraction
n = surfaceMaterial.RefractiveIndex / scene.refractiveIndex
cosI = -geometry.Dot(rayVector, normalVector.Neg())
} else {
// External refraction
n = scene.refractiveIndex / surfaceMaterial.RefractiveIndex
cosI = -geometry.Dot(rayVector, normalVector)
}
var cos2T = 1 - n*n*(1-cosI*cosI)
if cos2T > 0.0 {
var refractedDirection = rayVector.Scale(n).Add(normalVector.Scale(n*cosI - float32(math.Sqrt(float64(cos2T))))).ToUnit()
var nearbyPoint = point.Translate_Dist(refractedDirection, Bias)
var refractedResult = scene.Trace(geometry.NewRay(nearbyPoint, refractedDirection), depth+1)
// Beer's Law
var absorbance = surfaceMaterial.Color.Scale(0.15 * -refractedResult.Distance)
var transparency = color.New(
float32(math.Exp(float64(absorbance.Red))),
float32(math.Exp(float64(absorbance.Green))),
float32(math.Exp(float64(absorbance.Blue))))
totalRayColor = totalRayColor.Add(refractedResult.Color.Mul(transparency))
}
}
}
// Calculate the color from each light in the scene.
for _, lightItem := range scene.lights {
var light = lightItem.light
var lightColor = light.Material(point).Color
var vectorToLight = geometry.NewVector_BetweenPoints(point, light.Center())
var distanceToLight = vectorToLight.Magnitude()
var directionToLight = vectorToLight.ToUnit()
var directionToLightVector = directionToLight.ToVector()
// Calculate the shading from the light.
var shade float32 = 1.0
var nearbyPoint = point.Translate_Dist(directionToLight, Bias)
var shadowRay = geometry.NewRay(nearbyPoint, directionToLight)
for _, shadowItem := range scene.items {
if shadowItem.index != lightItem.index {
var shadowDistance, hasIntersection = shadowItem.object.Intersect(shadowRay)
if hasIntersection && shadowDistance < distanceToLight {
shade = 0.0
break
}
}
}
if shade != 0.0 {
// Calculate the diffusive lighting from the light.
var diffuse = surfaceMaterial.Diffuse
if diffuse > 0.0 {
var percentageOfLight = geometry.Dot(normalVector, directionToLightVector)
if percentageOfLight > 0.0 {
totalRayColor = totalRayColor.Add(lightColor.Scale(shade * diffuse * percentageOfLight).Mul(surfaceMaterial.Color))
}
}
// Calculate the specular lighting from the light.
var specular = surfaceMaterial.Specular
var shininess = surfaceMaterial.Shininess
if specular > 0.0 && shininess > 0 {
var reflectedDirection = directionToLightVector.Sub(normalVector.Scale(2.0 * geometry.Dot(directionToLightVector, normalVector))).ToUnit()
var percentageOfLight = geometry.Dot(rayVector, reflectedDirection.ToVector())
if percentageOfLight > 0.0 {
totalRayColor = totalRayColor.Add(lightColor.Scale(shade * specular * float32(math.Pow(float64(percentageOfLight), float64(shininess)))))
}
}
}
}
return TraceResult{
Color: totalRayColor,
Distance: nearestDistance,
}
}
type internalObject struct {
index int
object SceneObject
isLight bool
}
type internalLight struct {
index int
light *SceneLight
}
type TraceResult struct {
Color color.ColorRGB
Distance float32
}
|
package data
import (
"github.com/gorilla/websocket"
"github.com/op/go-logging"
"time"
)
var log = logging.MustGetLogger("main-logger")
type WsError struct {
Code int `json:"code"`
Msg string `json:"msg"`
}
type WsEvent struct {
Id int `json:"id"`
Type string `json:"type"`
Channel string `json:"channel"`
Text string `json:"text"`
Ok bool `json:"ok"`
ReplyTo int `json:"reply_to"`
Ts string `json:"ts"`
Error WsError `json:"error"`
Url string `json:"url"`
User string `json:"user"`
}
type RtmResponse struct {
Url string `json:"url"`
}
type WsMessage struct {
Msg string
}
type SlackUser struct {
User string
Channel string
}
type Checker struct {
StopPinger chan bool
StopChecker chan bool
Alive chan bool
StopHandlerChan chan bool
Ws *websocket.Conn
}
type Context struct {
Checker *Checker
Ws *websocket.Conn
User string
}
func (ctx *Context) RunChecker() {
go func() {
for {
select {
case <-ctx.Checker.StopPinger:
return
case <-time.After(10 * time.Second):
ctx.Ws.WriteJSON(&WsEvent{
Id: 911,
Type: "ping",
})
}
}
}()
go func() {
for {
select {
case <-ctx.Checker.StopChecker:
return
case <-ctx.Checker.Alive:
case <-time.After(30 * time.Second):
log.Error("Connection timeout after 30 seconds; trying to restart...")
ctx.Checker.StopHandlerChan <- true
return
}
}
}()
}
func (ctx *Context) StopChecker() {
ctx.Checker.StopPinger <- true
ctx.Checker.StopChecker <- true
}
func (ctx *Context) Stop() {
ctx.StopChecker()
ctx.Ws.Close()
}
|
// +build !mysql
package main
import (
"database/sql"
"fmt"
"os"
"strings"
"time"
)
// SCSDB ...
type SCSDB struct {
conn *sql.DB
}
// CreateDBConnection ...
func CreateDBConnection() *SCSDB {
db := SCSDB{}
db.init()
log.Infof("Starting connection....")
return &db
}
// ForcedStatement ...
func (db *SCSDB) ForcedStatement(query string, args ...interface{}) error {
log.Debug(query, args)
log.Infof("Query %s", query)
log.Infof("Args %s", args)
var err error = nil
for ok := false; !ok; {
tx, err := db.conn.Begin()
stmt, err := tx.Prepare(query)
if err != nil {
if strings.Contains(err.Error(), "Error 1040") {
time.Sleep(10 * time.Millisecond)
continue
} else {
return err
}
}
_, err = stmt.Exec(args...)
if err != nil {
fmt.Printf("begin failed: %v\n", err)
os.Exit(1)
}
ok = true
if err != nil && strings.Contains(err.Error(), "Error 1040") {
ok = false
time.Sleep(10 * time.Millisecond)
}
}
log.Infof("Error %s ", err)
return err
}
// ForcedQueryRowWithOneParameter ...
func (db *SCSDB) ForcedQueryRowWithOneParameter(query string, param1 interface{}, args ...interface{}) error {
var err error
log.Infof("Query %s", query)
log.Infof("Args %s", param1)
log.Infof("Args %s", args)
for ok := false; !ok; {
tx, err := db.conn.Begin()
err = tx.QueryRow(
query,
param1).
Scan(args...)
ok = true
if err != nil && strings.Contains(err.Error(), "Error 1040") {
ok = false
time.Sleep(10 * time.Millisecond)
}
// if err != nil {
// me, cok := err.(*mysql.MySQLError)
// if cok && me.Number == 1040 {
// ok = false
// time.Sleep(10 * time.Millisecond)
// }
// }
}
log.Infof("Error %s ", err)
return err
}
// GetOpenConnections ...
func (db *SCSDB) GetOpenConnections() int {
return db.conn.Stats().OpenConnections
}
|
package main
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"time"
"github.com/pkg/errors"
)
const (
url string = "" // バックエンドのURL
)
// サーバがリクエストを受けると、バックエンドのサーバへアクセスする処理をイメージ
// リクエストタイムアウト3秒とし、その際にバックエンドへの接続資源を解放する処理を実装してみる
func main() {
handler()
}
func handler() {
ctx, cancel := context.WithTimeout(context.Background(), 1000*time.Microsecond)
defer cancel() // not forget!
errChan := make(chan error, 1)
go func() {
errChan <- requestWithContext(ctx)
}()
select {
case err := <-errChan:
if err != nil {
fmt.Printf("faild: %+v\n", err)
return
}
}
fmt.Println("success!")
}
func requestWithContext(ctx context.Context) error {
tr := &http.Transport{}
client := &http.Client{Transport: tr}
// バックエンドにGoルーチンでリクエスト
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
errCh := make(chan error, 1)
go func() {
res, err := client.Do(req)
if err == nil {
bytes, _ := ioutil.ReadAll(res.Body)
fmt.Printf("%s\n", string(bytes))
}
errCh <- errors.Wrap(err, "wraped")
}()
// http requestとcontext doneの2つのチャンネルをWatch(いずれかだけ処理)
// キャンセル時はコネクションをキャンセル
var ret error
select {
case err := <-errCh:
ret = err
case <-ctx.Done():
tr.CancelRequest(req) // キャンセル処理
fmt.Printf("context done: %+v\n", <-errCh)
ret = ctx.Err()
}
return ret
}
|
// Copyright 2020, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package awsemfexporter
import (
"reflect"
"strings"
"testing"
"time"
metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
"github.com/golang/protobuf/ptypes/wrappers"
"github.com/stretchr/testify/assert"
"go.opentelemetry.io/collector/consumer/pdata"
"go.opentelemetry.io/collector/translator/internaldata"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"go.uber.org/zap/zaptest/observer"
)
func generateTestIntGauge(name string) *metricspb.Metric {
return &metricspb.Metric{
MetricDescriptor: &metricspb.MetricDescriptor{
Name: name,
Type: metricspb.MetricDescriptor_GAUGE_INT64,
Unit: "Count",
LabelKeys: []*metricspb.LabelKey{
{Key: "label1"},
},
},
Timeseries: []*metricspb.TimeSeries{
{
LabelValues: []*metricspb.LabelValue{
{Value: "value1", HasValue: true},
},
Points: []*metricspb.Point{
{
Value: &metricspb.Point_Int64Value{
Int64Value: 1,
},
},
},
},
},
}
}
func generateTestDoubleGauge(name string) *metricspb.Metric {
return &metricspb.Metric{
MetricDescriptor: &metricspb.MetricDescriptor{
Name: name,
Type: metricspb.MetricDescriptor_GAUGE_DOUBLE,
Unit: "Count",
LabelKeys: []*metricspb.LabelKey{
{Key: "label1"},
},
},
Timeseries: []*metricspb.TimeSeries{
{
LabelValues: []*metricspb.LabelValue{
{Value: "value1", HasValue: true},
},
Points: []*metricspb.Point{
{
Value: &metricspb.Point_DoubleValue{
DoubleValue: 0.1,
},
},
},
},
},
}
}
func generateTestIntSum(name string) *metricspb.Metric {
return &metricspb.Metric{
MetricDescriptor: &metricspb.MetricDescriptor{
Name: name,
Type: metricspb.MetricDescriptor_CUMULATIVE_INT64,
Unit: "Count",
LabelKeys: []*metricspb.LabelKey{
{Key: "label1"},
},
},
Timeseries: []*metricspb.TimeSeries{
{
LabelValues: []*metricspb.LabelValue{
{Value: "value1", HasValue: true},
{Value: "value2", HasValue: true},
},
Points: []*metricspb.Point{
{
Value: &metricspb.Point_Int64Value{
Int64Value: 1,
},
},
},
},
},
}
}
func generateTestDoubleSum(name string) *metricspb.Metric {
return &metricspb.Metric{
MetricDescriptor: &metricspb.MetricDescriptor{
Name: name,
Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE,
Unit: "Count",
LabelKeys: []*metricspb.LabelKey{
{Key: "label1"},
},
},
Timeseries: []*metricspb.TimeSeries{
{
LabelValues: []*metricspb.LabelValue{
{Value: "value1", HasValue: true},
{Value: "value2", HasValue: true},
},
Points: []*metricspb.Point{
{
Value: &metricspb.Point_DoubleValue{
DoubleValue: 0.1,
},
},
},
},
},
}
}
func generateTestDoubleHistogram(name string) *metricspb.Metric {
return &metricspb.Metric{
MetricDescriptor: &metricspb.MetricDescriptor{
Name: name,
Type: metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION,
Unit: "Seconds",
LabelKeys: []*metricspb.LabelKey{
{Key: "label1"},
},
},
Timeseries: []*metricspb.TimeSeries{
{
LabelValues: []*metricspb.LabelValue{
{Value: "value1", HasValue: true},
{Value: "value2", HasValue: true},
},
Points: []*metricspb.Point{
{
Value: &metricspb.Point_DistributionValue{
DistributionValue: &metricspb.DistributionValue{
Sum: 35.0,
Count: 18,
BucketOptions: &metricspb.DistributionValue_BucketOptions{
Type: &metricspb.DistributionValue_BucketOptions_Explicit_{
Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{
Bounds: []float64{0, 10},
},
},
},
Buckets: []*metricspb.DistributionValue_Bucket{
{
Count: 5,
},
{
Count: 6,
},
{
Count: 7,
},
},
},
},
},
},
},
},
}
}
func generateTestSummary(name string) *metricspb.Metric {
return &metricspb.Metric{
MetricDescriptor: &metricspb.MetricDescriptor{
Name: name,
Type: metricspb.MetricDescriptor_SUMMARY,
Unit: "Seconds",
LabelKeys: []*metricspb.LabelKey{
{Key: "label1"},
},
},
Timeseries: []*metricspb.TimeSeries{
{
LabelValues: []*metricspb.LabelValue{
{Value: "value1", HasValue: true},
},
Points: []*metricspb.Point{
{
Value: &metricspb.Point_SummaryValue{
SummaryValue: &metricspb.SummaryValue{
Sum: &wrappers.DoubleValue{
Value: 15.0,
},
Count: &wrappers.Int64Value{
Value: 5,
},
Snapshot: &metricspb.SummaryValue_Snapshot{
Count: &wrappers.Int64Value{
Value: 5,
},
Sum: &wrappers.DoubleValue{
Value: 15.0,
},
PercentileValues: []*metricspb.SummaryValue_Snapshot_ValueAtPercentile{
{
Percentile: 0.0,
Value: 1,
},
{
Percentile: 100.0,
Value: 5,
},
},
},
},
},
},
},
},
},
}
}
func TestIntDataPointSliceAt(t *testing.T) {
instrLibName := "cloudwatch-otel"
labels := map[string]string{"label1": "value1"}
rateKeys := rateKeyParams{
namespaceKey: "namespace",
metricNameKey: "foo",
logGroupKey: "log-group",
logStreamKey: "log-stream",
}
testCases := []struct {
testName string
needsCalculateRate bool
value interface{}
calculatedValue interface{}
}{
{
"no rate calculation",
false,
int64(-17),
float64(-17),
},
{
"w/ 1st rate calculation",
true,
int64(1),
float64(0),
},
{
"w/ 2nd rate calculation",
true,
int64(2),
float64(1),
},
}
for _, tc := range testCases {
t.Run(tc.testName, func(t *testing.T) {
timestamp := time.Now().UnixNano() / int64(time.Millisecond)
testDPS := pdata.NewIntDataPointSlice()
testDPS.Resize(1)
testDP := testDPS.At(0)
testDP.SetValue(tc.value.(int64))
testDP.LabelsMap().InitFromMap(labels)
dps := IntDataPointSlice{
instrLibName,
rateCalculationMetadata{
tc.needsCalculateRate,
rateKeys,
timestamp,
},
testDPS,
}
expectedDP := DataPoint{
Value: tc.calculatedValue,
Labels: map[string]string{
oTellibDimensionKey: instrLibName,
"label1": "value1",
},
}
assert.Equal(t, 1, dps.Len())
dp := dps.At(0)
if strings.Contains(tc.testName, "2nd rate") {
assert.InDelta(t, expectedDP.Value.(float64), dp.Value.(float64), 0.01)
} else {
assert.Equal(t, expectedDP, dp)
}
// sleep 1s for verifying the cumulative metric delta rate
time.Sleep(1000 * time.Millisecond)
})
}
}
func TestDoubleDataPointSliceAt(t *testing.T) {
instrLibName := "cloudwatch-otel"
labels := map[string]string{"label1": "value1"}
rateKeys := rateKeyParams{
namespaceKey: "namespace",
metricNameKey: "foo",
logGroupKey: "log-group",
logStreamKey: "log-stream",
}
testCases := []struct {
testName string
needsCalculateRate bool
value interface{}
calculatedValue interface{}
}{
{
"no rate calculation",
false,
float64(0.3),
float64(0.3),
},
{
"w/ 1st rate calculation",
true,
float64(0.4),
float64(0.0),
},
{
"w/ 2nd rate calculation",
true,
float64(0.5),
float64(0.1),
},
}
for _, tc := range testCases {
t.Run(tc.testName, func(t *testing.T) {
timestamp := time.Now().UnixNano() / int64(time.Millisecond)
testDPS := pdata.NewDoubleDataPointSlice()
testDPS.Resize(1)
testDP := testDPS.At(0)
testDP.SetValue(tc.value.(float64))
testDP.LabelsMap().InitFromMap(labels)
dps := DoubleDataPointSlice{
instrLibName,
rateCalculationMetadata{
tc.needsCalculateRate,
rateKeys,
timestamp,
},
testDPS,
}
expectedDP := DataPoint{
Value: tc.calculatedValue,
Labels: map[string]string{
oTellibDimensionKey: instrLibName,
"label1": "value1",
},
}
assert.Equal(t, 1, dps.Len())
dp := dps.At(0)
if strings.Contains(tc.testName, "2nd rate") {
assert.InDelta(t, expectedDP.Value.(float64), dp.Value.(float64), 0.002)
} else {
assert.Equal(t, expectedDP, dp)
}
// sleep 10ms for verifying the cumulative metric delta rate
time.Sleep(1000 * time.Millisecond)
})
}
}
func TestDoubleHistogramDataPointSliceAt(t *testing.T) {
instrLibName := "cloudwatch-otel"
labels := map[string]string{"label1": "value1"}
testDPS := pdata.NewDoubleHistogramDataPointSlice()
testDPS.Resize(1)
testDP := testDPS.At(0)
testDP.SetCount(uint64(17))
testDP.SetSum(float64(17.13))
testDP.SetBucketCounts([]uint64{1, 2, 3})
testDP.SetExplicitBounds([]float64{1, 2, 3})
testDP.LabelsMap().InitFromMap(labels)
dps := DoubleHistogramDataPointSlice{
instrLibName,
testDPS,
}
expectedDP := DataPoint{
Value: &CWMetricStats{
Sum: 17.13,
Count: 17,
},
Labels: map[string]string{
oTellibDimensionKey: instrLibName,
"label1": "value1",
},
}
assert.Equal(t, 1, dps.Len())
dp := dps.At(0)
assert.Equal(t, expectedDP, dp)
}
func TestDoubleSummaryDataPointSliceAt(t *testing.T) {
instrLibName := "cloudwatch-otel"
labels := map[string]string{"label1": "value1"}
testDPS := pdata.NewDoubleSummaryDataPointSlice()
testDPS.Resize(1)
testDP := testDPS.At(0)
testDP.SetCount(uint64(17))
testDP.SetSum(float64(17.13))
testDP.QuantileValues().Resize(2)
testQuantileValue := testDP.QuantileValues().At(0)
testQuantileValue.SetQuantile(0)
testQuantileValue.SetValue(float64(1))
testQuantileValue = testDP.QuantileValues().At(1)
testQuantileValue.SetQuantile(100)
testQuantileValue.SetValue(float64(5))
testDP.LabelsMap().InitFromMap(labels)
dps := DoubleSummaryDataPointSlice{
instrLibName,
testDPS,
}
expectedDP := DataPoint{
Value: &CWMetricStats{
Max: 5,
Min: 1,
Count: 17,
Sum: 17.13,
},
Labels: map[string]string{
oTellibDimensionKey: instrLibName,
"label1": "value1",
},
}
assert.Equal(t, 1, dps.Len())
dp := dps.At(0)
assert.Equal(t, expectedDP, dp)
}
func TestCreateLabels(t *testing.T) {
expectedLabels := map[string]string{
"a": "A",
"b": "B",
"c": "C",
}
labelsMap := pdata.NewStringMap().InitFromMap(expectedLabels)
labels := createLabels(labelsMap, noInstrumentationLibraryName)
assert.Equal(t, expectedLabels, labels)
// With isntrumentation library name
labels = createLabels(labelsMap, "cloudwatch-otel")
expectedLabels[oTellibDimensionKey] = "cloudwatch-otel"
assert.Equal(t, expectedLabels, labels)
}
func TestCalculateRate(t *testing.T) {
intRateKey := "foo"
doubleRateKey := "bar"
time1 := time.Now().UnixNano() / int64(time.Millisecond)
time2 := time.Unix(0, time1*int64(time.Millisecond)).Add(time.Second*10).UnixNano() / int64(time.Millisecond)
time3 := time.Unix(0, time2*int64(time.Millisecond)).Add(time.Second*10).UnixNano() / int64(time.Millisecond)
intVal1 := float64(0)
intVal2 := float64(10)
intVal3 := float64(200)
doubleVal1 := 0.0
doubleVal2 := 5.0
doubleVal3 := 15.1
rate := calculateRate(intRateKey, intVal1, time1)
assert.Equal(t, float64(0), rate)
rate = calculateRate(doubleRateKey, doubleVal1, time1)
assert.Equal(t, float64(0), rate)
rate = calculateRate(intRateKey, intVal2, time2)
assert.InDelta(t, float64(1), rate, 0.1)
rate = calculateRate(doubleRateKey, doubleVal2, time2)
assert.InDelta(t, 0.5, rate, 0.1)
// Test change of data type
rate = calculateRate(intRateKey, doubleVal3, time3)
assert.InDelta(t, float64(0.51), rate, 0.1)
rate = calculateRate(doubleRateKey, intVal3, time3)
assert.InDelta(t, float64(19.5), rate, 0.1)
}
func calculateRate(metricName string, value float64, timestampMs int64) interface{} {
time := time.Unix(0, timestampMs*int64(time.Millisecond))
val, _ := rateMetricCalculator.Calculate(metricName, nil, value, time)
return val
}
func TestGetDataPoints(t *testing.T) {
metadata := CWMetricMetadata{
Namespace: "Namespace",
TimestampMs: time.Now().UnixNano() / int64(time.Millisecond),
LogGroup: "log-group",
LogStream: "log-stream",
InstrumentationLibraryName: "cloudwatch-otel",
}
testCases := []struct {
testName string
metric *metricspb.Metric
expectedDataPoints DataPoints
}{
{
"Int gauge",
generateTestIntGauge("foo"),
IntDataPointSlice{
metadata.InstrumentationLibraryName,
rateCalculationMetadata{
false,
rateKeyParams{
namespaceKey: metadata.Namespace,
metricNameKey: "foo",
logGroupKey: metadata.LogGroup,
logStreamKey: metadata.LogStream,
},
metadata.TimestampMs,
},
pdata.IntDataPointSlice{},
},
},
{
"Double gauge",
generateTestDoubleGauge("foo"),
DoubleDataPointSlice{
metadata.InstrumentationLibraryName,
rateCalculationMetadata{
false,
rateKeyParams{
namespaceKey: metadata.Namespace,
metricNameKey: "foo",
logGroupKey: metadata.LogGroup,
logStreamKey: metadata.LogStream,
},
metadata.TimestampMs,
},
pdata.DoubleDataPointSlice{},
},
},
{
"Int sum",
generateTestIntSum("foo"),
IntDataPointSlice{
metadata.InstrumentationLibraryName,
rateCalculationMetadata{
true,
rateKeyParams{
namespaceKey: metadata.Namespace,
metricNameKey: "foo",
logGroupKey: metadata.LogGroup,
logStreamKey: metadata.LogStream,
},
metadata.TimestampMs,
},
pdata.IntDataPointSlice{},
},
},
{
"Double sum",
generateTestDoubleSum("foo"),
DoubleDataPointSlice{
metadata.InstrumentationLibraryName,
rateCalculationMetadata{
true,
rateKeyParams{
namespaceKey: metadata.Namespace,
metricNameKey: "foo",
logGroupKey: metadata.LogGroup,
logStreamKey: metadata.LogStream,
},
metadata.TimestampMs,
},
pdata.DoubleDataPointSlice{},
},
},
{
"Double histogram",
generateTestDoubleHistogram("foo"),
DoubleHistogramDataPointSlice{
metadata.InstrumentationLibraryName,
pdata.DoubleHistogramDataPointSlice{},
},
},
{
"Summary",
generateTestSummary("foo"),
DoubleSummaryDataPointSlice{
metadata.InstrumentationLibraryName,
pdata.DoubleSummaryDataPointSlice{},
},
},
}
for _, tc := range testCases {
oc := internaldata.MetricsData{
Metrics: []*metricspb.Metric{tc.metric},
}
// Retrieve *pdata.Metric
rm := internaldata.OCToMetrics(oc).ResourceMetrics().At(0)
metric := rm.InstrumentationLibraryMetrics().At(0).Metrics().At(0)
logger := zap.NewNop()
expectedLabels := pdata.NewStringMap().InitFromMap(map[string]string{"label1": "value1"})
t.Run(tc.testName, func(t *testing.T) {
dps := getDataPoints(&metric, metadata, logger)
assert.NotNil(t, dps)
assert.Equal(t, reflect.TypeOf(tc.expectedDataPoints), reflect.TypeOf(dps))
switch convertedDPS := dps.(type) {
case IntDataPointSlice:
expectedDPS := tc.expectedDataPoints.(IntDataPointSlice)
assert.Equal(t, metadata.InstrumentationLibraryName, convertedDPS.instrumentationLibraryName)
assert.Equal(t, expectedDPS.rateCalculationMetadata, convertedDPS.rateCalculationMetadata)
assert.Equal(t, 1, convertedDPS.Len())
dp := convertedDPS.IntDataPointSlice.At(0)
assert.Equal(t, int64(1), dp.Value())
assert.Equal(t, expectedLabels, dp.LabelsMap())
case DoubleDataPointSlice:
expectedDPS := tc.expectedDataPoints.(DoubleDataPointSlice)
assert.Equal(t, metadata.InstrumentationLibraryName, convertedDPS.instrumentationLibraryName)
assert.Equal(t, expectedDPS.rateCalculationMetadata, convertedDPS.rateCalculationMetadata)
assert.Equal(t, 1, convertedDPS.Len())
dp := convertedDPS.DoubleDataPointSlice.At(0)
assert.Equal(t, 0.1, dp.Value())
assert.Equal(t, expectedLabels, dp.LabelsMap())
case DoubleHistogramDataPointSlice:
assert.Equal(t, metadata.InstrumentationLibraryName, convertedDPS.instrumentationLibraryName)
assert.Equal(t, 1, convertedDPS.Len())
dp := convertedDPS.DoubleHistogramDataPointSlice.At(0)
assert.Equal(t, 35.0, dp.Sum())
assert.Equal(t, uint64(18), dp.Count())
assert.Equal(t, []float64{0, 10}, dp.ExplicitBounds())
assert.Equal(t, expectedLabels, dp.LabelsMap())
case DoubleSummaryDataPointSlice:
assert.Equal(t, metadata.InstrumentationLibraryName, convertedDPS.instrumentationLibraryName)
assert.Equal(t, 1, convertedDPS.Len())
dp := convertedDPS.DoubleSummaryDataPointSlice.At(0)
assert.Equal(t, 15.0, dp.Sum())
assert.Equal(t, uint64(5), dp.Count())
assert.Equal(t, 2, dp.QuantileValues().Len())
assert.Equal(t, float64(1), dp.QuantileValues().At(0).Value())
assert.Equal(t, float64(5), dp.QuantileValues().At(1).Value())
}
})
}
t.Run("Unhandled metric type", func(t *testing.T) {
metric := pdata.NewMetric()
metric.SetName("foo")
metric.SetUnit("Count")
metric.SetDataType(pdata.MetricDataTypeIntHistogram)
obs, logs := observer.New(zap.WarnLevel)
logger := zap.New(obs)
dps := getDataPoints(&metric, metadata, logger)
assert.Nil(t, dps)
// Test output warning logs
expectedLogs := []observer.LoggedEntry{
{
Entry: zapcore.Entry{Level: zap.WarnLevel, Message: "Unhandled metric data type."},
Context: []zapcore.Field{
zap.String("DataType", "IntHistogram"),
zap.String("Name", "foo"),
zap.String("Unit", "Count"),
},
},
}
assert.Equal(t, 1, logs.Len())
assert.Equal(t, expectedLogs, logs.AllUntimed())
})
t.Run("Nil metric", func(t *testing.T) {
dps := getDataPoints(nil, metadata, zap.NewNop())
assert.Nil(t, dps)
})
}
func BenchmarkGetDataPoints(b *testing.B) {
oc := internaldata.MetricsData{
Metrics: []*metricspb.Metric{
generateTestIntGauge("int-gauge"),
generateTestDoubleGauge("double-gauge"),
generateTestIntSum("int-sum"),
generateTestDoubleSum("double-sum"),
generateTestDoubleHistogram("double-histogram"),
generateTestSummary("summary"),
},
}
rms := internaldata.OCToMetrics(oc).ResourceMetrics()
metrics := rms.At(0).InstrumentationLibraryMetrics().At(0).Metrics()
numMetrics := metrics.Len()
metadata := CWMetricMetadata{
Namespace: "Namespace",
TimestampMs: int64(1596151098037),
LogGroup: "log-group",
LogStream: "log-stream",
InstrumentationLibraryName: "cloudwatch-otel",
}
logger := zap.NewNop()
b.ResetTimer()
for n := 0; n < b.N; n++ {
for i := 0; i < numMetrics; i++ {
metric := metrics.At(i)
getDataPoints(&metric, metadata, logger)
}
}
}
func TestGetSortedLabelsEquals(t *testing.T) {
labelMap1 := make(map[string]string)
labelMap1["k1"] = "v1"
labelMap1["k2"] = "v2"
labelMap2 := make(map[string]string)
labelMap2["k2"] = "v2"
labelMap2["k1"] = "v1"
sortedLabels1 := getSortedLabels(labelMap1)
sortedLabels2 := getSortedLabels(labelMap2)
rateKeyParams1 := rateKeyParams{
namespaceKey: "namespace",
metricNameKey: "foo",
logGroupKey: "log-group",
logStreamKey: "log-stream",
labels: sortedLabels1,
}
rateKeyParams2 := rateKeyParams{
namespaceKey: "namespace",
metricNameKey: "foo",
logGroupKey: "log-group",
logStreamKey: "log-stream",
labels: sortedLabels2,
}
assert.Equal(t, rateKeyParams1, rateKeyParams2)
}
func TestGetSortedLabelsNotEqual(t *testing.T) {
labelMap1 := make(map[string]string)
labelMap1["k1"] = "v1"
labelMap1["k2"] = "v2"
labelMap2 := make(map[string]string)
labelMap2["k2"] = "v2"
labelMap2["k1"] = "v3"
sortedLabels1 := getSortedLabels(labelMap1)
sortedLabels2 := getSortedLabels(labelMap2)
rateKeyParams1 := rateKeyParams{
namespaceKey: "namespace",
metricNameKey: "foo",
logGroupKey: "log-group",
logStreamKey: "log-stream",
labels: sortedLabels1,
}
rateKeyParams2 := rateKeyParams{
namespaceKey: "namespace",
metricNameKey: "foo",
logGroupKey: "log-group",
logStreamKey: "log-stream",
labels: sortedLabels2,
}
assert.NotEqual(t, rateKeyParams1, rateKeyParams2)
}
func TestGetSortedLabelsNotEqualOnPram(t *testing.T) {
labelMap1 := make(map[string]string)
labelMap1["k1"] = "v1"
labelMap1["k2"] = "v2"
labelMap2 := make(map[string]string)
labelMap2["k2"] = "v2"
labelMap2["k1"] = "v1"
sortedLabels1 := getSortedLabels(labelMap1)
sortedLabels2 := getSortedLabels(labelMap2)
rateKeyParams1 := rateKeyParams{
namespaceKey: "namespaceA",
metricNameKey: "foo",
logGroupKey: "log-group",
logStreamKey: "log-stream",
labels: sortedLabels1,
}
rateKeyParams2 := rateKeyParams{
namespaceKey: "namespaceB",
metricNameKey: "foo",
logGroupKey: "log-group",
logStreamKey: "log-stream",
labels: sortedLabels2,
}
assert.NotEqual(t, rateKeyParams1, rateKeyParams2)
}
func TestGetSortedLabelsNotEqualOnEmptyLabel(t *testing.T) {
rateKeyParams1 := rateKeyParams{
namespaceKey: "namespaceA",
metricNameKey: "foo",
logGroupKey: "log-group",
logStreamKey: "log-stream",
}
rateKeyParams2 := rateKeyParams{
namespaceKey: "namespaceA",
metricNameKey: "foo",
logGroupKey: "log-group",
logStreamKey: "log-stream",
}
assert.Equal(t, rateKeyParams1, rateKeyParams2)
}
|
package main
// To judge whether tree2 is the subtree of tree1
func isSubtree(tree1 *TreeNode, tree2 *TreeNode) bool {
if isBothEmptyTree(tree1, tree2) {
return true
}
if isEitherEmptyTree(tree1, tree2) {
return false
}
return isSameTree(tree1, tree2) || isSubtree(tree1.Left, tree2) || isSubtree(tree1.Right, tree2)
}
// To judge whether tree1 is same to tree2
func isSameTree(tree1 *TreeNode, tree2 *TreeNode) bool {
if isBothEmptyTree(tree1, tree2) {
return true
}
if isEitherEmptyTree(tree1, tree2) {
return false
}
return isSameRootVal(tree1, tree2) && isSameTree(tree1.Left, tree2.Left) && isSameTree(tree1.Right, tree2.Right)
}
func isBothEmptyTree(tree1 *TreeNode, tree2 *TreeNode) bool {
return tree1 == nil && tree2 == nil
}
func isEitherEmptyTree(tree1 *TreeNode, tree2 *TreeNode) bool {
return tree1 == nil || tree2 == nil
}
func isSameRootVal(tree1 *TreeNode, tree2 *TreeNode) bool {
return tree1.Val == tree2.Val
}
|
package sockguard
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
"net/url"
"os"
"regexp"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
)
// Credit: http://hassansin.github.io/Unit-Testing-http-client-in-Go
type roundTripFunc func(req *http.Request) *http.Response
func (f roundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) {
return f(req), nil
}
// Reusable mock RulesDirector instance
func mockRulesDirector() *RulesDirector {
return &RulesDirector{
Client: &http.Client{},
Owner: "test-owner",
AllowHostModeNetworking: false,
}
}
// Reusable mock RulesDirector instance - with "state" management of mocked upstream Docker daemon
// Just containers/networks initially
func mockRulesDirectorWithUpstreamState(us *upstreamState) *RulesDirector {
rd := mockRulesDirector()
rd.Client = mockRulesDirectorHttpClientWithUpstreamState(us)
return rd
}
func mockRulesDirectorHttpClientWithUpstreamState(us *upstreamState) *http.Client {
return &http.Client{
Transport: roundTripFunc(func(req *http.Request) *http.Response {
resp := http.Response{
// Must be set to non-nil value or it panics
Header: make(http.Header),
}
re1 := regexp.MustCompile("^/v(.*)/containers/(.*)/json$")
// TODOLATER: adjust re2 to make /json suffix optional, for non-GET?
re2 := regexp.MustCompile("^/v(.*)/images/(.*)/json$")
// NOTE: this regex may not cover all name variations, but will cover enough to fulfil tests
re3 := regexp.MustCompile("^/v(.*)/networks/([A-Za-z0-9]+)(/connect|/disconnect)?$")
re4 := regexp.MustCompile("^/v(.*)/volumes/(.*)$")
switch {
case re1.MatchString(req.URL.Path):
if req.Method == "GET" {
// inspect container - /containers/{id}/json
parsePath := re1.FindStringSubmatch(req.URL.Path)
if len(parsePath) == 3 {
// Vary the response based on container ID (easiest option)
// Partial JSON result, enough to satisfy the inspectLabels() struct
if us.doesContainerExist(parsePath[2]) == false {
resp.StatusCode = 404
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("{\"message\":\"No such container: %s\"}", parsePath[2])))
} else {
containerOwnerLabel := us.ownerLabelContent(us.getContainerOwner(parsePath[2]))
resp.StatusCode = 200
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("{\"Id\":\"%s\",\"Config\":{\"Labels\":{%s}}}", parsePath[2], containerOwnerLabel)))
}
} else {
resp.StatusCode = 501
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("Failure parsing container ID from path - %s\n", req.URL.Path)))
}
} else {
resp.StatusCode = 501
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("Unsupported HTTP method %s for %s\n", req.Method, req.URL.Path)))
}
case re2.MatchString(req.URL.Path):
switch req.Method {
case "GET":
// inspect image - /images/{id}/json
parsePath := re2.FindStringSubmatch(req.URL.Path)
if len(parsePath) == 3 {
// Vary the response based on image ID (easiest option)
// Partial JSON result, enough to satisfy the inspectLabels() struct
if us.doesImageExist(parsePath[2]) == false {
resp.StatusCode = 404
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("{\"message\":\"no such image: %s: No such image: %s:latest\"}", parsePath[2], parsePath[2])))
} else {
imageOwnerLabel := us.ownerLabelContent(us.getImageOwner(parsePath[2]))
resp.StatusCode = 200
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("{\"Id\":\"%s\",\"Config\":{\"Labels\":{%s}}}", parsePath[2], imageOwnerLabel)))
}
} else {
resp.StatusCode = 501
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("Failure parsing image ID from path - %s\n", req.URL.Path)))
}
default:
resp.StatusCode = 501
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("Unsupported HTTP method %s for %s\n", req.Method, req.URL.Path)))
}
case re3.MatchString(req.URL.Path):
parsePath := re3.FindStringSubmatch(req.URL.Path)
if len(parsePath) != 4 {
resp.StatusCode = 501
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("Failure parsing network ID/target from path - %s\n", req.URL.Path)))
return &resp
}
switch req.Method {
case "GET":
// inspect network - /networks/{id}
// Vary the response based on network ID (easiest option)
// Partial JSON result, enough to satisfy the inspectLabels() struct
if us.doesNetworkExist(parsePath[2]) == false {
resp.StatusCode = 404
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("{\"message\":\"network %s not found\"}", parsePath[2])))
} else {
networkOwnerLabel := us.ownerLabelContent(us.getNetworkOwner(parsePath[2]))
resp.StatusCode = 200
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("{\"Id\":\"%s\",\"Labels\":{%s}}", parsePath[2], networkOwnerLabel)))
}
case "POST":
switch parsePath[3] {
case "/connect", "/disconnect":
// connect container to network - /networks/{id}/connect
// disconnect container to network - /networks/{id}/disconnect
// Verify the Content-Type = application/json, will 400 without it on Docker daemon
contentType := req.Header.Get("Content-Type")
if contentType != "application/json" {
resp.StatusCode = 400
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("{\"message\":\"Content-Type specified (%s) must be 'application/json'\"}", contentType)))
return &resp
}
// Parse out the Container from request body
var decoded map[string]interface{}
if err := json.NewDecoder(req.Body).Decode(&decoded); err != nil {
resp.StatusCode = 500
resp.Body = ioutil.NopCloser(bytes.NewBufferString(err.Error()))
return &resp
}
useContainer := decoded["Container"].(string)
// Bare minimum response format here, mostly response code
if us.doesNetworkExist(parsePath[2]) == false {
resp.StatusCode = 404
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("{\"message\":\"network %s not found\"}", parsePath[2])))
} else {
var err error
if parsePath[3] == "/connect" {
useContainerAliases := []string{}
// If there are Aliases specified, pass them in.
parseContainerEndpointConfig, ok := decoded["EndpointConfig"]
if ok {
parseContainerAliases, ok2 := parseContainerEndpointConfig.(map[string]interface{})["Aliases"].([]interface{})
if ok2 {
for _, parseContainerAlias := range parseContainerAliases {
parsedContainerAlias := parseContainerAlias.(string)
if parsedContainerAlias != "" {
useContainerAliases = append(useContainerAliases, parsedContainerAlias)
}
}
}
}
err = us.connectContainerToNetwork(useContainer, parsePath[2], useContainerAliases)
} else if parsePath[3] == "/disconnect" {
err = us.disconnectContainerToNetwork(useContainer, parsePath[2])
}
if err != nil {
resp.StatusCode = 500
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("{\"message\":\"error %sing container '%s' to/from network '%s': %s\"}", parsePath[3], useContainer, parsePath[2], err.Error())))
return &resp
}
resp.StatusCode = 200
resp.Body = ioutil.NopCloser(bytes.NewBufferString("OK"))
}
default:
// unknown
resp.StatusCode = 501
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("POST not supported for %s\n", req.URL.Path)))
}
case "DELETE":
// delete network - /networks/{id}
// Bare minimum response format here, mostly response code
if us.doesNetworkExist(parsePath[2]) == false {
resp.StatusCode = 404
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("{\"message\":\"network %s not found\"}", parsePath[2])))
} else {
us.deleteNetwork(parsePath[2])
resp.StatusCode = 200
resp.Body = ioutil.NopCloser(bytes.NewBufferString("OK"))
}
default:
resp.StatusCode = 501
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("Unsupported HTTP method %s for %s\n", req.Method, req.URL.Path)))
}
case re4.MatchString(req.URL.Path):
switch req.Method {
case "GET":
// inspect volume - /volume/{name}
parsePath := re4.FindStringSubmatch(req.URL.Path)
if len(parsePath) == 3 {
// Vary the response based on volume name (easiest option)
// Partial JSON result, enough to satisfy the inspectLabels() struct
if us.doesVolumeExist(parsePath[2]) == false {
resp.StatusCode = 404
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("{\"message\":\"get %s: no such volume\"}", parsePath[2])))
} else {
volumeOwnerLabel := us.ownerLabelContent(us.getVolumeOwner(parsePath[2]))
resp.StatusCode = 200
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("{\"Name\":\"%s\",\"Labels\":{%s}}", parsePath[2], volumeOwnerLabel)))
}
} else {
resp.StatusCode = 501
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("Failure parsing volume name from path - %s\n", req.URL.Path)))
}
default:
resp.StatusCode = 501
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("Unsupported HTTP method %s for %s\n", req.Method, req.URL.Path)))
}
default:
resp.StatusCode = 501
resp.Body = ioutil.NopCloser(bytes.NewBufferString(fmt.Sprintf("Path %s not implemented\n", req.URL.Path)))
}
return &resp
}),
}
}
// Reusable mock log.Logger instance
func mockLogger() *log.Logger {
return log.New(os.Stderr, "MOCK: ", log.Ltime|log.Lmicroseconds)
}
func TestAddLabelsToQueryStringFilters(t *testing.T) {
l := mockLogger()
r := mockRulesDirector()
// key = client side URL (inc query params)
// value = expected request URL on upstream side (inc query params)
// TODOLATER: would it be more elegant to write these as URL decoded for readability? will need to change the map[string]string to still send the full docker-compose ps URLs
tests := map[string]string{
// docker ps - without any filters
"/v1.32/containers/json": "/v1.32/containers/json?filters=%7B%22label%22%3A%5B%22com.buildkite.sockguard.owner%3Dtest-owner%22%5D%7D",
// docker ps - with a key=value: true filter
"/v1.32/containers/json?filters=%7B%22label%22%3A%7B%22test%3Dblah%22%3Atrue%7D%7D": "/v1.32/containers/json?filters=%7B%22label%22%3A%5B%22test%3Dblah%22%2C%22com.buildkite.sockguard.owner%3Dtest-owner%22%5D%7D",
// docker-compose ps - first list API call
"/v1.32/containers/json?limit=-1&all=1&size=0&trunc_cmd=0&filters=%7B%22label%22%3A+%5B%22com.docker.compose.project%3Dblah%22%2C+%22com.docker.compose.oneoff%3DFalse%22%5D%7D": "/v1.32/containers/json?all=1&filters=%7B%22label%22%3A%5B%22com.docker.compose.project%3Dblah%22%2C%22com.docker.compose.oneoff%3DFalse%22%2C%22com.buildkite.sockguard.owner%3Dtest-owner%22%5D%7D&limit=-1&size=0&trunc_cmd=0",
// docker-compose ps - second list API call
"/v1.32/containers/json?limit=-1&all=0&size=0&trunc_cmd=0&filters=%7B%22label%22%3A+%5B%22com.docker.compose.project%3Dblah%22%2C+%22com.docker.compose.oneoff%3DTrue%22%5D%7D": "/v1.32/containers/json?all=0&filters=%7B%22label%22%3A%5B%22com.docker.compose.project%3Dblah%22%2C%22com.docker.compose.oneoff%3DTrue%22%2C%22com.buildkite.sockguard.owner%3Dtest-owner%22%5D%7D&limit=-1&size=0&trunc_cmd=0",
}
for cReqUrl, uReqUrl := range tests {
upstream := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if req.Method != "GET" {
t.Errorf("%s : Expected HTTP method GET got %s", uReqUrl, req.Method)
}
// log.Printf("%s %s", req.Method, req.URL.String())
// Validate the request URL against expected.
if req.URL.String() != uReqUrl {
decodeUReqUrl, err1 := url.QueryUnescape(uReqUrl)
decodeInReqUrl, err2 := url.QueryUnescape(req.URL.String())
if err1 == nil && err2 == nil {
t.Errorf("Expected:\n%s\ngot:\n%s\n\n(URL decoded) Expected:\n%s\ngot:\n%s\n", uReqUrl, req.URL.String(), decodeUReqUrl, decodeInReqUrl)
} else {
t.Errorf("Expected:\n%s\ngot:\n%s\n\n(errors trying to URL decode)\n", uReqUrl, req.URL.String())
}
}
// Return empty JSON, the request is whats important not the response
fmt.Fprintf(w, `{}`)
})
// Credit: https://blog.questionable.services/article/testing-http-handlers-go/
// Create a request to pass to our handler
req, err := http.NewRequest("GET", cReqUrl, nil)
if err != nil {
t.Fatal(err)
}
// We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
rr := httptest.NewRecorder()
handler := r.addLabelsToQueryStringFilters(l, req, upstream)
// Our handlers satisfy http.Handler, so we can call their ServeHTTP method
// directly and pass in our Request and ResponseRecorder.
handler.ServeHTTP(rr, req)
// Check the status code is what we expect.
if status := rr.Code; status != http.StatusOK {
t.Errorf("%s : handler returned wrong status code: got %v want %v", cReqUrl, status, http.StatusOK)
}
// Don't bother checking the response, it's not relevant in mocked context. The request side is more important here.
}
}
func loadFixtureFile(filename_part string) (string, error) {
data, err := ioutil.ReadFile(fmt.Sprintf("./fixtures/%s.json", filename_part))
if err != nil {
return "", err
}
// Remove any whitespace/newlines from the start/end of the file
return strings.TrimSpace(string(data)), nil
}
// Used for handleContainerCreate(), handleNetworkCreate(), and friends
type handleCreateTests struct {
rd *RulesDirector
// Expected StatusCode
esc int
}
func TestHandleContainerCreate(t *testing.T) {
l := mockLogger()
// For each of the tests below, there will be 2 files in the fixtures/ dir:
// - <key>_in.json - the client request sent to the director
// - <key>_expected.json - the expected request sent to the upstream
tests := map[string]handleCreateTests{
// Defaults
"containers_create_1": handleCreateTests{
rd: &RulesDirector{
Client: &http.Client{},
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
},
esc: 200,
},
// Defaults + custom Owner
"containers_create_2": handleCreateTests{
rd: &RulesDirector{
Client: &http.Client{},
Owner: "test-owner",
},
esc: 200,
},
// Defaults with Binds disabled, and a bind sent (should fail)
"containers_create_3": handleCreateTests{
rd: &RulesDirector{
Client: &http.Client{},
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
AllowBinds: []string{},
},
esc: 401,
},
// Defaults + Binds enabled + a matching bind (should pass)
"containers_create_4": handleCreateTests{
rd: &RulesDirector{
Client: &http.Client{},
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
AllowBinds: []string{"/tmp"},
},
esc: 200,
},
// Defaults + Binds enabled + a non-matching bind (should fail)
"containers_create_5": handleCreateTests{
rd: &RulesDirector{
Client: &http.Client{},
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
AllowBinds: []string{"/tmp"},
},
esc: 401,
},
// Defaults + Host Mode Networking + request with NetworkMode=host (should pass)
"containers_create_6": handleCreateTests{
rd: &RulesDirector{
Client: &http.Client{},
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
AllowHostModeNetworking: true,
},
esc: 200,
},
// Defaults + Host Mode Networking disabled + request with NetworkMode=host (should fail)
"containers_create_7": handleCreateTests{
rd: &RulesDirector{
Client: &http.Client{},
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
AllowHostModeNetworking: false,
},
esc: 401,
},
// Defaults + Cgroup Parent
"containers_create_8": handleCreateTests{
rd: &RulesDirector{
Client: &http.Client{},
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
ContainerCgroupParent: "some-cgroup",
},
esc: 200,
},
// Defaults + Force User
"containers_create_9": handleCreateTests{
rd: &RulesDirector{
Client: &http.Client{},
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
User: "someuser",
},
esc: 200,
},
// Defaults + a custom label on request
"containers_create_10": handleCreateTests{
rd: &RulesDirector{
Client: &http.Client{},
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
},
esc: 200,
},
// Defaults + -docker-link sockguard + requesting default bridge network
"containers_create_11": handleCreateTests{
rd: &RulesDirector{
Client: &http.Client{},
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
ContainerDockerLink: "asdf:zzzz",
},
esc: 200,
},
// Defaults + -docker-link sockguard flag + requesting a user defined bridge network
"containers_create_12": handleCreateTests{
rd: &RulesDirector{
Client: &http.Client{},
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
ContainerDockerLink: "asdf:zzzz",
},
esc: 200,
},
// Defaults + try set a CgroupParent (should fail, only permitted if sockguard started with -cgroup-parent)
"containers_create_13": handleCreateTests{
rd: &RulesDirector{
Client: &http.Client{},
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
},
esc: 401,
},
// Defaults + -docker-link sockguard flag + requesting default bridge network + another arbitrary --link from client
"containers_create_14": handleCreateTests{
rd: &RulesDirector{
Client: &http.Client{},
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
ContainerDockerLink: "cccc:dddd",
},
esc: 200,
},
}
reqUrl := "/v1.37/containers/create"
expectedUrl := "/v1.37/containers/create"
// TODOLATER: consolidate/DRY this with TestHandleNetworkCreate()?
for k, v := range tests {
expectedReqJson, err := loadFixtureFile(fmt.Sprintf("%s_expected", k))
if err != nil {
t.Fatal(err)
}
upstream := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if req.Method != "POST" {
t.Errorf("%s : Expected HTTP method POST got %s", k, req.Method)
}
// log.Printf("%s %s", req.Method, req.URL.String())
// Validate the request URL against expected.
if req.URL.String() != expectedUrl {
t.Errorf("%s : Expected URL %s got %s", k, expectedUrl, req.URL.String())
}
// Validate the body has been modified as expected
body, err := ioutil.ReadAll(req.Body)
if err != nil {
t.Fatal(err)
}
if string(body) != string(expectedReqJson) {
t.Errorf("%s : Expected request body JSON:\n%s\nGot request body JSON:\n%s\n", k, string(expectedReqJson), string(body))
}
// TODOLATER: append to "us" (upstream state) the new container, and any connected networks? we only check the ciagentcontainer
// when verifying state further down right now, which is the key consideration.
// Return empty JSON, the request is whats important not the response
fmt.Fprintf(w, `{}`)
})
// Credit: https://blog.questionable.services/article/testing-http-handlers-go/
// Create a request to pass to our handler
containerCreateJson, err := loadFixtureFile(fmt.Sprintf("%s_in", k))
if err != nil {
t.Fatal(err)
}
req, err := http.NewRequest("POST", reqUrl, strings.NewReader(containerCreateJson))
if err != nil {
t.Fatal(err)
}
// We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
rr := httptest.NewRecorder()
handler := v.rd.handleContainerCreate(l, req, upstream)
// Our handlers satisfy http.Handler, so we can call their ServeHTTP method
// directly and pass in our Request and ResponseRecorder.
handler.ServeHTTP(rr, req)
// Check the status code is what we expect.
//fmt.Printf("%s : SC %d ESC %d\n", k, rr.Code, v.esc)
if status := rr.Code; status != v.esc {
// Get the body out of the response to return with the error
respBody, err := ioutil.ReadAll(rr.Body)
if err == nil {
t.Errorf("%s : handler returned wrong status code: got %v want %v. Response body: %s", k, status, v.esc, string(respBody))
} else {
t.Errorf("%s : handler returned wrong status code: got %v want %v. Error reading response body: %s", k, status, v.esc, err.Error())
}
}
// State of ciagentcontainer network attachments is not relevant for a general container creation call,
// only matters for network create/delete.
// Don't bother checking the response, it's not relevant in mocked context. The request side is more important here.
}
}
func TestSplitContainerDockerLink(t *testing.T) {
goodTests := map[string]containerDockerLink{
"38e5c22c7120": containerDockerLink{Container: "38e5c22c7120", Alias: "38e5c22c7120"},
"38e5c22c7120:asdf": containerDockerLink{Container: "38e5c22c7120", Alias: "asdf"},
"somename": containerDockerLink{Container: "somename", Alias: "somename"},
"somename:zzzz": containerDockerLink{Container: "somename", Alias: "zzzz"},
}
badTests := []string{
"",
"somename:zzzz:aaaa",
}
for k1, v1 := range goodTests {
result1, err := splitContainerDockerLink(k1)
if err != nil {
t.Errorf("%s : %s", k1, err.Error())
}
if cmp.Equal(*result1, v1) != true {
t.Errorf("'%s' : Expected %+v, got %+v\n", k1, v1, result1)
}
}
for _, v2 := range badTests {
_, err := splitContainerDockerLink(v2)
if err == nil {
t.Errorf("'%s' : Expected error, got nil", v2)
}
}
}
func TestHandleNetworkCreate(t *testing.T) {
l := mockLogger()
// Pre-populated simplified upstream state that "exists" before tests execute.
us := upstreamState{
containers: map[string]upstreamStateContainer{
"ciagentcontainer": upstreamStateContainer{
// No ownership checking at this level (intentionally), due to chicken-and-egg situation
// (CI container is a sibling/sidecar of sockguard itself, not a child)
owner: "foreign",
attachedNetworks: []upstreamStateContainerAttachedNetwork{},
},
},
networks: map[string]upstreamStateNetwork{},
}
// For each of the tests below, there will be 2 files in the fixtures/ dir:
// - <key>_in.json - the client request sent to the director
// - <key>_expected.json - the expected request sent to the upstream
tests := map[string]handleCreateTests{
// Defaults
"networks_create_1": handleCreateTests{
rd: &RulesDirector{
Client: mockRulesDirectorHttpClientWithUpstreamState(&us),
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
},
esc: 200,
},
// Defaults + -docker-link enabled
"networks_create_2": handleCreateTests{
rd: &RulesDirector{
Client: mockRulesDirectorHttpClientWithUpstreamState(&us),
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
ContainerDockerLink: "ciagentcontainer:cccc",
},
esc: 200,
},
// Defaults + -container-join-network enabled
"networks_create_3": handleCreateTests{
rd: &RulesDirector{
Client: mockRulesDirectorHttpClientWithUpstreamState(&us),
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
ContainerJoinNetwork: "ciagentcontainer",
},
esc: 200,
},
// Defaults + -container-join-network + -container-join-network-alias enabled
"networks_create_4": handleCreateTests{
rd: &RulesDirector{
Client: mockRulesDirectorHttpClientWithUpstreamState(&us),
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
ContainerJoinNetwork: "ciagentcontainer",
ContainerJoinNetworkAlias: "ciagentalias",
},
esc: 200,
},
}
reqUrl := "/v1.37/networks/create"
expectedUrl := "/v1.37/networks/create"
// TODOLATER: consolidate/DRY this with TestHandleContainerCreate()?
for k, v := range tests {
expectedReqJson, err := loadFixtureFile(fmt.Sprintf("%s_expected", k))
if err != nil {
t.Fatal(err)
}
upstream := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if req.Method != "POST" {
t.Errorf("%s : Expected HTTP method POST got %s", k, req.Method)
}
// log.Printf("%s %s", req.Method, req.URL.String())
// Validate the request URL against expected.
if req.URL.String() != expectedUrl {
t.Errorf("%s : Expected URL %s got %s", k, expectedUrl, req.URL.String())
}
// Validate the body has been modified as expected
body, err := ioutil.ReadAll(req.Body)
if err != nil {
t.Fatal(err)
}
if string(body) != string(expectedReqJson) {
t.Errorf("%s : Expected request body JSON:\n%s\nGot request body JSON:\n%s\n", k, string(expectedReqJson), string(body))
}
var decoded map[string]interface{}
if err := json.Unmarshal(body, &decoded); err != nil {
t.Fatal(err)
}
newNetworkName := decoded["Name"].(string)
newNetworkOwner := ""
switch lab := decoded["Labels"].(type) {
case map[string]interface{}:
newNetworkOwner = lab["com.buildkite.sockguard.owner"].(string)
default:
t.Fatal("Error: Cannot parse Labels from request JSON on network create")
}
if us.doesNetworkExist(newNetworkName) == true {
t.Fatalf("Network '%s' already exists", newNetworkName)
}
us.createNetwork(newNetworkName, newNetworkOwner)
// Return empty JSON, the request is whats important not the response
fmt.Fprintf(w, `{}`)
})
// Credit: https://blog.questionable.services/article/testing-http-handlers-go/
// Create a request to pass to our handler
containerCreateJson, err := loadFixtureFile(fmt.Sprintf("%s_in", k))
if err != nil {
t.Fatal(err)
}
// Parse out the new network name from containerCreateJson, for use in further checks below
var decodedIn map[string]interface{}
if err := json.Unmarshal([]byte(containerCreateJson), &decodedIn); err != nil {
t.Fatal(err)
}
inNewNetworkName := decodedIn["Name"].(string)
req, err := http.NewRequest("POST", reqUrl, strings.NewReader(containerCreateJson))
if err != nil {
t.Fatal(err)
}
// We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
rr := httptest.NewRecorder()
handler := v.rd.handleNetworkCreate(l, req, upstream)
// Our handlers satisfy http.Handler, so we can call their ServeHTTP method
// directly and pass in our Request and ResponseRecorder.
handler.ServeHTTP(rr, req)
// Check the status code is what we expect.
//fmt.Printf("%s : SC %d ESC %d\n", k, rr.Code, v.esc)
if status := rr.Code; status != v.esc {
// Get the body out of the response to return with the error
respBody, err := ioutil.ReadAll(rr.Body)
if err == nil {
t.Errorf("%s : handler returned wrong status code: got %v want %v. Response body: %s", k, status, v.esc, string(respBody))
} else {
t.Errorf("%s : handler returned wrong status code: got %v want %v. Error reading response body: %s", k, status, v.esc, err.Error())
}
}
// Verify the network was added to upstreamState
if rr.Code == 200 && us.doesNetworkExist(inNewNetworkName) == false {
t.Errorf("%s : %d response code, but network '%s' does not exist, should have been created in mock upstream state", k, rr.Code, inNewNetworkName)
} else if rr.Code != 200 && us.doesNetworkExist(inNewNetworkName) == true {
t.Errorf("%s : %d response code, but network '%s' exists, should not have been created", k, rr.Code, inNewNetworkName)
}
// Verify the ciagentcontainer was connected to the new network (if applicable)
if v.rd.ContainerDockerLink != "" || v.rd.ContainerJoinNetwork != "" {
ciAgentAttachedNetworks := us.getContainerAttachedNetworks("ciagentcontainer")
ciAgentAttachedToNetwork := false
ciAgentAttachedToNetworkWithAlias := false
for _, vn := range ciAgentAttachedNetworks {
if vn.name == inNewNetworkName {
ciAgentAttachedToNetwork = true
if v.rd.ContainerJoinNetworkAlias == "" {
// No alias set, consider this a success
ciAgentAttachedToNetworkWithAlias = true
} else if cmp.Equal(vn.aliases, []string{v.rd.ContainerJoinNetworkAlias}) == true {
// Should also have the correct alias set
ciAgentAttachedToNetworkWithAlias = true
}
break
}
}
if ciAgentAttachedToNetwork == false {
t.Errorf("%s : network '%s' exists (or should exist), but ciagentcontainer is not attached", k, inNewNetworkName)
}
if ciAgentAttachedToNetworkWithAlias == false {
t.Errorf("%s : network '%s' exists (or should exist), but ciagentcontainer does not have the alias '%s'", k, inNewNetworkName, v.rd.ContainerJoinNetworkAlias)
}
}
// Don't bother checking the response, it's not relevant in mocked context. The request side is more important here.
}
}
func TestHandleNetworkDelete(t *testing.T) {
l := mockLogger()
// Pre-populated simplified upstream state that "exists" before tests execute.
us := upstreamState{
containers: map[string]upstreamStateContainer{
"ciagentcontainer": upstreamStateContainer{
// No ownership checking at this level (intentionally), due to chicken-and-egg situation
// (CI container is a sibling/sidecar of sockguard itself, not a child)
owner: "foreign",
attachedNetworks: []upstreamStateContainerAttachedNetwork{
upstreamStateContainerAttachedNetwork{
name: "whatevernetwork",
},
upstreamStateContainerAttachedNetwork{
name: "alwaysjoinnetwork",
},
upstreamStateContainerAttachedNetwork{
name: "alwaysjoinnetworkwithalias",
aliases: []string{"ciagentalias"},
},
},
},
},
networks: map[string]upstreamStateNetwork{
"somenetwork": upstreamStateNetwork{
owner: "sockguard-pid-1",
},
"anothernetwork": upstreamStateNetwork{
owner: "adifferentowner",
},
"whatevernetwork": upstreamStateNetwork{
owner: "sockguard-pid-1",
},
"alwaysjoinnetwork": upstreamStateNetwork{
owner: "sockguard-pid-1",
},
"alwaysjoinnetworkwithalias": upstreamStateNetwork{
owner: "sockguard-pid-1",
},
},
}
// Key = the network name that will be deleted (or attempted)
tests := map[string]handleCreateTests{
// Defaults (owner label matches, should pass)
"somenetwork": handleCreateTests{
rd: &RulesDirector{
Client: mockRulesDirectorHttpClientWithUpstreamState(&us),
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
},
esc: 200,
},
// Defaults (owner label does not match, should fail)
"anothernetwork": handleCreateTests{
rd: &RulesDirector{
Client: mockRulesDirectorHttpClientWithUpstreamState(&us),
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
},
esc: 401,
},
// Defaults + -docker-link enabled
"whatevernetwork": handleCreateTests{
rd: &RulesDirector{
Client: mockRulesDirectorHttpClientWithUpstreamState(&us),
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
ContainerDockerLink: "ciagentcontainer:ffff",
},
esc: 200,
},
// Defaults + -container-join-network enabled
"alwaysjoinnetwork": handleCreateTests{
rd: &RulesDirector{
Client: mockRulesDirectorHttpClientWithUpstreamState(&us),
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
ContainerJoinNetwork: "ciagentcontainer",
},
esc: 200,
},
// Defaults + -container-join-network + -container-join-network-alias enabled
// Technically we don't do anything different to the prior here, but added for completeness
"alwaysjoinnetworkwithalias": handleCreateTests{
rd: &RulesDirector{
Client: mockRulesDirectorHttpClientWithUpstreamState(&us),
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
ContainerJoinNetwork: "ciagentcontainer",
ContainerJoinNetworkAlias: "ciagentalias",
},
esc: 200,
},
}
pathIdRegex := regexp.MustCompile("^/v(.*)/networks/(.*)$")
// TODOLATER: consolidate/DRY this with TestHandleContainerCreate()?
for k, v := range tests {
reqUrl := fmt.Sprintf("/v1.37/networks/%s", k)
expectedUrl := fmt.Sprintf("/v1.37/networks/%s", k)
upstream := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if req.Method != "DELETE" {
t.Errorf("%s : Expected HTTP method DELETE got %s", k, req.Method)
}
// log.Printf("%s %s", req.Method, req.URL.String())
// Validate the request URL against expected.
if req.URL.String() != expectedUrl {
t.Errorf("%s : Expected URL %s got %s", k, expectedUrl, req.URL.String())
}
// No request body for these DELETE calls
// Parse out request URI
if pathIdRegex.MatchString(req.URL.Path) == false {
t.Fatalf("%s : URL path did not match expected /vx.xx/networks/{id|name}", k)
}
parsePath := pathIdRegex.FindStringSubmatch(req.URL.Path)
if len(parsePath) != 3 {
t.Fatalf("%s : URL path regex split mismatch, expected 3 got %d", k, len(parsePath))
}
// "Delete" the network (from mocked upstream state)
err := us.deleteNetwork(parsePath[2])
if err != nil {
t.Fatal(err)
}
// Return empty JSON, the request is whats important not the response
fmt.Fprintf(w, `{}`)
})
// Credit: https://blog.questionable.services/article/testing-http-handlers-go/
// Create a request to pass to our handler
req, err := http.NewRequest("DELETE", reqUrl, nil)
if err != nil {
t.Fatal(err)
}
// We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
rr := httptest.NewRecorder()
handler := v.rd.handleNetworkDelete(l, req, upstream)
// Our handlers satisfy http.Handler, so we can call their ServeHTTP method
// directly and pass in our Request and ResponseRecorder.
handler.ServeHTTP(rr, req)
// Check the status code is what we expect.
//fmt.Printf("%s : SC %d ESC %d\n", k, rr.Code, v.esc)
if status := rr.Code; status != v.esc {
// Get the body out of the response to return with the error
respBody, err := ioutil.ReadAll(rr.Body)
if err == nil {
t.Errorf("%s : handler returned wrong status code: got %v want %v. Response body: %s", k, status, v.esc, string(respBody))
} else {
t.Errorf("%s : handler returned wrong status code: got %v want %v. Error reading response body: %s", k, status, v.esc, err.Error())
}
}
// Verify the network was deleted from mock upstream state (or not deleted on error)
if rr.Code == 200 && us.doesNetworkExist(k) == true {
t.Errorf("%s : %d response code, but network still exists, should have been deleted from mock upstream state", k, rr.Code)
} else if rr.Code != 200 && us.doesNetworkExist(k) == false {
t.Errorf("%s : %d response code, but network does not exist, should not have been deleted", k, rr.Code)
}
// Don't bother checking the response, it's not relevant in mocked context. The request side is more important here.
}
}
// TODOLATER: would it make more sense to implement a TestDirect, or TestDirect* (break it into variations by path or method)?
// Since that would also cover Direct() + CheckOwner(). Or do we do both...?
func TestCheckOwner(t *testing.T) {
l := mockLogger()
// Pre-populated simplified upstream state that "exists" before tests execute.
us := upstreamState{
containers: map[string]upstreamStateContainer{
"idwithnolabel": upstreamStateContainer{
// Empty owner = no label
owner: "",
},
"idwithlabel1": upstreamStateContainer{
owner: "test-owner",
},
},
images: map[string]upstreamStateImage{
"idwithnolabel": upstreamStateImage{
// Empty owner = no label
owner: "",
},
"idwithlabel1": upstreamStateImage{
owner: "test-owner",
},
},
networks: map[string]upstreamStateNetwork{
"idwithnolabel": upstreamStateNetwork{
// Empty owner = no label
owner: "",
},
"idwithlabel1": upstreamStateNetwork{
owner: "test-owner",
},
},
volumes: map[string]upstreamStateVolume{
"namewithnolabel": upstreamStateVolume{
// Empty owner = no label
owner: "",
},
"namewithlabel1": upstreamStateVolume{
owner: "test-owner",
},
"name-with-label2": upstreamStateVolume{
owner: "test-owner",
},
},
}
r := mockRulesDirectorWithUpstreamState(&us)
tests := map[string]struct {
Type string
ExpResult bool
}{
// A container that will match
"/v1.37/containers/idwithlabel1/logs": {"containers", true},
// A container that won't match
"/v1.37/containers/idwithnolabel/logs": {"containers", false},
// An image that will match
"/v1.37/images/idwithlabel1/json": {"images", true},
// An image that won't match
"/v1.37/images/idwithnolabel/json": {"images", false},
// A network that will match
"/v1.37/networks/idwithlabel1": {"networks", true},
// A network that won't match
"/v1.37/networks/idwithnolabel": {"networks", false},
// A volume that will match
"/v1.37/volumes/namewithlabel1": {"volumes", true},
// A volume that will match
"/v1.37/volumes/name-with-label2": {"volumes", true},
// A volume that won't match
"/v1.37/volumes/namewithnolabel": {"volumes", false},
}
for k, v := range tests {
kReq, err := http.NewRequest("GET", k, nil)
if err != nil {
t.Fatal(err)
}
result, err := r.checkOwner(l, v.Type, false, kReq)
if err != nil {
t.Errorf("%s : Error - %s", kReq.URL.String(), err.Error())
}
if v.ExpResult != result {
t.Errorf("%s : Expected %t, got %t", kReq.URL.String(), v.ExpResult, result)
}
}
}
type handleBuildTest struct {
rd *RulesDirector
// Expected StatusCode
esc int
// These are short enough, store inline rather than in fixtures files
inQueryString string
expectedQueryString string
}
func TestHandleBuild(t *testing.T) {
l := mockLogger()
tests := []handleBuildTest{
// Defaults
handleBuildTest{
rd: &RulesDirector{
Client: &http.Client{},
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
},
esc: 200,
inQueryString: `buildargs={}&cachefrom=[]&cgroupparent=&cpuperiod=0&cpuquota=0&cpusetcpus=&cpusetmems=&cpushares=0&dockerfile=Dockerfile&labels={}&memory=0&memswap=0&networkmode=default&rm=1&shmsize=0&target=&ulimits=null&version=1`,
expectedQueryString: `buildargs={}&cachefrom=[]&cgroupparent=&cpuperiod=0&cpuquota=0&cpusetcpus=&cpusetmems=&cpushares=0&dockerfile=Dockerfile&labels={"com.buildkite.sockguard.owner":"sockguard-pid-1"}&memory=0&memswap=0&networkmode=default&rm=1&shmsize=0&target=&ulimits=null&version=1`,
},
// Defaults + custom label
handleBuildTest{
rd: &RulesDirector{
Client: &http.Client{},
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
},
esc: 200,
inQueryString: `buildargs={}&cachefrom=[]&cgroupparent=&cpuperiod=0&cpuquota=0&cpusetcpus=&cpusetmems=&cpushares=0&dockerfile=Dockerfile&labels={"somelabel":"somevalue"}&memory=0&memswap=0&networkmode=default&rm=1&shmsize=0&target=&ulimits=null&version=1`,
expectedQueryString: `buildargs={}&cachefrom=[]&cgroupparent=&cpuperiod=0&cpuquota=0&cpusetcpus=&cpusetmems=&cpushares=0&dockerfile=Dockerfile&labels={"com.buildkite.sockguard.owner":"sockguard-pid-1","somelabel":"somevalue"}&memory=0&memswap=0&networkmode=default&rm=1&shmsize=0&target=&ulimits=null&version=1`,
},
// Defaults + CgroupParent in config (should pass)
handleBuildTest{
rd: &RulesDirector{
Client: &http.Client{},
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
ContainerCgroupParent: "somecgroup",
},
esc: 200,
inQueryString: `buildargs={}&cachefrom=[]&cgroupparent=&cpuperiod=0&cpuquota=0&cpusetcpus=&cpusetmems=&cpushares=0&dockerfile=Dockerfile&labels={}&memory=0&memswap=0&networkmode=default&rm=1&shmsize=0&target=&ulimits=null&version=1`,
expectedQueryString: `buildargs={}&cachefrom=[]&cgroupparent=somecgroup&cpuperiod=0&cpuquota=0&cpusetcpus=&cpusetmems=&cpushares=0&dockerfile=Dockerfile&labels={"com.buildkite.sockguard.owner":"sockguard-pid-1"}&memory=0&memswap=0&networkmode=default&rm=1&shmsize=0&target=&ulimits=null&version=1`,
},
// Defaults + CgroupParent in API request (should fail)
handleBuildTest{
rd: &RulesDirector{
Client: &http.Client{},
// This is what's set in main() as the default, assuming running in a container so PID 1
Owner: "sockguard-pid-1",
},
esc: 401,
inQueryString: `buildargs={}&cachefrom=[]&cgroupparent=anothercgroup&cpuperiod=0&cpuquota=0&cpusetcpus=&cpusetmems=&cpushares=0&dockerfile=Dockerfile&labels={}&memory=0&memswap=0&networkmode=default&rm=1&shmsize=0&target=&ulimits=null&version=1`,
expectedQueryString: `<should fail and never get here>`,
},
}
reqUrlPath := "/v1.37/build"
expectedUrlPath := "/v1.37/build"
for _, v := range tests {
upstream := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
// log.Printf("%s %s", req.Method, req.URL.Path)
// Validate the request URL path against expected.
if req.URL.Path != expectedUrlPath {
t.Error("Expected URL path", expectedUrlPath, "got", req.URL.Path)
}
// Validate the query string matches expected
unescapeQueryString, err := url.QueryUnescape(req.URL.RawQuery)
if err != nil {
t.Fatal(err)
}
if unescapeQueryString != v.expectedQueryString {
t.Errorf("Expected URL query string:\n%s\nGot:\n%s\n\n", v.expectedQueryString, unescapeQueryString)
}
// We don't validate the request body here, as it is a build context tar (which isn't modified), not relevant
// Return empty JSON, the request is whats important not the response
fmt.Fprintf(w, `{}`)
})
// Credit: https://blog.questionable.services/article/testing-http-handlers-go/
// Create a request to pass to our handler, using an empty request body for now (not relevant)
r, err := http.NewRequest("POST", fmt.Sprintf("%s?%s", reqUrlPath, v.inQueryString), nil)
if err != nil {
t.Fatal(err)
}
// We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
rr := httptest.NewRecorder()
handler := v.rd.handleBuild(l, r, upstream)
// Our handlers satisfy http.Handler, so we can call their ServeHTTP method
// directly and pass in our Request and ResponseRecorder.
handler.ServeHTTP(rr, r)
// Check the status code is what we expect.
//fmt.Printf("%s : SC %d ESC %d\n", k, rr.Code, v.esc)
if status := rr.Code; status != v.esc {
// Get the body out of the response to return with the error
respBody, err := ioutil.ReadAll(rr.Body)
if err == nil {
t.Errorf("%s : handler returned wrong status code: got %v want %v. Response body: %s", v.inQueryString, status, v.esc, string(respBody))
} else {
t.Errorf("%s : handler returned wrong status code: got %v want %v. Error reading response body: %s", v.inQueryString, status, v.esc, err.Error())
}
}
// Don't bother checking the response, it's not relevant in mocked context. The request side is more important here.
}
}
|
package carriage
import (
"TruckMonitor-Backend/model"
"fmt"
)
func convertEmployeeName(employee *model.Employee) (result string) {
result = fmt.Sprintf("%s %s", employee.Surname, employee.Name)
if len(employee.Patronymic) > 0 {
result = fmt.Sprintf("%s %s", result, employee.Patronymic)
}
return
}
|
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"strings"
"unicode"
"go.opentelemetry.io/collector/consumer/pdata"
)
// constants for tags
const (
// maximum for tag string lengths
MaxTagLength = 200
// DefaultServiceName is the default name we assign a service if it's missing and we have no reasonable fallback
// From: https://github.com/DataDog/datadog-agent/blob/eab0dde41fe3a069a65c33d82a81b1ef1cf6b3bc/pkg/trace/traceutil/normalize.go#L15
DefaultServiceName string = "unnamed-otel-service"
)
// NormalizeSpanName returns a cleaned up, normalized span name. Span names are used to formulate tags,
// and they also are used throughout the UI to connect metrics and traces. This helper function will:
//
// 1. Convert to all lowercase unicode string
// 2. Convert bad characters to underscores
// 3. Dedupe contiguous underscores
// 4. Remove leading non-alpha chars
// 5. Truncate to MaxTagLength (200) characters
// 6. Strip trailing underscores
//
func NormalizeSpanName(tag string, isService bool) string {
// unless you just throw out unicode, this is already as fast as it gets
bufSize := len(tag)
if bufSize > MaxTagLength {
bufSize = MaxTagLength // Limit size of allocation
}
var buf strings.Builder
buf.Grow(bufSize)
lastWasUnderscore := false
for i, c := range tag {
// Bail early if the tag contains a lot of non-letter/digit characters.
// Let us assume if a tag is test🍣🍣[.,...], it's unlikely to be properly formated tag.
// Max tag length matches backend constraint.
if i > 2*MaxTagLength {
break
}
// fast path for len check
if buf.Len() >= MaxTagLength {
break
}
// fast path for ascii alphabetic chars
switch {
case c >= 'a' && c <= 'z':
buf.WriteRune(c)
lastWasUnderscore = false
continue
case c >= 'A' && c <= 'Z':
c -= 'A' - 'a'
buf.WriteRune(c)
lastWasUnderscore = false
continue
}
c = unicode.ToLower(c)
switch {
// handle always valid cases
case unicode.IsLetter(c):
buf.WriteRune(c)
lastWasUnderscore = false
// skip any characters that can't start the string
case buf.Len() == 0:
continue
// handle valid characters that can't start the string.
// '-' creates issues in the UI so we skip it
case unicode.IsDigit(c) || c == '.':
buf.WriteRune(c)
lastWasUnderscore = false
// '-' only creates issues for span operation names not service names
case c == '-' && isService:
buf.WriteRune(c)
lastWasUnderscore = false
// convert anything else to underscores (including underscores), but only allow one in a row.
case !lastWasUnderscore:
buf.WriteRune('_')
lastWasUnderscore = true
}
}
s := buf.String()
// strip trailing underscores
if lastWasUnderscore {
return s[:len(s)-1]
}
return s
}
// NormalizeSpanKind returns a span kind with the SPAN_KIND prefix trimmed off
func NormalizeSpanKind(kind pdata.SpanKind) string {
return strings.TrimPrefix(kind.String(), "SPAN_KIND_")
}
// NormalizeServiceName returns a span service name normalized to remove invalid characters
// TODO: we'd like to move to the datadog-agent traceutil version of this once it's available in the exportable package
// https://github.com/DataDog/datadog-agent/blob/eab0dde41fe3a069a65c33d82a81b1ef1cf6b3bc/pkg/trace/traceutil/normalize.go#L52
func NormalizeServiceName(service string) string {
if service == "" {
return DefaultServiceName
}
s := NormalizeSpanName(service, true)
if s == "" {
return DefaultServiceName
}
return s
}
|
package main
import (
"fmt"
"github.com/PuerkitoBio/goquery"
"strconv"
)
func getSelectedCategory(document *goquery.Document) (selectedCategoryNode *goquery.Selection) {
return document.Find("ul ul li span").First()
}
func getAllCategories(document *goquery.Document) (categoriesList []*Category) {
selectedCategoryNode := getSelectedCategory(document)
childCategoriesContainer := selectedCategoryNode.Parent().Parent().Find("ul").First()
if len(childCategoriesContainer.Nodes) > 0 {
categoriesList = make([]*Category, 0)
childCategoriesContainer.Find("li a").Each(func(_ int, selection *goquery.Selection) {
categoryLink, exists := selection.Attr("href")
if exists {
newCategory := &Category{
link: categoryLink,
title: strconv.Quote(selection.Text()),
}
categoriesList = append(categoriesList, newCategory)
} else {
fmt.Println("Href for category doesn't exists")
}
})
}
return
}
|
package mal
import (
"drdgvhbh/discordbot/internal/cli/anime/mal"
"fmt"
"time"
"github.com/bwmarrin/discordgo"
)
type AnimeStockQuoteEmbeddedOptions struct {
AnimeStock mal.AnimeStock
}
func CreateAnimeStockQuoteEmbedded(
options AnimeStockQuoteEmbeddedOptions,
) *discordgo.MessageEmbed {
animeStock := options.AnimeStock
price := animeStock.MarketPrice()
imgURL := animeStock.ImageURL()
message := &discordgo.MessageEmbed{
Title: "Anime Stock Quote",
Author: &discordgo.MessageEmbedAuthor{},
Color: 0xFFFFCC,
Fields: []*discordgo.MessageEmbedField{
&discordgo.MessageEmbedField{
Name: "Price",
Value: fmt.Sprintf("%.4f", price),
Inline: true,
},
},
Image: &discordgo.MessageEmbedImage{
URL: imgURL,
},
Timestamp: time.Now().Format(time.RFC3339),
}
return message
}
|
package cmd
import (
"errors"
"time"
boshblob "github.com/cloudfoundry/bosh-agent/blobstore"
bosherr "github.com/cloudfoundry/bosh-agent/errors"
boshlog "github.com/cloudfoundry/bosh-agent/logger"
boshcmd "github.com/cloudfoundry/bosh-agent/platform/commands"
boshsys "github.com/cloudfoundry/bosh-agent/system"
boshtime "github.com/cloudfoundry/bosh-agent/time"
boshuuid "github.com/cloudfoundry/bosh-agent/uuid"
bmcloud "github.com/cloudfoundry/bosh-micro-cli/cloud"
bmcomp "github.com/cloudfoundry/bosh-micro-cli/compile"
bmconfig "github.com/cloudfoundry/bosh-micro-cli/config"
bmcpideploy "github.com/cloudfoundry/bosh-micro-cli/cpideployer"
bmdeployer "github.com/cloudfoundry/bosh-micro-cli/deployer"
bmagentclient "github.com/cloudfoundry/bosh-micro-cli/deployer/agentclient"
bmas "github.com/cloudfoundry/bosh-micro-cli/deployer/applyspec"
bmblobstore "github.com/cloudfoundry/bosh-micro-cli/deployer/blobstore"
bmins "github.com/cloudfoundry/bosh-micro-cli/deployer/instance"
bmregistry "github.com/cloudfoundry/bosh-micro-cli/deployer/registry"
bmsshtunnel "github.com/cloudfoundry/bosh-micro-cli/deployer/sshtunnel"
bmdepl "github.com/cloudfoundry/bosh-micro-cli/deployment"
bmeventlog "github.com/cloudfoundry/bosh-micro-cli/eventlogging"
bmindex "github.com/cloudfoundry/bosh-micro-cli/index"
bminstall "github.com/cloudfoundry/bosh-micro-cli/install"
bmpkgs "github.com/cloudfoundry/bosh-micro-cli/packages"
bmrelvalidation "github.com/cloudfoundry/bosh-micro-cli/release/validation"
bmstemcell "github.com/cloudfoundry/bosh-micro-cli/stemcell"
bmtempcomp "github.com/cloudfoundry/bosh-micro-cli/templatescompiler"
bmerbrenderer "github.com/cloudfoundry/bosh-micro-cli/templatescompiler/erbrenderer"
bmui "github.com/cloudfoundry/bosh-micro-cli/ui"
bmvm "github.com/cloudfoundry/bosh-micro-cli/vm"
)
type Factory interface {
CreateCommand(name string) (Cmd, error)
}
type factory struct {
commands map[string](func() (Cmd, error))
userConfig bmconfig.UserConfig
userConfigService bmconfig.UserConfigService
deploymentConfig bmconfig.DeploymentConfig
deploymentConfigService bmconfig.DeploymentConfigService
fs boshsys.FileSystem
ui bmui.UI
logger boshlog.Logger
uuidGenerator boshuuid.Generator
workspace string
}
func NewFactory(
userConfig bmconfig.UserConfig,
userConfigService bmconfig.UserConfigService,
fs boshsys.FileSystem,
ui bmui.UI,
logger boshlog.Logger,
uuidGenerator boshuuid.Generator,
workspace string,
) Factory {
f := &factory{
userConfig: userConfig,
userConfigService: userConfigService,
fs: fs,
ui: ui,
logger: logger,
uuidGenerator: uuidGenerator,
workspace: workspace,
}
f.loadDeploymentConfig()
f.commands = map[string](func() (Cmd, error)){
"deployment": f.createDeploymentCmd,
"deploy": f.createDeployCmd,
}
return f
}
func (f *factory) CreateCommand(name string) (Cmd, error) {
if f.commands[name] == nil {
return nil, errors.New("Invalid command name")
}
return f.commands[name]()
}
func (f *factory) createDeploymentCmd() (Cmd, error) {
return NewDeploymentCmd(
f.ui,
f.userConfig,
f.userConfigService,
f.deploymentConfig,
f.fs,
f.uuidGenerator,
f.logger,
), nil
}
func (f *factory) createDeployCmd() (Cmd, error) {
runner := boshsys.NewExecCmdRunner(f.logger)
extractor := boshcmd.NewTarballCompressor(runner, f.fs)
boshValidator := bmrelvalidation.NewBoshValidator(f.fs)
cpiReleaseValidator := bmrelvalidation.NewCpiValidator()
releaseValidator := bmrelvalidation.NewValidator(
boshValidator,
cpiReleaseValidator,
f.ui,
)
compressor := boshcmd.NewTarballCompressor(runner, f.fs)
indexFilePath := f.deploymentConfig.CompiledPackagedIndexPath()
compiledPackageIndex := bmindex.NewFileIndex(indexFilePath, f.fs)
compiledPackageRepo := bmpkgs.NewCompiledPackageRepo(compiledPackageIndex)
options := map[string]interface{}{"blobstore_path": f.deploymentConfig.BlobstorePath()}
blobstore := boshblob.NewSHA1VerifiableBlobstore(
boshblob.NewLocalBlobstore(f.fs, f.uuidGenerator, options),
)
blobExtractor := bminstall.NewBlobExtractor(f.fs, compressor, blobstore, f.logger)
packageInstaller := bminstall.NewPackageInstaller(compiledPackageRepo, blobExtractor)
packageCompiler := bmcomp.NewPackageCompiler(
runner,
f.deploymentConfig.PackagesPath(),
f.fs,
compressor,
blobstore,
compiledPackageRepo,
packageInstaller,
)
timeService := boshtime.NewConcreteService()
eventFilters := []bmeventlog.EventFilter{
bmeventlog.NewTimeFilter(timeService),
}
eventLogger := bmeventlog.NewEventLoggerWithFilters(f.ui, eventFilters)
da := bmcomp.NewDependencyAnalysis()
releasePackagesCompiler := bmcomp.NewReleasePackagesCompiler(
da,
packageCompiler,
eventLogger,
timeService,
)
cpiManifestParser := bmdepl.NewCpiDeploymentParser(f.fs)
boshManifestParser := bmdepl.NewBoshDeploymentParser(f.fs)
erbRenderer := bmerbrenderer.NewERBRenderer(f.fs, runner, f.logger)
jobRenderer := bmtempcomp.NewJobRenderer(erbRenderer, f.fs, f.logger)
templatesIndex := bmindex.NewFileIndex(f.deploymentConfig.TemplatesIndexPath(), f.fs)
templatesRepo := bmtempcomp.NewTemplatesRepo(templatesIndex)
templatesCompiler := bmtempcomp.NewTemplatesCompiler(jobRenderer, compressor, blobstore, templatesRepo, f.fs, f.logger)
releaseCompiler := bmcomp.NewReleaseCompiler(releasePackagesCompiler, templatesCompiler)
jobInstaller := bminstall.NewJobInstaller(
f.fs,
packageInstaller,
blobExtractor,
templatesRepo,
f.deploymentConfig.JobsPath(),
f.deploymentConfig.PackagesPath(),
eventLogger,
timeService,
)
cloudFactory := bmcloud.NewFactory(f.fs, runner, f.deploymentConfig, f.logger)
cpiDeployer := bmcpideploy.NewCpiDeployer(
f.ui,
f.fs,
extractor,
releaseValidator,
releaseCompiler,
jobInstaller,
cloudFactory,
f.logger,
)
stemcellReader := bmstemcell.NewReader(compressor, f.fs)
repo := bmstemcell.NewRepo(f.deploymentConfigService)
stemcellManagerFactory := bmstemcell.NewManagerFactory(f.fs, stemcellReader, repo, eventLogger)
vmManagerFactory := bmvm.NewManagerFactory(eventLogger, f.deploymentConfigService, f.logger)
registryServer := bmregistry.NewServer(f.logger)
sshTunnelFactory := bmsshtunnel.NewFactory(f.logger)
agentClientFactory := bmagentclient.NewAgentClientFactory(f.deploymentConfig.DeploymentUUID, 1*time.Second, f.logger)
blobstoreFactory := bmblobstore.NewBlobstoreFactory(f.fs, f.logger)
sha1Calculator := bmins.NewSha1Calculator(f.fs)
applySpecFactory := bmas.NewFactory()
templatesSpecGenerator := bmins.NewTemplatesSpecGenerator(
blobstoreFactory,
compressor,
jobRenderer,
f.uuidGenerator,
sha1Calculator,
f.fs,
f.logger,
)
instanceFactory := bmins.NewInstanceFactory(
agentClientFactory,
templatesSpecGenerator,
applySpecFactory,
f.fs,
f.logger,
)
deployer := bmdeployer.NewDeployer(
vmManagerFactory,
sshTunnelFactory,
registryServer,
instanceFactory,
eventLogger,
f.logger,
)
return NewDeployCmd(
f.ui,
f.userConfig,
f.fs,
cpiManifestParser,
boshManifestParser,
cpiDeployer,
stemcellManagerFactory,
deployer,
f.logger,
), nil
}
func (f *factory) loadDeploymentConfig() error {
f.deploymentConfigService = bmconfig.NewFileSystemDeploymentConfigService(
f.userConfig.DeploymentConfigFilePath(),
f.fs,
f.logger,
)
var err error
f.deploymentConfig, err = f.deploymentConfigService.Load()
if err != nil {
return bosherr.WrapError(err, "Loading deployment config")
}
f.deploymentConfig.ContainingDir = f.workspace
return nil
}
|
package data
import "testing"
func TestPlantStructValidation(testcase *testing.T) {
plant := &Plant{
Name: "apple",
Price: 200.00,
}
validationError := plant.Validate()
if validationError != nil {
testcase.Fatal(validationError)
}
}
|
package micro
import (
"fmt"
"github.com/micro/go-micro/v2"
"github.com/micro/go-micro/v2/server"
)
func InitServer(name, version string, registry *EtcdRegistry, broker *MqttBroker, fn func(s server.Server)) {
var (
s micro.Service
opts []micro.Option
)
opts = []micro.Option{
micro.Name(name),
micro.WrapHandler(LogWrapper),
micro.Version(version),
}
if registry != nil {
opts = append(opts, micro.Registry(etcdRegistry(registry)))
}
if broker != nil {
opts = append(opts, micro.Broker(mqttBroker(broker)))
}
s = micro.NewService(opts...)
//s.Init()
fn(s.Server())
go func() {
if err := s.Run(); err != nil {
panic(fmt.Sprintf("[%s] micro server run error(%+v).", name, err))
}
}()
}
|
package controllers
import (
"github.com/astaxie/beego"
"github.com/astaxie/beego/validation"
)
//控制器声明
type BaseController struct {
beego.Controller
}
//返回结构声明
type ReturnData struct {
code int
message string
data map[string]interface{}
}
/**
* 接收参数方法
* @param param interface{} 对应 structs 地址
*/
func (this *BaseController) RequestData(param interface{}) {
this.ParseForm(param)
valida := validation.Validation{}
valida.Valid(param)
message := ""
if valida.HasErrors() {
for _, e := range valida.Errors {
message = e.Key + " " + e.Message
break
}
var data map[string]interface{}
this.ReturnData(1, message, data)
}
}
/**
返回数据
*/
func (this *BaseController) ReturnData(code int, message string, data interface{}) {
this.Data["json"] = map[string]interface{}{
"code": code,
"message": message,
"data": data,
}
this.ServeJSON()
this.StopRun()
}
/**
* 返回数据
*/
func (this *BaseController) ReturnJson(ret map[string]interface{}) {
this.Data["json"] = ret
this.ServeJSON()
this.StopRun()
}
|
package s3
import (
"context"
"crypto/sha256"
"fmt"
"io"
"net/url"
"path"
"strings"
"time"
"github.com/dustin/go-humanize"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
type S3AO struct {
client *minio.Client
bucket string
expiry time.Duration
}
type BucketInfo struct {
Objects uint64
ObjectsReadable string
ObjectsSize uint64
ObjectsSizeReadable string
IncompleteObjects uint64
IncompleteObjectsReadable string
IncompleteObjectsSize uint64
IncompleteObjectsSizeReadable string
}
// Initialize S3AO
func Init(endpoint, bucket, region, accessKey, secretKey string, secure bool, presignExpiry time.Duration) (S3AO, error) {
var s3ao S3AO
// Set up client for S3AO
minioClient, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
Secure: secure,
})
if err != nil {
return s3ao, err
}
minioClient.SetAppInfo("filebin", "2.0.1")
s3ao.client = minioClient
s3ao.bucket = bucket
s3ao.expiry = presignExpiry
fmt.Printf("Established session to S3AO at %s\n", endpoint)
// Ensure that the bucket exists
found, err := s3ao.client.BucketExists(context.Background(), bucket)
if err != nil {
fmt.Printf("Unable to check if S3AO bucket exists: %s\n", err.Error())
return s3ao, err
}
if found {
fmt.Printf("Found S3AO bucket: %s\n", bucket)
} else {
t0 := time.Now()
if err := s3ao.client.MakeBucket(context.Background(), bucket, minio.MakeBucketOptions{Region: region}); err != nil {
fmt.Printf("%s\n", err.Error())
}
fmt.Printf("Created S3AO bucket: %s in %.3fs\n", bucket, time.Since(t0).Seconds())
}
return s3ao, nil
}
func (s S3AO) Status() bool {
found, err := s.client.BucketExists(context.Background(), s.bucket)
if err != nil {
fmt.Printf("Error from S3 when checking if bucket %s exists: %s\n", s.bucket, err.Error())
return false
}
if found == false {
fmt.Printf("S3 bucket %s does not exist\n", s.bucket)
return false
}
return true
}
func (s S3AO) SetTrace(trace bool) {
if trace {
s.client.TraceOn(nil)
} else {
s.client.TraceOff()
}
}
func (s S3AO) PutObject(bin string, filename string, data io.Reader, size int64) (err error) {
t0 := time.Now()
// Hash the path in S3
objectKey := s.GetObjectKey(bin, filename)
var objectSize uint64
var content io.Reader
// Do not encrypt the content during upload. This allows for presigned downloads.
content = data
objectSize = uint64(size)
_, err = s.client.PutObject(context.Background(), s.bucket, objectKey, content, int64(objectSize), minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err != nil {
fmt.Printf("Unable to put object: %s\n", err.Error())
return err
}
fmt.Printf("Stored object: %s (%d bytes) in %.3fs\n", objectKey, objectSize, time.Since(t0).Seconds())
return nil
}
func (s S3AO) RemoveObject(bin string, filename string) error {
key := s.GetObjectKey(bin, filename)
err := s.RemoveKey(key)
return err
}
func (s S3AO) RemoveKey(key string) error {
t0 := time.Now()
opts := minio.RemoveObjectOptions{
// The following is used in the Minio SDK documentation,
// but it seems not all S3 server side implementations
// support this. One example is DigitalOcean Spaces.
//GovernanceBypass: true,
}
err := s.client.RemoveObject(context.Background(), s.bucket, key, opts)
if err != nil {
fmt.Printf("Unable to remove object: %s\n", err.Error())
return err
}
fmt.Printf("Removed object: %s in %.3fs\n", key, time.Since(t0).Seconds())
return nil
}
func (s S3AO) ListObjects() (objects []string, err error) {
opts := minio.ListObjectsOptions{
Prefix: "",
Recursive: true,
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objectCh := s.client.ListObjects(ctx, s.bucket, opts)
for object := range objectCh {
if object.Err != nil {
return objects, object.Err
}
objects = append(objects, object.Key)
}
return objects, nil
}
func (s S3AO) RemoveBucket() error {
t0 := time.Now()
objects, err := s.ListObjects()
if err != nil {
fmt.Printf("Unable to list objects: %s\n", err.Error())
}
// ReoveObject on all objects
for _, object := range objects {
if err := s.RemoveKey(object); err != nil {
return err
}
}
// RemoveBucket
if err := s.client.RemoveBucket(context.Background(), s.bucket); err != nil {
return err
}
fmt.Printf("Removed bucket in %.3fs\n", time.Since(t0).Seconds())
return nil
}
func (s S3AO) GetObject(bin string, filename string, start int64, end int64) (*minio.Object, error) {
t0 := time.Now()
// Hash the path in S3
b := sha256.New()
b.Write([]byte(bin))
f := sha256.New()
f.Write([]byte(filename))
objectKey := path.Join(fmt.Sprintf("%x", b.Sum(nil)), fmt.Sprintf("%x", f.Sum(nil)))
opts := minio.GetObjectOptions{}
if end > 0 {
opts.SetRange(start, end)
}
object, err := s.client.GetObject(context.Background(), s.bucket, objectKey, opts)
if err != nil {
return object, err
}
fmt.Printf("Fetched object: %s in %.3fs\n", objectKey, time.Since(t0).Seconds())
return object, err
}
// This only works with objects that are not encrypted
func (s S3AO) PresignedGetObject(bin string, filename string, mime string) (presignedURL *url.URL, err error) {
// Hash the path in S3
b := sha256.New()
b.Write([]byte(bin))
f := sha256.New()
f.Write([]byte(filename))
objectKey := path.Join(fmt.Sprintf("%x", b.Sum(nil)), fmt.Sprintf("%x", f.Sum(nil)))
reqParams := make(url.Values)
reqParams.Set("response-content-type", mime)
switch {
case strings.HasPrefix(mime, "text/html"), strings.HasPrefix(mime, "application/pdf"):
// Tell browser to handle this as an attachment. For text/html, this
// is a small barrier to reduce phishing.
reqParams.Set("response-content-disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
default:
// Browser to decide how to handle the rest of the content-types
reqParams.Set("response-content-disposition", fmt.Sprintf("filename=\"%s\"", filename))
}
reqParams.Set("response-cache-control", fmt.Sprintf("max-age=%.0f", s.expiry.Seconds()))
presignedURL, err = s.client.PresignedGetObject(context.Background(), s.bucket, objectKey, s.expiry, reqParams)
if err != nil {
return presignedURL, err
}
return presignedURL, nil
}
func (s S3AO) GetBucketInfo() (info BucketInfo) {
//opts := minio.ListObjectsOptions{
// Prefix: "",
// Recursive: true,
//}
//objectCh := s.client.ListObjects(context.Background(), s.bucket, opts)
var size int64
var numObjects uint64
//for object := range objectCh {
// if object.Err != nil {
// fmt.Println(object.Err)
// return info
// }
// size = size + object.Size
// numObjects = numObjects + 1
//}
//info.Objects = numObjects
//info.ObjectsReadable = humanize.Comma(int64(numObjects))
//info.ObjectsSize = uint64(size)
//info.ObjectsSizeReadable = humanize.Bytes(info.ObjectsSize)
multiPartObjectCh := s.client.ListIncompleteUploads(context.Background(), s.bucket, "", true)
for multiPartObject := range multiPartObjectCh {
if multiPartObject.Err != nil {
fmt.Println(multiPartObject.Err)
return info
}
size = size + multiPartObject.Size
numObjects = numObjects + 1
}
info.IncompleteObjects = numObjects
info.IncompleteObjectsReadable = humanize.Comma(int64(numObjects))
info.IncompleteObjectsSize = uint64(size)
info.IncompleteObjectsSizeReadable = humanize.Bytes(info.IncompleteObjectsSize)
return info
}
func (s S3AO) GetObjectKey(bin string, filename string) (key string) {
b := sha256.New()
b.Write([]byte(bin))
f := sha256.New()
f.Write([]byte(filename))
key = path.Join(fmt.Sprintf("%x", b.Sum(nil)), fmt.Sprintf("%x", f.Sum(nil)))
return key
}
|
// Copyright 2019 Drone.IO Inc. All rights reserved.
// Use of this source code is governed by the Blue Oak Model License
// that can be found in the LICENSE file.
package gc
import (
"context"
"time"
docker "github.com/docker/docker/client"
)
// FilterFunc filters the Docker resource based
// on its labels. If the function returns false,
// the resource is ignored.
type FilterFunc func(map[string]string) bool
// default timeout for the collection cycle.
var timeout = time.Hour
// Collector defines a Docker container garbage collector.
type Collector interface {
Collect(context.Context) error
}
type collector struct {
client docker.APIClient
whitelist []string // reserved containers
reserved []string // reserved images
threshold int64 // target threshold in bytes
}
// New returns a garbage collector.
func New(client docker.APIClient, opt ...Option) Collector {
c := new(collector)
c.client = client
for _, o := range opt {
o(c)
}
return c
}
func (c *collector) Collect(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
_ = c.collectContainers(ctx)
_ = c.collectDanglingImages(ctx)
_ = c.collectImages(ctx)
_ = c.collectNetworks(ctx)
_ = c.collectVolumes(ctx)
return nil
}
// Schedule schedules the garbage collector to execute at the
// specified interval duration.
func Schedule(ctx context.Context, collector Collector, interval time.Duration) error {
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(interval):
_ = collector.Collect(ctx)
}
}
}
|
package main
import (
"log"
"os"
"strconv"
"github.com/grayzone/godcm/core"
"github.com/grayzone/godcm/dcmimage"
)
var folder = "./test/data/"
func readdicmfile(filename string, isReadValue bool) {
var reader core.DcmReader
reader.IsReadValue = isReadValue
err := reader.ReadFile(folder + filename)
if err != nil {
log.Println(err.Error())
}
for _, v := range reader.Meta.Elements {
log.Println(v.String())
}
for i := range reader.Dataset.Elements {
log.Println(reader.Dataset.Elements[i].String())
}
}
func getimageinfo(filename string) dcmimage.DcmImage {
var reader core.DcmReader
reader.IsReadPixel = true
reader.IsReadValue = true
err := reader.ReadFile(filename)
if err != nil {
log.Fatal(err.Error())
}
isCompressed, err := reader.IsCompressed()
if err != nil {
log.Println(err.Error())
}
isBigEndian, err := reader.IsBigEndian()
if err != nil {
log.Println(err.Error())
}
pixeldata := reader.Dataset.PixelData()
var img dcmimage.DcmImage
img.IsCompressed = isCompressed
img.IsBigEndian = isBigEndian
var num interface{}
num, _ = strconv.ParseUint(reader.Dataset.BitsAllocated(), 10, 16)
img.BitsAllocated = uint16(num.(uint64))
num, _ = strconv.ParseUint(reader.Dataset.BitsStored(), 10, 16)
img.BitsStored = uint16(num.(uint64))
num, _ = strconv.ParseUint(reader.Dataset.Columns(), 10, 32)
img.Columns = uint32(num.(uint64))
num, _ = strconv.ParseUint(reader.Dataset.Rows(), 10, 32)
img.Rows = uint32(num.(uint64))
num, _ = strconv.ParseUint(reader.Dataset.HighBit(), 10, 16)
img.HighBit = uint16(num.(uint64))
num, _ = strconv.ParseFloat(reader.Dataset.WindowCenter(), 64)
img.WindowCenter = num.(float64)
num, _ = strconv.ParseFloat(reader.Dataset.WindowWidth(), 64)
img.WindowWidth = num.(float64)
num, _ = strconv.ParseFloat(reader.Dataset.RescaleIntercept(), 64)
img.RescaleIntercept = num.(float64)
num, _ = strconv.ParseFloat(reader.Dataset.RescaleSlope(), 64)
img.RescaleSlope = num.(float64)
num, _ = strconv.ParseUint(reader.Dataset.PixelRepresentation(), 10, 16)
img.PixelRepresentation = uint16(num.(uint64))
img.PhotometricInterpretation = reader.Dataset.PhotometricInterpretation()
num, _ = strconv.ParseUint(reader.Dataset.NumberOfFrames(), 10, 64)
img.NumberOfFrames = int(num.(uint64))
num, _ = strconv.ParseUint(reader.Dataset.SamplesPerPixel(), 10, 16)
img.SamplesPerPixel = uint16(num.(uint64))
img.PixelData = pixeldata
return img
}
func convert2bmp(filename string, bits uint16) {
img := getimageinfo(folder + filename)
frame := img.NumberOfFrames
for i := 0; i < frame; i++ {
newfile := filename + "_" + strconv.FormatUint(uint64(i), 10) + ".bmp"
err := img.WriteBMP(newfile, bits, i)
if err != nil {
log.Println(err.Error())
}
}
}
func convert2png(filename string) {
img := getimageinfo(folder + filename)
frame := img.NumberOfFrames
for i := 0; i < frame; i++ {
newfile := filename + "_" + strconv.FormatUint(uint64(i), 10) + ".png"
err := img.ConvertToPNG(newfile, i)
if err != nil {
log.Println(err.Error())
}
}
}
func convert2jpg(filename string) {
img := getimageinfo(folder + filename)
frame := img.NumberOfFrames
for i := 0; i < frame; i++ {
newfile := filename + "_" + strconv.FormatUint(uint64(i), 10) + ".jpg"
err := img.ConvertToJPG(newfile, i)
if err != nil {
log.Println(err.Error())
}
}
}
var testfile = []string{
"US-RGB-8-esopecho.dcm",
"MR-MONO2-8-16x-heart.dcm",
"xr_chest.dcm",
"GH177_D_CLUNIE_CT1_IVRLE_BigEndian_undefined_length.dcm",
"US-MONO2-8-8x-execho.dcm",
"xr_tspine.dcm",
"IM0.dcm",
"image_09-12-2013_4.dcm",
"CT-MONO2-16-ankle",
"xr_chicken2.dcm",
"T23/IM-0001-0001.dcm",
"IM-0001-0010.dcm",
"GH195.dcm",
"GH064.dcm",
"GH177_D_CLUNIE_CT1_IVRLE_BigEndian_ELE_undefinded_length.dcm",
}
func testParseDcm() {
var index int
var isReadValue bool
switch len(os.Args) {
case 1:
readdicmfile(testfile[0], true)
case 2:
index, _ = strconv.Atoi(os.Args[1])
readdicmfile(testfile[index], isReadValue)
case 3:
index, _ = strconv.Atoi(os.Args[1])
isReadValue, _ = strconv.ParseBool(os.Args[2])
readdicmfile(testfile[index], isReadValue)
}
}
func testdcm2bmp() {
var index int
switch len(os.Args) {
case 1:
convert2bmp(testfile[0], 32)
case 2:
index, _ = strconv.Atoi(os.Args[1])
convert2bmp(testfile[index], 8)
case 3:
index, _ = strconv.Atoi(os.Args[1])
bits, _ := strconv.Atoi(os.Args[2])
convert2bmp(testfile[index], uint16(bits))
}
}
func testdcm2png() {
var index int
switch len(os.Args) {
case 1:
convert2png(testfile[0])
case 2:
index, _ = strconv.Atoi(os.Args[1])
convert2png(testfile[index])
}
}
func testdcm2jpg() {
var index int
switch len(os.Args) {
case 1:
convert2jpg(testfile[0])
case 2:
index, _ = strconv.Atoi(os.Args[1])
convert2jpg(testfile[index])
}
}
/*
func convert2png16(filename string) {
img := getimageinfo(filename)
err := img.ConvertToPNG16("test16.png")
if err != nil {
log.Println(err.Error())
}
}
func convert2jpg16(filename string) {
img := getimageinfo(filename)
err := img.ConvertToJPG16("test16.jpg")
if err != nil {
log.Println(err.Error())
}
}
func testdcm2png16() {
var index int
switch len(os.Args) {
case 1:
convert2png16(folder + testfile[0])
case 2:
index, _ = strconv.Atoi(os.Args[1])
convert2png16(folder + testfile[index])
}
}
func testdcm2jpg16() {
var index int
switch len(os.Args) {
case 1:
convert2jpg16(folder + testfile[0])
case 2:
index, _ = strconv.Atoi(os.Args[1])
convert2jpg16(folder + testfile[index])
}
}
*/
func main() {
// testParseDcm()
// testdcm2bmp()
// testdcm2png()
testdcm2jpg()
}
|
package utils
import (
"bufio"
"bytes"
"encoding/gob"
"errors"
"fmt"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"reflect"
"strconv"
"strings"
"time"
"crypto/sha1"
)
const (
letterAlphabets = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
letterNumbers = "0123456789"
letterAlphabetsAndNumbers = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
var ()
// 以登录式shell的形式执行命令。由于是登陆式shell,因此能加载环境变量
// 由于是把命令拼装传给bash,因此支持管道、重定向等,使用比较方便
func Sysexec(cmd string, args ...string) (result string, err error) {
arg := append([]string{cmd}, args...)
arg_str := fmt.Sprintf("%s", strings.Join(arg, " "))
ori_output, err := exec.Command("/bin/bash", "-l", "-c", arg_str).CombinedOutput()
return strings.TrimSpace(string(ori_output)), err
}
// 判断目录是否存在
func IsDirExist(path string) (exist bool) {
fi, err := os.Stat(path)
if err != nil {
exist = os.IsExist(err)
} else {
exist = fi.IsDir()
}
return
}
// 判断文件是否存在
func IsFileExist(path string) (exist bool) {
fi, err := os.Stat(path)
if err != nil {
exist = os.IsExist(err)
} else {
exist = !fi.IsDir()
}
return
}
func Mkdirp(dir string) {
os.MkdirAll(dir, os.ModePerm)
}
// 列出目录中指定结尾的文件的文件名(不包含目录)
func ListDir(dirPth string, suffix string) (files []string, err error) {
dir, err := ioutil.ReadDir(dirPth)
if err != nil {
return nil, err
}
files = make([]string, 0, len(dir))
for _, fi := range dir {
// 若该文件为目录,或结尾不符合,则忽略
if fi.IsDir() || !strings.HasSuffix(fi.Name(), suffix) {
continue
}
files = append(files, fi.Name())
}
return
}
// 字符串数组中是否存在指定字符
func Include(str string, strings []string) bool {
if strings == nil {
return false
}
for _, element := range strings {
if element == str {
return true
}
}
return false
}
// string转换成uint64
func AtoUint64(s string) (i uint64, err error) {
return strconv.ParseUint(s, 10, 64)
}
// 判断指定的文件系统类型是否是磁盘文件系统,diamond里用
func IsDiskFS(fs string) bool {
switch fs {
case "btrfs", "ext2", "ext3", "ext4", "jfs", "reiser", "xfs", "ffs", "ufs", "jfs2", "vxfs", "hfs", "ntfs", "fat32", "zfs", "fuse.mfs":
return true
default:
return false
}
}
// 拷贝一个字符串类型的map。字符串类型,无所谓深拷贝浅拷贝。
func CloneMap(src map[string]string) (dst map[string]string) {
dst = make(map[string]string)
for key, value := range src {
dst[key] = value
}
return
}
// 单位转换用
func UnitConvert(i float64, oldunit, newunit string) (j float64) {
m1 := GetMagnificationFromUnit(oldunit)
m2 := GetMagnificationFromUnit(newunit)
j = i * m1 / m2
return
}
func GetMagnificationFromUnit(unit string) (magnification float64) {
switch unit {
case "bit":
magnification = 1
case "byte":
magnification = 8
case "sector":
magnification = 512
case "kb":
magnification = 1000
case "kib":
magnification = 1024
case "mb":
magnification = 1000000
case "mib":
magnification = 1048576
case "gb":
magnification = 1000000000
case "gib":
magnification = 1073741824
}
return
}
// 把一个data序列化成golang自己的gob格式,方便存储
func GobEncode(data interface{}) ([]byte, error) {
buf := bytes.NewBuffer(nil)
enc := gob.NewEncoder(buf)
err := enc.Encode(data)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// 把一个gob格式的序列化后的byte数组,反序列化到给定的结构中。
func GobDecode(data []byte, to interface{}) error {
buf := bytes.NewBuffer(data)
dec := gob.NewDecoder(buf)
return dec.Decode(to)
}
// 获取当前进程内存占用
func GetMemUsage() (int, error) {
pageSize := 4096
pid := os.Getpid()
f, err := os.Open(fmt.Sprintf("/proc/%d/stat", pid))
if err != nil {
return 0, fmt.Errorf("failed to get memory usage by pid: %d", pid)
}
defer f.Close()
buff := bufio.NewReader(f)
line, err := buff.ReadString('\n')
cpu_metrics := strings.Fields(line)
rss, _ := strconv.Atoi(cpu_metrics[23])
return rss * pageSize, nil
}
func RandString(n int) string {
b := make([]byte, n)
// A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
rand.Seed(time.Now().UnixNano())
for i, cache, remain := n-1, rand.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = rand.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterAlphabetsAndNumbers) {
b[i] = letterAlphabetsAndNumbers[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return string(b)
}
func RandNumberString(n int) string {
b := make([]byte, n)
// A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
rand.Seed(time.Now().UnixNano())
for i, cache, remain := n-1, rand.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = rand.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterNumbers) {
b[i] = letterNumbers[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return string(b)
}
func RandAlphabetString(n int) string {
b := make([]byte, n)
// A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
rand.Seed(time.Now().UnixNano())
for i, cache, remain := n-1, rand.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = rand.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterAlphabets) {
b[i] = letterAlphabets[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return string(b)
}
func Sha1Sum(data string) (sum string) {
//产生一个散列值得方式是 sha1.New(),sha1.Write(bytes),然后 sha1.Sum([]byte{})。这里我们从一个新的散列开始。
h := sha1.New()
//写入要处理的字节。如果是一个字符串,需要使用[]byte(s) 来强制转换成字节数组。
h.Write([]byte(data))
//这个用来得到最终的散列值的字符切片。Sum 的参数可以用来都现有的字符切片追加额外的字节切片:一般不需要要。
bs := h.Sum(nil)
//SHA1 值经常以 16 进制输出,例如在 git commit 中。使用%x 来将散列结果格式化为 16 进制字符串。
sum = fmt.Sprintf("%x", bs)
return
}
func SplitToIntSlice(str string) (slice []int) {
str = strings.TrimSpace(str)
string_slice := strings.Split(str, ",")
slice = make([]int, 0, len(string_slice))
for _, item := range string_slice {
i, err := strconv.Atoi(strings.TrimSpace(item))
if err == nil {
slice = append(slice, i)
}
}
return
}
func UniqDup(data interface{}) (interface{}, error) {
slice := reflect.ValueOf(data)
if slice.Kind() != reflect.Slice && slice.Kind() != reflect.Array {
return data, errors.New("data is not a slice or array")
}
_map := make(map[interface{}]struct{})
uniq_slice := reflect.MakeSlice(slice.Type(), 0, slice.Len())
in := make([]reflect.Value, 0)
for i := 0; i < slice.Len(); i++ {
item := slice.Index(i)
_, ok := reflect.TypeOf(item.Interface()).MethodByName("UniqId")
if ok {
retval := item.MethodByName("UniqId").Call(in)
if len(retval) == 1 && retval[0].Kind() == reflect.Int {
id := retval[0].Int()
if _, ok := _map[id]; !ok {
uniq_slice = reflect.Append(uniq_slice, slice.Index(i))
_map[id] = struct{}{}
}
} else {
return struct{}{}, errors.New("func (UniqId) return value not int")
}
} else {
if _, ok := _map[item.Interface()]; !ok {
uniq_slice = reflect.Append(uniq_slice, slice.Index(i))
_map[item.Interface()] = struct{}{}
}
}
}
return uniq_slice.Interface(), nil
}
func Uniq(data interface{}) (err error) {
value := reflect.ValueOf(data)
if value.Kind() != reflect.Ptr {
err = errors.New("not a pointer")
return
}
uniq_slice, err := UniqDup(value.Elem().Interface())
if err != nil {
return
}
ptr := reflect.ValueOf(uniq_slice)
value.Elem().Set(ptr)
return nil
}
// 调试时避免烦人的未使用变量提示
func Unused(args ...interface{}) {
for _, arg := range args {
fmt.Printf("unused var: %v(%t)\n", arg, arg)
}
}
|
package fluent
import (
"fmt"
"reflect"
"strconv"
"strings"
)
const scannerTag = "sql"
type scanner struct {
value interface{}
}
type one struct{}
type all struct{}
type scannerType interface {
scan(s interface{}, vals map[string]interface{}) error
}
func (o *one) scan(s interface{}, vals map[string]interface{}) error {
return scanStruct(s, vals)
}
func (a *all) scan(s interface{}, vals map[string]interface{}) error {
return scanStructSlice(s, vals)
}
// Scan set the value and check if we need to convert it
func (sc *scanner) Scan(val interface{}) error {
switch val.(type) {
case []byte:
// Strings and floats come back as []uint8
if v, ok := val.([]uint8); ok {
val = string(v)
if f, err := strconv.ParseFloat(val.(string), 64); err == nil {
val = f
}
}
}
sc.value = val
return nil
}
func scanStruct(s interface{}, vals map[string]interface{}) error {
valsLen := len(vals)
if valsLen == 0 {
return fmt.Errorf("The values map shouldn't be empty")
}
valOf := reflect.Indirect(reflect.ValueOf(s))
if valOf.Kind() != reflect.Struct {
return fmt.Errorf("The provided interface is not a struct")
}
for i := 0; i < valOf.Type().NumField(); i++ {
field := valOf.Field(i)
tag := valOf.Type().Field(i).Tag.Get(scannerTag)
// Skip empty tags
if len(strings.TrimSpace(tag)) == 0 {
continue
}
fieldName := valOf.Type().Field(i).Name
if !field.CanSet() {
return fmt.Errorf("Can't set the value for field: %s", fieldName)
}
if field.Kind() == reflect.Ptr {
ptr := reflect.New(field.Type().Elem()).Interface()
if err := scanStruct(ptr, vals); err != nil {
return err
}
field.Set(reflect.ValueOf(ptr))
}
// Check if the tag exists
if val := vals[tag]; val != nil {
if err := setFieldValue(field, val); err != nil {
return fmt.Errorf("Field %s: %s", fieldName, err)
}
}
}
return nil
}
// Set the value depending on the field type
func setFieldValue(field reflect.Value, v interface{}) error {
switch field.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
val, ok := v.(int64)
if !ok {
// Try again to cast to int64
if intVal, ok := v.(int); ok {
val = int64(intVal)
} else {
return fmt.Errorf("unable to set the integer value")
}
}
field.SetInt(val)
case reflect.Float32, reflect.Float64:
val, ok := v.(float64)
if !ok {
return fmt.Errorf("unable to set the float value")
}
field.SetFloat(val)
case reflect.String:
val, ok := v.(string)
if !ok {
return fmt.Errorf("unable to set the string value")
}
field.SetString(val)
case reflect.Bool:
val, ok := v.(bool)
if !ok {
return fmt.Errorf("unable to set the bool value")
}
field.SetBool(val)
default:
field.Set(reflect.ValueOf(v))
}
return nil
}
func scanStructSlice(s interface{}, vals map[string]interface{}) error {
if s == nil {
return fmt.Errorf("The slice shouldn't be empty")
}
if len(vals) == 0 {
return fmt.Errorf("The values map shouldn't be empty")
}
if reflect.TypeOf(s).Kind() != reflect.Ptr {
return fmt.Errorf("The provided type is not a pointer")
}
valOf := reflect.ValueOf(s).Elem()
if valOf.Kind() != reflect.Slice {
return fmt.Errorf("The provided value is not a slice")
}
// New slice pointer to write to
ptr := reflect.New(valOf.Type().Elem()).Interface()
if err := scanStruct(ptr, vals); err != nil {
return err
}
ptrVal := reflect.ValueOf(ptr).Elem()
valOf.Set(reflect.Append(valOf, ptrVal))
return nil
}
func getStructValues(s interface{}) ([]string, []interface{}, error) {
valOf := reflect.Indirect(reflect.ValueOf(s))
if valOf.Kind() != reflect.Struct {
return nil, nil, fmt.Errorf("The provided interface is not a struct")
}
var (
cols []string
args []interface{}
)
for i := 0; i < valOf.Type().NumField(); i++ {
value := valOf.Field(i).Interface()
tag := valOf.Type().Field(i).Tag.Get(scannerTag)
// Skip empty tags
if len(strings.TrimSpace(tag)) == 0 {
continue
}
if !isZero(value) {
args = append(args, value)
cols = append(cols, tag)
}
}
return cols, args, nil
}
// Check if the underlying type of the value is zero
func isZero(v interface{}) bool {
return reflect.DeepEqual(v, reflect.Zero(reflect.TypeOf(v)).Interface())
}
|
package InformaCast
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
)
// RestDialCastDialingConfig represents the JSON the API expects to receive.
// The bad naming convention is provided to you by the official InformaCast REST API documentation.
type RestDialCastDialingConfig struct {
id int64
dialingPatternRegEx string
authType int64
messageId int64
messageDescription string
recipientGroups []int64
recipientGroupName string
dialcodes []string
}
// DialingConfigurationResponses is the json object returned during get requests.
type DialingConfigurationResponses struct {
Index int64
Id int64
dialingPatternRegEx string
message string
messageDescription string
link string
}
type DialingConfigrations struct {
QueryParameters
HttpParameters
}
func (d DialingConfigrations) Get() ( Data []DialingConfigurationResponses, err error ) {
url := "https://" + d.Server + ":8444/InformaCast/RESTServices/V1/Admin/DialCast/dialingConfigurations" + d.QueryParameters.Compile()
// make the get Request
response, err := request("Get", url, nil, d.Username, d.Password)
if err != nil {
return nil, err
}
defer response.Body.Close()
//parse response body
body, err := ioutil.ReadAll(response.Body)
if err != nil {
panic(err.Error())
}
// we are only interested in the data part of this object.
// the api allows batching the get requests, but for now this modules just gets everything.
var fullJsonResponse struct {
Total uint64
Previous uint64
Next string
Data []DialingConfigurationResponses
}
json.Unmarshal(body, &fullJsonResponse)
return fullJsonResponse.Data , nil
}
// Find will retrieve a single DialCast record
func (d DialingConfigrations) GetOne(c RestDialCastDialingConfig) ( Data DialingConfigurationResponses, err error ) {
url := "https://" + d.Server + ":8444/InformaCast/RESTServices/V1/Admin/DialCast/dialingConfigurations/" + fmt.Sprint(c.id) + d.QueryParameters.Compile()
// create json byte array
Json, err := json.Marshal(c)
if err != nil {
return Data, err
}
// make the get Request
response, err := request("Get", url, Json , d.Username, d.Password)
if err != nil {
return Data, err
}
defer response.Body.Close()
//parse response body
body, err := ioutil.ReadAll(response.Body)
if err != nil {
panic(err.Error())
}
// we are only interested in the data part of this object.
// the api allows batching the get requests, but for now this modules just gets everything.
var fullJsonResponse struct {
Total uint64
Previous uint64
Next string
Data []DialingConfigurationResponses
}
json.Unmarshal(body, &fullJsonResponse)
return fullJsonResponse.Data[0] , nil
}
// New Creates a new DialCast record
func (d DialingConfigrations) New(c RestDialCastDialingConfig) ( Data DialingConfigurationResponses, err error ) {
url := "https://" + d.Server + ":8444/InformaCast/RESTServices/V1/Admin/DialCast/dialingConfigurations" + d.QueryParameters.Compile()
// create json byte array
Json, err := json.Marshal(c)
if err != nil {
return Data, err
}
// make the get Request
response, err := request("POST", url,Json, d.Username, d.Password)
if err != nil {
return Data, err
}
defer response.Body.Close()
//parse response body
body, err := ioutil.ReadAll(response.Body)
if err != nil {
panic(err.Error())
}
// we are only interested in the data part of this object.
// the api allows batching the get requests, but for now this modules just gets everything.
var fullJsonResponse struct {
Total uint64
Previous uint64
Next string
Data []DialingConfigurationResponses
}
json.Unmarshal(body, &fullJsonResponse)
return fullJsonResponse.Data[0] , nil
}
// Update updates a single DialCast record.
func (d DialingConfigrations) Update(c RestDialCastDialingConfig) ( Data DialingConfigurationResponses, err error ) {
url := "https://" + d.Server + ":8444/InformaCast/RESTServices/V1/Admin/DialCast/dialingConfigurations/" + fmt.Sprint(c.id) + d.QueryParameters.Compile()
// create json byte array
Json, err := json.Marshal(c)
if err != nil {
return Data, err
}
// make the get Request
response, err := request("Get", url, Json , d.Username, d.Password)
if err != nil {
return Data, err
}
defer response.Body.Close()
//parse response body
body, err := ioutil.ReadAll(response.Body)
if err != nil {
panic(err.Error())
}
// we are only interested in the data part of this object.
// the api allows batching the get requests, but for now this modules just gets everything.
var fullJsonResponse struct {
Total uint64
Previous uint64
Next string
Data []DialingConfigurationResponses
}
json.Unmarshal(body, &fullJsonResponse)
return fullJsonResponse.Data[0] , nil
}
func (d DialingConfigrations) Delete(c RestDialCastDialingConfig) (err error) {
// find the record
data, err := d.GetOne(c)
if data.Id == 0 {
return errors.New("Could not find DialCast record.")
}
url := "https://" + d.Server + ":8444/InformaCast/RESTServices/V1/Admin/DialCast/dialingConfigurations/" + fmt.Sprint(c.id)
// create json byte array
Json, err := json.Marshal(c)
if err != nil {
return err
}
// make the get Request
response, err := request("Get", url, Json , d.Username, d.Password)
if err != nil {
return err
}
defer response.Body.Close()
return nil
}
|
package receiver
type ExposableError struct {
err string
}
func NewExposableError(err string) ExposableError {
return ExposableError{
err: err,
}
}
func (e ExposableError) Error() string {
return e.err
}
|
package wso2
import (
"bytes"
"crypto/rsa"
"encoding/base64"
"encoding/binary"
"encoding/json"
"fmt"
"io/ioutil"
"math/big"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/golang-jwt/jwt/v4"
)
var errCantIdentifyKey = fmt.Errorf("Unable to identify key used for signing")
// Client represents an instance of a WSO2 client and contains all the
// configuration values necessary to run
type Client struct {
gatewayURL string
clientID string
clientSecret string
callbackURL string
keyCache map[string]*rsa.PublicKey
keyCacheExp time.Time
cacheMux sync.RWMutex
token string
tokenExp time.Time
tokenMux sync.RWMutex
}
// New returns a new WSO2 client that uses the given credentials, gateway,
// and callback url
func New(clientID, clientSecret, gatewayURL, callbackURL string) *Client {
c := Client{
clientID: clientID,
clientSecret: clientSecret,
gatewayURL: gatewayURL,
callbackURL: callbackURL,
}
return &c
}
// AuthCodeResponse represents the response given by WSO2 when exchanging an
// authorization code for a token
type AuthCodeResponse struct {
ExpiresIn int `json:"expires_in"`
RefreshToken string `json:"refresh_token"`
AccessToken string `json:"access_token"`
IDToken string `json:"id_token"`
}
// CallbackURL returns the callback url set for the client
func (c *Client) CallbackURL() string {
return c.callbackURL
}
// ValidateAuthorizationCode validates the given authorization code and returns
// the response including the token, refresh token, and ID token if it exists.
func (c *Client) ValidateAuthorizationCode(ac string) (AuthCodeResponse, error) {
data := url.Values{}
data.Set("grant_type", "authorization_code")
data.Set("code", ac)
data.Set("redirect_uri", c.callbackURL)
// Send back the auth code in exchange for a token and refresh token
req, err := http.NewRequest("POST", fmt.Sprintf("%stoken", c.gatewayURL), strings.NewReader(data.Encode()))
if err != nil {
return AuthCodeResponse{}, fmt.Errorf("Error while trying to create authorization code request: %w", err)
}
req.SetBasicAuth(c.clientID, c.clientSecret)
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
res, err := http.DefaultClient.Do(req)
if err != nil {
return AuthCodeResponse{}, fmt.Errorf("Error while making Authorization Code request: %w", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
body, _ := ioutil.ReadAll(res.Body)
return AuthCodeResponse{}, fmt.Errorf("Got non 200 response back from the token endpoint: %d %s", res.StatusCode, body)
}
// Read the body and parse it
b, err := ioutil.ReadAll(res.Body)
if err != nil {
return AuthCodeResponse{}, fmt.Errorf("Error while trying to read the Auth Code Response body: %w", err)
}
codeRes := AuthCodeResponse{}
err = json.Unmarshal(b, &codeRes)
if err != nil {
return AuthCodeResponse{}, fmt.Errorf("Error while trying to unmarshal the auth code response: %w", err)
}
return codeRes, nil
}
// GetAuthCodeURL returns the authorize URL to redirect users to for the
// Authorization Code OAuth2.0 Grant type. It expects a state identifier to be
// passed in, this identifier helps identify state when the response comes back
func (c *Client) GetAuthCodeURL(state string) string {
return fmt.Sprintf("%sauthorize?response_type=code&client_id=%s&redirect_uri=%s&scope=openid&state=%s",
c.gatewayURL, c.clientID, c.callbackURL, state)
}
// ValidateJWT validates the given JWT and if it is valid returns the claims
func (c *Client) ValidateJWT(j string) (map[string]interface{}, error) {
// Refresh the cache if it is not set or expired
if c.keyCacheExp.IsZero() || time.Now().After(c.keyCacheExp) {
// Ignore an error from refresh key cache and try to validate anyways
_ = c.refreshKeyCache()
}
// Try to validate using automatic key selection
token, err := jwt.Parse(j, c.validationFunc(nil))
if err != nil {
// If we weren't able to figure out which key to use
if strings.Contains(err.Error(), errCantIdentifyKey.Error()) {
c.cacheMux.RLock()
defer c.cacheMux.RUnlock()
for _, k := range c.keyCache {
token, newErr := jwt.Parse(j, c.validationFunc(k))
if ve, ok := newErr.(*jwt.ValidationError); ok {
// if the error is due to invalid signature then continue
if ve.Errors&jwt.ValidationErrorSignatureInvalid != 0 {
continue
}
// Ignore issued at errors due to WSO2
if ve.Errors&jwt.ValidationErrorIssuedAt != 0 {
return token.Claims.(jwt.MapClaims), nil
}
}
// for any other error break because something else is wrong
if newErr != nil {
err = newErr
break
}
// If we found the right signature then we're good
if token.Valid {
return token.Claims.(jwt.MapClaims), nil
}
}
// The last error will fall through the normal processing
}
if ve, ok := err.(*jwt.ValidationError); ok {
if ve.Errors&jwt.ValidationErrorExpired != 0 {
return nil, fmt.Errorf("JWT is expired")
}
if ve.Errors&jwt.ValidationErrorSignatureInvalid != 0 {
return nil, fmt.Errorf("JWT signature is invalid")
}
}
// for all other errors just return a generic error
return nil, fmt.Errorf("Failed to validate token: %w", err)
}
return token.Claims.(jwt.MapClaims), nil
}
func (c *Client) validationFunc(k *rsa.PublicKey) jwt.Keyfunc {
return func(token *jwt.Token) (interface{}, error) {
// Check that the signing method is RSA (this check is required due to a security
// vulnerability in the JWT standard)
if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
// Parse the claims
claims, ok := token.Claims.(jwt.MapClaims)
if !ok {
return nil, fmt.Errorf("Unable to parse claims")
}
// Check that the issuer is who we expect
if iss, ok := claims["iss"].(string); ok {
if iss != c.gatewayURL && !strings.HasPrefix(iss, "https://wso2-is.byu.edu") {
return nil, fmt.Errorf("Unexpected issuer: %s", iss)
}
}
// If a key has been passed in use that
if k != nil {
return k, nil
}
// Otherwise, try to determine the key
if x5t, ok := token.Header["x5t"].(string); ok {
if key, ok := c.keyCache[x5t]; ok {
return key, nil
}
return nil, fmt.Errorf("Unrecognized key used for JWT signature: %s", x5t)
}
return nil, errCantIdentifyKey
}
}
func (c *Client) refreshKeyCache() error {
// Get openid-configuration document
res, err := http.Get(fmt.Sprintf("%s.well-known/openid-configuration", c.gatewayURL))
if err != nil {
return fmt.Errorf("Error while trying to get openid configuration: %w", err)
}
if res.StatusCode != http.StatusOK {
return fmt.Errorf("Got non 200 response from openid configuration endpoint")
}
oidConfig := struct {
JWKSURI string `json:"jwks_uri"`
}{}
err = json.NewDecoder(res.Body).Decode(&oidConfig)
res.Body.Close()
if err != nil {
return fmt.Errorf("Error while trying to unmarshal openid config: %w", err)
}
// Get JWKS document
res, err = http.Get(oidConfig.JWKSURI)
if err != nil {
return fmt.Errorf("Error while trying to get JWKS document: %w", err)
}
if res.StatusCode != http.StatusOK {
return fmt.Errorf("Got non 200 status code from JWKS endpoint")
}
jwks := struct {
Keys []struct {
X5T string `json:"x5t"`
E string `json:"e"`
N string `json:"n"`
} `json:"keys"`
}{}
err = json.NewDecoder(res.Body).Decode(&jwks)
res.Body.Close()
if err != nil {
return fmt.Errorf("Error while trying to unmarshal JWKS doc: %w", err)
}
// Calculate new cache expiry time to add
cacheExp := res.Header.Get("cache-control")
cacheExp = strings.TrimPrefix(cacheExp, "public max-age=")
expSeconds, err := strconv.Atoi(cacheExp)
if err != nil || expSeconds == 0 {
expSeconds = 3600
}
// Add keys to cache
c.cacheMux.Lock()
defer c.cacheMux.Unlock()
c.keyCache = make(map[string]*rsa.PublicKey)
for _, k := range jwks.Keys {
cert, err := eNToPubKey(k.E, k.N)
if err != nil {
return fmt.Errorf("Failed to parse key: %w", err)
}
c.keyCache[k.X5T] = cert
}
// Update cache expiry timestamp
c.keyCacheExp = time.Now().Add(time.Second * time.Duration(expSeconds))
return nil
}
func eNToPubKey(e, n string) (*rsa.PublicKey, error) {
ebytes, err := base64.StdEncoding.DecodeString(e)
if err != nil {
return nil, fmt.Errorf("Error while decoding e: %w", err)
}
if len(ebytes) < 8 {
padding := make([]byte, 8-len(ebytes), 8)
ebytes = append(padding, ebytes...)
}
var eInt uint64
err = binary.Read(bytes.NewReader(ebytes), binary.BigEndian, &eInt)
if err != nil {
return nil, fmt.Errorf("Error while reading e: %w", err)
}
nBytes, err := base64.RawURLEncoding.DecodeString(n)
if err != nil {
return nil, fmt.Errorf("Error while decoding n: %w", err)
}
nInt := big.NewInt(0)
nInt.SetBytes(nBytes)
return &rsa.PublicKey{
N: nInt,
E: int(eInt),
}, nil
}
|
package operands
import (
"context"
"fmt"
hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1"
"github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common"
"github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/commonTestUtils"
hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util"
. "github.com/onsi/ginkgo"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/reference"
)
var _ = Describe("Monitoring Operand", func() {
Context("Metrics Service", func() {
var hco *hcov1beta1.HyperConverged
var req *common.HcoRequest
BeforeEach(func() {
hco = commonTestUtils.NewHco()
req = commonTestUtils.NewReq(hco)
})
It("should create if not present", func() {
expectedResource := NewMetricsService(hco, commonTestUtils.Namespace)
cl := commonTestUtils.InitClient([]runtime.Object{})
handler := (*genericOperand)(newMetricsServiceHandler(cl, commonTestUtils.GetScheme()))
res := handler.ensure(req)
Expect(res.Created).To(BeTrue())
Expect(res.Updated).To(BeFalse())
Expect(res.Overwritten).To(BeFalse())
Expect(res.UpgradeDone).To(BeFalse())
Expect(res.Err).To(BeNil())
foundResource := &corev1.Service{}
Expect(
cl.Get(context.TODO(),
types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace},
foundResource),
).To(BeNil())
Expect(foundResource.Name).To(Equal(expectedResource.Name))
Expect(foundResource.Labels).Should(HaveKeyWithValue(hcoutil.AppLabel, commonTestUtils.Name))
Expect(foundResource.Namespace).To(Equal(expectedResource.Namespace))
})
It("should find if present", func() {
expectedResource := NewMetricsService(hco, commonTestUtils.Namespace)
cl := commonTestUtils.InitClient([]runtime.Object{expectedResource})
handler := (*genericOperand)(newMetricsServiceHandler(cl, commonTestUtils.GetScheme()))
res := handler.ensure(req)
Expect(res.Created).To(BeFalse())
Expect(res.Updated).To(BeFalse())
Expect(res.Overwritten).To(BeFalse())
Expect(res.UpgradeDone).To(BeFalse())
Expect(res.Err).To(BeNil())
objectRef, err := reference.GetReference(handler.Scheme, expectedResource)
Expect(err).To(BeNil())
Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef))
})
It("should reconcile to default", func() {
existingResource := NewMetricsService(hco, commonTestUtils.Namespace)
existingResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", existingResource.Namespace, existingResource.Name)
existingResource.Spec.Ports[0].Name = "Non default value"
existingResource.Spec.Ports[0].Port = 0
req.HCOTriggered = false
cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource})
handler := (*genericOperand)(newMetricsServiceHandler(cl, commonTestUtils.GetScheme()))
res := handler.ensure(req)
Expect(res.Created).To(BeFalse())
Expect(res.Updated).To(BeTrue())
Expect(res.Overwritten).To(BeTrue())
Expect(res.UpgradeDone).To(BeFalse())
Expect(res.Err).To(BeNil())
foundResource := &corev1.Service{}
Expect(
cl.Get(context.TODO(),
types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace},
foundResource),
).To(BeNil())
Expect(foundResource.Spec.Ports[0].Name).To(BeIdenticalTo(operatorPortName))
Expect(foundResource.Spec.Ports[0].Port).To(BeIdenticalTo(hcoutil.MetricsPort))
})
})
Context("Service Monitor", func() {
var hco *hcov1beta1.HyperConverged
var req *common.HcoRequest
BeforeEach(func() {
hco = commonTestUtils.NewHco()
req = commonTestUtils.NewReq(hco)
})
It("should create if not present", func() {
expectedResource := NewServiceMonitor(hco, commonTestUtils.Namespace)
cl := commonTestUtils.InitClient([]runtime.Object{})
handler := (*genericOperand)(newMetricsServiceMonitorHandler(cl, commonTestUtils.GetScheme()))
res := handler.ensure(req)
Expect(res.Created).To(BeTrue())
Expect(res.Updated).To(BeFalse())
Expect(res.Overwritten).To(BeFalse())
Expect(res.UpgradeDone).To(BeFalse())
Expect(res.Err).To(BeNil())
foundResource := &monitoringv1.ServiceMonitor{}
Expect(
cl.Get(context.TODO(),
types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace},
foundResource),
).To(BeNil())
Expect(foundResource.Name).To(Equal(expectedResource.Name))
Expect(foundResource.Labels).Should(HaveKeyWithValue(hcoutil.AppLabel, commonTestUtils.Name))
Expect(foundResource.Namespace).To(Equal(expectedResource.Namespace))
})
It("should find if present", func() {
expectedResource := NewServiceMonitor(hco, commonTestUtils.Namespace)
cl := commonTestUtils.InitClient([]runtime.Object{expectedResource})
handler := (*genericOperand)(newMetricsServiceMonitorHandler(cl, commonTestUtils.GetScheme()))
res := handler.ensure(req)
Expect(res.Created).To(BeFalse())
Expect(res.Updated).To(BeFalse())
Expect(res.Overwritten).To(BeFalse())
Expect(res.UpgradeDone).To(BeFalse())
Expect(res.Err).To(BeNil())
objectRef, err := reference.GetReference(handler.Scheme, expectedResource)
Expect(err).To(BeNil())
Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef))
})
It("should reconcile to default", func() {
existingResource := NewServiceMonitor(hco, commonTestUtils.Namespace)
existingResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", existingResource.Namespace, existingResource.Name)
existingResource.Spec.Endpoints[0].Port = "Non default value"
req.HCOTriggered = false
cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource})
handler := (*genericOperand)(newMetricsServiceMonitorHandler(cl, commonTestUtils.GetScheme()))
res := handler.ensure(req)
Expect(res.Created).To(BeFalse())
Expect(res.Updated).To(BeTrue())
Expect(res.Overwritten).To(BeTrue())
Expect(res.UpgradeDone).To(BeFalse())
Expect(res.Err).To(BeNil())
foundResource := &monitoringv1.ServiceMonitor{}
Expect(
cl.Get(context.TODO(),
types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace},
foundResource),
).To(BeNil())
Expect(foundResource.Spec.Endpoints[0].Port).To(BeIdenticalTo(operatorPortName))
})
})
Context("Prometheus rule", func() {
var hco *hcov1beta1.HyperConverged
var req *common.HcoRequest
BeforeEach(func() {
hco = commonTestUtils.NewHco()
req = commonTestUtils.NewReq(hco)
})
It("should create if not present", func() {
expectedResource := NewPrometheusRule(hco, commonTestUtils.Namespace)
cl := commonTestUtils.InitClient([]runtime.Object{})
handler := (*genericOperand)(newMonitoringPrometheusRuleHandler(cl, commonTestUtils.GetScheme()))
res := handler.ensure(req)
Expect(res.Created).To(BeTrue())
Expect(res.Updated).To(BeFalse())
Expect(res.Overwritten).To(BeFalse())
Expect(res.UpgradeDone).To(BeFalse())
Expect(res.Err).To(BeNil())
foundResource := &monitoringv1.PrometheusRule{}
Expect(
cl.Get(context.TODO(),
types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace},
foundResource),
).To(BeNil())
Expect(foundResource.Name).To(Equal(expectedResource.Name))
Expect(foundResource.Labels).Should(HaveKeyWithValue(hcoutil.AppLabel, commonTestUtils.Name))
Expect(foundResource.Namespace).To(Equal(expectedResource.Namespace))
})
It("should find if present", func() {
expectedResource := NewPrometheusRule(hco, commonTestUtils.Namespace)
cl := commonTestUtils.InitClient([]runtime.Object{expectedResource})
handler := (*genericOperand)(newMonitoringPrometheusRuleHandler(cl, commonTestUtils.GetScheme()))
res := handler.ensure(req)
Expect(res.Created).To(BeFalse())
Expect(res.Updated).To(BeFalse())
Expect(res.Overwritten).To(BeFalse())
Expect(res.UpgradeDone).To(BeFalse())
Expect(res.Err).To(BeNil())
objectRef, err := reference.GetReference(handler.Scheme, expectedResource)
Expect(err).To(BeNil())
Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef))
})
It("should reconcile to default", func() {
existingResource := NewPrometheusRule(hco, commonTestUtils.Namespace)
existingResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", existingResource.Namespace, existingResource.Name)
existingResource.Spec.Groups[0].Name = "Non default value"
existingResource.Spec.Groups[0].Rules[0].Alert = "Non default value"
req.HCOTriggered = false
cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource})
handler := (*genericOperand)(newMonitoringPrometheusRuleHandler(cl, commonTestUtils.GetScheme()))
res := handler.ensure(req)
Expect(res.Created).To(BeFalse())
Expect(res.Updated).To(BeTrue())
Expect(res.Overwritten).To(BeTrue())
Expect(res.UpgradeDone).To(BeFalse())
Expect(res.Err).To(BeNil())
foundResource := &monitoringv1.PrometheusRule{}
Expect(
cl.Get(context.TODO(),
types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace},
foundResource),
).To(BeNil())
Expect(foundResource.Spec.Groups[0].Name).To(BeIdenticalTo(alertRuleGroup))
Expect(foundResource.Spec.Groups[0].Rules[0].Alert).To(BeIdenticalTo(outOfBandUpdateAlert))
})
})
})
|
package boltrepo
import (
"bytes"
"encoding/binary"
"encoding/json"
"github.com/boltdb/bolt"
"github.com/scjalliance/drivestream/binpath"
"github.com/scjalliance/drivestream/collection"
"github.com/scjalliance/drivestream/page"
"github.com/scjalliance/drivestream/resource"
)
var _ page.Sequence = (*Pages)(nil)
// Pages accesses a sequence of pages in a bolt repository.
type Pages struct {
db *bolt.DB
drive resource.ID
collection collection.SeqNum
}
// Path returns the path of the pages.
func (ref Pages) Path() binpath.Text {
return binpath.Text{RootBucket, DriveBucket, ref.drive.String(), CollectionBucket, ref.collection.String(), PageBucket}
}
// Next returns the sequence number to use for the next page of the
// collection.
func (ref Pages) Next() (n page.SeqNum, err error) {
err = ref.db.View(func(tx *bolt.Tx) error {
col := collectionBucket(tx, ref.drive, ref.collection)
if col == nil {
return collection.NotFound{Drive: ref.drive, Collection: ref.collection}
}
pages := col.Bucket([]byte(PageBucket))
if pages == nil {
return nil
}
cursor := pages.Cursor()
k, _ := cursor.Last()
if k == nil {
return nil
}
if len(k) != 8 {
key := append(k[:0:0], k...) // Copy key bytes
return BadPageKey{Drive: ref.drive, Collection: ref.collection, BadKey: key}
}
n = page.SeqNum(binary.BigEndian.Uint64(k)) + 1
return nil
})
return n, err
}
// Read reads the requested pages from a collection.
func (ref Pages) Read(start page.SeqNum, p []page.Data) (n int, err error) {
err = ref.db.View(func(tx *bolt.Tx) error {
col := collectionBucket(tx, ref.drive, ref.collection)
if col == nil {
return collection.NotFound{Drive: ref.drive, Collection: ref.collection}
}
pages := col.Bucket([]byte(PageBucket))
if pages == nil {
return collection.PageNotFound{Drive: ref.drive, Collection: ref.collection, Page: start}
}
cursor := pages.Cursor()
pos := start
key := makePageKey(pos)
k, v := cursor.Seek(key[:])
if k == nil || !bytes.Equal(key[:], k) {
return collection.PageNotFound{Drive: ref.drive, Collection: ref.collection, Page: start}
}
for n < len(p) {
if v == nil {
return collection.PageDataInvalid{Drive: ref.drive, Collection: ref.collection, Page: pos} // All pages must be non-nil
}
if err := json.Unmarshal(v, &p[n]); err != nil {
// TODO: Wrap the error in PageDataInvalid?
return err
}
n++
k, v = cursor.Next()
if k == nil {
break
}
if len(k) != 8 {
key := append(k[:0:0], k...) // Copy key bytes
return BadPageKey{Drive: ref.drive, Collection: ref.collection, BadKey: key}
}
pos = start + page.SeqNum(n)
key = makePageKey(pos)
if !bytes.Equal(key[:], k) {
// The next key doesn't match the expected sequence number
// TODO: Consider returning an error here?
break
}
}
return nil
})
return n, err
}
// Ref returns a page reference for the sequence number.
func (ref Pages) Ref(seqNum page.SeqNum) page.Reference {
return Page{
db: ref.db,
drive: ref.drive,
collection: ref.collection,
page: seqNum,
}
}
// Clear removes all pages affiliated with a collection.
func (ref Pages) Clear() error {
return ref.db.Update(func(tx *bolt.Tx) error {
col := collectionBucket(tx, ref.drive, ref.collection)
if col == nil {
return collection.NotFound{Drive: ref.drive, Collection: ref.collection}
}
key := []byte(PageBucket)
pages := col.Bucket(key)
if pages == nil {
return nil
}
return col.DeleteBucket(key)
})
}
|
/*
* @lc app=leetcode id=87 lang=golang
*
* [87] Scramble String
*/
func checkScramble(s1, s2 string, cache map[string]bool) bool {
n := len(s1)
if n == 1 {
return s1 == s2
}
if t, ok := cache[s1+" "+s2]; ok {
return t
}
for i := 1; i < n; i++ {
match := (checkScramble(s1[:i], s2[:i], cache) && checkScramble(s1[i:], s2[i:], cache)) ||
(checkScramble(s1[:i], s2[n-i:], cache) && checkScramble(s1[i:], s2[:n-i], cache))
if match {
cache[s1+" "+s2] = true
return true
}
}
cache[s1+" "+s2] = false
return false
}
func isScramble(s1 string, s2 string) bool {
cache := make(map[string]bool)
return checkScramble(s1, s2, cache)
}
|
package main
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/rightscale/rsc/gen"
)
var _ = Describe("APIAnalyzer ParseRoute", func() {
var (
moniker string
routes []string
pathPatterns []*gen.PathPattern
)
JustBeforeEach(func() {
pathPatterns = ParseRoute(moniker, routes)
})
Context("given a simple route", func() {
BeforeEach(func() {
routes = []string{"GET /api/servers(.:format)? {:action=>\"index\", :controller=>\"servers\"}"}
})
It("computes the path pattern", func() {
Ω(len(pathPatterns)).Should(Equal(1))
Ω(pathPatterns[0].HTTPMethod).Should(Equal("GET"))
Ω(pathPatterns[0].Pattern).Should(Equal("/api/servers"))
Ω(pathPatterns[0].Variables).Should(BeEmpty())
})
})
Context("given an obsolete route", func() {
BeforeEach(func() {
routes = []string{"GET /api/session(.:format)? {:action=>\"index\", :controller=>\"servers\"}"}
})
It("does not produce a path pattern", func() {
Ω(len(pathPatterns)).Should(Equal(0))
})
})
Context("given a route with a variable", func() {
BeforeEach(func() {
routes = []string{"PUT /api/servers/:id(.:format)? {:action=>\"index\", :controller=>\"servers\"}"}
})
It("computes the path pattern", func() {
Ω(len(pathPatterns)).Should(Equal(1))
Ω(pathPatterns[0].HTTPMethod).Should(Equal("PUT"))
Ω(pathPatterns[0].Pattern).Should(Equal("/api/servers/%s"))
Ω(len(pathPatterns[0].Variables)).Should(Equal(1))
Ω(pathPatterns[0].Variables[0]).Should(Equal("id"))
})
})
Context("given a route with multiple variables", func() {
BeforeEach(func() {
routes = []string{"PUT /api/clouds/:cloud_id/instances/:instance_id/security_groups/:id(.:format)? {:action=>\"index\", :controller=>\"security_groups\"}"}
})
It("computes the path pattern", func() {
Ω(len(pathPatterns)).Should(Equal(1))
Ω(pathPatterns[0].HTTPMethod).Should(Equal("PUT"))
Ω(pathPatterns[0].Pattern).Should(Equal("/api/clouds/%s/instances/%s/security_groups/%s"))
Ω(len(pathPatterns[0].Variables)).Should(Equal(3))
Ω(pathPatterns[0].Variables[0]).Should(Equal("cloud_id"))
Ω(pathPatterns[0].Variables[1]).Should(Equal("instance_id"))
Ω(pathPatterns[0].Variables[2]).Should(Equal("id"))
})
})
Context("given multiple routes with multiple ", func() {
BeforeEach(func() {
routes = []string{
"GET /api/security_groups/:id(.:format)? {:action=>\"index\", :controller=>\"security_groups\"}",
"GET /api/instances/:instance_id/security_groups/:id(.:format)? {:action=>\"index\", :controller=>\"security_groups\"}",
"GET /api/clouds/:cloud_id/instances/:instance_id/security_groups/:id(.:format)? {:action=>\"index\", :controller=>\"security_groups\"}",
}
})
It("computes the path patterns", func() {
Ω(len(pathPatterns)).Should(Equal(3))
Ω(pathPatterns[0].HTTPMethod).Should(Equal("GET"))
Ω(pathPatterns[0].Pattern).Should(Equal("/api/security_groups/%s"))
Ω(len(pathPatterns[0].Variables)).Should(Equal(1))
Ω(pathPatterns[0].Variables[0]).Should(Equal("id"))
Ω(pathPatterns[0].Pattern).Should(Equal("/api/security_groups/%s"))
Ω(len(pathPatterns[1].Variables)).Should(Equal(2))
Ω(pathPatterns[1].HTTPMethod).Should(Equal("GET"))
Ω(pathPatterns[1].Variables[0]).Should(Equal("instance_id"))
Ω(pathPatterns[1].Variables[1]).Should(Equal("id"))
Ω(pathPatterns[1].Pattern).Should(Equal("/api/instances/%s/security_groups/%s"))
Ω(len(pathPatterns[2].Variables)).Should(Equal(3))
Ω(pathPatterns[2].HTTPMethod).Should(Equal("GET"))
Ω(pathPatterns[2].Variables[0]).Should(Equal("cloud_id"))
Ω(pathPatterns[2].Variables[1]).Should(Equal("instance_id"))
Ω(pathPatterns[2].Variables[2]).Should(Equal("id"))
Ω(pathPatterns[2].Pattern).Should(Equal("/api/clouds/%s/instances/%s/security_groups/%s"))
})
})
})
|
package main
import (
"fmt"
)
type Data struct {
}
// 测试方法
func (self Data) String() string {
return "this is string: data"
}
func main() {
fmt.Printf("%v\n", Data{})
}
|
package main
import (
"bufio"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"strconv"
"strings"
"md2cflc/confluence"
"md2cflc/render"
)
var (
username = flag.String("u", "", "Confluence username")
passwd = flag.String("p", "", "Confluence password")
pageId = flag.String("pageid", "", "Confluence page ID to update")
confluenceURL = flag.String("wiki", "", "Confluence wiki http URL")
parentID = flag.Int("parentid", 0, "parent id of a page")
title = flag.String("title", "", "title of a new page")
space = flag.String("space", "", "page Space in the wiki")
verbose = flag.Bool("verbose", false, "enable debug mode")
noEscape = flag.Bool("no-escape", false, "not escape curly brackets")
)
func Debug(data []byte, err error) {
if err == nil {
fmt.Printf("%s\n\n", data)
} else {
fmt.Printf("%s\n\n", err)
}
}
func optionParse() {
flag.Parse()
}
func markdownFile() string {
files := flag.Args()
if len(files) == 0 {
fmt.Printf("Please specify markdown file.\n")
os.Exit(1)
}
f := files[0]
if _, err := os.Stat(f); err != nil {
log.Fatalf("markdownFile: %v", err)
}
return escapeCurlyBrackets(f)
}
func escapeCurlyBrackets(filename string) string {
fin, _ := os.Open(filename)
reader := bufio.NewReader(fin)
ctt, _ := ioutil.ReadAll(reader)
content := string(ctt)
if !*noEscape {
content := strings.Replace(content, "{", `\\{`, -1)
content = strings.Replace(content, "}", `\\}`, -1)
}
bn := path.Base(filename)
tmpFileName := "/tmp/" + bn + ".tmp"
out, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE, 0755)
defer out.Close()
if err != nil {
log.Fatal(err)
}
w := bufio.NewWriter(out)
w.WriteString(content)
w.Flush()
return tmpFileName
}
func Markdown2ConfluenceWiki(file string) (string, error) {
if strings.HasSuffix(file, ".tmp") {
defer os.Remove(file)
}
markdownContents, err := ioutil.ReadFile(file)
if err != nil {
return "", fmt.Errorf("read file %s error: %v", file, err)
}
output := render.Run(markdownContents)
return string(output), nil
}
func updateCheck() (err error) {
if *username == "" || *passwd == "" {
err = fmt.Errorf("'username' and 'password' must provided")
return
}
if *pageId == "" && *parentID == 0 {
err = fmt.Errorf("'pageId' and 'parentID' can not be both empty")
return
}
if *pageId != "" && *parentID > 0 {
err = fmt.Errorf("please provide pageId OR parentID. Do not provid BOTH")
return
}
if *parentID > 0 && *space == "" {
err = fmt.Errorf("please provide SPACE key when you provided parentID")
return
}
if *confluenceURL == "" {
err = fmt.Errorf("confluence wiki URL not input")
return
}
return
}
func newPageByOldPage(oldPage *confluence.Content, content string) *confluence.Content {
newPage := *oldPage
newPage.Version.Number = oldPage.Version.Number + 1
newPage.Body.Storage.Value = content
newPage.Body.Storage.Representation = "wiki"
return &newPage
}
func newPage4Create(oldPage *confluence.Content) *confluence.ContentCreate {
newPage := &confluence.ContentCreate{
Space: confluence.Space{""},
Ancestors: make([]confluence.Ancestor, 0),
Content: *oldPage,
}
return newPage
}
func doUpdate(url, username, passwd, pageId, content string) (err error) {
auth := confluence.BasicAuth(username, passwd)
wiki, err := confluence.NewWiki(url, auth)
if err != nil {
return
}
oldPage, err := wiki.GetContent(pageId, []string{"version"})
if err != nil {
return
}
newPage := newPageByOldPage(oldPage, content)
_, err = wiki.UpdateContent(newPage, *verbose)
if err != nil {
return
}
return
}
func doCreate(url, username, passwd, title, content, space string, parentID int) (err error) {
auth := confluence.BasicAuth(username, passwd)
wiki, err := confluence.NewWiki(url, auth)
if err != nil {
return
}
oldPage, err := wiki.GetContent(strconv.Itoa(parentID), []string{"version"})
if err != nil {
return
}
_newPage := newPageByOldPage(oldPage, content)
newPage := newPage4Create(_newPage)
newPage.Version.Number = 1
newPage.Title = title
// newPage.Ancestors = append(newPage.Ancestors, confluence.Ancestor{parentID})
ans := confluence.Ancestor{parentID}
newPage.Ancestors = append(newPage.Ancestors, ans)
newPage.Space.Key = space
_, err = wiki.CreateContent(newPage, *verbose)
if err != nil {
return
}
return
}
func UpdateContent(content string) error {
err := updateCheck()
if err != nil {
return err
}
if *pageId != "" {
fmt.Println("do update")
return doUpdate(*confluenceURL, *username, *passwd, *pageId, content)
} else if *parentID > 0 && *space != "" {
fmt.Println("do create")
return doCreate(*confluenceURL, *username, *passwd, *title, content, *space, *parentID)
}
return nil
}
func main() {
optionParse()
wikiContent, err := Markdown2ConfluenceWiki(markdownFile())
if err != nil {
log.Fatalf("Convert markdown to wiki: %v", err)
}
if len(*confluenceURL) == 0 {
fmt.Printf("%s", wikiContent)
os.Exit(0)
}
if err := UpdateContent(wikiContent); err != nil {
log.Fatalf("Update err: %v", err)
}
}
|
//First published go package.
package main
import (
"fmt"
"github.com/andy1341/lets-go-chat/pkg/hasher"
)
func main() {
fmt.Println(hasher.HashPassword("asd"))
fmt.Println(hasher.CheckPasswordHash("asd", "passhash"))
}
|
package peer
import (
"fmt"
"github.com/HNB-ECO/HNB-Blockchain/HNB/p2pNetwork/common"
"github.com/HNB-ECO/HNB-Blockchain/HNB/p2pNetwork/message/bean"
"sync"
)
type NbrPeers struct {
sync.RWMutex
List map[uint64]*Peer
}
func (np *NbrPeers) Broadcast(msg bean.Message, isConsensus bool) {
np.RLock()
defer np.RUnlock()
for _, node := range np.List {
if node.syncState == common.ESTABLISH && node.GetRelay() == true {
err := node.Send(msg, isConsensus)
if err != nil {
P2PLog.Info(LOGTABLE_NETWORK, "send msg err: "+err.Error())
}
}
}
}
func (np *NbrPeers) NodeExisted(uid uint64) bool {
_, ok := np.List[uid]
return ok
}
func (np *NbrPeers) GetPeer(id uint64) *Peer {
np.Lock()
defer np.Unlock()
n, ok := np.List[id]
if ok == false {
return nil
}
return n
}
func (np *NbrPeers) AddNbrNode(p *Peer) {
np.Lock()
defer np.Unlock()
if np.NodeExisted(p.GetID()) {
fmt.Printf("insert an existed node\n")
} else {
np.List[p.GetID()] = p
}
}
func (np *NbrPeers) DelNbrNode(id uint64) (*Peer, bool) {
np.Lock()
defer np.Unlock()
n, ok := np.List[id]
if ok == false {
return nil, false
}
delete(np.List, id)
return n, true
}
func (np *NbrPeers) Init() {
np.List = make(map[uint64]*Peer)
}
func (np *NbrPeers) NodeEstablished(id uint64) bool {
np.RLock()
defer np.RUnlock()
n, ok := np.List[id]
if ok == false {
return false
}
if n.syncState != common.ESTABLISH {
return false
}
return true
}
func (np *NbrPeers) GetNeighborAddrs() []common.PeerAddr {
np.RLock()
defer np.RUnlock()
var addrs []common.PeerAddr
for _, p := range np.List {
if p.GetSyncState() != common.ESTABLISH {
continue
}
var addr common.PeerAddr
addr.IpAddr, _ = p.GetAddr16()
addr.Time = p.GetTimeStamp()
addr.Services = p.GetServices()
addr.ConsensusPort = p.GetConsPort()
addr.SyncPort = p.GetSyncPort()
addr.ID = p.GetID()
addrs = append(addrs, addr)
}
return addrs
}
func (np *NbrPeers) GetNeighborHeights() map[uint64]uint64 {
np.RLock()
defer np.RUnlock()
hm := make(map[uint64]uint64)
for _, n := range np.List {
if n.GetSyncState() == common.ESTABLISH {
hm[n.GetID()] = n.GetHeight()
}
}
return hm
}
func (np *NbrPeers) GetNeighbors() []*Peer {
np.RLock()
defer np.RUnlock()
peers := []*Peer{}
for _, n := range np.List {
if n.GetSyncState() == common.ESTABLISH {
node := n
peers = append(peers, node)
}
}
return peers
}
func (np *NbrPeers) GetNbrNodeCnt() uint32 {
np.RLock()
defer np.RUnlock()
var count uint32
for _, n := range np.List {
if n.GetSyncState() == common.ESTABLISH {
count++
}
}
return count
}
|
package camt
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document03600103 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:camt.036.001.03 Document"`
Message *DebitAuthorisationResponseV03 `xml:"DbtAuthstnRspn"`
}
func (d *Document03600103) AddMessage() *DebitAuthorisationResponseV03 {
d.Message = new(DebitAuthorisationResponseV03)
return d.Message
}
// Scope
// The Debit Authorisation Response message is sent by an account owner to its account servicing institution. This message is used to approve or reject a debit authorisation request.
// Usage
// The Debit Authorisation Response message is used to reply to a Debit Authorisation Request message.
// The Debit Authorisation Response message covers one and only one payment instruction at a time. If an account owner needs to reply to several Debit Authorisation Request messages, then multiple Debit Authorisation Response messages must be sent.
// The Debit Authorisation Response message indicates whether the account owner agrees with the request by means of a code. It also allows further details to be given about the debit authorisation, such as acceptable amount and value date for the debit.
// The Debit Authorisation Response message must be used exclusively between the account owner and the account servicing institution. It must not be used in place of a Resolution Of Investigation message between subsequent agents.
type DebitAuthorisationResponseV03 struct {
// Identifies the assignment of an investigation case from an assigner to an assignee.
// Usage: The Assigner must be the sender of this confirmation and the Assignee must be the receiver.
Assignment *iso20022.CaseAssignment3 `xml:"Assgnmt"`
// Identifies the investigation case.
Case *iso20022.Case3 `xml:"Case"`
// Indicates if the debit authorisation is granted or not.
Confirmation *iso20022.DebitAuthorisationConfirmation2 `xml:"Conf"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
SupplementaryData []*iso20022.SupplementaryData1 `xml:"SplmtryData,omitempty"`
}
func (d *DebitAuthorisationResponseV03) AddAssignment() *iso20022.CaseAssignment3 {
d.Assignment = new(iso20022.CaseAssignment3)
return d.Assignment
}
func (d *DebitAuthorisationResponseV03) AddCase() *iso20022.Case3 {
d.Case = new(iso20022.Case3)
return d.Case
}
func (d *DebitAuthorisationResponseV03) AddConfirmation() *iso20022.DebitAuthorisationConfirmation2 {
d.Confirmation = new(iso20022.DebitAuthorisationConfirmation2)
return d.Confirmation
}
func (d *DebitAuthorisationResponseV03) AddSupplementaryData() *iso20022.SupplementaryData1 {
newValue := new(iso20022.SupplementaryData1)
d.SupplementaryData = append(d.SupplementaryData, newValue)
return newValue
}
|
package main
import (
"flag"
"fmt"
"log"
"os"
"path"
"time"
"github.com/codeformuenster/dkan-newest-dataset-notifier/datasets"
"github.com/codeformuenster/dkan-newest-dataset-notifier/externalservices"
"github.com/codeformuenster/dkan-newest-dataset-notifier/s3"
"github.com/codeformuenster/dkan-newest-dataset-notifier/tooter"
"github.com/codeformuenster/dkan-newest-dataset-notifier/util"
)
const defaultDKANInstance = "https://opendata.stadt-muenster.de"
var (
dkanInstanceURL, localPath, externalServicesConfigPath string
enableTweeter, allowEmpty, tootUnlisted, reverseDkanResponse bool
)
// How this works (at least in my head)
//
// - load local (previous data.json)
// - Download data.json
// - Compare with previous data.json
func main() {
flag.BoolVar(&enableTweeter, "enable-tooter", false, "enable the creation of toots")
flag.BoolVar(&allowEmpty, "allow-empty", false, "allow empty previous dataset, for initialization")
flag.BoolVar(&tootUnlisted, "toot-unlisted", false, "toot unlisted instead of public, for initialization")
flag.BoolVar(&reverseDkanResponse, "reverse-dkan-response", false, "reverse dataset list from DKAN, for initialization")
flag.StringVar(&dkanInstanceURL, "url", defaultDKANInstance, "base url of the dkan instance (https://...)")
flag.StringVar(&localPath, "local-path", "", "path to local json file for comparison")
flag.StringVar(&externalServicesConfigPath, "config-path", "", "path to local json external services configuration")
flag.Parse()
log.SetFlags(log.LstdFlags | log.Lmicroseconds)
var err error
// validate + constructr dkan urls
datasetsURL, err := util.MakeURL(fmt.Sprintf("%s/%s", dkanInstanceURL, "data.json"))
if err != nil {
log.Printf("Could not create valid datasets URL")
log.Panicln(err)
}
cfg, err := externalservices.FromFile(externalServicesConfigPath)
if err != nil {
log.Println(err)
log.Println("Disabling external services")
}
s3Instance, s3Available := setupS3(cfg.S3Config)
t, tooterAvailable, err := setupMastodon(cfg.MastodonConfig)
if err != nil {
log.Panicln(err)
}
if !tooterAvailable {
log.Println("disabling tooter, no toots will be created")
}
var prevDatasets datasets.Datasets
if s3Available {
prevDatasets, err = datasets.FromS3(s3Instance)
} else {
if localPath == "" {
localPath = makeDataPath(time.Now().Add(-24 * time.Hour))
log.Printf("empty local-path flag, assuming path %s\n", localPath)
}
prevDatasets, err = datasets.FromPath(localPath)
}
// handle error of prev dataset fetch
if err != nil {
log.Println(err)
log.Println("Reading previous datasets failed, assuming empty")
}
if !allowEmpty && len(prevDatasets.Dataset) == 0 {
log.Println("Empty previous datasets not allowed")
return
}
currDatasets, err := datasets.FromURL(datasetsURL)
if err != nil {
log.Panicln(err)
}
if reverseDkanResponse {
prevDatasets.Reverse()
currDatasets.Reverse()
}
missing := currDatasets.Compare(&prevDatasets)
for _, m := range missing {
tootText, err := m.ToTootText(dkanInstanceURL)
if err != nil {
log.Println(err)
continue
}
log.Printf("%d %s\n", len(tootText), tootText)
if tooterAvailable {
visibility := "public"
if tootUnlisted {
visibility = "unlisted"
}
err = t.SendToot(tootText, visibility)
if err != nil {
log.Println(err)
continue
}
time.Sleep(10 * time.Second)
}
}
{
var err error
if s3Available {
if len(missing) != 0 {
err = currDatasets.SaveToS3(fmt.Sprintf("data-%s.json", time.Now().Format("2006-01-02")), s3Instance)
}
} else {
err = currDatasets.Save(makeDataPath(time.Now()))
}
if err != nil {
log.Panicln(err)
}
}
}
func setupMastodon(cfg externalservices.MastodonConfig) (tooter.Tooter, bool, error) {
if !enableTweeter || !cfg.Validate() {
return tooter.Tooter{}, false, nil
}
t, err := tooter.NewTooter(cfg)
if err != nil {
return t, false, err
}
return t, true, nil
}
func setupS3(cfg externalservices.S3Config) (s3.S3, bool) {
return s3.NewS3(cfg), cfg.Validate()
}
func makeDataPath(date time.Time) string {
filename := fmt.Sprintf("data-%s.json", date.Format("2006-01-02"))
dir, err := os.Getwd()
if err != nil {
return "./" + filename
}
return path.Join(dir, filename)
}
|
package dbft
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"github.com/HNB-ECO/HNB-Blockchain/HNB/consensus/algorand/types"
dposStruct "github.com/HNB-ECO/HNB-Blockchain/HNB/consensus/dbft/common"
"github.com/HNB-ECO/HNB-Blockchain/HNB/db"
"github.com/HNB-ECO/HNB-Blockchain/HNB/ledger"
"github.com/HNB-ECO/HNB-Blockchain/HNB/util"
"github.com/json-iterator/go"
"sort"
"strconv"
)
func (dm *DBFTManager) HandleEpochChange(dposMsg *dposStruct.DPoSMessage, senderID uint64) error {
epochChange := &dposStruct.EpochChange{}
err := jsoniter.Unmarshal(dposMsg.Payload, epochChange)
if err != nil {
return err
}
epochNum := epochChange.EpochNo
ConsLog.Infof(LOGTABLE_DBFT, "(epochChange) receive epochNum %d epochChange <- %v", epochNum, util.HexBytes(epochChange.DigestAddr))
if epochNum < dm.candidateNo.GetCurrentNo()-1 {
return fmt.Errorf("invalid epochChange epochNum %d < %d", epochNum, dm.candidateNo.GetCurrentNo())
}
if epochChange.Height > dm.CurHeight {
ConsLog.Infof(LOGTABLE_DBFT, "(epochChange) receive h %d > %d sync", epochChange.Height, dm.CurHeight)
dm.bftHandler.SyncEntrance(epochChange.Height, senderID)
return nil
}
hash, err := ledger.GetBlockHash(epochChange.Height - 1)
if err != nil {
ConsLog.Errorf(LOGTABLE_DBFT, "(epochChange) receive h %d < %d sync error: %v", epochChange.Height, dm.CurHeight, err)
return err
}
if !bytes.Equal(hash, epochChange.Hash) {
ConsLog.Errorf(LOGTABLE_DBFT, "invalid epochChange h %d hash %X != %X", epochChange.Height, epochChange.Hash, hash)
return fmt.Errorf("invalid epochChange h %d hash %X != %X", epochChange.Height, epochChange.Hash, hash)
}
if dm.NewEpoch.IsExist(epochNum) {
return fmt.Errorf("epochChange already +2/3")
}
dm.EpochChange.SetEpochChange(epochChange)
if dm.EpochChange.IsMoreThanOneThridToCandidates(epochChange.EpochNo, len(dm.TotalValidators.Validators)) {
ConsLog.Infof(LOGTABLE_DBFT, "(epochChange) epochNum %d receive num > %d follow", epochNum, len(dm.TotalValidators.Validators)/3)
epochChangeMsg, err := dm.BuildEpochChangeMsg(dm.CurHeight, epochNum)
if err != nil {
return err
}
if epochNum > dm.candidateNo.GetCurrentNo() {
dm.candidateNo.SetNo(epochNum)
}
ok, err := dm.EpochChange.IsExist(epochChange)
if err != nil {
return err
}
if !ok {
ConsLog.Infof(LOGTABLE_DBFT, "(epochChange) broadcast epochNum %d ", epochNum)
dm.BroadcastMsgToAllVP(epochChangeMsg)
select {
case dm.InternalMsgQueue <- epochChangeMsg:
default:
ConsLog.Warningf(LOGTABLE_DBFT, "tdm recvMsgChan full")
}
} else {
ConsLog.Infof(LOGTABLE_DBFT, "(epochChange) has broadcast epochChange epochNum %d ", epochNum)
}
}
err = dm.CheckEnterNewEpoch(epochChange)
if err != nil {
ConsLog.Infof(LOGTABLE_DBFT, "(epochChange) CheckEnterNewEpoch err %s", err.Error())
return err
}
return nil
}
func (dm *DBFTManager) CheckEnterNewEpoch(epochChange *dposStruct.EpochChange) error {
if dm.EpochChange.IsMajorityToCandidates(epochChange.EpochNo, len(dm.TotalValidators.Validators)) {
if dm.CheckNewEpochSender(epochChange.EpochNo) {
newEpochMsg, err := dm.BuildNewEpochMsg(epochChange.EpochNo)
if err != nil {
return err
}
dm.BroadcastMsgToAllVP(newEpochMsg)
ConsLog.Infof(LOGTABLE_DBFT, "(newEpoch) send newEpoch bgNum %d", epochChange.EpochNo)
select {
case dm.InternalMsgQueue <- newEpochMsg:
default:
ConsLog.Warningf(LOGTABLE_DBFT, "dpos recvMsgChan full")
}
}
}
return nil
}
func (dm *DBFTManager) CheckNewEpochSender(bgNum uint64) bool {
newEpochSenderAddr, err := dm.GetNewEpochSenderAddr(bgNum)
if err != nil {
return false
}
ConsLog.Debugf(LOGTABLE_DBFT, "epochNo %d newEpochSender addr %v, my addr %v", bgNum, newEpochSenderAddr, dm.bftHandler.GetDigestAddr())
if bytes.Equal(dm.bftHandler.GetDigestAddr(), newEpochSenderAddr) {
return true
}
return false
}
func (dm *DBFTManager) GetNewEpochSenderAddr(epochNo uint64) (util.HexBytes, error) {
totalValsLen := len(dm.TotalValidators.Validators)
if totalValsLen == 0 {
return nil, errors.New("total validators are empty")
}
proposerIndex := epochNo % uint64(totalValsLen)
validators := make([]*types.Validator, len(dm.TotalValidators.Validators))
for i, val := range dm.TotalValidators.Validators {
validators[i] = val.Copy()
}
sort.Sort(types.ValidatorsByAddress(validators))
ConsLog.Infof(LOGTABLE_DBFT, "(dm) sorted validators %v", validators)
return util.HexBytes(validators[proposerIndex].Address), nil
}
func (dm *DBFTManager) HandleNewEpoch(dposMsg *dposStruct.DPoSMessage) (*dposStruct.NewEpoch, error) {
newEpoch := new(dposStruct.NewEpoch)
err := jsoniter.Unmarshal(dposMsg.Payload, newEpoch)
if err != nil {
return nil, err
}
ConsLog.Infof(LOGTABLE_DBFT, "(epochChange) receive epochNo %d NewEpoch <- %v", newEpoch.EpochNo, util.HexBytes(newEpoch.DigestAddr))
newEpochNo := newEpoch.EpochNo
if newEpochNo < dm.candidateNo.GetCurrentNo() {
return nil, fmt.Errorf("(newEpoch) net epoch %d != my %d",
newEpochNo, dm.candidateNo.GetCurrentNo())
}
newEpochSenderAddr, err := dm.GetNewEpochSenderAddr(newEpochNo)
if err != nil {
return nil, err
}
if !bytes.Equal(newEpoch.DigestAddr, newEpochSenderAddr) {
return nil, fmt.Errorf("(newEpoch) send not Expect, send %s Expect %s",
hex.EncodeToString(newEpoch.DigestAddr), hex.EncodeToString(newEpochSenderAddr))
}
if !dm.EpochChange.IsMoreThanOneThridToCandidates(newEpochNo, len(dm.TotalValidators.Validators)) {
return nil, fmt.Errorf("(newEpoch) recv epoch %d epochChange not enough", newEpochNo)
}
selfEpoch, err := dm.BuildNewEpoch(newEpochNo)
if err != nil {
return nil, err
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
witnessesList := []*types.Validator{}
err = json.Unmarshal(newEpoch.Witnesss, &witnessesList)
if err != nil {
return nil, err
}
netEpoch := &Epoch{
EpochNo: newEpochNo,
DependEpochNo: newEpoch.DependEpochNo,
WitnessList: witnessesList,
BeginNum: newEpoch.Begin,
EndNum: newEpoch.End,
}
selfEpoch.ReSetBeginBlk(dm.bftHandler.Height)
if !dm.checkNewEpochMatch(netEpoch, selfEpoch) {
return nil, fmt.Errorf("newEpochNo %d not match with self", newEpochNo)
}
err = dm.ResetEpoch(netEpoch)
if err != nil {
return nil, fmt.Errorf("newEpochNo %d ResetEpoch err %s", netEpoch.EpochNo, err.Error())
}
return newEpoch, nil
}
func (dm *DBFTManager) checkNewEpochMatch(net, self *Epoch) bool {
if net.BeginNum != self.BeginNum {
ConsLog.Warningf(LOGTABLE_DBFT, "(newEpoch) the BeginNum net %d self %d",
net.BeginNum, self.BeginNum)
return false
}
if net.EndNum != self.EndNum {
ConsLog.Warningf(LOGTABLE_DBFT, "(newEpoch) the EndNum net %d self %d",
net.EndNum, self.EndNum)
return false
}
if net.DependEpochNo != self.DependEpochNo {
ConsLog.Warningf(LOGTABLE_DBFT, "(newEpoch) the dependEpochNo net %d self %d",
net.DependEpochNo, self.DependEpochNo)
return false
}
if len(net.WitnessList) != len(self.WitnessList) {
ConsLog.Warningf(LOGTABLE_DBFT, "(newEpoch) the witnesses len net %d self %d",
len(net.WitnessList), len(self.WitnessList))
return false
}
var match bool
for _, netWitness := range net.WitnessList {
match = false
for _, selfWitness := range self.WitnessList {
if bytes.Equal(netWitness.Hash(), selfWitness.Hash()) {
match = true
break
}
}
if !match {
ConsLog.Warningf(LOGTABLE_DBFT, "(newEpoch) the witness not match net %s self %s",
net.WitnessList, self.WitnessList)
return false
}
}
return true
}
func GetKey(chainId string, epochNo uint64) string {
return chainId + strconv.FormatUint(epochNo, 10)
}
func (dm *DBFTManager) RecordEpochInfo(epochInfo *Epoch) error {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
epochData, err := json.Marshal(epochInfo)
if err != nil {
ConsLog.Errorf(LOGTABLE_DBFT, "RecordEpochInfo Marshal err %s", err.Error())
return err
}
key := GetKey(dm.ChainID, epochInfo.EpochNo)
err = db.KVDB.Put([]byte(key), epochData)
if err != nil {
ConsLog.Errorf(LOGTABLE_DBFT, "RecordEpochInfo Put err %s", err.Error())
return err
}
return nil
}
func LoadEpochInfo(chainid string, epochNo uint64) (*Epoch, error) {
key := GetKey(chainid, epochNo)
epochData, err := db.KVDB.Get([]byte(key))
var epoch *Epoch
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(epochData, &epoch)
if err != nil {
ConsLog.Errorf(LOGTABLE_DBFT, "LoadEpochInfo Unmarshal err %s", err.Error())
return nil, err
}
return epoch, nil
}
|
package main
import (
"log"
"os"
"fmt"
"github.com/olekukonko/tablewriter"
)
func (app *Application) QueueAction(args []string) {
log.Printf("username = %#v", app.Username)
issues, err1 := app.Client.GetIssuesByAsignee(app.Username)
if err1 != nil {
fmt.Printf("Unable to get user issues: %v\n", err1)
os.Exit(1)
}
// Page size
pageSize := 10
// Render output
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Key", "Summary"})
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.SetAutoWrapText(false)
for idx, i := range issues {
var row []string = make([]string, 2)
row[0] = i.PKey
row[1] = i.Summary
table.Append(row)
if idx > pageSize {
table.Append([]string{"...", "..."})
break
}
}
table.Render()
}
|
package routers
import (
"github.com/gorilla/mux"
"github.com/gotodos/handlers"
"github.com/gotodos/common"
)
func InitRouters() *mux.Router {
router := mux.NewRouter().StrictSlash(false)
taskRoutes := GetTaskRoutes()
taskRouter := common.JwtWrapper(taskRoutes)
router.PathPrefix("/tasks").Handler(taskRouter)
router.HandleFunc("/token", handlers.TokenHandler).Name("GetToken")
return router
}
|
// Main canibus server
package main
import (
"flag"
"os"
"github.com/ghetzel/canibus/core"
"github.com/ghetzel/canibus/server"
"github.com/ghetzel/canibus/webserver"
)
const (
DEFAULT_IP = "0.0.0.0"
DEFAULT_PORT = "1234"
DEFAULT_WEBPORT = "2515"
DEFAULT_WWW_ROOT = "www"
DEFAULT_CONFIG_FILE = "config.json"
)
var ServerConfig server.Config
var bindIP = flag.String("ip", DEFAULT_IP, "IP to bind to")
var tcpPort = flag.String("port", DEFAULT_PORT, "TCP port")
var wwwPort = flag.String("www", DEFAULT_WEBPORT, "port for web server")
var wwwRoot = flag.String("root", DEFAULT_WWW_ROOT, "file path for web server")
var configFile = flag.String("config", DEFAULT_CONFIG_FILE, "Settings config file")
func launchTCPServer() {
err := server.StartListener(*bindIP, *tcpPort)
if err != nil {
println(err.Error())
os.Exit(1)
}
}
func launchSPAWebServer() {
err := webserver.StartSPAWebListener(*wwwRoot, *bindIP, *wwwPort)
if err != nil {
println(err.Error())
os.Exit(1)
}
}
func launchWebServer() {
err := webserver.StartWebListener(*wwwRoot, *bindIP, *wwwPort)
if err != nil {
println(err.Error())
os.Exit(1)
}
}
func main() {
flag.Parse()
core.SetConfig(&ServerConfig)
core.LoadConfig(*configFile)
server.InitDrivers()
go launchTCPServer()
launchSPAWebServer()
}
|
package commands
// Signup is a command requesting a new customer signup be performed.
type Signup struct {
CustomerID string
Name string
Nickname string
}
// ChangeNickname is a command requesting an existing customer's nickname be
// changed.
type ChangeNickname struct {
CustomerID string
NewNickname string
}
|
package main
import "syscall"
var syscallType = map[int]int{
syscall.SYS_ACCESS: SyscallPath,
syscall.SYS_CHDIR: SyscallPath,
syscall.SYS_CREAT: SyscallPath,
syscall.SYS_EXECVE: SyscallPath,
syscall.SYS_LCHOWN: SyscallPath,
syscall.SYS_LINK: SyscallPath,
syscall.SYS_LSTAT: SyscallPath,
syscall.SYS_MKDIR: SyscallPath,
syscall.SYS_OPEN: SyscallPath,
syscall.SYS_READLINK: SyscallPath,
syscall.SYS_RMDIR: SyscallPath,
syscall.SYS_STAT: SyscallPath,
syscall.SYS_STATFS: SyscallPath,
syscall.SYS_SYMLINK: SyscallPath,
syscall.SYS_TRUNCATE: SyscallPath,
syscall.SYS_UNLINK: SyscallPath,
syscall.SYS_UTIMES: SyscallPath,
syscall.SYS_FCHDIR: SyscallFile,
syscall.SYS_FCNTL: SyscallFile,
syscall.SYS_FACCESSAT: SyscallFilePath,
syscall.SYS_FCHMODAT: SyscallFilePath,
syscall.SYS_FCHOWNAT: SyscallFilePath,
syscall.SYS_LINKAT: SyscallFilePath,
syscall.SYS_MKDIRAT: SyscallFilePath,
syscall.SYS_MKNODAT: SyscallFilePath,
syscall.SYS_OPENAT: SyscallFilePath,
syscall.SYS_READLINKAT: SyscallFilePath,
syscall.SYS_UNLINKAT: SyscallFilePath,
}
var syscallCwdChange = map[int]bool{
syscall.SYS_CHDIR: true,
syscall.SYS_FCHDIR: true,
}
|
package main
import (
"fmt"
"strings"
"strconv"
)
func pa_test(n int) bool {
// convert n to string sn for parsing
sn := strings.Split(strconv.Itoa(n), "")
// Reverse sn
for i, j := 0, len(sn)-1; i < j; i, j = i+1, j-1 {
sn[i], sn[j] = sn[j], sn[i]
}
srn := strings.Join(sn, "")
// convert reversed string srn to int rn
rn, err := strconv.Atoi(srn)
if err != nil {
fmt.Println("Error converting string to int.")
}
// if n equals rn then we have a palindrome
if n == rn {
return true
}
return false
}
func pbin_test(n int) bool {
b := fmt.Sprintf("%b",n)
bn := strings.Split(b, "")
for i, j := 0, len(bn)-1; i < j; i, j = i+1, j-1 {
bn[i], bn[j] = bn[j], bn[i]
}
rb := strings.Join(bn, "")
if b == rb {
return true
}
return false
}
func main() {
sum := 0
for x := 1; x < 1000000; x++ {
if pa_test(x) && pbin_test(x) {
sum += x
}
}
fmt.Println(sum)
}
|
// Copyright 2021 Kuei-chun Chen. All rights reserved.
package keyhole
import (
"github.com/simagix/keyhole/sim"
)
// StartSimulation kicks off simulation
func StartSimulation(runner *sim.Runner) error {
var err error
if err = runner.Start(); err != nil {
return err
}
return runner.CollectAllStatus()
}
|
/*
* Copyright 2018, CS Systemes d'Information, http://www.c-s.fr
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"os"
"path/filepath"
"strconv"
"time"
"github.com/urfave/cli"
"github.com/CS-SI/SafeScale/broker/client"
brokerutils "github.com/CS-SI/SafeScale/broker/utils"
"github.com/CS-SI/SafeScale/utils"
clitools "github.com/CS-SI/SafeScale/utils"
)
// SSHCmd ssh command
var SSHCmd = cli.Command{
Name: "ssh",
Usage: "ssh COMMAND",
Subcommands: []cli.Command{
sshRun,
sshCopy,
sshConnect,
sshTunnel,
sshClose,
},
}
var sshRun = cli.Command{
Name: "run",
Usage: "Run a command on the host",
ArgsUsage: "<Host_name|Host_ID>",
Flags: []cli.Flag{
cli.StringFlag{
Name: "c",
Usage: "Command to execute",
},
cli.StringFlag{
Name: "timeout",
Value: "5",
Usage: "timeout in minutes",
}},
Action: func(c *cli.Context) error {
if c.NArg() != 1 {
fmt.Println("Missing mandatory argument <Host_name>")
_ = cli.ShowSubcommandHelp(c)
return clitools.ExitOnInvalidArgument()
}
timeout := brokerutils.TimeoutCtxHost
if c.IsSet("timeout") {
timeout = time.Duration(c.Float64("timeout")) * time.Minute
}
retcode, stdout, stderr, err := client.New().Ssh.Run(c.Args().Get(0), c.String("c"), client.DefaultConnectionTimeout, timeout)
if err != nil {
return clitools.ExitOnRPC(utils.TitleFirst(client.DecorateError(err, "ssh run", false).Error()))
}
fmt.Println(stdout)
fmt.Fprintln(os.Stderr, stderr)
if retcode != 0 {
return cli.NewExitError("", retcode)
}
return nil
},
}
func normalizeFileName(fileName string) string {
absPath, _ := filepath.Abs(fileName)
if _, err := os.Stat(absPath); err != nil {
return fileName
}
return absPath
}
var sshCopy = cli.Command{
Name: "copy",
Usage: "Copy a local file/directory to an host or copy from host to local",
ArgsUsage: "from to Ex: /my/local/file.txt host1:/remote/path/",
Flags: []cli.Flag{
cli.StringFlag{
Name: "timeout",
Value: "5",
Usage: "timeout in minutes",
}},
Action: func(c *cli.Context) error {
if c.NArg() != 2 {
fmt.Println("2 arguments (from and to) are required")
_ = cli.ShowSubcommandHelp(c)
return clitools.ExitOnInvalidArgument()
}
timeout := brokerutils.TimeoutCtxHost
if c.IsSet("timeout") {
timeout = time.Duration(c.Float64("timeout")) * time.Minute
}
_, _, _, err := client.New().Ssh.Copy(normalizeFileName(c.Args().Get(0)), normalizeFileName(c.Args().Get(1)), client.DefaultConnectionTimeout, timeout)
if err != nil {
return clitools.ExitOnRPC(utils.TitleFirst(client.DecorateError(err, "ssh copy", true).Error()))
}
fmt.Printf("Copy of '%s' to '%s' done\n", c.Args().Get(0), c.Args().Get(1))
return nil
},
}
var sshConnect = cli.Command{
Name: "connect",
Usage: "Connect to the host with interactive shell",
ArgsUsage: "<Host_name|Host_ID>",
Action: func(c *cli.Context) error {
if c.NArg() != 1 {
fmt.Println("Missing mandatory argument <Host_name>")
_ = cli.ShowSubcommandHelp(c)
return clitools.ExitOnInvalidArgument()
}
err := client.New().Ssh.Connect(c.Args().Get(0), 0)
if err != nil {
err = clitools.ExitOnRPC(utils.TitleFirst(client.DecorateError(err, "ssh connect", false).Error()))
}
return err
},
}
var sshTunnel = cli.Command{
Name: "tunnel",
Usage: "Create a ssh tunnel between admin host and a host in the cloud",
ArgsUsage: "<Host_name|Host_ID --local local_port --remote remote_port>",
Flags: []cli.Flag{
cli.IntFlag{
Name: "local",
Value: 8080,
Usage: "local tunnel's port, if not set all",
},
cli.IntFlag{
Name: "remote",
Value: 8080,
Usage: "remote tunnel's port, if not set all",
},
cli.StringFlag{
Name: "timeout",
Value: "1",
Usage: "timeout in minutes",
},
},
Action: func(c *cli.Context) error {
if c.NArg() != 1 {
fmt.Println("Missing mandatory argument")
_ = cli.ShowSubcommandHelp(c)
return fmt.Errorf("Missing arguments")
}
localPort := c.Int("local")
if 0 > localPort || localPort > 65535 {
fmt.Printf("%d is not a valid port\n", localPort)
_ = cli.ShowSubcommandHelp(c)
return fmt.Errorf("wrong value of localport")
}
remotePort := c.Int("remote")
if 0 > localPort || localPort > 65535 {
fmt.Printf("%d is not a valid port\n", remotePort)
_ = cli.ShowSubcommandHelp(c)
return fmt.Errorf("wrong value of remoteport")
}
timeout := time.Duration(c.Float64("timeout")) * time.Minute
//c.GlobalInt("port") is the grpc port aka. 50051
err := client.New().Ssh.CreateTunnel(c.Args().Get(0), localPort, remotePort, timeout)
if err != nil {
err = client.DecorateError(err, "ssh tunnel", false)
}
return err
},
}
var sshClose = cli.Command{
Name: "close",
Usage: "Close one or several ssh tunnel",
ArgsUsage: "<Host_name|Host_ID> --local local_port --remote remote_port",
Flags: []cli.Flag{
cli.StringFlag{
Name: "local",
Value: ".*",
Usage: "local tunnel's port, if not set all",
},
cli.StringFlag{
Name: "remote",
Value: ".*",
Usage: "remote tunnel's port, if not set all",
},
cli.StringFlag{
Name: "timeout",
Value: "1",
Usage: "timeout in minutes",
},
},
Action: func(c *cli.Context) error {
if c.NArg() != 1 {
fmt.Println("Missing mandatory argument")
_ = cli.ShowSubcommandHelp(c)
return fmt.Errorf("Missing arguments")
}
strLocalPort := c.String("local")
if c.IsSet("local") {
localPort, err := strconv.Atoi(strLocalPort)
if err != nil || 0 > localPort || localPort > 65535 {
fmt.Printf("%d is not a valid port\n", localPort)
_ = cli.ShowSubcommandHelp(c)
return fmt.Errorf("wrong value of localport")
}
}
strRemotePort := c.String("remote")
if c.IsSet("remote") {
remotePort, err := strconv.Atoi(strRemotePort)
if err != nil || 0 > remotePort || remotePort > 65535 {
fmt.Printf("%d is not a valid port\n", remotePort)
_ = cli.ShowSubcommandHelp(c)
return fmt.Errorf("wrong value of remoteport")
}
}
timeout := time.Duration(c.Float64("timeout")) * time.Minute
//c.GlobalInt("port") is the grpc port aka. 50051
err := client.New().Ssh.CloseTunnels(c.Args().Get(0), strLocalPort, strRemotePort, timeout)
if err != nil {
err = client.DecorateError(err, "ssh close", false)
}
return err
},
}
|
/*
Package sqle is a general purpose, transparent, non-magical helper package
for sql.DB that simplifies and reduces error checking for various SQL
operations.
*/
package sqle
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-09-08 08:30
# @File : lt_113_Path_Sum_II.go
# @Description :
# @Attention :
*/
package v0
/*
找到路径的同时,收集路径
解题思路: dfs解决
*/
func pathSum(root *TreeNode, sum int) [][]int {
result := make([][]int, 0)
dfs(root, sum, []int{}, &result)
return result
}
func dfs(root *TreeNode, sum int, path []int, result *[][]int) {
if root == nil {
return
}
path = append(path, root.Val)
if root.Left == nil && root.Right == nil && root.Val == sum {
newPath := make([]int, len(path))
copy(newPath, path)
*result = append(*result, newPath)
return
}
dfs(root.Left, sum-root.Val, path, result)
dfs(root.Right, sum-root.Val, path, result)
}
|
package server
import (
"chlorine/apierror"
"chlorine/storage"
"chlorine/ws"
"encoding/gob"
"fmt"
"log"
"net/http"
"os"
"time"
)
var (
dbStorage *storage.DBStorage
dbConfig = storage.DatabaseConfig{
Host: os.Getenv("POSTGRES_HOST"),
Port: os.Getenv("POSTGRES_PORT"),
User: os.Getenv("POSTGRES_USER"),
Password: os.Getenv("POSTGRES_PASSWORD"),
Name: os.Getenv("POSTGRES_DATABASE")}
webSocketHub = ws.CreateHub()
)
// StartChlorineServer starts Chlorine to listen to HTTP connections on the given port.
func StartChlorineServer(port string) {
dbStorage = storage.ConnectDatabase(dbConfig)
initWebSocketActions(webSocketHub)
go webSocketHub.Run()
initRepositories()
initServices()
initHandlers()
handler := GetApplicationHandler()
err := http.ListenAndServe(port, handler)
if err != nil {
log.Fatal(err)
}
}
func panicIfErr(jsonWriter JSONResponseWriter, err error, pretext string) {
if err != nil {
jsonWriter.Error(apierror.APIServerError, http.StatusInternalServerError)
panic(fmt.Sprintf("%s: %s", pretext, err.Error()))
}
}
func init() {
gob.Register(&time.Time{})
gob.Register(&time.Location{})
}
|
package primitives_test
import (
"encoding/xml"
"fmt"
"github.com/plandem/xlsx/format"
"github.com/plandem/xlsx/internal/ml/primitives"
"github.com/stretchr/testify/require"
"testing"
)
func TestTimePeriod(t *testing.T) {
type Entity struct {
Attribute primitives.TimePeriodType `xml:"attribute,attr"`
}
list := map[string]primitives.TimePeriodType{
"": primitives.TimePeriodType(0),
"today": format.TimePeriodToday,
"yesterday": format.TimePeriodYesterday,
"tomorrow": format.TimePeriodTomorrow,
"last7Days": format.TimePeriodLast7Days,
"thisMonth": format.TimePeriodThisMonth,
"lastMonth": format.TimePeriodLastMonth,
"nextMonth": format.TimePeriodNextMonth,
"thisWeek": format.TimePeriodThisWeek,
"lastWeek": format.TimePeriodLastWeek,
"nextWeek": format.TimePeriodNextWeek,
}
for s, v := range list {
t.Run(s, func(tt *testing.T) {
entity := Entity{Attribute: v}
encoded, err := xml.Marshal(&entity)
require.Empty(tt, err)
if s == "" {
require.Equal(tt, `<Entity></Entity>`, string(encoded))
} else {
require.Equal(tt, fmt.Sprintf(`<Entity attribute="%s"></Entity>`, s), string(encoded))
}
var decoded Entity
err = xml.Unmarshal(encoded, &decoded)
require.Empty(tt, err)
require.Equal(tt, entity, decoded)
require.Equal(tt, s, decoded.Attribute.String())
})
}
}
|
/*
Copyright 2018-2020 The Nori Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logger
import "time"
type Field struct {
Key string
Value string
}
type Entry struct {
Formatter Formatter
Level Level
Time time.Time
Message string
}
//go:generate mockgen -destination=../mocks/logger/formatter.go -package=mocks github.com/nori-io/common/v5/pkg/domain/logger Formatter
type Formatter interface {
Format(e Entry, field ...Field) ([]byte, error)
}
//go:generate mockgen -destination=../mocks/logger/logger.go -package=mocks github.com/nori-io/common/v5/pkg/domain/logger Logger
type Logger interface {
FieldLogger
AddHook(hook Hook)
With(fields ...Field) Logger
}
//go:generate mockgen -destination=../mocks/logger/field_logger.go -package=mocks github.com/nori-io/common/v5/pkg/domain/logger FieldLogger
type FieldLogger interface {
// Critical push to log entry with critical level
Critical(format string, opts ...interface{})
// Debug push to log entry with debug level
Debug(format string, opts ...interface{})
// Fatal logs a message with fatal level and exit with status set to 1
Fatal(format string, opts ...interface{})
// Error push to log entry with error level
Error(format string, opts ...interface{})
// Info push to log entry with info level
Info(format string, opts ...interface{})
// Log push to log with specified level
Log(level Level, format string, opts ...interface{})
// Notice push to log entry with notice level
Notice(format string, opts ...interface{})
// Panic logs a message with panic level and then throws the panic
Panic(format string, opts ...interface{})
// Warning push to log entry with warning level
Warning(format string, opts ...interface{})
}
|
package licenses
const (
// LicensesGetLicenses is a string representation of the current endpoint for getting licenses
LicensesGetLicenses = "v1/metadata/getLicenses"
)
|
// Copyright 2012 Derek A. Rhodes. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lorem
import (
"math/rand"
"strings"
)
// Generate a natural word len.
func genWordLen() int {
f := rand.Float32() * 100
// a table of word lengths and their frequencies.
switch {
case f < 1.939:
return 1
case f < 19.01:
return 2
case f < 38.00:
return 3
case f < 50.41:
return 4
case f < 61.00:
return 5
case f < 70.09:
return 6
case f < 78.97:
return 7
case f < 85.65:
return 8
case f < 90.87:
return 9
case f < 95.05:
return 10
case f < 97.27:
return 11
case f < 98.67:
return 12
case f < 100.0:
return 13
}
return 2 // shouldn't get here
}
func intRange(min, max int) int {
if min == max {
return intRange(min, min+1)
}
if min > max {
return intRange(max, min)
}
n := rand.Int() % (max - min)
return n + min
}
func word(wordLen int) string {
if wordLen < 1 {
wordLen = 1
}
if wordLen > 13 {
wordLen = 13
}
n := rand.Int() % len(wordlist)
for {
if n >= len(wordlist)-1 {
n = 0
}
if len(wordlist[n]) == wordLen {
return wordlist[n]
}
n++
}
return ""
}
// Generate a word in a specfied range of letters.
func Word(min, max int) string {
n := intRange(min, max)
return word(n)
}
// Generate a sentence with a specified range of words.
func Sentence(min, max int) string {
n := intRange(min, max)
// grab some words
ws := []string{}
maxcommas := 2
numcomma := 0
for i := 0; i < n; i++ {
ws = append(ws, (word(genWordLen())))
// maybe insert a comma, if there are currently < 2 commas, and
// the current word is not the last or first
if (rand.Int()%n == 0) && numcomma < maxcommas && i < n-1 && i > 2 {
ws[i-1] += ","
numcomma += 1
}
}
sentence := strings.Join(ws, " ") + "."
sentence = strings.ToUpper(sentence[:1]) + sentence[1:]
return sentence
}
// Generate a paragraph with a specified range of sentenences.
const (
minwords = 5
maxwords = 22
)
func Paragraph(min, max int) string {
n := intRange(min, max)
p := []string{}
for i := 0; i < n; i++ {
p = append(p, Sentence(minwords, maxwords))
}
return strings.Join(p, " ")
}
// Generate a random URL
func Url() string {
n := intRange(0, 3)
base := `http://www.` + Host()
switch n {
case 0:
break
case 1:
base += "/" + Word(2, 8)
case 2:
base += "/" + Word(2, 8) + "/" + Word(2, 8) + ".html"
}
return base
}
// Host
func Host() string {
n := intRange(0, 3)
tld := ""
switch n {
case 0:
tld = ".com"
case 1:
tld = ".net"
case 2:
tld = ".org"
}
parts := []string{Word(2, 8), Word(2, 8), tld}
return strings.Join(parts, ``)
}
// Email
func Email() string {
return Word(4, 10) + `@` + Host()
}
|
package handlers
import (
"coffeebeans-people-backend/models"
"coffeebeans-people-backend/utility"
"context"
"encoding/json"
"net/http"
)
func CreateProject(apiSvc models.ApiSvc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var project models.Project
err := json.NewDecoder(r.Body).Decode(&project)
if err != nil {
utility.NewJSONWriter(w).Write(models.Response{
Error: err.Error(),
Message: "Error decoding request body",
}, http.StatusBadRequest)
return
}
err = apiSvc.CreateProjectByAdmin(context.TODO(), project)
if err != nil {
utility.NewJSONWriter(w).Write(models.Response{
Error: "Mongo error",
Message: "Project Id already exists. Check you project id",
}, http.StatusBadRequest)
return
}
utility.NewJSONWriter(w).Write(models.Response{
Error: "",
Message: "Project Created Successsfully",
}, http.StatusOK)
}
}
|
package 搜索
// -------------------------------- 搜索 --------------------------------
func numWays(n int, relation [][]int, k int) int {
canReach := get2DSlice(n, n)
for i := 0; i < len(relation); i++ {
canReach[relation[i][0]][relation[i][1]] = true
}
return getNumWays(n, canReach, 0, k)
}
func getNumWays(n int, canReach [][]bool, curNum, k int) int {
if k == 0 {
if curNum == n-1 {
return 1
}
return 0
}
ways := 0
for i := 0; i < n; i++ {
if i == curNum {
continue
}
if canReach[curNum][i] == true {
ways += getNumWays(n, canReach, i, k-1)
}
}
return ways
}
func get2DSlice(rows, column int) [][]bool {
slice := make([][]bool, rows)
for i := 0; i < len(slice); i++ {
slice[i] = make([]bool, column)
}
return slice
}
// -------------------------------- 动态规划 --------------------------------
func numWays(n int, relation [][]int, k int) int {
ways := get2DIntSlice(k+1, n) // ways[i][t], 表示第 i 轮,传递到编号为 t 的方案数。
ways[0][0] = 1
for i := 1; i <= k; i++ {
for t := 0; t < len(relation); t++ {
ways[i][relation[t][1]] += ways[i-1][relation[t][0]]
}
}
return ways[k][n-1]
}
func get2DIntSlice(rows, column int) [][]int {
slice := make([][]int, rows)
for i := 0; i < len(slice); i++ {
slice[i] = make([]int, column)
}
return slice
}
// -------------------------------- 动态规划状态压缩 --------------------------------
// 正确的状态压缩
func numWays(n int, relation [][]int, k int) int {
ways := make([]int, n)
ways[0] = 1
for i := 1; i <= k; i++ {
nextLayWays := make([]int, n)
for t := 0; t < len(relation); t++ {
nextLayWays[relation[t][1]] += ways[relation[t][0]]
}
ways = nextLayWays
}
return ways[n-1]
}
// 错误的状态压缩
func numWays(n int, relation [][]int, k int) int {
ways := make([]int, n)
ways[0] = 1
for i := 1; i <= k; i++ {
for t := 0; t < len(relation); t++ {
ways[relation[t][1]] += ways[relation[t][0]]
}
}
return ways[n-1]
}
/*
题目链接: https://leetcode-cn.com/problems/chuan-di-xin-xi/
总结:
1. 这题我采用邻接矩阵存储边的信息。
2. 这题官方还给出了动态规划的解法!
3. 状态压缩时,要记得保留上层的状态,而且不要让从当层获取当层的结果。 (看上面错误的状态压缩)
*/
|
package jqka
import (
"bytes"
"fmt"
"time"
. "../"
. "../../base"
"github.com/golang/glog"
)
const tout time.Duration = time.Second * 10
type JQKARobot struct {
RobotBase
}
func init() {
for i := DefaultRobotConcurrent; i > 0; i-- {
robot := &JQKARobot{}
Registry(robot)
}
}
func (p *JQKARobot) Can(id string, task int32) bool {
switch task {
case TaskDay:
return true
default:
return false
}
return false
}
func (p *JQKARobot) Day_url(id string, t time.Time) string {
return fmt.Sprintf("http://d.10jqka.com.cn/v2/line/hs_%s/01/%s.js",
id[2:], t.Format("2006"))
}
func (p *JQKARobot) Day_latest_url(id string) string {
return fmt.Sprintf("http://d.10jqka.com.cn/v2/line/hs_%s/01/last.js",
id[2:])
}
func (p *JQKARobot) tdata_from_line(td *Tdata, line []byte) bool {
infos := bytes.Split(line, []byte(","))
if len(infos) != 8 {
return false
}
//timestr, open, high, low, close, volume
//20160217,2829.76,2868.70,2824.36,2867.34,21690992000,225964250000.00,
//timestr, open, high, cloze, low, volume
timestr := infos[0]
open := infos[1]
high := infos[2]
close := infos[4]
low := infos[3]
volume := infos[5]
if l := len(volume); l > 2 {
volume = volume[:l-2]
}
td.FromBytes(timestr, open, high, close, low, volume)
return true
}
func (p *JQKARobot) parse_tdatas(res []Tdata, body []byte) []Tdata {
data := ParseParamBeginEnd(body, []byte(`"data":"`), []byte(`"`))
if data == nil {
return res
}
// 20160104,18.28,18.28,17.55,17.80,42240610,754425780.00,0.226;
lines := bytes.Split(data, []byte(";"))
for i, count := 0, len(lines); i < count; i++ {
td := Tdata{}
if !p.tdata_from_line(&td, lines[i]) {
continue
}
res = append(res, td)
}
return res
}
func (p *JQKARobot) Days_download(id string, start time.Time) (res []Tdata, err error) {
if id == "sh000001" {
id = "sh1A0001"
}
url := p.Day_latest_url(id)
body, _ := Http_get(url, nil, tout)
if !bytes.HasPrefix(body, []byte(`quotebridge_v2_line_hs_`)) {
return
}
body = ParseParamBeginEnd(body, []byte(`(`), nil)
if body == nil {
return
}
body = bytes.TrimRight(body, ")")
// "start":"19901219"
start_str := string(ParseParamBeginEnd(body, []byte(`"start":"`), []byte(`"`)))
var start_date time.Time
if len(start_str) > 0 {
start_date, _ = time.Parse("20060102", start_str)
} else {
start_date = time.Now().AddDate(0, 0, -1)
}
if start.Before(start_date) {
start = start_date
}
res = p.parse_tdatas(res, body)
i, ok := (TdataSlice(res)).Search(start.Truncate(time.Hour * 24))
if !ok {
return p.years_download(id, start)
}
if i >= len(res) {
res = []Tdata{}
} else {
res = res[i:]
}
glog.Infoln("10jqka get item", len(res))
return
}
func (p *JQKARobot) years_download(id string, start time.Time) (res []Tdata, err error) {
for t, ys, ye := start, start.Year(), time.Now().Year()+1; ys < ye; ys++ {
url := p.Day_url(id, t)
t = t.AddDate(1, 0, 0)
body, _ := Http_get(url, nil, tout)
if !bytes.HasPrefix(body, []byte(`quotebridge_v2_line_hs_`)) {
continue
}
body = ParseParamBeginEnd(body, []byte(`(`), nil)
if body == nil {
continue
}
res = p.parse_tdatas(res, body)
}
if len(res) < 1 {
return
}
i, _ := (TdataSlice(res)).Search(start.Truncate(time.Hour * 24))
if i >= len(res) {
res = []Tdata{}
} else {
res = res[i:]
}
glog.Infoln("10jqka get item", len(res))
return
}
|
package compute
const (
// AssetTypeServer is an asset type representing a server.
AssetTypeServer = "SERVER"
// AssetTypeNetworkDomain is an asset type representing a network domain.
AssetTypeNetworkDomain = "NETWORK_DOMAIN"
// AssetTypeVLAN is an asset type representing a virtual LAN (VLAN).
AssetTypeVLAN = "VLAN"
// AssetTypePublicIPBlock is an asset type representing a public IP block.
AssetTypePublicIPBlock = "PUBLIC_IP_BLOCK"
// AssetTypeCustomerImage is an asset type of customer image
AssetTypeCustomerImage = "CUSTOMER_IMAGE"
// AssetTypeCustomerImage is an asset type of account
AssetTypeAccount = "ACCOUNT"
// AssetTypeUser is an asset type representing a user.
AssetTypeUser = "PUBLIC_IP_BLOCK"
)
|
// Copyright 2019-2023 The sakuracloud_exporter Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collector
import (
"context"
"errors"
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/sacloud/iaas-api-go"
"github.com/sacloud/iaas-api-go/types"
"github.com/sacloud/packages-go/newsfeed"
"github.com/sacloud/sakuracloud_exporter/platform"
"github.com/stretchr/testify/require"
)
type dummyVPCRouterClient struct {
find []*platform.VPCRouter
findErr error
status *iaas.VPCRouterStatus
statusErr error
monitor *iaas.MonitorInterfaceValue
monitorErr error
monitorCPU *iaas.MonitorCPUTimeValue
monitorCPUErr error
maintenance *newsfeed.FeedItem
maintenanceErr error
}
func (d *dummyVPCRouterClient) Find(ctx context.Context) ([]*platform.VPCRouter, error) {
return d.find, d.findErr
}
func (d *dummyVPCRouterClient) Status(ctx context.Context, zone string, id types.ID) (*iaas.VPCRouterStatus, error) {
return d.status, d.statusErr
}
func (d *dummyVPCRouterClient) MonitorNIC(ctx context.Context, zone string, id types.ID, index int, end time.Time) (*iaas.MonitorInterfaceValue, error) {
return d.monitor, d.monitorErr
}
func (d *dummyVPCRouterClient) MonitorCPU(ctx context.Context, zone string, id types.ID, end time.Time) (*iaas.MonitorCPUTimeValue, error) {
return d.monitorCPU, d.monitorCPUErr
}
func (d *dummyVPCRouterClient) MaintenanceInfo(infoURL string) (*newsfeed.FeedItem, error) {
return d.maintenance, d.maintenanceErr
}
func TestVPCRouterCollector_Describe(t *testing.T) {
initLoggerAndErrors()
c := NewVPCRouterCollector(context.Background(), testLogger, testErrors, &dummyVPCRouterClient{})
descs := collectDescs(c)
require.Len(t, descs, len([]*prometheus.Desc{
c.Up,
c.VPCRouterInfo,
c.CPUTime,
c.SessionCount,
c.DHCPLeaseCount,
c.L2TPSessionCount,
c.PPTPSessionCount,
c.SiteToSitePeerStatus,
c.Receive,
c.Send,
c.SessionAnalysis,
c.MaintenanceScheduled,
c.MaintenanceInfo,
c.MaintenanceStartTime,
c.MaintenanceEndTime,
}))
}
func TestVPCRouterCollector_Collect(t *testing.T) {
initLoggerAndErrors()
c := NewVPCRouterCollector(context.Background(), testLogger, testErrors, nil)
monitorTime := time.Unix(1, 0)
cases := []struct {
name string
in platform.VPCRouterClient
wantLogs []string
wantErrCounter float64
wantMetrics []*collectedMetric
}{
{
name: "collector returns error",
in: &dummyVPCRouterClient{
findErr: errors.New("dummy"),
},
wantLogs: []string{`level=WARN msg="can't list vpc routers" err=dummy`},
wantErrCounter: 1,
wantMetrics: nil,
},
{
name: "empty result",
in: &dummyVPCRouterClient{},
wantMetrics: nil,
},
{
name: "a VPCRouter with activity monitor",
in: &dummyVPCRouterClient{
find: []*platform.VPCRouter{
{
ZoneName: "is1a",
VPCRouter: &iaas.VPCRouter{
ID: 101,
Name: "router",
Description: "desc",
Tags: types.Tags{"tag1", "tag2"},
PlanID: types.VPCRouterPlans.Premium,
InstanceStatus: types.ServerInstanceStatuses.Up,
Availability: types.Availabilities.Available,
Interfaces: []*iaas.VPCRouterInterface{
{
Index: 0,
ID: 200,
},
{
Index: 1,
ID: 201,
},
},
Settings: &iaas.VPCRouterSetting{
VRID: 1,
InternetConnectionEnabled: true,
Interfaces: []*iaas.VPCRouterInterfaceSetting{
{
VirtualIPAddress: "192.168.0.1",
IPAddress: []string{"192.168.0.11", "192.168.0.12"},
NetworkMaskLen: 24,
Index: 0,
},
{
VirtualIPAddress: "192.168.1.1",
IPAddress: []string{"192.168.1.11", "192.168.1.12"},
NetworkMaskLen: 24,
Index: 1,
},
},
},
},
},
},
status: &iaas.VPCRouterStatus{
SessionCount: 100,
DHCPServerLeases: []*iaas.VPCRouterDHCPServerLease{
{
IPAddress: "172.16.0.1",
MACAddress: "aa:bb:cc:dd:ee:ff",
},
},
L2TPIPsecServerSessions: []*iaas.VPCRouterL2TPIPsecServerSession{
{
User: "user1",
IPAddress: "172.16.1.1",
TimeSec: 10,
},
},
PPTPServerSessions: []*iaas.VPCRouterPPTPServerSession{
{
User: "user2",
IPAddress: "172.16.2.1",
TimeSec: 20,
},
},
SiteToSiteIPsecVPNPeers: []*iaas.VPCRouterSiteToSiteIPsecVPNPeer{
{
Status: "UP",
Peer: "172.16.3.1",
},
},
SessionAnalysis: &iaas.VPCRouterSessionAnalysis{
SourceAddress: []*iaas.VPCRouterStatisticsValue{
{Name: "localhost", Count: 4},
},
},
},
monitor: &iaas.MonitorInterfaceValue{
Time: monitorTime,
Receive: 100,
Send: 200,
},
},
wantMetrics: []*collectedMetric{
{
desc: c.Up,
metric: createGaugeMetric(1, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
}),
},
{
desc: c.VPCRouterInfo,
metric: createGaugeMetric(1, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
"plan": "premium",
"ha": "1",
"vrid": "1",
"vip": "192.168.0.1",
"ipaddress1": "192.168.0.11",
"ipaddress2": "192.168.0.12",
"nw_mask_len": "24",
"internet_connection": "1",
"tags": ",tag1,tag2,",
"description": "desc",
}),
},
{
desc: c.SessionCount,
metric: createGaugeMetric(100, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
}),
},
{
desc: c.DHCPLeaseCount,
metric: createGaugeMetric(1, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
}),
},
{
desc: c.L2TPSessionCount,
metric: createGaugeMetric(1, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
}),
},
{
desc: c.PPTPSessionCount,
metric: createGaugeMetric(1, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
}),
},
{
desc: c.SiteToSitePeerStatus,
metric: createGaugeMetric(1, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
"peer_index": "0",
"peer_address": "172.16.3.1",
}),
},
{
desc: c.SessionAnalysis,
metric: createGaugeMetric(4, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
"type": "SourceAddress",
"label": "localhost",
}),
},
{
desc: c.Receive,
metric: createGaugeWithTimestamp(float64(100)*8/1000, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
"nic_index": "0",
"vip": "192.168.0.1",
"ipaddress1": "192.168.0.11",
"ipaddress2": "192.168.0.12",
"nw_mask_len": "24",
}, monitorTime),
},
{
desc: c.Receive,
metric: createGaugeWithTimestamp(float64(100)*8/1000, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
"nic_index": "1",
"vip": "192.168.1.1",
"ipaddress1": "192.168.1.11",
"ipaddress2": "192.168.1.12",
"nw_mask_len": "24",
}, monitorTime),
},
{
desc: c.Send,
metric: createGaugeWithTimestamp(float64(200)*8/1000, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
"nic_index": "0",
"vip": "192.168.0.1",
"ipaddress1": "192.168.0.11",
"ipaddress2": "192.168.0.12",
"nw_mask_len": "24",
}, monitorTime),
},
{
desc: c.Send,
metric: createGaugeWithTimestamp(float64(200)*8/1000, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
"nic_index": "1",
"vip": "192.168.1.1",
"ipaddress1": "192.168.1.11",
"ipaddress2": "192.168.1.12",
"nw_mask_len": "24",
}, monitorTime),
},
{
desc: c.MaintenanceScheduled,
metric: createGaugeMetric(0, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
}),
},
},
},
{
name: "APIs return error",
in: &dummyVPCRouterClient{
find: []*platform.VPCRouter{
{
ZoneName: "is1a",
VPCRouter: &iaas.VPCRouter{
ID: 101,
Name: "router",
Description: "desc",
Tags: types.Tags{"tag1", "tag2"},
PlanID: types.VPCRouterPlans.Premium,
InstanceStatus: types.ServerInstanceStatuses.Up,
Availability: types.Availabilities.Available,
Interfaces: []*iaas.VPCRouterInterface{
{Index: 0, ID: 200},
},
Settings: &iaas.VPCRouterSetting{
VRID: 1,
InternetConnectionEnabled: true,
Interfaces: []*iaas.VPCRouterInterfaceSetting{
{
VirtualIPAddress: "192.168.0.1",
IPAddress: []string{"192.168.0.11", "192.168.0.12"},
NetworkMaskLen: 24,
Index: 0,
},
},
},
},
},
},
statusErr: errors.New("dummy1"),
monitorErr: errors.New("dummy2"),
},
wantMetrics: []*collectedMetric{
{
desc: c.Up,
metric: createGaugeMetric(1, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
}),
},
{
desc: c.VPCRouterInfo,
metric: createGaugeMetric(1, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
"plan": "premium",
"ha": "1",
"vrid": "1",
"vip": "192.168.0.1",
"ipaddress1": "192.168.0.11",
"ipaddress2": "192.168.0.12",
"nw_mask_len": "24",
"internet_connection": "1",
"tags": ",tag1,tag2,",
"description": "desc",
}),
},
{
desc: c.MaintenanceScheduled,
metric: createGaugeMetric(0, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
}),
},
},
wantLogs: []string{
`level=WARN msg="can't fetch vpc_router's status" err=dummy1`,
`level=WARN msg="can't get vpc_router's receive bytes: ID=101, NICIndex=0" err=dummy2`,
},
wantErrCounter: 2,
},
{
name: "a VPCRouter with maintenance info",
in: &dummyVPCRouterClient{
find: []*platform.VPCRouter{
{
ZoneName: "is1a",
VPCRouter: &iaas.VPCRouter{
ID: 101,
Name: "router",
Description: "desc",
Tags: types.Tags{"tag1", "tag2"},
PlanID: types.VPCRouterPlans.Premium,
InstanceStatus: types.ServerInstanceStatuses.Up,
InstanceHostInfoURL: "http://example.com/maintenance-info-dummy-url",
Availability: types.Availabilities.Available,
Interfaces: []*iaas.VPCRouterInterface{
{
Index: 0,
ID: 200,
},
{
Index: 1,
ID: 201,
},
},
Settings: &iaas.VPCRouterSetting{
VRID: 1,
InternetConnectionEnabled: true,
Interfaces: []*iaas.VPCRouterInterfaceSetting{
{
VirtualIPAddress: "192.168.0.1",
IPAddress: []string{"192.168.0.11", "192.168.0.12"},
NetworkMaskLen: 24,
Index: 0,
},
{
VirtualIPAddress: "192.168.1.1",
IPAddress: []string{"192.168.1.11", "192.168.1.12"},
NetworkMaskLen: 24,
Index: 1,
},
},
},
},
},
},
maintenance: &newsfeed.FeedItem{
StrDate: "947430000", // 2000-01-10
Description: "desc",
StrEventStart: "946652400", // 2000-01-01
StrEventEnd: "949244400", // 2000-01-31
Title: "dummy-title",
URL: "http://example.com/maintenance",
},
},
wantMetrics: []*collectedMetric{
{
desc: c.Up,
metric: createGaugeMetric(1, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
}),
},
{
desc: c.VPCRouterInfo,
metric: createGaugeMetric(1, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
"plan": "premium",
"ha": "1",
"vrid": "1",
"vip": "192.168.0.1",
"ipaddress1": "192.168.0.11",
"ipaddress2": "192.168.0.12",
"nw_mask_len": "24",
"internet_connection": "1",
"tags": ",tag1,tag2,",
"description": "desc",
}),
},
{
desc: c.MaintenanceScheduled,
metric: createGaugeMetric(1, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
}),
},
{
desc: c.MaintenanceInfo,
metric: createGaugeMetric(1, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
"info_url": "http://example.com/maintenance",
"info_title": "dummy-title",
"description": "desc",
"start_date": "946652400",
"end_date": "949244400",
}),
},
{
desc: c.MaintenanceStartTime,
metric: createGaugeMetric(946652400, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
}),
},
{
desc: c.MaintenanceEndTime,
metric: createGaugeMetric(949244400, map[string]string{
"id": "101",
"name": "router",
"zone": "is1a",
}),
},
},
},
}
for _, tc := range cases {
initLoggerAndErrors()
c.logger = testLogger
c.errors = testErrors
c.client = tc.in
collected, err := collectMetrics(c, "vpc_router")
require.NoError(t, err)
require.Equal(t, tc.wantLogs, collected.logged)
require.Equal(t, tc.wantErrCounter, *collected.errors.Counter.Value)
requireMetricsEqual(t, tc.wantMetrics, collected.collected)
}
}
|
package acme
import (
"bytes"
"context"
"crypto/rand"
"crypto/x509"
"encoding/pem"
"fmt"
"log"
"golang.org/x/crypto/acme"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
"github.com/jetstack-experimental/cert-manager/pkg/apis/certmanager/v1alpha1"
"github.com/jetstack-experimental/cert-manager/pkg/util/kube"
"github.com/jetstack-experimental/cert-manager/pkg/util/pki"
)
const (
errorIssueCert = "ErrIssueCert"
successCertIssued = "CertIssueSuccess"
messageErrorIssueCert = "Error issuing TLS certificate: "
messageCertIssued = "Certificate issued successfully"
)
func (a *Acme) obtainCertificate(ctx context.Context, crt *v1alpha1.Certificate) ([]byte, []byte, error) {
if crt.Spec.ACME == nil {
return nil, nil, fmt.Errorf("acme config must be specified")
}
domains := crt.Spec.Domains
if len(domains) == 0 {
return nil, nil, fmt.Errorf("no domains specified")
}
acmePrivKey, err := kube.SecretTLSKey(a.secretsLister, a.issuer.Namespace, a.issuer.Spec.ACME.PrivateKey)
if err != nil {
return nil, nil, fmt.Errorf("error getting acme account private key: %s", err.Error())
}
cl := &acme.Client{
Key: acmePrivKey,
DirectoryURL: a.issuer.Spec.ACME.Server,
}
key, err := kube.SecretTLSKey(a.secretsLister, crt.Namespace, crt.Spec.SecretName)
if k8sErrors.IsNotFound(err) {
key, err = pki.GenerateRSAPrivateKey(2048)
if err != nil {
return nil, nil, fmt.Errorf("error generating private key: %s", err.Error())
}
}
if err != nil {
return nil, nil, fmt.Errorf("error getting certificate private key: %s", err.Error())
}
template := pki.GenerateCSR(domains)
csr, err := x509.CreateCertificateRequest(rand.Reader, template, key)
if err != nil {
return nil, nil, fmt.Errorf("error creating certificate request: %s", err)
}
certSlice, certURL, err := cl.CreateCert(
ctx,
csr,
0,
true,
)
if err != nil {
return nil, nil, fmt.Errorf("error getting certificate for acme server: %s", err)
}
certBuffer := bytes.NewBuffer([]byte{})
for _, cert := range certSlice {
pem.Encode(certBuffer, &pem.Block{Type: "CERTIFICATE", Bytes: cert})
}
log.Printf("successfully got certificate: domains=%+v url=%s", domains, certURL)
return pki.EncodePKCS1PrivateKey(key), certBuffer.Bytes(), nil
}
func (a *Acme) Issue(ctx context.Context, crt *v1alpha1.Certificate) (v1alpha1.CertificateStatus, []byte, []byte, error) {
update := crt.DeepCopy()
key, cert, err := a.obtainCertificate(ctx, crt)
if err != nil {
s := messageErrorIssueCert + err.Error()
update.UpdateStatusCondition(v1alpha1.CertificateConditionReady, v1alpha1.ConditionFalse, errorIssueCert, s)
return update.Status, nil, nil, err
}
update.UpdateStatusCondition(v1alpha1.CertificateConditionReady, v1alpha1.ConditionTrue, successCertIssued, messageCertIssued)
return update.Status, key, cert, err
}
|
package grid
type GridRepository interface {
GetDimensions() (int, int)
Draw()
CalculateNexGeneration() error
}
|
package main
import (
"flag"
"fmt"
log "gopkg.in/Sirupsen/logrus.v0"
"github.com/mackee/kuiperbelt"
)
func main() {
var configFilename, logLevel, port, sock string
var showVersion bool
flag.StringVar(&configFilename, "config", "config.yml", "config path")
flag.StringVar(&logLevel, "log-level", "", "log level")
flag.StringVar(&port, "port", "", "launch port")
flag.StringVar(&sock, "sock", "", "unix domain socket path")
flag.BoolVar(&showVersion, "version", false, "show version")
flag.Parse()
if showVersion {
fmt.Printf("ekbo version: %s\n", kuiperbelt.Version)
return
}
if logLevel != "" {
lvl, err := log.ParseLevel(logLevel)
if err != nil {
log.WithFields(log.Fields{
"log_evel": logLevel,
}).Fatal("cannot parse log level")
}
log.SetLevel(lvl)
}
kuiperbelt.Run(port, sock, configFilename)
}
|
package criteria
import (
"fmt"
"github.com/open-policy-agent/opa/ast"
"github.com/pomerium/pomerium/pkg/policy/generator"
"github.com/pomerium/pomerium/pkg/policy/parser"
"github.com/pomerium/pomerium/pkg/policy/rules"
"github.com/pomerium/pomerium/pkg/webauthnutil"
)
const (
deviceOperatorApproved = "approved"
deviceOperatorIs = "is"
deviceOperatorType = "type"
)
var deviceOperatorLookup = map[string]struct{}{
deviceOperatorApproved: {},
deviceOperatorIs: {},
deviceOperatorType: {},
}
type deviceCriterion struct {
g *Generator
}
func (deviceCriterion) DataType() CriterionDataType {
return generator.CriterionDataTypeUnknown
}
func (deviceCriterion) Name() string {
return "device"
}
func (c deviceCriterion) GenerateRule(_ string, data parser.Value) (*ast.Rule, []*ast.Rule, error) {
obj, ok := data.(parser.Object)
if !ok {
return nil, nil, fmt.Errorf("expected object for device criterion, got: %T", data)
}
for k := range obj {
_, ok := deviceOperatorLookup[k]
if !ok {
return nil, nil, fmt.Errorf("unexpected field in device criterion: %s", k)
}
}
var body ast.Body
switch {
case obj.Truthy(deviceOperatorApproved):
// must be approved
body = append(body, ast.Body{
ast.MustParseExpr(`count([x|x:=device_enrollment.approved_by]) > 0`),
}...)
case obj.Falsy(deviceOperatorApproved):
// must *not* be approved
body = append(body, ast.Body{
ast.MustParseExpr(`count([x|x:=device_enrollment.approved_by]) == 0`),
}...)
}
if v, ok := obj[deviceOperatorIs]; ok {
s, ok := v.(parser.String)
if !ok {
return nil, nil, fmt.Errorf("expected string for device criterion is operator, got %T", v)
}
body = append(body, ast.Body{
ast.Assign.Expr(ast.VarTerm("is_expect"), ast.StringTerm(string(s))),
ast.MustParseExpr(`is_expect == device_credential.id`),
}...)
}
deviceType := webauthnutil.DefaultDeviceType
if v, ok := obj[deviceOperatorType]; ok {
s, ok := v.(parser.String)
if !ok {
return nil, nil, fmt.Errorf("expected string for device criterion type operator, got %T", v)
}
deviceType = string(s)
body = append(body, ast.Body{
ast.MustParseExpr(`device_credential.id != ""`),
}...)
}
rule := NewCriterionDeviceRule(c.g, c.Name(),
ReasonDeviceOK, ReasonDeviceUnauthorized,
body, deviceType)
return rule, []*ast.Rule{
rules.GetDeviceCredential(),
rules.GetDeviceEnrollment(),
rules.GetSession(),
rules.ObjectGet(),
}, nil
}
// Device returns a Criterion based on the User's device state.
func Device(generator *Generator) Criterion {
return deviceCriterion{g: generator}
}
func init() {
Register(Device)
}
|
package spotbot
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"github.com/CloudCom/firego"
)
type Track struct {
duration float64
uri string
title string
artist string
}
func (track Track) String() string {
res := fmt.Sprintf("%s - %s", track.title, track.artist)
return res
}
type Player struct {
next bool
playing bool
}
type Playlist struct {
tracks []Track
}
type Spotbot struct {
rootUrl string
fb *firego.Firebase
}
func New(firebaseUrl string) *Spotbot {
return &Spotbot{fb: firego.New(firebaseUrl)}
}
func (sp *Spotbot) CurrentTrack() Track {
var val map[string]interface{}
ref := sp.fb.Child("current_track")
logError(ref.Value(&val))
return toTrack(val)
}
func toTrack(val map[string]interface{}) Track {
artist := val["artists"].([]interface{})[0].(string)
return Track{val["duration"].(float64), val["uri"].(string), val["title"].(string), artist}
}
func (sp *Spotbot) Playing() Playlist {
var val []map[string]interface{}
ref := sp.fb.Child("playlist")
logError(ref.Value(&val))
tracks := make([]Track, len(val))
for _, trackData := range val {
tracks = append(tracks, toTrack(trackData))
}
playlist := Playlist{tracks}
return playlist
}
func (sp *Spotbot) Shuffle() {
ref := sp.fb.Child("playlist/shuffle")
shuffle := !sp.IsShuffled()
logError(ref.Set(shuffle))
}
func (sp *Spotbot) IsShuffled() bool {
var val bool
ref := sp.fb.Child("playlist/shuffle")
logError(ref.Value(&val))
return val
}
func (sp *Spotbot) NextSong() {
requestServer("next")
}
func (sp *Spotbot) Pause() {
requestServer("stop")
}
func (sp *Spotbot) Play() {
requestServer("start")
}
func requestServer(action string) {
url := os.Getenv("SPOTBOT_SERVER")
client := &http.Client{}
request, err := http.NewRequest("PUT", url+"/player/"+action, nil)
response, err := client.Do(request)
if err != nil {
log.Fatal(err)
} else {
defer response.Body.Close()
_, err := ioutil.ReadAll(response.Body)
if err != nil {
log.Fatal(err)
}
}
}
func (sp *Spotbot) Search(query string) []Track {
if query == "" {
return nil
}
url := fmt.Sprintf("http://api.spotify.com/v1/search?limit=20&type=track&market=se&q='%s'", query)
fmt.Println(url)
res, _ := http.Get(url)
defer res.Body.Close()
body, _ := ioutil.ReadAll(res.Body)
var data map[string]map[string]interface{}
json.Unmarshal(body, &data)
if data != nil {
rawItems := data["tracks"]["items"].([]interface{})
tracks := make([]Track, 0)
for _, rawItem := range rawItems {
item := rawItem.(map[string]interface{})
artist := item["artists"].([]interface{})[0].(map[string]interface{})["name"].(string)
track := Track{title: item["name"].(string), uri: item["uri"].(string), artist: artist}
tracks = append(tracks, track)
}
return tracks
} else {
return nil
}
}
func logError(err error) {
if err != nil {
log.Fatal(err)
}
}
|
package main
import (
"bufio"
//"bytes"
"encoding/json"
"errors"
"flag"
"github.com/nsqio/go-nsq"
"io/ioutil"
"log"
"net/http"
"sync"
)
var route = make(map[string]bool)
var exitchan = make(chan bool)
var msgchan = make(chan *nsq.Message, 10000)
var producers = make(map[string]*nsq.Producer)
var consumers = make(map[string]*nsq.Consumer)
var mutex = &sync.Mutex{}
var mutex2 = &sync.Mutex{}
var mutex3 = &sync.Mutex{}
var ccfg = nsq.NewConfig()
var pcfg = nsq.NewConfig()
//var bufscan = bufio.NewScanner(r)
var mainnode = flag.String("mainnode", "127.0.0.1:4152", "The Main Node")
func HandleMessage(m *nsq.Message) error {
log.Println("New Msg")
msgchan <- m
return nil
}
func AddSource(addr string, topic string, channel string) error {
mutex.Lock()
log.Println(1)
if consumers[addr] != nil {
log.Println("The Consumer has existed")
mutex.Unlock()
return errors.New("The Consumer has existed")
}
log.Println(2)
c, err := nsq.NewConsumer(topic, channel, ccfg)
if err != nil {
log.Println(err)
mutex.Unlock()
return err
}
log.Println(3)
c.AddHandler(nsq.HandlerFunc(HandleMessage))
err = c.ConnectToNSQD(addr)
if err != nil {
mutex.Unlock()
return err
}
consumers[addr] = c
mutex.Unlock()
return nil
}
func AddNode(addr string) {
mutex2.Lock()
if producers[addr] != nil {
log.Println("The Producer has existed")
mutex2.Unlock()
return
}
p, err := nsq.NewProducer(addr, pcfg)
if err != nil {
log.Println(err)
mutex2.Unlock()
return
}
producers[addr] = p
mutex2.Unlock()
}
func DelNode(addr string) {
//var pr *nsq.Producer
mutex2.Lock()
if producers[addr] != nil {
//pr = producers[addr]
producers[addr] = nil
}
mutex2.Unlock()
//pr = nil
}
func GetTopic(m *nsq.Message) string {
body := m.Body
a, b, _ := bufio.ScanLines(body, false)
//log.Println(a, string(b), c)
m.Body = m.Body[a:]
//bytes.TrimPrefix(m.Body, b)
return string(b)
}
func RouteMsg() {
for {
m := <-msgchan
t := GetTopic(m)
for k := range producers {
if k == *mainnode {
producers[k].Publish(t, m.Body)
continue
}
mutex3.Lock()
if route[k+t] == false {
mutex3.Unlock()
continue
}
mutex3.Unlock()
producers[k].Publish(t, m.Body)
}
}
}
func DelTopic(node string, topic string) {
if node == *mainnode {
return
}
mutex3.Lock()
route[node+topic] = false
mutex3.Unlock()
}
func AddTopic(node string, topic string) {
if node == *mainnode {
return
}
mutex3.Lock()
route[node+topic] = true
mutex3.Unlock()
}
func handleAddSource(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
w.Write([]byte(err.Error()))
return
}
var v = struct {
Addr string `json:addr`
Topic string `json:topic`
Channel string `json:channel`
}{}
err = json.Unmarshal(body, &v)
if err != nil {
w.Write([]byte(err.Error()))
return
}
log.Println("Adding Consumer", v)
err = AddSource(v.Addr, v.Topic, v.Channel)
if err != nil {
w.Write([]byte(err.Error()))
return
}
w.Write([]byte("Success"))
}
func handleAddNode(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
w.Write([]byte(err.Error()))
return
}
var v = struct {
Addr string `json:addr`
}{}
err = json.Unmarshal(body, &v)
if err != nil {
w.Write([]byte(err.Error()))
return
}
AddNode(v.Addr)
w.Write([]byte("Success"))
}
func handleDelNode(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
w.Write([]byte(err.Error()))
return
}
var v = struct {
Addr string `json:addr`
}{}
err = json.Unmarshal(body, &v)
if err != nil {
w.Write([]byte(err.Error()))
return
}
DelNode(v.Addr)
w.Write([]byte("Success"))
}
func handleDelTopic(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
w.Write([]byte(err.Error()))
return
}
var v = struct {
Addr string `json:addr`
Topic string `josn:topic`
}{}
err = json.Unmarshal(body, &v)
if err != nil {
w.Write([]byte(err.Error()))
return
}
DelTopic(v.Addr, v.Topic)
w.Write([]byte("Success"))
}
func handleAddTopic(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
w.Write([]byte(err.Error()))
return
}
var v = struct {
Addr string `json:addr`
Topic string `josn:topic`
}{}
err = json.Unmarshal(body, &v)
if err != nil {
w.Write([]byte(err.Error()))
return
}
AddTopic(v.Addr, v.Topic)
w.Write([]byte("Success"))
}
func ping(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Hello world"))
}
func main() {
flag.Parse()
go RouteMsg()
go func() {
mux := http.NewServeMux()
mux.HandleFunc("/api/addsource", handleAddSource)
mux.HandleFunc("/api/addnode", handleAddNode)
mux.HandleFunc("/api/delnode", handleDelNode)
mux.HandleFunc("/api/deltopic", handleDelTopic)
mux.HandleFunc("/api/addtopic", handleAddTopic)
mux.HandleFunc("/api/ping", ping)
http.ListenAndServe("0.0.0.0:3003", mux)
}()
<-exitchan
}
|
package ds
/**
*
*
*
* Given an array arr of integers, check if there exists two integers N and M such that N is the double of M ( i.e. N = 2 * M).
More formally check if there exists two indices i and j such that :
i != j
0 <= i, j < arr.length
arr[i] == 2 * arr[j]
Example 1:
Input: arr = [10,2,5,3]
Output: true
Explanation: N = 10 is the double of M = 5,that is, 10 = 2 * 5.
Example 2:
Input: arr = [7,1,14,11]
Output: true
Explanation: N = 14 is the double of M = 7,that is, 14 = 2 * 7.
Example 3:
Input: arr = [3,1,7,11]
Output: false
Explanation: In this case does not exist N and M, such that N = 2 * M.
Constraints:
2 <= arr.length <= 500
-10^3 <= arr[i] <= 10^3
Accepted
75,221
Submissions
206,024
*
*
*/
/**
* @param {number[]} arr
* @return {boolean}
*/
func checkIfExist(arr []int) bool {
m := make(map[int]int)
for i := 0; i < len(arr); i++ {
val := arr[i]
if val, ok := m[val*2]; ok && val != i {
return true
}
if val%2 == 0 {
if val, ok := m[val/2]; ok && val != i {
return true
}
}
if _, ok := m[val]; !ok {
m[val] = i
}
}
return false
}
|
package main
import (
"BackendGo/router"
"BackendGo/server"
"log"
)
func main() {
app, err := server.NewServer()
if err != nil {
log.Fatal(err)
}
if err = router.ApplyRoutes(app); err != nil {
log.Fatal(err)
}
if err = app.Router.Run(); err != nil {
log.Fatal(err)
}
}
|
package cloudinit
import (
"fmt"
)
// ErrDataNotSupplied error returned of no user-data or network configuration
// in the Secret
type ErrDataNotSupplied struct {
DocName string
Key string
}
func (e ErrDataNotSupplied) Error() string {
return fmt.Sprintf("Document %s has no key %s", e.DocName, e.Key)
}
|
package sessionhub
import (
"github.com/game-explorer/animal-chess-server/internal/pkg/log"
"testing"
"time"
)
// 测试 在放入管道的同时修改管道
// 结果: 已经阻塞到select中的管道依然保持原样(阻塞), 等到下一次写入时才是新的管道生效.
func TestSetChanWrite(t *testing.T) {
var c chan int
go func() {
for range time.Tick(1 * time.Second) {
select {
case c <- 1:
log.Infof("c")
case <-time.After(5 * time.Second):
log.Infof("timeout")
}
}
}()
// 先 timeout
// 后 c
// 再 default
go func() {
time.Sleep(2 * time.Second)
c = make(chan int, 10)
time.Sleep(7 * time.Second)
c = nil
}()
select {}
}
|
package models
type CommentWrap struct {
Json CommentJson `json:"json"`
}
type CommentJson struct {
Errors []string `json:"errors"`
Data CommentJsonData `json:"data"`
}
type CommentJsonData struct {
Things []CommentJsonDataThing `json:"things"`
}
type CommentJsonDataThing struct {
Kind string `json:"kind"`
Data CommentJsonDataThingData `json:"data"`
}
type CommentJsonDataThingData struct {
AuthorFlairBackgroundColor string `json:"author_flair_background_color"`
TotalAwardsReceived float64 `json:"total_awards_received"`
ApprovedAtUtc string `json:"approved_at_utc"`
Distinguished string `json:"distinguished"`
ModReasonBy string `json:"mod_reason_by"`
BannedBy string `json:"banned_by"`
AuthorFlairType string `json:"author_flair_type"`
RemovalReason string `json:"removal_reason"`
LinkId string `json:"link_id"`
AuthorFlairTemplateId string `json:"author_flair_template_id"`
Likes bool `json:"likes"`
Replies string `json:"replies"`
UserReports []string `json:"user_reports"`
Saved bool `json:"saved"`
Id string `json:"id"`
BannedAtUtc string `json:"banned_at_utc"`
ModReasonTitle string `json:"mod_reason_title"`
Gilded float64 `json:"gilded"`
Archived bool `json:"archived"`
NoFollow bool `json:"no_follow"`
Author string `json:"author"`
RteMode string `json:"rte_mode"`
CanModPost bool `json:"can_mod_post"`
CreatedUtc float64 `json:"created_utc"`
SendReplies bool `json:"send_replies"`
ParentId string `json:"parent_id"`
Score float64 `json:"score"`
AuthorFullname string `json:"author_fullname"`
ApprovedBy string `json:"approved_by"`
Mod_note string `json:"mod_note"`
AllAwardings []string `json:"all_awardings"`
SubredditId string `json:"subreddit_id"`
Body string `json:"body"`
Edited bool `json:"edited"`
Gildings Gilding `json:"gildings"`
AuthorFlairCssClass string `json:"author_flair_css_class"`
Name string `json:"name"`
AuthorPatreonFlair bool `json:"author_patreon_flair"`
Downs float64 `json:"downs"`
AuthorFlairRichtext []string `json:"author_flair_richtext"`
IsSubmitter bool `json:"is_submitter"`
CollapsedReason string `json:"collapsed_reason"`
BodyHtml string `json:"body_html"`
Stickied bool `json:"stickied"`
CanGild bool `json:"can_gild"`
Removed bool `json:"removed"`
Approved bool `json:"approved"`
AuthorFlairTextColor string `json:"author_flair_text_color"`
ScoreHidden bool `json:"score_hidden"`
Permalink string `json:"permalink"`
NumReports float64 `json:"num_reports"`
Locked bool `json:"locked"`
ReportReasons []string `json:"report_reasons"`
Created float64 `json:"created"`
Subreddit string `json:"subreddit"`
AuthorFlairText string `json:"author_flair_text"`
Spam bool `json:"spam"`
Collapsed bool `json:"collapsed"`
SubredditNamePrefixed string `json:"subreddit_name_prefixed"`
Controversiality float64 `json:"controversiality"`
IgnoreReports bool `json:"ignore_reports"`
ModReports []string `json:"mod_reports"`
SubredditType string `json:"subreddit_type"`
Ups float64 `json:"ups"`
}
type Gilding struct {
Gid map[string]int `json:"gid"`
}
|
package main
import "fmt"
func main() {
a := 4
b := 3
fmt.Println("soma = ", a+b)
fmt.Println("subtração = ", a-b)
fmt.Println("multiplicação = ", a*b)
fmt.Println("divisão = ", a/b)
fmt.Println("módulo = ", a%b)
//bitwise
fmt.Println("AND = ", a&b)
fmt.Println("OR = ", a|b)
fmt.Println("XOR = ", a^b)
}
|
package utils
import (
"bufio"
"fmt"
"log"
"os"
"strconv"
)
var DEBUG bool = false
type UserOptions struct {
Print bool
PrintSleepMiliseconds int
}
func Println(a ...interface{}) (n int, err error) {
if !DEBUG{
return
}
return fmt.Println(a...)
}
func Max(v1 int, v2 int) int{
if v1 > v2{
return v1
}
return v2
}
func ReadIntegersFromFile(filePath string) (result []int64){
file, err := os.Open(filePath)
if err != nil {
log.Fatal(err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
v,e := strconv.ParseInt(scanner.Text(),10,64)
if e != nil {
result = append(result, v)
} else{
panic(e)
}
}
if err := scanner.Err(); err != nil {
panic(err)
}
return
}
func ToInt(s string) (result int){
result,_ =strconv.Atoi(s)
return
}
|
package linkedlist
import (
"testing"
)
func TestHasCycleWithUnCycledList(t *testing.T) {
l := newListNodes([]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, false)
if hasCycle(l) {
t.Fail()
}
}
func TestHasCycleWithCycledList(t *testing.T) {
l := newListNodes([]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, true)
if !hasCycle(l) {
t.Fail()
}
}
func TestInCircleWithCycledList(t *testing.T) {
l := newListNodes([]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, true)
if inCircle(l) != l {
t.Fail()
}
}
func TestInCircleWithOutCycledList(t *testing.T) {
l := newListNodes([]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, false)
if inCircle(l) != nil {
t.Fail()
}
}
|
package main
import "fmt"
type Cx struct {
Num int // 当前的跑了第几次
Left []int // 保存在左侧的数
Right []int // 保存在右侧的数据
}
func main() {
num := []int{1,4,2,3,5,9,10,11,24,14,34,13,45,17,19,40}
back := compare(num)
fmt.Println(back)
}
func compare(value []int)[]int{
if len(value)==1 {
return value
}
if len(value) == 0 {
return nil
}
sl := Cx{}
sl.Num =value[0]
sl.Left = []int{}
sl.Right =[]int{}
for _,v := range value[1:] {
if v > sl.Num {
sl.Right = append(sl.Right,v)
}else{
sl.Left = append(sl.Left,v)
}
}
back := []int{}
if len(sl.Right) >=1 {
back = append(back,compare(sl.Right)...)
}
back = append(back,sl.Num)
if len(sl.Left) >=1{
back = append(back,compare(sl.Left)...)
}
return back
}
|
package main
import (
"./protocol"
"bufio"
"log"
"os"
"strconv"
)
func main() {
log.Printf("Started Client")
for _, param := range os.Args[1:] {
log.Printf("Registering Server: " + param)
protocol.Connect(param)
}
input := bufio.NewScanner(os.Stdin)
for input.Scan() {
val, err := strconv.Atoi(input.Text())
if err != nil {
log.Printf("Invalid provided value")
} else {
protocol.Send(val)
}
}
}
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tests
import (
"context"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"database/sql"
"encoding/binary"
"encoding/pem"
"fmt"
"io"
"math/big"
"net"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/go-sql-driver/mysql"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/config"
ddlutil "github.com/pingcap/tidb/ddl/util"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/extension"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/auth"
tmysql "github.com/pingcap/tidb/parser/mysql"
server2 "github.com/pingcap/tidb/server"
"github.com/pingcap/tidb/server/internal/column"
"github.com/pingcap/tidb/server/internal/resultset"
"github.com/pingcap/tidb/server/internal/testserverclient"
"github.com/pingcap/tidb/server/internal/testutil"
util2 "github.com/pingcap/tidb/server/internal/util"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/mockstore/unistore"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/cpuprofile"
"github.com/pingcap/tidb/util/plancodec"
"github.com/pingcap/tidb/util/resourcegrouptag"
"github.com/pingcap/tidb/util/topsql"
"github.com/pingcap/tidb/util/topsql/collector"
mockTopSQLTraceCPU "github.com/pingcap/tidb/util/topsql/collector/mock"
topsqlstate "github.com/pingcap/tidb/util/topsql/state"
"github.com/pingcap/tidb/util/topsql/stmtstats"
"github.com/stretchr/testify/require"
"github.com/tikv/client-go/v2/tikvrpc"
"go.opencensus.io/stats/view"
)
type tidbTestSuite struct {
*testserverclient.TestServerClient
tidbdrv *server2.TiDBDriver
server *server2.Server
domain *domain.Domain
store kv.Storage
}
func createTidbTestSuite(t *testing.T) *tidbTestSuite {
cfg := util2.NewTestConfig()
cfg.Port = 0
cfg.Status.ReportStatus = true
cfg.Status.StatusPort = 0
cfg.Status.RecordDBLabel = true
cfg.Performance.TCPKeepAlive = true
return createTidbTestSuiteWithCfg(t, cfg)
}
func createTidbTestSuiteWithCfg(t *testing.T, cfg *config.Config) *tidbTestSuite {
ts := &tidbTestSuite{TestServerClient: testserverclient.NewTestServerClient()}
// setup tidbTestSuite
var err error
ts.store, err = mockstore.NewMockStore()
session.DisableStats4Test()
require.NoError(t, err)
ts.domain, err = session.BootstrapSession(ts.store)
require.NoError(t, err)
ts.tidbdrv = server2.NewTiDBDriver(ts.store)
server, err := server2.NewServer(cfg, ts.tidbdrv)
require.NoError(t, err)
ts.Port = testutil.GetPortFromTCPAddr(server.ListenAddr())
ts.StatusPort = testutil.GetPortFromTCPAddr(server.StatusListenerAddr())
ts.server = server
ts.server.SetDomain(ts.domain)
ts.domain.InfoSyncer().SetSessionManager(ts.server)
go func() {
err := ts.server.Run()
require.NoError(t, err)
}()
ts.WaitUntilServerOnline()
t.Cleanup(func() {
if ts.domain != nil {
ts.domain.Close()
}
if ts.server != nil {
ts.server.Close()
}
if ts.store != nil {
require.NoError(t, ts.store.Close())
}
view.Stop()
})
return ts
}
type tidbTestTopSQLSuite struct {
*tidbTestSuite
}
func createTidbTestTopSQLSuite(t *testing.T) *tidbTestTopSQLSuite {
base := createTidbTestSuite(t)
ts := &tidbTestTopSQLSuite{base}
// Initialize global variable for top-sql test.
db, err := sql.Open("mysql", ts.GetDSN())
require.NoError(t, err)
defer func() {
err := db.Close()
require.NoError(t, err)
}()
dbt := testkit.NewDBTestKit(t, db)
topsqlstate.GlobalState.PrecisionSeconds.Store(1)
topsqlstate.GlobalState.ReportIntervalSeconds.Store(2)
dbt.MustExec("set @@global.tidb_top_sql_max_time_series_count=5;")
require.NoError(t, cpuprofile.StartCPUProfiler())
t.Cleanup(func() {
cpuprofile.StopCPUProfiler()
topsqlstate.GlobalState.PrecisionSeconds.Store(topsqlstate.DefTiDBTopSQLPrecisionSeconds)
topsqlstate.GlobalState.ReportIntervalSeconds.Store(topsqlstate.DefTiDBTopSQLReportIntervalSeconds)
view.Stop()
})
return ts
}
func TestRegression(t *testing.T) {
ts := createTidbTestSuite(t)
if testserverclient.Regression {
ts.RunTestRegression(t, nil, "Regression")
}
}
func TestUint64(t *testing.T) {
ts := createTidbTestSuite(t)
ts.RunTestPrepareResultFieldType(t)
}
func TestSpecialType(t *testing.T) {
ts := createTidbTestSuite(t)
ts.RunTestSpecialType(t)
}
func TestPreparedString(t *testing.T) {
ts := createTidbTestSuite(t)
ts.RunTestPreparedString(t)
}
func TestPreparedTimestamp(t *testing.T) {
ts := createTidbTestSuite(t)
ts.RunTestPreparedTimestamp(t)
}
func TestConcurrentUpdate(t *testing.T) {
ts := createTidbTestSuite(t)
ts.RunTestConcurrentUpdate(t)
}
func TestErrorCode(t *testing.T) {
ts := createTidbTestSuite(t)
ts.RunTestErrorCode(t)
}
func TestAuth(t *testing.T) {
ts := createTidbTestSuite(t)
ts.RunTestAuth(t)
ts.RunTestIssue3682(t)
ts.RunTestAccountLock(t)
}
func TestIssues(t *testing.T) {
ts := createTidbTestSuite(t)
ts.RunTestIssue3662(t)
ts.RunTestIssue3680(t)
ts.RunTestIssue22646(t)
}
func TestDBNameEscape(t *testing.T) {
ts := createTidbTestSuite(t)
ts.RunTestDBNameEscape(t)
}
func TestResultFieldTableIsNull(t *testing.T) {
ts := createTidbTestSuite(t)
ts.RunTestResultFieldTableIsNull(t)
}
func TestStatusAPI(t *testing.T) {
ts := createTidbTestSuite(t)
ts.RunTestStatusAPI(t)
}
func TestStatusPort(t *testing.T) {
ts := createTidbTestSuite(t)
cfg := util2.NewTestConfig()
cfg.Port = 0
cfg.Status.ReportStatus = true
cfg.Status.StatusPort = ts.StatusPort
cfg.Performance.TCPKeepAlive = true
server, err := server2.NewServer(cfg, ts.tidbdrv)
require.Error(t, err)
require.Nil(t, server)
}
func TestStatusAPIWithTLS(t *testing.T) {
ts := createTidbTestSuite(t)
dir := t.TempDir()
fileName := func(file string) string {
return filepath.Join(dir, file)
}
caCert, caKey, err := generateCert(0, "TiDB CA 2", nil, nil, fileName("ca-key-2.pem"), fileName("ca-cert-2.pem"))
require.NoError(t, err)
_, _, err = generateCert(1, "tidb-server-2", caCert, caKey, fileName("server-key-2.pem"), fileName("server-cert-2.pem"))
require.NoError(t, err)
cli := testserverclient.NewTestServerClient()
cli.StatusScheme = "https"
cfg := util2.NewTestConfig()
cfg.Port = cli.Port
cfg.Status.StatusPort = cli.StatusPort
cfg.Security.ClusterSSLCA = fileName("ca-cert-2.pem")
cfg.Security.ClusterSSLCert = fileName("server-cert-2.pem")
cfg.Security.ClusterSSLKey = fileName("server-key-2.pem")
server, err := server2.NewServer(cfg, ts.tidbdrv)
require.NoError(t, err)
cli.Port = testutil.GetPortFromTCPAddr(server.ListenAddr())
cli.StatusPort = testutil.GetPortFromTCPAddr(server.StatusListenerAddr())
go func() {
err := server.Run()
require.NoError(t, err)
}()
time.Sleep(time.Millisecond * 100)
// https connection should work.
ts.RunTestStatusAPI(t)
// but plain http connection should fail.
cli.StatusScheme = "http"
//nolint:bodyclose
_, err = cli.FetchStatus("/status")
require.Error(t, err)
server.Close()
}
func TestStatusAPIWithTLSCNCheck(t *testing.T) {
ts := createTidbTestSuite(t)
dir := t.TempDir()
caPath := filepath.Join(dir, "ca-cert-cn.pem")
serverKeyPath := filepath.Join(dir, "server-key-cn.pem")
serverCertPath := filepath.Join(dir, "server-cert-cn.pem")
client1KeyPath := filepath.Join(dir, "client-key-cn-check-a.pem")
client1CertPath := filepath.Join(dir, "client-cert-cn-check-a.pem")
client2KeyPath := filepath.Join(dir, "client-key-cn-check-b.pem")
client2CertPath := filepath.Join(dir, "client-cert-cn-check-b.pem")
caCert, caKey, err := generateCert(0, "TiDB CA CN CHECK", nil, nil, filepath.Join(dir, "ca-key-cn.pem"), caPath)
require.NoError(t, err)
_, _, err = generateCert(1, "tidb-server-cn-check", caCert, caKey, serverKeyPath, serverCertPath)
require.NoError(t, err)
_, _, err = generateCert(2, "tidb-client-cn-check-a", caCert, caKey, client1KeyPath, client1CertPath, func(c *x509.Certificate) {
c.Subject.CommonName = "tidb-client-1"
})
require.NoError(t, err)
_, _, err = generateCert(3, "tidb-client-cn-check-b", caCert, caKey, client2KeyPath, client2CertPath, func(c *x509.Certificate) {
c.Subject.CommonName = "tidb-client-2"
})
require.NoError(t, err)
cli := testserverclient.NewTestServerClient()
cli.StatusScheme = "https"
cfg := util2.NewTestConfig()
cfg.Port = cli.Port
cfg.Status.StatusPort = cli.StatusPort
cfg.Security.ClusterSSLCA = caPath
cfg.Security.ClusterSSLCert = serverCertPath
cfg.Security.ClusterSSLKey = serverKeyPath
cfg.Security.ClusterVerifyCN = []string{"tidb-client-2"}
server, err := server2.NewServer(cfg, ts.tidbdrv)
require.NoError(t, err)
cli.Port = testutil.GetPortFromTCPAddr(server.ListenAddr())
cli.StatusPort = testutil.GetPortFromTCPAddr(server.StatusListenerAddr())
go func() {
err := server.Run()
require.NoError(t, err)
}()
defer server.Close()
time.Sleep(time.Millisecond * 100)
hc := newTLSHttpClient(t, caPath,
client1CertPath,
client1KeyPath,
)
//nolint:bodyclose
_, err = hc.Get(cli.StatusURL("/status"))
require.Error(t, err)
hc = newTLSHttpClient(t, caPath,
client2CertPath,
client2KeyPath,
)
resp, err := hc.Get(cli.StatusURL("/status"))
require.NoError(t, err)
require.Nil(t, resp.Body.Close())
}
func newTLSHttpClient(t *testing.T, caFile, certFile, keyFile string) *http.Client {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
require.NoError(t, err)
caCert, err := os.ReadFile(caFile)
require.NoError(t, err)
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: caCertPool,
InsecureSkipVerify: true,
}
tlsConfig.BuildNameToCertificate()
return &http.Client{Transport: &http.Transport{TLSClientConfig: tlsConfig}}
}
func TestMultiStatements(t *testing.T) {
ts := createTidbTestSuite(t)
ts.RunFailedTestMultiStatements(t)
ts.RunTestMultiStatements(t)
}
func TestSocketForwarding(t *testing.T) {
tempDir := t.TempDir()
socketFile := tempDir + "/tidbtest.sock" // Unix Socket does not work on Windows, so '/' should be OK
ts := createTidbTestSuite(t)
cli := testserverclient.NewTestServerClient()
cfg := util2.NewTestConfig()
cfg.Socket = socketFile
cfg.Port = cli.Port
os.Remove(cfg.Socket)
cfg.Status.ReportStatus = false
server, err := server2.NewServer(cfg, ts.tidbdrv)
require.NoError(t, err)
server.SetDomain(ts.domain)
cli.Port = testutil.GetPortFromTCPAddr(server.ListenAddr())
go func() {
err := server.Run()
require.NoError(t, err)
}()
time.Sleep(time.Millisecond * 100)
defer server.Close()
cli.RunTestRegression(t, func(config *mysql.Config) {
config.User = "root"
config.Net = "unix"
config.Addr = socketFile
config.DBName = "test"
config.Params = map[string]string{"sql_mode": "'STRICT_ALL_TABLES'"}
}, "SocketRegression")
}
func TestSocket(t *testing.T) {
tempDir := t.TempDir()
socketFile := tempDir + "/tidbtest.sock" // Unix Socket does not work on Windows, so '/' should be OK
cfg := util2.NewTestConfig()
cfg.Socket = socketFile
cfg.Port = 0
os.Remove(cfg.Socket)
cfg.Host = ""
cfg.Status.ReportStatus = false
ts := createTidbTestSuite(t)
server, err := server2.NewServer(cfg, ts.tidbdrv)
require.NoError(t, err)
server.SetDomain(ts.domain)
go func() {
err := server.Run()
require.NoError(t, err)
}()
time.Sleep(time.Millisecond * 100)
defer server.Close()
confFunc := func(config *mysql.Config) {
config.User = "root"
config.Net = "unix"
config.Addr = socketFile
config.DBName = "test"
config.Params = map[string]string{"sql_mode": "STRICT_ALL_TABLES"}
}
// a fake server client, config is override, just used to run tests
cli := testserverclient.NewTestServerClient()
cli.WaitUntilCustomServerCanConnect(confFunc)
cli.RunTestRegression(t, confFunc, "SocketRegression")
}
func TestSocketAndIp(t *testing.T) {
tempDir := t.TempDir()
socketFile := tempDir + "/tidbtest.sock" // Unix Socket does not work on Windows, so '/' should be OK
cli := testserverclient.NewTestServerClient()
cfg := util2.NewTestConfig()
cfg.Socket = socketFile
cfg.Port = cli.Port
cfg.Status.ReportStatus = false
ts := createTidbTestSuite(t)
server, err := server2.NewServer(cfg, ts.tidbdrv)
require.NoError(t, err)
server.SetDomain(ts.domain)
cli.Port = testutil.GetPortFromTCPAddr(server.ListenAddr())
go func() {
err := server.Run()
require.NoError(t, err)
}()
cli.WaitUntilServerCanConnect()
defer server.Close()
// Test with Socket connection + Setup user1@% for all host access
cli.Port = testutil.GetPortFromTCPAddr(server.ListenAddr())
defer func() {
cli.RunTests(t, func(config *mysql.Config) {
config.User = "root"
},
func(dbt *testkit.DBTestKit) {
dbt.MustExec("DROP USER IF EXISTS 'user1'@'%'")
dbt.MustExec("DROP USER IF EXISTS 'user1'@'localhost'")
dbt.MustExec("DROP USER IF EXISTS 'user1'@'127.0.0.1'")
})
}()
cli.RunTests(t, func(config *mysql.Config) {
config.User = "root"
config.Net = "unix"
config.Addr = socketFile
config.DBName = "test"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("select user()")
cli.CheckRows(t, rows, "root@localhost")
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION")
dbt.MustQuery("CREATE USER user1@'%'")
dbt.MustQuery("GRANT SELECT ON test.* TO user1@'%'")
})
// Test with Network interface connection with all hosts
cli.RunTests(t, func(config *mysql.Config) {
config.User = "user1"
config.DBName = "test"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("select user()")
// NOTICE: this is not compatible with MySQL! (MySQL would report user1@localhost also for 127.0.0.1)
cli.CheckRows(t, rows, "user1@127.0.0.1")
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT USAGE ON *.* TO 'user1'@'%'\nGRANT SELECT ON test.* TO 'user1'@'%'")
rows = dbt.MustQuery("select host from information_schema.processlist where user = 'user1'")
records := cli.Rows(t, rows)
require.Contains(t, records[0], ":", "Missing :<port> in is.processlist")
})
// Test with unix domain socket file connection with all hosts
cli.RunTests(t, func(config *mysql.Config) {
config.Net = "unix"
config.Addr = socketFile
config.User = "user1"
config.DBName = "test"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("select user()")
cli.CheckRows(t, rows, "user1@localhost")
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT USAGE ON *.* TO 'user1'@'%'\nGRANT SELECT ON test.* TO 'user1'@'%'")
})
// Setup user1@127.0.0.1 for loop back network interface access
cli.RunTests(t, func(config *mysql.Config) {
config.User = "root"
config.DBName = "test"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("select user()")
// NOTICE: this is not compatible with MySQL! (MySQL would report user1@localhost also for 127.0.0.1)
cli.CheckRows(t, rows, "root@127.0.0.1")
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION")
dbt.MustQuery("CREATE USER user1@127.0.0.1")
dbt.MustQuery("GRANT SELECT,INSERT ON test.* TO user1@'127.0.0.1'")
})
// Test with Network interface connection with all hosts
cli.RunTests(t, func(config *mysql.Config) {
config.User = "user1"
config.DBName = "test"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("select user()")
// NOTICE: this is not compatible with MySQL! (MySQL would report user1@localhost also for 127.0.0.1)
cli.CheckRows(t, rows, "user1@127.0.0.1")
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT USAGE ON *.* TO 'user1'@'127.0.0.1'\nGRANT SELECT,INSERT ON test.* TO 'user1'@'127.0.0.1'")
})
// Test with unix domain socket file connection with all hosts
cli.RunTests(t, func(config *mysql.Config) {
config.Net = "unix"
config.Addr = socketFile
config.User = "user1"
config.DBName = "test"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("select user()")
cli.CheckRows(t, rows, "user1@localhost")
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT USAGE ON *.* TO 'user1'@'%'\nGRANT SELECT ON test.* TO 'user1'@'%'")
})
// Setup user1@localhost for socket (and if MySQL compatible; loop back network interface access)
cli.RunTests(t, func(config *mysql.Config) {
config.Net = "unix"
config.Addr = socketFile
config.User = "root"
config.DBName = "test"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("select user()")
cli.CheckRows(t, rows, "root@localhost")
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION")
dbt.MustExec("CREATE USER user1@localhost")
dbt.MustExec("GRANT SELECT,INSERT,UPDATE,DELETE ON test.* TO user1@localhost")
})
// Test with Network interface connection with all hosts
cli.RunTests(t, func(config *mysql.Config) {
config.User = "user1"
config.DBName = "test"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("select user()")
// NOTICE: this is not compatible with MySQL! (MySQL would report user1@localhost also for 127.0.0.1)
cli.CheckRows(t, rows, "user1@127.0.0.1")
require.NoError(t, rows.Close())
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT USAGE ON *.* TO 'user1'@'127.0.0.1'\nGRANT SELECT,INSERT ON test.* TO 'user1'@'127.0.0.1'")
require.NoError(t, rows.Close())
})
// Test with unix domain socket file connection with all hosts
cli.RunTests(t, func(config *mysql.Config) {
config.Net = "unix"
config.Addr = socketFile
config.User = "user1"
config.DBName = "test"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("select user()")
cli.CheckRows(t, rows, "user1@localhost")
require.NoError(t, rows.Close())
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT USAGE ON *.* TO 'user1'@'localhost'\nGRANT SELECT,INSERT,UPDATE,DELETE ON test.* TO 'user1'@'localhost'")
require.NoError(t, rows.Close())
})
}
// TestOnlySocket for server configuration without network interface for mysql clients
func TestOnlySocket(t *testing.T) {
tempDir := t.TempDir()
socketFile := tempDir + "/tidbtest.sock" // Unix Socket does not work on Windows, so '/' should be OK
cli := testserverclient.NewTestServerClient()
cfg := util2.NewTestConfig()
cfg.Socket = socketFile
cfg.Host = "" // No network interface listening for mysql traffic
cfg.Status.ReportStatus = false
ts := createTidbTestSuite(t)
server, err := server2.NewServer(cfg, ts.tidbdrv)
require.NoError(t, err)
server.SetDomain(ts.domain)
go func() {
err := server.Run()
require.NoError(t, err)
}()
time.Sleep(time.Millisecond * 100)
defer server.Close()
require.Nil(t, server.Listener())
require.NotNil(t, server.Socket())
// Test with Socket connection + Setup user1@% for all host access
defer func() {
cli.RunTests(t, func(config *mysql.Config) {
config.User = "root"
config.Net = "unix"
config.Addr = socketFile
},
func(dbt *testkit.DBTestKit) {
dbt.MustExec("DROP USER IF EXISTS 'user1'@'%'")
dbt.MustExec("DROP USER IF EXISTS 'user1'@'localhost'")
dbt.MustExec("DROP USER IF EXISTS 'user1'@'127.0.0.1'")
})
}()
cli.RunTests(t, func(config *mysql.Config) {
config.User = "root"
config.Net = "unix"
config.Addr = socketFile
config.DBName = "test"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("select user()")
cli.CheckRows(t, rows, "root@localhost")
require.NoError(t, rows.Close())
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION")
require.NoError(t, rows.Close())
dbt.MustExec("CREATE USER user1@'%'")
dbt.MustExec("GRANT SELECT ON test.* TO user1@'%'")
})
// Test with Network interface connection with all hosts, should fail since server not configured
db, err := sql.Open("mysql", cli.GetDSN(func(config *mysql.Config) {
config.User = "root"
config.DBName = "test"
}))
require.NoErrorf(t, err, "Open failed")
err = db.Ping()
require.Errorf(t, err, "Connect succeeded when not configured!?!")
db.Close()
db, err = sql.Open("mysql", cli.GetDSN(func(config *mysql.Config) {
config.User = "user1"
config.DBName = "test"
}))
require.NoErrorf(t, err, "Open failed")
err = db.Ping()
require.Errorf(t, err, "Connect succeeded when not configured!?!")
db.Close()
// Test with unix domain socket file connection with all hosts
cli.RunTests(t, func(config *mysql.Config) {
config.Net = "unix"
config.Addr = socketFile
config.User = "user1"
config.DBName = "test"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("select user()")
cli.CheckRows(t, rows, "user1@localhost")
require.NoError(t, rows.Close())
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT USAGE ON *.* TO 'user1'@'%'\nGRANT SELECT ON test.* TO 'user1'@'%'")
require.NoError(t, rows.Close())
})
// Setup user1@127.0.0.1 for loop back network interface access
cli.RunTests(t, func(config *mysql.Config) {
config.Net = "unix"
config.Addr = socketFile
config.User = "root"
config.DBName = "test"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("select user()")
// NOTICE: this is not compatible with MySQL! (MySQL would report user1@localhost also for 127.0.0.1)
cli.CheckRows(t, rows, "root@localhost")
require.NoError(t, rows.Close())
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION")
require.NoError(t, rows.Close())
dbt.MustExec("CREATE USER user1@127.0.0.1")
dbt.MustExec("GRANT SELECT,INSERT ON test.* TO user1@'127.0.0.1'")
})
// Test with unix domain socket file connection with all hosts
cli.RunTests(t, func(config *mysql.Config) {
config.Net = "unix"
config.Addr = socketFile
config.User = "user1"
config.DBName = "test"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("select user()")
cli.CheckRows(t, rows, "user1@localhost")
require.NoError(t, rows.Close())
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT USAGE ON *.* TO 'user1'@'%'\nGRANT SELECT ON test.* TO 'user1'@'%'")
require.NoError(t, rows.Close())
})
// Setup user1@localhost for socket (and if MySQL compatible; loop back network interface access)
cli.RunTests(t, func(config *mysql.Config) {
config.Net = "unix"
config.Addr = socketFile
config.User = "root"
config.DBName = "test"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("select user()")
cli.CheckRows(t, rows, "root@localhost")
require.NoError(t, rows.Close())
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION")
require.NoError(t, rows.Close())
dbt.MustExec("CREATE USER user1@localhost")
dbt.MustExec("GRANT SELECT,INSERT,UPDATE,DELETE ON test.* TO user1@localhost")
})
// Test with unix domain socket file connection with all hosts
cli.RunTests(t, func(config *mysql.Config) {
config.Net = "unix"
config.Addr = socketFile
config.User = "user1"
config.DBName = "test"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("select user()")
cli.CheckRows(t, rows, "user1@localhost")
require.NoError(t, rows.Close())
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT USAGE ON *.* TO 'user1'@'localhost'\nGRANT SELECT,INSERT,UPDATE,DELETE ON test.* TO 'user1'@'localhost'")
require.NoError(t, rows.Close())
})
}
// generateCert generates a private key and a certificate in PEM format based on parameters.
// If parentCert and parentCertKey is specified, the new certificate will be signed by the parentCert.
// Otherwise, the new certificate will be self-signed and is a CA.
func generateCert(sn int, commonName string, parentCert *x509.Certificate, parentCertKey *rsa.PrivateKey, outKeyFile string, outCertFile string, opts ...func(c *x509.Certificate)) (*x509.Certificate, *rsa.PrivateKey, error) {
privateKey, err := rsa.GenerateKey(rand.Reader, 528)
if err != nil {
return nil, nil, errors.Trace(err)
}
notBefore := time.Now().Add(-10 * time.Minute).UTC()
notAfter := notBefore.Add(1 * time.Hour).UTC()
template := x509.Certificate{
SerialNumber: big.NewInt(int64(sn)),
Subject: pkix.Name{CommonName: commonName, Names: []pkix.AttributeTypeAndValue{util.MockPkixAttribute(util.CommonName, commonName)}},
DNSNames: []string{commonName},
NotBefore: notBefore,
NotAfter: notAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
BasicConstraintsValid: true,
}
for _, opt := range opts {
opt(&template)
}
var parent *x509.Certificate
var priv *rsa.PrivateKey
if parentCert == nil || parentCertKey == nil {
template.IsCA = true
template.KeyUsage |= x509.KeyUsageCertSign
parent = &template
priv = privateKey
} else {
parent = parentCert
priv = parentCertKey
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template, parent, &privateKey.PublicKey, priv)
if err != nil {
return nil, nil, errors.Trace(err)
}
cert, err := x509.ParseCertificate(derBytes)
if err != nil {
return nil, nil, errors.Trace(err)
}
certOut, err := os.Create(outCertFile)
if err != nil {
return nil, nil, errors.Trace(err)
}
err = pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
if err != nil {
return nil, nil, errors.Trace(err)
}
err = certOut.Close()
if err != nil {
return nil, nil, errors.Trace(err)
}
keyOut, err := os.OpenFile(outKeyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return nil, nil, errors.Trace(err)
}
err = pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)})
if err != nil {
return nil, nil, errors.Trace(err)
}
err = keyOut.Close()
if err != nil {
return nil, nil, errors.Trace(err)
}
return cert, privateKey, nil
}
// registerTLSConfig registers a mysql client TLS config.
// See https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig for details.
func registerTLSConfig(configName string, caCertPath string, clientCertPath string, clientKeyPath string, serverName string, verifyServer bool) error {
rootCertPool := x509.NewCertPool()
data, err := os.ReadFile(caCertPath)
if err != nil {
return err
}
if ok := rootCertPool.AppendCertsFromPEM(data); !ok {
return errors.New("Failed to append PEM")
}
clientCert := make([]tls.Certificate, 0, 1)
certs, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath)
if err != nil {
return err
}
clientCert = append(clientCert, certs)
tlsConfig := &tls.Config{
RootCAs: rootCertPool,
Certificates: clientCert,
ServerName: serverName,
InsecureSkipVerify: !verifyServer,
}
return mysql.RegisterTLSConfig(configName, tlsConfig)
}
func TestSystemTimeZone(t *testing.T) {
ts := createTidbTestSuite(t)
tk := testkit.NewTestKit(t, ts.store)
cfg := util2.NewTestConfig()
cfg.Port, cfg.Status.StatusPort = 0, 0
cfg.Status.ReportStatus = false
server, err := server2.NewServer(cfg, ts.tidbdrv)
require.NoError(t, err)
defer server.Close()
tz1 := tk.MustQuery("select variable_value from mysql.tidb where variable_name = 'system_tz'").Rows()
tk.MustQuery("select @@system_time_zone").Check(tz1)
}
func TestInternalSessionTxnStartTS(t *testing.T) {
ts := createTidbTestSuite(t)
se, err := session.CreateSession4Test(ts.store)
require.NoError(t, err)
_, err = se.Execute(context.Background(), "set global tidb_enable_metadata_lock=0")
require.NoError(t, err)
count := 10
stmts := make([]ast.StmtNode, count)
for i := 0; i < count; i++ {
stmt, err := session.ParseWithParams4Test(context.Background(), se, "select * from mysql.user limit 1")
require.NoError(t, err)
stmts[i] = stmt
}
// Test an issue that sysSessionPool doesn't call session's Close, cause
// asyncGetTSWorker goroutine leak.
var wg util.WaitGroupWrapper
for i := 0; i < count; i++ {
s := stmts[i]
wg.Run(func() {
_, _, err := session.ExecRestrictedStmt4Test(context.Background(), se, s)
require.NoError(t, err)
})
}
wg.Wait()
}
func TestClientWithCollation(t *testing.T) {
ts := createTidbTestSuite(t)
ts.RunTestClientWithCollation(t)
}
func TestCreateTableFlen(t *testing.T) {
ts := createTidbTestSuite(t)
// issue #4540
qctx, err := ts.tidbdrv.OpenCtx(uint64(0), 0, uint8(tmysql.DefaultCollationID), "test", nil, nil)
require.NoError(t, err)
_, err = Execute(context.Background(), qctx, "use test;")
require.NoError(t, err)
ctx := context.Background()
testSQL := "CREATE TABLE `t1` (" +
"`a` char(36) NOT NULL," +
"`b` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP," +
"`c` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP," +
"`d` varchar(50) DEFAULT ''," +
"`e` char(36) NOT NULL DEFAULT ''," +
"`f` char(36) NOT NULL DEFAULT ''," +
"`g` char(1) NOT NULL DEFAULT 'N'," +
"`h` varchar(100) NOT NULL," +
"`i` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP," +
"`j` varchar(10) DEFAULT ''," +
"`k` varchar(10) DEFAULT ''," +
"`l` varchar(20) DEFAULT ''," +
"`m` varchar(20) DEFAULT ''," +
"`n` varchar(30) DEFAULT ''," +
"`o` varchar(100) DEFAULT ''," +
"`p` varchar(50) DEFAULT ''," +
"`q` varchar(50) DEFAULT ''," +
"`r` varchar(100) DEFAULT ''," +
"`s` varchar(20) DEFAULT ''," +
"`t` varchar(50) DEFAULT ''," +
"`u` varchar(100) DEFAULT ''," +
"`v` varchar(50) DEFAULT ''," +
"`w` varchar(300) NOT NULL," +
"`x` varchar(250) DEFAULT ''," +
"`y` decimal(20)," +
"`z` decimal(20, 4)," +
"PRIMARY KEY (`a`)" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin"
_, err = Execute(ctx, qctx, testSQL)
require.NoError(t, err)
rs, err := Execute(ctx, qctx, "show create table t1")
require.NoError(t, err)
req := rs.NewChunk(nil)
err = rs.Next(ctx, req)
require.NoError(t, err)
cols := rs.Columns()
require.NoError(t, err)
require.Len(t, cols, 2)
require.Equal(t, 5*tmysql.MaxBytesOfCharacter, int(cols[0].ColumnLength))
require.Equal(t, len(req.GetRow(0).GetString(1))*tmysql.MaxBytesOfCharacter, int(cols[1].ColumnLength))
// for issue#5246
rs, err = Execute(ctx, qctx, "select y, z from t1")
require.NoError(t, err)
cols = rs.Columns()
require.Len(t, cols, 2)
require.Equal(t, 21, int(cols[0].ColumnLength))
require.Equal(t, 22, int(cols[1].ColumnLength))
rs.Close()
}
func Execute(ctx context.Context, qc *server2.TiDBContext, sql string) (resultset.ResultSet, error) {
stmts, err := qc.Parse(ctx, sql)
if err != nil {
return nil, err
}
if len(stmts) != 1 {
panic("wrong input for Execute: " + sql)
}
return qc.ExecuteStmt(ctx, stmts[0])
}
func TestShowTablesFlen(t *testing.T) {
ts := createTidbTestSuite(t)
qctx, err := ts.tidbdrv.OpenCtx(uint64(0), 0, uint8(tmysql.DefaultCollationID), "test", nil, nil)
require.NoError(t, err)
ctx := context.Background()
_, err = Execute(ctx, qctx, "use test;")
require.NoError(t, err)
testSQL := "create table abcdefghijklmnopqrstuvwxyz (i int)"
_, err = Execute(ctx, qctx, testSQL)
require.NoError(t, err)
rs, err := Execute(ctx, qctx, "show tables")
require.NoError(t, err)
req := rs.NewChunk(nil)
err = rs.Next(ctx, req)
require.NoError(t, err)
cols := rs.Columns()
require.NoError(t, err)
require.Len(t, cols, 1)
require.Equal(t, 26*tmysql.MaxBytesOfCharacter, int(cols[0].ColumnLength))
}
func checkColNames(t *testing.T, columns []*column.Info, names ...string) {
for i, name := range names {
require.Equal(t, name, columns[i].Name)
require.Equal(t, name, columns[i].OrgName)
}
}
func TestFieldList(t *testing.T) {
ts := createTidbTestSuite(t)
qctx, err := ts.tidbdrv.OpenCtx(uint64(0), 0, uint8(tmysql.DefaultCollationID), "test", nil, nil)
require.NoError(t, err)
_, err = Execute(context.Background(), qctx, "use test;")
require.NoError(t, err)
ctx := context.Background()
testSQL := `create table t (
c_bit bit(10),
c_int_d int,
c_bigint_d bigint,
c_float_d float,
c_double_d double,
c_decimal decimal(6, 3),
c_datetime datetime(2),
c_time time(3),
c_date date,
c_timestamp timestamp(4) DEFAULT CURRENT_TIMESTAMP(4),
c_char char(20),
c_varchar varchar(20),
c_text_d text,
c_binary binary(20),
c_blob_d blob,
c_set set('a', 'b', 'c'),
c_enum enum('a', 'b', 'c'),
c_json JSON,
c_year year
)`
_, err = Execute(ctx, qctx, testSQL)
require.NoError(t, err)
colInfos, err := qctx.FieldList("t")
require.NoError(t, err)
require.Len(t, colInfos, 19)
checkColNames(t, colInfos, "c_bit", "c_int_d", "c_bigint_d", "c_float_d",
"c_double_d", "c_decimal", "c_datetime", "c_time", "c_date", "c_timestamp",
"c_char", "c_varchar", "c_text_d", "c_binary", "c_blob_d", "c_set", "c_enum",
"c_json", "c_year")
for _, cols := range colInfos {
require.Equal(t, "test", cols.Schema)
}
for _, cols := range colInfos {
require.Equal(t, "t", cols.Table)
}
for i, col := range colInfos {
switch i {
case 10, 11, 12, 15, 16:
// c_char char(20), c_varchar varchar(20), c_text_d text,
// c_set set('a', 'b', 'c'), c_enum enum('a', 'b', 'c')
require.Equalf(t, uint16(tmysql.CharsetNameToID(tmysql.DefaultCharset)), col.Charset, "index %d", i)
continue
}
require.Equalf(t, uint16(tmysql.CharsetNameToID("binary")), col.Charset, "index %d", i)
}
// c_decimal decimal(6, 3)
require.Equal(t, uint8(3), colInfos[5].Decimal)
// for issue#10513
tooLongColumnAsName := "COALESCE(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)"
columnAsName := tooLongColumnAsName[:tmysql.MaxAliasIdentifierLen]
rs, err := Execute(ctx, qctx, "select "+tooLongColumnAsName)
require.NoError(t, err)
cols := rs.Columns()
require.Equal(t, "", cols[0].OrgName)
require.Equal(t, columnAsName, cols[0].Name)
rs.Close()
rs, err = Execute(ctx, qctx, "select c_bit as '"+tooLongColumnAsName+"' from t")
require.NoError(t, err)
cols = rs.Columns()
require.Equal(t, "c_bit", cols[0].OrgName)
require.Equal(t, columnAsName, cols[0].Name)
rs.Close()
}
func TestClientErrors(t *testing.T) {
ts := createTidbTestSuite(t)
ts.RunTestInfoschemaClientErrors(t)
}
func TestInitConnect(t *testing.T) {
ts := createTidbTestSuite(t)
ts.RunTestInitConnect(t)
}
func TestSumAvg(t *testing.T) {
ts := createTidbTestSuite(t)
ts.RunTestSumAvg(t)
}
func TestNullFlag(t *testing.T) {
ts := createTidbTestSuite(t)
qctx, err := ts.tidbdrv.OpenCtx(uint64(0), 0, uint8(tmysql.DefaultCollationID), "test", nil, nil)
require.NoError(t, err)
ctx := context.Background()
{
// issue #9689
rs, err := Execute(ctx, qctx, "select 1")
require.NoError(t, err)
cols := rs.Columns()
require.Len(t, cols, 1)
expectFlag := uint16(tmysql.NotNullFlag | tmysql.BinaryFlag)
require.Equal(t, expectFlag, column.DumpFlag(cols[0].Type, cols[0].Flag))
rs.Close()
}
{
// issue #19025
rs, err := Execute(ctx, qctx, "select convert('{}', JSON)")
require.NoError(t, err)
cols := rs.Columns()
require.Len(t, cols, 1)
expectFlag := uint16(tmysql.BinaryFlag)
require.Equal(t, expectFlag, column.DumpFlag(cols[0].Type, cols[0].Flag))
rs.Close()
}
{
// issue #18488
_, err := Execute(ctx, qctx, "use test")
require.NoError(t, err)
_, err = Execute(ctx, qctx, "CREATE TABLE `test` (`iD` bigint(20) NOT NULL, `INT_TEST` int(11) DEFAULT NULL);")
require.NoError(t, err)
rs, err := Execute(ctx, qctx, `SELECT id + int_test as res FROM test GROUP BY res ORDER BY res;`)
require.NoError(t, err)
cols := rs.Columns()
require.Len(t, cols, 1)
expectFlag := uint16(tmysql.BinaryFlag)
require.Equal(t, expectFlag, column.DumpFlag(cols[0].Type, cols[0].Flag))
rs.Close()
}
{
rs, err := Execute(ctx, qctx, "select if(1, null, 1) ;")
require.NoError(t, err)
cols := rs.Columns()
require.Len(t, cols, 1)
expectFlag := uint16(tmysql.BinaryFlag)
require.Equal(t, expectFlag, column.DumpFlag(cols[0].Type, cols[0].Flag))
rs.Close()
}
{
rs, err := Execute(ctx, qctx, "select CASE 1 WHEN 2 THEN 1 END ;")
require.NoError(t, err)
cols := rs.Columns()
require.Len(t, cols, 1)
expectFlag := uint16(tmysql.BinaryFlag)
require.Equal(t, expectFlag, column.DumpFlag(cols[0].Type, cols[0].Flag))
rs.Close()
}
{
rs, err := Execute(ctx, qctx, "select NULL;")
require.NoError(t, err)
cols := rs.Columns()
require.Len(t, cols, 1)
expectFlag := uint16(tmysql.BinaryFlag)
require.Equal(t, expectFlag, column.DumpFlag(cols[0].Type, cols[0].Flag))
rs.Close()
}
}
func TestNO_DEFAULT_VALUEFlag(t *testing.T) {
ts := createTidbTestSuite(t)
// issue #21465
qctx, err := ts.tidbdrv.OpenCtx(uint64(0), 0, uint8(tmysql.DefaultCollationID), "test", nil, nil)
require.NoError(t, err)
ctx := context.Background()
_, err = Execute(ctx, qctx, "use test")
require.NoError(t, err)
_, err = Execute(ctx, qctx, "drop table if exists t")
require.NoError(t, err)
_, err = Execute(ctx, qctx, "create table t(c1 int key, c2 int);")
require.NoError(t, err)
rs, err := Execute(ctx, qctx, "select c1 from t;")
require.NoError(t, err)
defer rs.Close()
cols := rs.Columns()
require.Len(t, cols, 1)
expectFlag := uint16(tmysql.NotNullFlag | tmysql.PriKeyFlag | tmysql.NoDefaultValueFlag)
require.Equal(t, expectFlag, column.DumpFlag(cols[0].Type, cols[0].Flag))
}
func TestGracefulShutdown(t *testing.T) {
ts := createTidbTestSuite(t)
cli := testserverclient.NewTestServerClient()
cfg := util2.NewTestConfig()
cfg.GracefulWaitBeforeShutdown = 2 // wait before shutdown
cfg.Port = 0
cfg.Status.StatusPort = 0
cfg.Status.ReportStatus = true
cfg.Performance.TCPKeepAlive = true
server, err := server2.NewServer(cfg, ts.tidbdrv)
require.NoError(t, err)
require.NotNil(t, server)
cli.Port = testutil.GetPortFromTCPAddr(server.ListenAddr())
cli.StatusPort = testutil.GetPortFromTCPAddr(server.StatusListenerAddr())
go func() {
err := server.Run()
require.NoError(t, err)
}()
time.Sleep(time.Millisecond * 100)
resp, err := cli.FetchStatus("/status") // server is up
require.NoError(t, err)
require.Nil(t, resp.Body.Close())
go server.Close()
time.Sleep(time.Millisecond * 500)
resp, _ = cli.FetchStatus("/status") // should return 5xx code
require.Equal(t, 500, resp.StatusCode)
require.Nil(t, resp.Body.Close())
time.Sleep(time.Second * 2)
//nolint:bodyclose
_, err = cli.FetchStatus("/status") // Status is gone
require.Error(t, err)
require.Regexp(t, "connect: connection refused$", err.Error())
}
func TestPessimisticInsertSelectForUpdate(t *testing.T) {
ts := createTidbTestSuite(t)
qctx, err := ts.tidbdrv.OpenCtx(uint64(0), 0, uint8(tmysql.DefaultCollationID), "test", nil, nil)
require.NoError(t, err)
defer qctx.Close()
ctx := context.Background()
_, err = Execute(ctx, qctx, "use test;")
require.NoError(t, err)
_, err = Execute(ctx, qctx, "drop table if exists t1, t2")
require.NoError(t, err)
_, err = Execute(ctx, qctx, "create table t1 (id int)")
require.NoError(t, err)
_, err = Execute(ctx, qctx, "create table t2 (id int)")
require.NoError(t, err)
_, err = Execute(ctx, qctx, "insert into t1 select 1")
require.NoError(t, err)
_, err = Execute(ctx, qctx, "begin pessimistic")
require.NoError(t, err)
rs, err := Execute(ctx, qctx, "INSERT INTO t2 (id) select id from t1 where id = 1 for update")
require.NoError(t, err)
require.Nil(t, rs) // should be no delay
}
func TestTopSQLCatchRunningSQL(t *testing.T) {
ts := createTidbTestTopSQLSuite(t)
db, err := sql.Open("mysql", ts.GetDSN())
require.NoError(t, err)
defer func() {
require.NoError(t, db.Close())
}()
dbt := testkit.NewDBTestKit(t, db)
dbt.MustExec("drop database if exists topsql")
dbt.MustExec("create database topsql")
dbt.MustExec("use topsql;")
dbt.MustExec("create table t (a int, b int);")
for i := 0; i < 5000; i++ {
dbt.MustExec(fmt.Sprintf("insert into t values (%v, %v)", i, i))
}
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/util/topsql/mockHighLoadForEachPlan", `return(true)`))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/domain/skipLoadSysVarCacheLoop", `return(true)`))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/util/topsql/mockHighLoadForEachPlan"))
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/domain/skipLoadSysVarCacheLoop"))
}()
mc := mockTopSQLTraceCPU.NewTopSQLCollector()
topsql.SetupTopSQLForTest(mc)
sqlCPUCollector := collector.NewSQLCPUCollector(mc)
sqlCPUCollector.Start()
defer sqlCPUCollector.Stop()
query := "select count(*) from t as t0 join t as t1 on t0.a != t1.a;"
needEnableTopSQL := int64(0)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-ctx.Done():
return
default:
}
if atomic.LoadInt64(&needEnableTopSQL) == 1 {
time.Sleep(2 * time.Millisecond)
topsqlstate.EnableTopSQL()
atomic.StoreInt64(&needEnableTopSQL, 0)
}
time.Sleep(time.Millisecond)
}
}()
execFn := func(db *sql.DB) {
dbt := testkit.NewDBTestKit(t, db)
atomic.StoreInt64(&needEnableTopSQL, 1)
mustQuery(t, dbt, query)
topsqlstate.DisableTopSQL()
}
check := func() {
require.NoError(t, ctx.Err())
stats := mc.GetSQLStatsBySQLWithRetry(query, true)
require.Greaterf(t, len(stats), 0, query)
}
ts.testCase(t, mc, execFn, check)
cancel()
wg.Wait()
}
func TestTopSQLCPUProfile(t *testing.T) {
ts := createTidbTestTopSQLSuite(t)
db, err := sql.Open("mysql", ts.GetDSN())
require.NoError(t, err)
defer func() {
require.NoError(t, db.Close())
}()
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/util/topsql/mockHighLoadForEachSQL", `return(true)`))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/util/topsql/mockHighLoadForEachPlan", `return(true)`))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/domain/skipLoadSysVarCacheLoop", `return(true)`))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/util/topsql/mockHighLoadForEachSQL"))
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/util/topsql/mockHighLoadForEachPlan"))
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/domain/skipLoadSysVarCacheLoop"))
}()
topsqlstate.EnableTopSQL()
defer topsqlstate.DisableTopSQL()
mc := mockTopSQLTraceCPU.NewTopSQLCollector()
topsql.SetupTopSQLForTest(mc)
sqlCPUCollector := collector.NewSQLCPUCollector(mc)
sqlCPUCollector.Start()
defer sqlCPUCollector.Stop()
dbt := testkit.NewDBTestKit(t, db)
dbt.MustExec("drop database if exists topsql")
dbt.MustExec("create database topsql")
dbt.MustExec("use topsql;")
dbt.MustExec("create table t (a int auto_increment, b int, unique index idx(a));")
dbt.MustExec("create table t1 (a int auto_increment, b int, unique index idx(a));")
dbt.MustExec("create table t2 (a int auto_increment, b int, unique index idx(a));")
dbt.MustExec("set @@global.tidb_txn_mode = 'pessimistic'")
checkFn := func(sql, planRegexp string) {
stats := mc.GetSQLStatsBySQLWithRetry(sql, len(planRegexp) > 0)
// since 1 sql may has many plan, check `len(stats) > 0` instead of `len(stats) == 1`.
require.Greaterf(t, len(stats), 0, "sql: "+sql)
for _, s := range stats {
sqlStr := mc.GetSQL(s.SQLDigest)
encodedPlan := mc.GetPlan(s.PlanDigest)
// Normalize the user SQL before check.
normalizedSQL := parser.Normalize(sql)
require.Equalf(t, normalizedSQL, sqlStr, "sql: %v", sql)
// decode plan before check.
normalizedPlan, err := plancodec.DecodeNormalizedPlan(encodedPlan)
require.NoError(t, err)
// remove '\n' '\t' before do regexp match.
normalizedPlan = strings.Replace(normalizedPlan, "\n", " ", -1)
normalizedPlan = strings.Replace(normalizedPlan, "\t", " ", -1)
require.Regexpf(t, planRegexp, normalizedPlan, "sql: %v", sql)
}
}
// Test case 1: DML query: insert/update/replace/delete/select
cases1 := []struct {
sql string
planRegexp string
}{
{sql: "insert into t () values (),(),(),(),(),(),();", planRegexp: ""},
{sql: "insert into t (b) values (1),(1),(1),(1),(1),(1),(1),(1);", planRegexp: ""},
{sql: "update t set b=a where b is null limit 1;", planRegexp: ".*Limit.*TableReader.*"},
{sql: "delete from t where b = a limit 2;", planRegexp: ".*Limit.*TableReader.*"},
{sql: "replace into t (b) values (1),(1),(1),(1),(1),(1),(1),(1);", planRegexp: ""},
{sql: "select * from t use index(idx) where a<10;", planRegexp: ".*IndexLookUp.*"},
{sql: "select * from t ignore index(idx) where a>1000000000;", planRegexp: ".*TableReader.*"},
{sql: "select /*+ HASH_JOIN(t1, t2) */ * from t t1 join t t2 on t1.a=t2.a where t1.b is not null;", planRegexp: ".*HashJoin.*"},
{sql: "select /*+ INL_HASH_JOIN(t1, t2) */ * from t t1 join t t2 on t2.a=t1.a where t1.b is not null;", planRegexp: ".*IndexHashJoin.*"},
{sql: "select * from t where a=1;", planRegexp: ".*Point_Get.*"},
{sql: "select * from t where a in (1,2,3,4)", planRegexp: ".*Batch_Point_Get.*"},
}
execFn := func(db *sql.DB) {
dbt := testkit.NewDBTestKit(t, db)
for _, ca := range cases1 {
sqlStr := ca.sql
if strings.HasPrefix(sqlStr, "select") {
mustQuery(t, dbt, sqlStr)
} else {
dbt.MustExec(sqlStr)
}
}
}
check := func() {
for _, ca := range cases1 {
checkFn(ca.sql, ca.planRegexp)
}
}
ts.testCase(t, mc, execFn, check)
// Test case 2: prepare/execute sql
cases2 := []struct {
prepare string
args []interface{}
planRegexp string
}{
{prepare: "insert into t1 (b) values (?);", args: []interface{}{1}, planRegexp: ""},
{prepare: "replace into t1 (b) values (?);", args: []interface{}{1}, planRegexp: ""},
{prepare: "update t1 set b=a where b is null limit ?;", args: []interface{}{1}, planRegexp: ".*Limit.*TableReader.*"},
{prepare: "delete from t1 where b = a limit ?;", args: []interface{}{1}, planRegexp: ".*Limit.*TableReader.*"},
{prepare: "replace into t1 (b) values (?);", args: []interface{}{1}, planRegexp: ""},
{prepare: "select * from t1 use index(idx) where a<?;", args: []interface{}{10}, planRegexp: ".*IndexLookUp.*"},
{prepare: "select * from t1 ignore index(idx) where a>?;", args: []interface{}{1000000000}, planRegexp: ".*TableReader.*"},
{prepare: "select /*+ HASH_JOIN(t1, t2) */ * from t1 t1 join t1 t2 on t1.a=t2.a where t1.b is not null;", args: nil, planRegexp: ".*HashJoin.*"},
{prepare: "select /*+ INL_HASH_JOIN(t1, t2) */ * from t1 t1 join t1 t2 on t2.a=t1.a where t1.b is not null;", args: nil, planRegexp: ".*IndexHashJoin.*"},
{prepare: "select * from t1 where a=?;", args: []interface{}{1}, planRegexp: ".*Point_Get.*"},
{prepare: "select * from t1 where a in (?,?,?,?)", args: []interface{}{1, 2, 3, 4}, planRegexp: ".*Batch_Point_Get.*"},
}
execFn = func(db *sql.DB) {
dbt := testkit.NewDBTestKit(t, db)
for _, ca := range cases2 {
prepare, args := ca.prepare, ca.args
stmt := dbt.MustPrepare(prepare)
if strings.HasPrefix(prepare, "select") {
rows, err := stmt.Query(args...)
require.NoError(t, err)
for rows.Next() {
}
require.NoError(t, rows.Close())
} else {
_, err = stmt.Exec(args...)
require.NoError(t, err)
}
}
}
check = func() {
for _, ca := range cases2 {
checkFn(ca.prepare, ca.planRegexp)
}
}
ts.testCase(t, mc, execFn, check)
// Test case 3: prepare, execute stmt using @val...
cases3 := []struct {
prepare string
args []interface{}
planRegexp string
}{
{prepare: "insert into t2 (b) values (?);", args: []interface{}{1}, planRegexp: ""},
{prepare: "update t2 set b=a where b is null limit ?;", args: []interface{}{1}, planRegexp: ".*Limit.*TableReader.*"},
{prepare: "delete from t2 where b = a limit ?;", args: []interface{}{1}, planRegexp: ".*Limit.*TableReader.*"},
{prepare: "replace into t2 (b) values (?);", args: []interface{}{1}, planRegexp: ""},
{prepare: "select * from t2 use index(idx) where a<?;", args: []interface{}{10}, planRegexp: ".*IndexLookUp.*"},
{prepare: "select * from t2 ignore index(idx) where a>?;", args: []interface{}{1000000000}, planRegexp: ".*TableReader.*"},
{prepare: "select /*+ HASH_JOIN(t1, t2) */ * from t2 t1 join t2 t2 on t1.a=t2.a where t1.b is not null;", args: nil, planRegexp: ".*HashJoin.*"},
{prepare: "select /*+ INL_HASH_JOIN(t1, t2) */ * from t2 t1 join t2 t2 on t2.a=t1.a where t1.b is not null;", args: nil, planRegexp: ".*IndexHashJoin.*"},
{prepare: "select * from t2 where a=?;", args: []interface{}{1}, planRegexp: ".*Point_Get.*"},
{prepare: "select * from t2 where a in (?,?,?,?)", args: []interface{}{1, 2, 3, 4}, planRegexp: ".*Batch_Point_Get.*"},
}
execFn = func(db *sql.DB) {
dbt := testkit.NewDBTestKit(t, db)
for _, ca := range cases3 {
prepare, args := ca.prepare, ca.args
dbt.MustExec(fmt.Sprintf("prepare stmt from '%v'", prepare))
var params []string
for i := range args {
param := 'a' + i
dbt.MustExec(fmt.Sprintf("set @%c=%v", param, args[i]))
params = append(params, fmt.Sprintf("@%c", param))
}
sqlStr := "execute stmt"
if len(params) > 0 {
sqlStr += " using "
sqlStr += strings.Join(params, ",")
}
if strings.HasPrefix(prepare, "select") {
mustQuery(t, dbt, sqlStr)
} else {
dbt.MustExec(sqlStr)
}
}
}
check = func() {
for _, ca := range cases3 {
checkFn(ca.prepare, ca.planRegexp)
}
}
ts.testCase(t, mc, execFn, check)
// Test case for other statements
cases4 := []struct {
sql string
plan string
isQuery bool
}{
{"begin", "", false},
{"insert into t () values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),()", "", false},
{"commit", "", false},
{"analyze table t", "", false},
{"explain analyze select sum(a+b) from t", ".*TableReader.*", true},
{"trace select sum(b*a), sum(a+b) from t", "", true},
{"set global tidb_stmt_summary_history_size=5;", "", false},
}
execFn = func(db *sql.DB) {
dbt := testkit.NewDBTestKit(t, db)
for _, ca := range cases4 {
if ca.isQuery {
mustQuery(t, dbt, ca.sql)
} else {
dbt.MustExec(ca.sql)
}
}
}
check = func() {
for _, ca := range cases4 {
checkFn(ca.sql, ca.plan)
}
// check for internal SQL.
checkFn("replace into mysql.global_variables (variable_name,variable_value) values ('tidb_stmt_summary_history_size', '5')", "")
}
ts.testCase(t, mc, execFn, check)
// Test case for multi-statement.
cases5 := []string{
"delete from t limit 1;",
"update t set b=1 where b is null limit 1;",
"select sum(a+b*2) from t;",
}
multiStatement5 := strings.Join(cases5, "")
execFn = func(db *sql.DB) {
dbt := testkit.NewDBTestKit(t, db)
dbt.MustExec("SET tidb_multi_statement_mode='ON'")
dbt.MustExec(multiStatement5)
}
check = func() {
for _, sqlStr := range cases5 {
checkFn(sqlStr, ".*TableReader.*")
}
}
ts.testCase(t, mc, execFn, check)
// Test case for multi-statement, but first statements execute failed
cases6 := []string{
"delete from t_not_exist;",
"update t set a=1 where a is null limit 1;",
}
multiStatement6 := strings.Join(cases6, "")
execFn = func(db *sql.DB) {
dbt := testkit.NewDBTestKit(t, db)
dbt.MustExec("SET tidb_multi_statement_mode='ON'")
_, err := db.Exec(multiStatement6)
require.NotNil(t, err)
require.Equal(t, "Error 1146: Table 'topsql.t_not_exist' doesn't exist", err.Error())
}
check = func() {
for i := 1; i < len(cases6); i++ {
sqlStr := cases6[i]
stats := mc.GetSQLStatsBySQL(sqlStr, false)
require.Equal(t, 0, len(stats), sqlStr)
}
}
ts.testCase(t, mc, execFn, check)
// Test case for multi-statement, the first statements execute success but the second statement execute failed.
cases7 := []string{
"update t set a=1 where a <0 limit 1;",
"delete from t_not_exist;",
}
multiStatement7 := strings.Join(cases7, "")
execFn = func(db *sql.DB) {
dbt := testkit.NewDBTestKit(t, db)
dbt.MustExec("SET tidb_multi_statement_mode='ON'")
_, err = db.Exec(multiStatement7)
require.NotNil(t, err)
require.Equal(t, "Error 1146 (42S02): Table 'topsql.t_not_exist' doesn't exist", err.Error())
}
check = func() {
checkFn(cases7[0], "") // the first statement execute success, should have topsql data.
}
ts.testCase(t, mc, execFn, check)
// Test case for statement with wrong syntax.
wrongSyntaxSQL := "select * froms t"
execFn = func(db *sql.DB) {
_, err = db.Exec(wrongSyntaxSQL)
require.NotNil(t, err)
require.Regexp(t, "Error 1064: You have an error in your SQL syntax...", err.Error())
}
check = func() {
stats := mc.GetSQLStatsBySQL(wrongSyntaxSQL, false)
require.Equal(t, 0, len(stats), wrongSyntaxSQL)
}
ts.testCase(t, mc, execFn, check)
// Test case for high cost of plan optimize.
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/planner/mockHighLoadForOptimize", "return"))
selectSQL := "select sum(a+b), count(distinct b) from t where a+b >0"
updateSQL := "update t set a=a+100 where a > 10000000"
selectInPlanSQL := "select * from t where exists (select 1 from t1 where t1.a = 1);"
execFn = func(db *sql.DB) {
dbt := testkit.NewDBTestKit(t, db)
mustQuery(t, dbt, selectSQL)
dbt.MustExec(updateSQL)
mustQuery(t, dbt, selectInPlanSQL)
}
check = func() {
checkFn(selectSQL, "")
checkFn(updateSQL, "")
selectCPUTime := mc.GetSQLCPUTimeBySQL(selectSQL)
updateCPUTime := mc.GetSQLCPUTimeBySQL(updateSQL)
require.Less(t, updateCPUTime, selectCPUTime)
checkFn(selectInPlanSQL, "")
}
ts.testCase(t, mc, execFn, check)
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/planner/mockHighLoadForOptimize"))
// Test case for DDL execute failed but should still have CPU data.
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/mockHighLoadForAddIndex", "return"))
dbt.MustExec(fmt.Sprintf("insert into t values (%v,%v), (%v, %v);", 2000, 1, 2001, 1))
addIndexStr := "alter table t add unique index idx_b (b)"
execFn = func(db *sql.DB) {
dbt := testkit.NewDBTestKit(t, db)
dbt.MustExec("alter table t drop index if exists idx_b")
_, err := db.Exec(addIndexStr)
require.NotNil(t, err)
require.Equal(t, "Error 1062 (23000): Duplicate entry '1' for key 't.idx_b'", err.Error())
}
check = func() {
checkFn(addIndexStr, "")
}
ts.testCase(t, mc, execFn, check)
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockHighLoadForAddIndex"))
// Test case for execute failed cause by storage error.
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/store/copr/handleTaskOnceError", `return(true)`))
execFailedQuery := "select * from t where a*b < 1000"
execFn = func(db *sql.DB) {
_, err = db.Query(execFailedQuery)
require.NotNil(t, err)
require.Equal(t, "Error 1105 (HY000): mock handleTaskOnce error", err.Error())
}
check = func() {
checkFn(execFailedQuery, "")
}
ts.testCase(t, mc, execFn, check)
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/store/copr/handleTaskOnceError"))
}
func (ts *tidbTestTopSQLSuite) testCase(t *testing.T, mc *mockTopSQLTraceCPU.TopSQLCollector, execFn func(db *sql.DB), checkFn func()) {
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
wg.Add(1)
go func() {
defer wg.Done()
ts.loopExec(ctx, t, execFn)
}()
checkFn()
cancel()
wg.Wait()
mc.Reset()
}
func mustQuery(t *testing.T, dbt *testkit.DBTestKit, query string) {
rows := dbt.MustQuery(query)
for rows.Next() {
}
err := rows.Close()
require.NoError(t, err)
}
type mockCollector struct {
f func(data stmtstats.StatementStatsMap)
}
func newMockCollector(f func(data stmtstats.StatementStatsMap)) stmtstats.Collector {
return &mockCollector{f: f}
}
func (c *mockCollector) CollectStmtStatsMap(data stmtstats.StatementStatsMap) {
c.f(data)
}
func waitCollected(ch chan struct{}) {
select {
case <-ch:
case <-time.After(time.Second * 3):
}
}
func TestTopSQLStatementStats(t *testing.T) {
ts, total, tagChecker, collectedNotifyCh := setupForTestTopSQLStatementStats(t)
const ExecCountPerSQL = 2
// Test for CRUD.
cases1 := []string{
"insert into t values (%d, sleep(0.1))",
"update t set a = %[1]d + 1000 where a = %[1]d and sleep(0.1);",
"select a from t where b = %d and sleep(0.1);",
"select a from t where a = %d and sleep(0.1);", // test for point-get
"delete from t where a = %d and sleep(0.1);",
"insert into t values (%d, sleep(0.1)) on duplicate key update b = b+1",
}
var wg sync.WaitGroup
sqlDigests := map[stmtstats.BinaryDigest]string{}
for i, ca := range cases1 {
sqlStr := fmt.Sprintf(ca, i)
_, digest := parser.NormalizeDigest(sqlStr)
sqlDigests[stmtstats.BinaryDigest(digest.Bytes())] = sqlStr
}
wg.Add(1)
go func() {
defer wg.Done()
for _, ca := range cases1 {
db, err := sql.Open("mysql", ts.GetDSN())
require.NoError(t, err)
dbt := testkit.NewDBTestKit(t, db)
dbt.MustExec("use stmtstats;")
for n := 0; n < ExecCountPerSQL; n++ {
sqlStr := fmt.Sprintf(ca, n)
if strings.HasPrefix(strings.ToLower(sqlStr), "select") {
mustQuery(t, dbt, sqlStr)
} else {
dbt.MustExec(sqlStr)
}
}
err = db.Close()
require.NoError(t, err)
}
}()
// Test for prepare stmt/execute stmt
cases2 := []struct {
prepare string
execStmt string
setSQLsGen func(idx int) []string
execSQL string
}{
{
prepare: "prepare stmt from 'insert into t2 values (?, sleep(?))';",
execStmt: "insert into t2 values (1, sleep(0.1))",
setSQLsGen: func(idx int) []string {
return []string{fmt.Sprintf("set @a=%v", idx), "set @b=0.1"}
},
execSQL: "execute stmt using @a, @b;",
},
{
prepare: "prepare stmt from 'update t2 set a = a + 1000 where a = ? and sleep(?);';",
execStmt: "update t2 set a = a + 1000 where a = 1 and sleep(0.1);",
setSQLsGen: func(idx int) []string {
return []string{fmt.Sprintf("set @a=%v", idx), "set @b=0.1"}
},
execSQL: "execute stmt using @a, @b;",
},
{
// test for point-get
prepare: "prepare stmt from 'select a, sleep(?) from t2 where a = ?';",
execStmt: "select a, sleep(?) from t2 where a = ?",
setSQLsGen: func(idx int) []string {
return []string{"set @a=0.1", fmt.Sprintf("set @b=%v", idx)}
},
execSQL: "execute stmt using @a, @b;",
},
{
prepare: "prepare stmt from 'select a, sleep(?) from t2 where b = ?';",
execStmt: "select a, sleep(?) from t2 where b = ?",
setSQLsGen: func(idx int) []string {
return []string{"set @a=0.1", fmt.Sprintf("set @b=%v", idx)}
},
execSQL: "execute stmt using @a, @b;",
},
{
prepare: "prepare stmt from 'delete from t2 where sleep(?) and a = ?';",
execStmt: "delete from t2 where sleep(0.1) and a = 1",
setSQLsGen: func(idx int) []string {
return []string{"set @a=0.1", fmt.Sprintf("set @b=%v", idx)}
},
execSQL: "execute stmt using @a, @b;",
},
{
prepare: "prepare stmt from 'insert into t2 values (?, sleep(?)) on duplicate key update b = b+1';",
execStmt: "insert into t2 values (1, sleep(0.1)) on duplicate key update b = b+1",
setSQLsGen: func(idx int) []string {
return []string{fmt.Sprintf("set @a=%v", idx), "set @b=0.1"}
},
execSQL: "execute stmt using @a, @b;",
},
{
prepare: "prepare stmt from 'set global tidb_enable_top_sql = (? = sleep(?))';",
execStmt: "set global tidb_enable_top_sql = (0 = sleep(0.1))",
setSQLsGen: func(idx int) []string {
return []string{"set @a=0", "set @b=0.1"}
},
execSQL: "execute stmt using @a, @b;",
},
}
for _, ca := range cases2 {
_, digest := parser.NormalizeDigest(ca.execStmt)
sqlDigests[stmtstats.BinaryDigest(digest.Bytes())] = ca.execStmt
}
wg.Add(1)
go func() {
defer wg.Done()
for _, ca := range cases2 {
db, err := sql.Open("mysql", ts.GetDSN())
require.NoError(t, err)
dbt := testkit.NewDBTestKit(t, db)
dbt.MustExec("use stmtstats;")
// prepare stmt
dbt.MustExec(ca.prepare)
for n := 0; n < ExecCountPerSQL; n++ {
setSQLs := ca.setSQLsGen(n)
for _, setSQL := range setSQLs {
dbt.MustExec(setSQL)
}
if strings.HasPrefix(strings.ToLower(ca.execStmt), "select") {
mustQuery(t, dbt, ca.execSQL)
} else {
dbt.MustExec(ca.execSQL)
}
}
err = db.Close()
require.NoError(t, err)
}
}()
// Test for prepare by db client prepare/exec interface.
cases3 := []struct {
prepare string
execStmt string
argsGen func(idx int) []interface{}
}{
{
prepare: "insert into t3 values (?, sleep(?))",
argsGen: func(idx int) []interface{} {
return []interface{}{idx, 0.1}
},
},
{
prepare: "update t3 set a = a + 1000 where a = ? and sleep(?)",
argsGen: func(idx int) []interface{} {
return []interface{}{idx, 0.1}
},
},
{
// test for point-get
prepare: "select a, sleep(?) from t3 where a = ?",
argsGen: func(idx int) []interface{} {
return []interface{}{0.1, idx}
},
},
{
prepare: "select a, sleep(?) from t3 where b = ?",
argsGen: func(idx int) []interface{} {
return []interface{}{0.1, idx}
},
},
{
prepare: "delete from t3 where sleep(?) and a = ?",
argsGen: func(idx int) []interface{} {
return []interface{}{0.1, idx}
},
},
{
prepare: "insert into t3 values (?, sleep(?)) on duplicate key update b = b+1",
argsGen: func(idx int) []interface{} {
return []interface{}{idx, 0.1}
},
},
{
prepare: "set global tidb_enable_1pc = (? = sleep(?))",
argsGen: func(idx int) []interface{} {
return []interface{}{0, 0.1}
},
},
}
for _, ca := range cases3 {
_, digest := parser.NormalizeDigest(ca.prepare)
sqlDigests[stmtstats.BinaryDigest(digest.Bytes())] = ca.prepare
}
wg.Add(1)
go func() {
defer wg.Done()
for _, ca := range cases3 {
db, err := sql.Open("mysql", ts.GetDSN())
require.NoError(t, err)
dbt := testkit.NewDBTestKit(t, db)
dbt.MustExec("use stmtstats;")
// prepare stmt
stmt, err := db.Prepare(ca.prepare)
require.NoError(t, err)
for n := 0; n < ExecCountPerSQL; n++ {
args := ca.argsGen(n)
if strings.HasPrefix(strings.ToLower(ca.prepare), "select") {
row, err := stmt.Query(args...)
require.NoError(t, err)
err = row.Close()
require.NoError(t, err)
} else {
_, err := stmt.Exec(args...)
require.NoError(t, err)
}
}
err = db.Close()
require.NoError(t, err)
}
}()
wg.Wait()
// Wait for collect.
waitCollected(collectedNotifyCh)
found := 0
for digest, item := range total {
if sqlStr, ok := sqlDigests[digest.SQLDigest]; ok {
found++
require.Equal(t, uint64(ExecCountPerSQL), item.ExecCount, sqlStr)
require.Equal(t, uint64(ExecCountPerSQL), item.DurationCount, sqlStr)
require.True(t, item.SumDurationNs > uint64(time.Millisecond*100*ExecCountPerSQL), sqlStr)
require.True(t, item.SumDurationNs < uint64(time.Millisecond*300*ExecCountPerSQL), sqlStr)
if strings.HasPrefix(sqlStr, "set global") {
// set global statement use internal SQL to change global variable, so itself doesn't have KV request.
continue
}
var kvSum uint64
for _, kvCount := range item.KvStatsItem.KvExecCount {
kvSum += kvCount
}
require.Equal(t, uint64(ExecCountPerSQL), kvSum)
tagChecker.checkExist(t, digest.SQLDigest, sqlStr)
}
}
require.Equal(t, len(sqlDigests), found)
require.Equal(t, 20, found)
}
type resourceTagChecker struct {
sync.Mutex
sqlDigest2Reqs map[stmtstats.BinaryDigest]map[tikvrpc.CmdType]struct{}
}
func (c *resourceTagChecker) checkExist(t *testing.T, digest stmtstats.BinaryDigest, sqlStr string) {
if strings.HasPrefix(sqlStr, "set global") {
// `set global` statement will use another internal sql to execute, so `set global` statement won't
// send RPC request.
return
}
if strings.HasPrefix(sqlStr, "trace") {
// `trace` statement will use another internal sql to execute, so remove the `trace` prefix before check.
_, sqlDigest := parser.NormalizeDigest(strings.TrimPrefix(sqlStr, "trace"))
digest = stmtstats.BinaryDigest(sqlDigest.Bytes())
}
c.Lock()
defer c.Unlock()
_, ok := c.sqlDigest2Reqs[digest]
require.True(t, ok, sqlStr)
}
func (c *resourceTagChecker) checkReqExist(t *testing.T, digest stmtstats.BinaryDigest, sqlStr string, reqs ...tikvrpc.CmdType) {
if len(reqs) == 0 {
return
}
c.Lock()
defer c.Unlock()
reqMap, ok := c.sqlDigest2Reqs[digest]
require.True(t, ok, sqlStr)
for _, req := range reqs {
_, ok := reqMap[req]
require.True(t, ok, fmt.Sprintf("sql: %v, expect: %v, got: %v", sqlStr, reqs, reqMap))
}
}
func setupForTestTopSQLStatementStats(t *testing.T) (*tidbTestSuite, stmtstats.StatementStatsMap, *resourceTagChecker, chan struct{}) {
// Prepare stmt stats.
stmtstats.SetupAggregator()
// Register stmt stats collector.
var mu sync.Mutex
collectedNotifyCh := make(chan struct{})
total := stmtstats.StatementStatsMap{}
mockCollector := newMockCollector(func(data stmtstats.StatementStatsMap) {
mu.Lock()
defer mu.Unlock()
total.Merge(data)
select {
case collectedNotifyCh <- struct{}{}:
default:
}
})
stmtstats.RegisterCollector(mockCollector)
ts := createTidbTestSuite(t)
db, err := sql.Open("mysql", ts.GetDSN())
require.NoError(t, err)
defer func() {
err := db.Close()
require.NoError(t, err)
}()
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/domain/skipLoadSysVarCacheLoop", `return(true)`))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/unistoreRPCClientSendHook", `return(true)`))
dbt := testkit.NewDBTestKit(t, db)
dbt.MustExec("drop database if exists stmtstats")
dbt.MustExec("create database stmtstats")
dbt.MustExec("use stmtstats;")
dbt.MustExec("create table t (a int, b int, unique index idx(a));")
dbt.MustExec("create table t2 (a int, b int, unique index idx(a));")
dbt.MustExec("create table t3 (a int, b int, unique index idx(a));")
// Enable TopSQL
topsqlstate.EnableTopSQL()
config.UpdateGlobal(func(conf *config.Config) {
conf.TopSQL.ReceiverAddress = "mock-agent"
})
tagChecker := &resourceTagChecker{
sqlDigest2Reqs: make(map[stmtstats.BinaryDigest]map[tikvrpc.CmdType]struct{}),
}
unistoreRPCClientSendHook := func(req *tikvrpc.Request) {
tag := req.GetResourceGroupTag()
if len(tag) == 0 || ddlutil.IsInternalResourceGroupTaggerForTopSQL(tag) {
// Ignore for internal background request.
return
}
sqlDigest, err := resourcegrouptag.DecodeResourceGroupTag(tag)
require.NoError(t, err)
tagChecker.Lock()
defer tagChecker.Unlock()
reqMap, ok := tagChecker.sqlDigest2Reqs[stmtstats.BinaryDigest(sqlDigest)]
if !ok {
reqMap = make(map[tikvrpc.CmdType]struct{})
}
reqMap[req.Type] = struct{}{}
tagChecker.sqlDigest2Reqs[stmtstats.BinaryDigest(sqlDigest)] = reqMap
}
unistore.UnistoreRPCClientSendHook.Store(&unistoreRPCClientSendHook)
t.Cleanup(func() {
stmtstats.UnregisterCollector(mockCollector)
err = failpoint.Disable("github.com/pingcap/tidb/domain/skipLoadSysVarCacheLoop")
require.NoError(t, err)
err = failpoint.Disable("github.com/pingcap/tidb/store/mockstore/unistore/unistoreRPCClientSendHook")
require.NoError(t, err)
stmtstats.CloseAggregator()
view.Stop()
})
return ts, total, tagChecker, collectedNotifyCh
}
func TestTopSQLStatementStats2(t *testing.T) {
ts, total, tagChecker, collectedNotifyCh := setupForTestTopSQLStatementStats(t)
const ExecCountPerSQL = 3
sqlDigests := map[stmtstats.BinaryDigest]string{}
// Test case for other statements
cases4 := []struct {
sql string
plan string
isQuery bool
}{
{"insert into t () values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),()", "", false},
{"analyze table t", "", false},
{"explain analyze select sum(a+b) from t", ".*TableReader.*", true},
{"trace select sum(b*a), sum(a+b) from t", "", true},
{"set global tidb_stmt_summary_history_size=5;", "", false},
{"select * from stmtstats.t where exists (select 1 from stmtstats.t2 where t2.a = 1);", ".*TableReader.*", true},
}
executeCaseFn := func(execFn func(db *sql.DB)) {
db, err := sql.Open("mysql", ts.GetDSN())
require.NoError(t, err)
dbt := testkit.NewDBTestKit(t, db)
dbt.MustExec("use stmtstats;")
require.NoError(t, err)
for n := 0; n < ExecCountPerSQL; n++ {
execFn(db)
}
err = db.Close()
require.NoError(t, err)
}
execFn := func(db *sql.DB) {
dbt := testkit.NewDBTestKit(t, db)
for _, ca := range cases4 {
if ca.isQuery {
mustQuery(t, dbt, ca.sql)
} else {
dbt.MustExec(ca.sql)
}
}
}
for _, ca := range cases4 {
_, digest := parser.NormalizeDigest(ca.sql)
sqlDigests[stmtstats.BinaryDigest(digest.Bytes())] = ca.sql
}
executeCaseFn(execFn)
// Test case for multi-statement.
cases5 := []string{
"delete from t limit 1;",
"update t set b=1 where b is null limit 1;",
"select sum(a+b*2) from t;",
}
multiStatement5 := strings.Join(cases5, "")
// Test case for multi-statement, but first statements execute failed
cases6 := []string{
"delete from t6_not_exist;",
"update t set a=1 where a is null limit 1;",
}
multiStatement6 := strings.Join(cases6, "")
// Test case for multi-statement, the first statements execute success but the second statement execute failed.
cases7 := []string{
"update t set a=1 where a <0 limit 1;",
"delete from t7_not_exist;",
}
// Test case for DDL.
cases8 := []string{
"create table if not exists t10 (a int, b int)",
"alter table t drop index if exists idx_b",
"alter table t add index idx_b (b)",
}
multiStatement7 := strings.Join(cases7, "")
execFn = func(db *sql.DB) {
dbt := testkit.NewDBTestKit(t, db)
dbt.MustExec("SET tidb_multi_statement_mode='ON'")
dbt.MustExec(multiStatement5)
_, err := db.Exec(multiStatement6)
require.NotNil(t, err)
require.Equal(t, "Error 1146 (42S02): Table 'stmtstats.t6_not_exist' doesn't exist", err.Error())
_, err = db.Exec(multiStatement7)
require.NotNil(t, err)
require.Equal(t, "Error 1146 (42S02): Table 'stmtstats.t7_not_exist' doesn't exist", err.Error())
for _, ca := range cases8 {
dbt.MustExec(ca)
}
}
executeCaseFn(execFn)
sqlStrs := append([]string{}, cases5...)
sqlStrs = append(sqlStrs, cases7[0])
sqlStrs = append(sqlStrs, cases8...)
for _, sqlStr := range sqlStrs {
_, digest := parser.NormalizeDigest(sqlStr)
sqlDigests[stmtstats.BinaryDigest(digest.Bytes())] = sqlStr
}
// Wait for collect.
waitCollected(collectedNotifyCh)
foundMap := map[stmtstats.BinaryDigest]string{}
for digest, item := range total {
if sqlStr, ok := sqlDigests[digest.SQLDigest]; ok {
require.Equal(t, uint64(ExecCountPerSQL), item.ExecCount, sqlStr)
require.True(t, item.SumDurationNs > 1, sqlStr)
foundMap[digest.SQLDigest] = sqlStr
tagChecker.checkExist(t, digest.SQLDigest, sqlStr)
// The special check uses to test the issue #33202.
if strings.Contains(strings.ToLower(sqlStr), "add index") {
tagChecker.checkReqExist(t, digest.SQLDigest, sqlStr, tikvrpc.CmdScan)
}
}
}
require.Equal(t, len(sqlDigests), len(foundMap), fmt.Sprintf("%v !=\n %v", sqlDigests, foundMap))
}
func TestTopSQLStatementStats3(t *testing.T) {
ts, total, tagChecker, collectedNotifyCh := setupForTestTopSQLStatementStats(t)
err := failpoint.Enable("github.com/pingcap/tidb/executor/mockSleepInTableReaderNext", "return(2000)")
require.NoError(t, err)
defer func() {
_ = failpoint.Disable("github.com/pingcap/tidb/executor/mockSleepInTableReaderNext")
}()
cases := []string{
"select count(a+b) from stmtstats.t",
"select * from stmtstats.t where b is null",
"update stmtstats.t set b = 1 limit 10",
"delete from stmtstats.t limit 1",
}
var wg sync.WaitGroup
sqlDigests := map[stmtstats.BinaryDigest]string{}
for _, ca := range cases {
wg.Add(1)
go func(sqlStr string) {
defer wg.Done()
db, err := sql.Open("mysql", ts.GetDSN())
require.NoError(t, err)
dbt := testkit.NewDBTestKit(t, db)
require.NoError(t, err)
if strings.HasPrefix(sqlStr, "select") {
mustQuery(t, dbt, sqlStr)
} else {
dbt.MustExec(sqlStr)
}
err = db.Close()
require.NoError(t, err)
}(ca)
_, digest := parser.NormalizeDigest(ca)
sqlDigests[stmtstats.BinaryDigest(digest.Bytes())] = ca
}
// Wait for collect.
waitCollected(collectedNotifyCh)
foundMap := map[stmtstats.BinaryDigest]string{}
for digest, item := range total {
if sqlStr, ok := sqlDigests[digest.SQLDigest]; ok {
// since the SQL doesn't execute finish, the ExecCount should be recorded,
// but the DurationCount and SumDurationNs should be 0.
require.Equal(t, uint64(1), item.ExecCount, sqlStr)
require.Equal(t, uint64(0), item.DurationCount, sqlStr)
require.Equal(t, uint64(0), item.SumDurationNs, sqlStr)
foundMap[digest.SQLDigest] = sqlStr
}
}
// wait sql execute finish.
wg.Wait()
// Wait for collect.
waitCollected(collectedNotifyCh)
for digest, item := range total {
if sqlStr, ok := sqlDigests[digest.SQLDigest]; ok {
require.Equal(t, uint64(1), item.ExecCount, sqlStr)
require.Equal(t, uint64(1), item.DurationCount, sqlStr)
require.Less(t, uint64(0), item.SumDurationNs, sqlStr)
foundMap[digest.SQLDigest] = sqlStr
tagChecker.checkExist(t, digest.SQLDigest, sqlStr)
}
}
}
func TestTopSQLStatementStats4(t *testing.T) {
ts, total, tagChecker, collectedNotifyCh := setupForTestTopSQLStatementStats(t)
err := failpoint.Enable("github.com/pingcap/tidb/executor/mockSleepInTableReaderNext", "return(2000)")
require.NoError(t, err)
defer func() {
_ = failpoint.Disable("github.com/pingcap/tidb/executor/mockSleepInTableReaderNext")
}()
cases := []struct {
prepare string
sql string
args []interface{}
}{
{prepare: "select count(a+b) from stmtstats.t", sql: "select count(a+b) from stmtstats.t"},
{prepare: "select * from stmtstats.t where b is null", sql: "select * from stmtstats.t where b is null"},
{prepare: "update stmtstats.t set b = ? limit ?", sql: "update stmtstats.t set b = 1 limit 10", args: []interface{}{1, 10}},
{prepare: "delete from stmtstats.t limit ?", sql: "delete from stmtstats.t limit 1", args: []interface{}{1}},
}
var wg sync.WaitGroup
sqlDigests := map[stmtstats.BinaryDigest]string{}
for _, ca := range cases {
wg.Add(1)
go func(prepare string, args []interface{}) {
defer wg.Done()
db, err := sql.Open("mysql", ts.GetDSN())
require.NoError(t, err)
stmt, err := db.Prepare(prepare)
require.NoError(t, err)
if strings.HasPrefix(prepare, "select") {
rows, err := stmt.Query(args...)
require.NoError(t, err)
for rows.Next() {
}
err = rows.Close()
require.NoError(t, err)
} else {
_, err := stmt.Exec(args...)
require.NoError(t, err)
}
err = db.Close()
require.NoError(t, err)
}(ca.prepare, ca.args)
_, digest := parser.NormalizeDigest(ca.sql)
sqlDigests[stmtstats.BinaryDigest(digest.Bytes())] = ca.sql
}
// Wait for collect.
waitCollected(collectedNotifyCh)
foundMap := map[stmtstats.BinaryDigest]string{}
for digest, item := range total {
if sqlStr, ok := sqlDigests[digest.SQLDigest]; ok {
// since the SQL doesn't execute finish, the ExecCount should be recorded,
// but the DurationCount and SumDurationNs should be 0.
require.Equal(t, uint64(1), item.ExecCount, sqlStr)
require.Equal(t, uint64(0), item.DurationCount, sqlStr)
require.Equal(t, uint64(0), item.SumDurationNs, sqlStr)
foundMap[digest.SQLDigest] = sqlStr
}
}
// wait sql execute finish.
wg.Wait()
// Wait for collect.
waitCollected(collectedNotifyCh)
for digest, item := range total {
if sqlStr, ok := sqlDigests[digest.SQLDigest]; ok {
require.Equal(t, uint64(1), item.ExecCount, sqlStr)
require.Equal(t, uint64(1), item.DurationCount, sqlStr)
require.Less(t, uint64(0), item.SumDurationNs, sqlStr)
foundMap[digest.SQLDigest] = sqlStr
tagChecker.checkExist(t, digest.SQLDigest, sqlStr)
}
}
}
func TestTopSQLResourceTag(t *testing.T) {
ts, _, tagChecker, _ := setupForTestTopSQLStatementStats(t)
defer func() {
topsqlstate.DisableTopSQL()
}()
loadDataFile, err := os.CreateTemp("", "load_data_test0.csv")
require.NoError(t, err)
defer func() {
path := loadDataFile.Name()
err = loadDataFile.Close()
require.NoError(t, err)
err = os.Remove(path)
require.NoError(t, err)
}()
_, err = loadDataFile.WriteString(
"31 31\n" +
"32 32\n" +
"33 33\n")
require.NoError(t, err)
// Test case for other statements
cases := []struct {
sql string
isQuery bool
reqs []tikvrpc.CmdType
}{
// Test for curd.
{"insert into t values (1,1), (3,3)", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit}},
{"insert into t values (1,2) on duplicate key update a = 2", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit, tikvrpc.CmdBatchGet}},
{"update t set b=b+1 where a=3", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit, tikvrpc.CmdGet}},
{"update t set b=b+1 where a>1", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit, tikvrpc.CmdCop}},
{"delete from t where a=3", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit, tikvrpc.CmdGet}},
{"delete from t where a>1", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit, tikvrpc.CmdCop}},
{"insert ignore into t values (2,2), (3,3)", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit, tikvrpc.CmdBatchGet}},
{"select * from t where a in (1,2,3,4)", true, []tikvrpc.CmdType{tikvrpc.CmdBatchGet}},
{"select * from t where a = 1", true, []tikvrpc.CmdType{tikvrpc.CmdGet}},
{"select * from t where b > 0", true, []tikvrpc.CmdType{tikvrpc.CmdCop}},
{"replace into t values (2,2), (4,4)", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit, tikvrpc.CmdBatchGet}},
// Test for DDL
{"create database test_db0", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit}},
{"create table test_db0.test_t0 (a int, b int, index idx(a))", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit}},
{"create table test_db0.test_t1 (a int, b int, index idx(a))", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit}},
{"alter table test_db0.test_t0 add column c int", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit}},
{"drop table test_db0.test_t0", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit}},
{"drop database test_db0", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit}},
{"alter table t modify column b double", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit, tikvrpc.CmdScan, tikvrpc.CmdCop}},
{"alter table t add index idx2 (b,a)", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit, tikvrpc.CmdScan, tikvrpc.CmdCop}},
{"alter table t drop index idx2", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit}},
// Test for transaction
{"begin", false, nil},
{"insert into t2 values (10,10), (11,11)", false, nil},
{"insert ignore into t2 values (20,20), (21,21)", false, []tikvrpc.CmdType{tikvrpc.CmdBatchGet}},
{"commit", false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit}},
// Test for other statements.
{"set @@global.tidb_enable_1pc = 1", false, nil},
{fmt.Sprintf("load data local infile %q into table t2", loadDataFile.Name()), false, []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit, tikvrpc.CmdBatchGet}},
{"admin check table t", false, nil},
{"admin check index t idx", false, nil},
{"admin recover index t idx", false, []tikvrpc.CmdType{tikvrpc.CmdBatchGet}},
{"admin cleanup index t idx", false, []tikvrpc.CmdType{tikvrpc.CmdBatchGet}},
}
internalCases := []struct {
sql string
reqs []tikvrpc.CmdType
}{
{"replace into mysql.global_variables (variable_name,variable_value) values ('tidb_enable_1pc', '1')", []tikvrpc.CmdType{tikvrpc.CmdPrewrite, tikvrpc.CmdCommit, tikvrpc.CmdBatchGet}},
{"select /*+ read_from_storage(tikv[`stmtstats`.`t`]) */ bit_xor(crc32(md5(concat_ws(0x2, `_tidb_rowid`, `a`)))), ((cast(crc32(md5(concat_ws(0x2, `_tidb_rowid`))) as signed) - 0) div 1 % 1024), count(*) from `stmtstats`.`t` use index() where 0 = 0 group by ((cast(crc32(md5(concat_ws(0x2, `_tidb_rowid`))) as signed) - 0) div 1 % 1024)", []tikvrpc.CmdType{tikvrpc.CmdCop}},
{"select bit_xor(crc32(md5(concat_ws(0x2, `_tidb_rowid`, `a`)))), ((cast(crc32(md5(concat_ws(0x2, `_tidb_rowid`))) as signed) - 0) div 1 % 1024), count(*) from `stmtstats`.`t` use index(`idx`) where 0 = 0 group by ((cast(crc32(md5(concat_ws(0x2, `_tidb_rowid`))) as signed) - 0) div 1 % 1024)", []tikvrpc.CmdType{tikvrpc.CmdCop}},
{"select /*+ read_from_storage(tikv[`stmtstats`.`t`]) */ bit_xor(crc32(md5(concat_ws(0x2, `_tidb_rowid`, `a`)))), ((cast(crc32(md5(concat_ws(0x2, `_tidb_rowid`))) as signed) - 0) div 1 % 1024), count(*) from `stmtstats`.`t` use index() where 0 = 0 group by ((cast(crc32(md5(concat_ws(0x2, `_tidb_rowid`))) as signed) - 0) div 1 % 1024)", []tikvrpc.CmdType{tikvrpc.CmdCop}},
{"select bit_xor(crc32(md5(concat_ws(0x2, `_tidb_rowid`, `a`)))), ((cast(crc32(md5(concat_ws(0x2, `_tidb_rowid`))) as signed) - 0) div 1 % 1024), count(*) from `stmtstats`.`t` use index(`idx`) where 0 = 0 group by ((cast(crc32(md5(concat_ws(0x2, `_tidb_rowid`))) as signed) - 0) div 1 % 1024)", []tikvrpc.CmdType{tikvrpc.CmdCop}},
}
executeCaseFn := func(execFn func(db *sql.DB)) {
dsn := ts.GetDSN(func(config *mysql.Config) {
config.AllowAllFiles = true
config.Params["sql_mode"] = "''"
})
db, err := sql.Open("mysql", dsn)
require.NoError(t, err)
dbt := testkit.NewDBTestKit(t, db)
dbt.MustExec("use stmtstats;")
require.NoError(t, err)
execFn(db)
err = db.Close()
require.NoError(t, err)
}
execFn := func(db *sql.DB) {
dbt := testkit.NewDBTestKit(t, db)
for _, ca := range cases {
if ca.isQuery {
mustQuery(t, dbt, ca.sql)
} else {
dbt.MustExec(ca.sql)
}
}
}
executeCaseFn(execFn)
for _, ca := range cases {
_, digest := parser.NormalizeDigest(ca.sql)
tagChecker.checkReqExist(t, stmtstats.BinaryDigest(digest.Bytes()), ca.sql, ca.reqs...)
}
for _, ca := range internalCases {
_, digest := parser.NormalizeDigest(ca.sql)
tagChecker.checkReqExist(t, stmtstats.BinaryDigest(digest.Bytes()), ca.sql, ca.reqs...)
}
}
func (ts *tidbTestTopSQLSuite) loopExec(ctx context.Context, t *testing.T, fn func(db *sql.DB)) {
db, err := sql.Open("mysql", ts.GetDSN())
require.NoError(t, err, "Error connecting")
defer func() {
err := db.Close()
require.NoError(t, err)
}()
dbt := testkit.NewDBTestKit(t, db)
dbt.MustExec("use topsql;")
for {
select {
case <-ctx.Done():
return
default:
}
fn(db)
}
}
func TestLocalhostClientMapping(t *testing.T) {
tempDir := t.TempDir()
socketFile := tempDir + "/tidbtest.sock" // Unix Socket does not work on Windows, so '/' should be OK
cli := testserverclient.NewTestServerClient()
cfg := util2.NewTestConfig()
cfg.Socket = socketFile
cfg.Port = cli.Port
cfg.Status.ReportStatus = false
ts := createTidbTestSuite(t)
server, err := server2.NewServer(cfg, ts.tidbdrv)
require.NoError(t, err)
server.SetDomain(ts.domain)
cli.Port = testutil.GetPortFromTCPAddr(server.ListenAddr())
go func() {
err := server.Run()
require.NoError(t, err)
}()
defer server.Close()
cli.WaitUntilServerCanConnect()
cli.Port = testutil.GetPortFromTCPAddr(server.ListenAddr())
// Create a db connection for root
db, err := sql.Open("mysql", cli.GetDSN(func(config *mysql.Config) {
config.User = "root"
config.Net = "unix"
config.DBName = "test"
config.Addr = socketFile
}))
require.NoErrorf(t, err, "Open failed")
err = db.Ping()
require.NoErrorf(t, err, "Ping failed")
defer db.Close()
dbt := testkit.NewDBTestKit(t, db)
rows := dbt.MustQuery("select user()")
cli.CheckRows(t, rows, "root@localhost")
require.NoError(t, rows.Close())
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION")
require.NoError(t, rows.Close())
dbt.MustExec("CREATE USER 'localhostuser'@'localhost'")
dbt.MustExec("CREATE USER 'localhostuser'@'%'")
defer func() {
dbt.MustExec("DROP USER IF EXISTS 'localhostuser'@'%'")
dbt.MustExec("DROP USER IF EXISTS 'localhostuser'@'localhost'")
dbt.MustExec("DROP USER IF EXISTS 'localhostuser'@'127.0.0.1'")
}()
dbt.MustExec("GRANT SELECT ON test.* TO 'localhostuser'@'%'")
dbt.MustExec("GRANT SELECT,UPDATE ON test.* TO 'localhostuser'@'localhost'")
// Test with loopback interface - Should get access to localhostuser@localhost!
cli.RunTests(t, func(config *mysql.Config) {
config.User = "localhostuser"
config.DBName = "test"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("select user()")
// NOTICE: this is not compatible with MySQL! (MySQL would report localhostuser@localhost also for 127.0.0.1)
cli.CheckRows(t, rows, "localhostuser@127.0.0.1")
require.NoError(t, rows.Close())
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT USAGE ON *.* TO 'localhostuser'@'localhost'\nGRANT SELECT,UPDATE ON test.* TO 'localhostuser'@'localhost'")
require.NoError(t, rows.Close())
})
dbt.MustExec("DROP USER IF EXISTS 'localhostuser'@'localhost'")
dbt.MustExec("CREATE USER 'localhostuser'@'127.0.0.1'")
dbt.MustExec("GRANT SELECT,UPDATE ON test.* TO 'localhostuser'@'127.0.0.1'")
// Test with unix domain socket file connection - Should get access to '%'
cli.RunTests(t, func(config *mysql.Config) {
config.Net = "unix"
config.Addr = socketFile
config.User = "localhostuser"
config.DBName = "test"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("select user()")
cli.CheckRows(t, rows, "localhostuser@localhost")
require.NoError(t, rows.Close())
rows = dbt.MustQuery("show grants")
cli.CheckRows(t, rows, "GRANT USAGE ON *.* TO 'localhostuser'@'%'\nGRANT SELECT ON test.* TO 'localhostuser'@'%'")
require.NoError(t, rows.Close())
})
// Test if only localhost exists
dbt.MustExec("DROP USER 'localhostuser'@'%'")
dbSocket, err := sql.Open("mysql", cli.GetDSN(func(config *mysql.Config) {
config.User = "localhostuser"
config.Net = "unix"
config.DBName = "test"
config.Addr = socketFile
}))
require.NoErrorf(t, err, "Open failed")
defer dbSocket.Close()
err = dbSocket.Ping()
require.Errorf(t, err, "Connection successful without matching host for unix domain socket!")
}
func TestRcReadCheckTS(t *testing.T) {
ts := createTidbTestSuite(t)
db, err := sql.Open("mysql", ts.GetDSN())
require.NoError(t, err)
defer func() {
err := db.Close()
require.NoError(t, err)
}()
db2, err := sql.Open("mysql", ts.GetDSN())
require.NoError(t, err)
defer func() {
err := db2.Close()
require.NoError(t, err)
}()
tk2 := testkit.NewDBTestKit(t, db2)
tk2.MustExec("set @@tidb_enable_async_commit = 0")
tk2.MustExec("set @@tidb_enable_1pc = 0")
cli := testserverclient.NewTestServerClient()
tk := testkit.NewDBTestKit(t, db)
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1(c1 int key, c2 int)")
tk.MustExec("insert into t1 values(1, 10), (2, 20), (3, 30)")
tk.MustExec(`set tidb_rc_read_check_ts = 'on';`)
tk.MustExec(`set tx_isolation = 'READ-COMMITTED';`)
tk.MustExec("begin pessimistic")
// Test point get retry.
rows := tk.MustQuery("select * from t1 where c1 = 1")
cli.CheckRows(t, rows, "1 10")
tk2.MustExec("update t1 set c2 = c2 + 1")
rows = tk.MustQuery("select * from t1 where c1 = 1")
cli.CheckRows(t, rows, "1 11")
// Test batch point get retry.
rows = tk.MustQuery("select * from t1 where c1 in (1, 3)")
cli.CheckRows(t, rows, "1 11", "3 31")
tk2.MustExec("update t1 set c2 = c2 + 1")
rows = tk.MustQuery("select * from t1 where c1 in (1, 3)")
cli.CheckRows(t, rows, "1 12", "3 32")
// Test scan retry.
rows = tk.MustQuery("select * from t1")
cli.CheckRows(t, rows, "1 12", "2 22", "3 32")
tk2.MustExec("update t1 set c2 = c2 + 1")
rows = tk.MustQuery("select * from t1")
cli.CheckRows(t, rows, "1 13", "2 23", "3 33")
// Test reverse scan retry.
rows = tk.MustQuery("select * from t1 order by c1 desc")
cli.CheckRows(t, rows, "3 33", "2 23", "1 13")
tk2.MustExec("update t1 set c2 = c2 + 1")
rows = tk.MustQuery("select * from t1 order by c1 desc")
cli.CheckRows(t, rows, "3 34", "2 24", "1 14")
// Test retry caused by ongoing prewrite lock.
// As the `defaultLockTTL` is 3s and it's difficult to change it here, the lock
// test is implemented in the uft test cases.
}
type connEventLogs struct {
sync.Mutex
types []extension.ConnEventTp
infos []extension.ConnEventInfo
}
func (l *connEventLogs) add(tp extension.ConnEventTp, info *extension.ConnEventInfo) {
l.Lock()
defer l.Unlock()
l.types = append(l.types, tp)
l.infos = append(l.infos, *info)
}
func (l *connEventLogs) reset() {
l.Lock()
defer l.Unlock()
l.types = l.types[:0]
l.infos = l.infos[:0]
}
func (l *connEventLogs) check(fn func()) {
l.Lock()
defer l.Unlock()
fn()
}
func (l *connEventLogs) waitEvent(tp extension.ConnEventTp) error {
totalSleep := 0
for {
l.Lock()
if l.types[len(l.types)-1] == tp {
l.Unlock()
return nil
}
l.Unlock()
if totalSleep >= 10000 {
break
}
time.Sleep(time.Millisecond * 100)
totalSleep += 100
}
return errors.New("timeout")
}
func TestExtensionConnEvent(t *testing.T) {
defer extension.Reset()
extension.Reset()
logs := &connEventLogs{}
require.NoError(t, extension.Register("test", extension.WithSessionHandlerFactory(func() *extension.SessionHandler {
return &extension.SessionHandler{
OnConnectionEvent: logs.add,
}
})))
require.NoError(t, extension.Setup())
ts := createTidbTestSuite(t)
// createTidbTestSuite create an inner connection, so wait the previous connection closed
require.NoError(t, logs.waitEvent(extension.ConnDisconnected))
// test for login success
logs.reset()
db, err := sql.Open("mysql", ts.GetDSN())
require.NoError(t, err)
defer func() {
require.NoError(t, db.Close())
}()
conn, err := db.Conn(context.Background())
require.NoError(t, err)
defer func() {
_ = conn.Close()
}()
var expectedConn2 variable.ConnectionInfo
require.NoError(t, logs.waitEvent(extension.ConnHandshakeAccepted))
logs.check(func() {
require.Equal(t, []extension.ConnEventTp{
extension.ConnConnected,
extension.ConnHandshakeAccepted,
}, logs.types)
conn1 := logs.infos[0]
require.Equal(t, "127.0.0.1", conn1.ClientIP)
require.Equal(t, "127.0.0.1", conn1.ServerIP)
require.Empty(t, conn1.User)
require.Empty(t, conn1.DB)
require.Equal(t, int(ts.Port), conn1.ServerPort)
require.NotEqual(t, conn1.ServerPort, conn1.ClientPort)
require.NotEmpty(t, conn1.ConnectionID)
require.Nil(t, conn1.ActiveRoles)
require.NoError(t, conn1.Error)
require.Empty(t, conn1.SessionAlias)
expectedConn2 = *(conn1.ConnectionInfo)
expectedConn2.User = "root"
expectedConn2.DB = "test"
require.Equal(t, []*auth.RoleIdentity{}, logs.infos[1].ActiveRoles)
require.Nil(t, logs.infos[1].Error)
require.Equal(t, expectedConn2, *(logs.infos[1].ConnectionInfo))
require.Empty(t, logs.infos[1].SessionAlias)
})
_, err = conn.ExecContext(context.TODO(), "create role r1@'%'")
require.NoError(t, err)
_, err = conn.ExecContext(context.TODO(), "grant r1 TO root")
require.NoError(t, err)
_, err = conn.ExecContext(context.TODO(), "set role all")
require.NoError(t, err)
_, err = conn.ExecContext(context.TODO(), "set @@tidb_session_alias='alias123'")
require.NoError(t, err)
require.NoError(t, conn.Close())
require.NoError(t, db.Close())
require.NoError(t, logs.waitEvent(extension.ConnDisconnected))
logs.check(func() {
require.Equal(t, 3, len(logs.infos))
require.Equal(t, 1, len(logs.infos[2].ActiveRoles))
require.Equal(t, auth.RoleIdentity{
Username: "r1",
Hostname: "%",
}, *logs.infos[2].ActiveRoles[0])
require.Nil(t, logs.infos[2].Error)
require.Equal(t, expectedConn2, *(logs.infos[2].ConnectionInfo))
require.Equal(t, "alias123", logs.infos[2].SessionAlias)
})
// test for login failed
logs.reset()
cfg := mysql.NewConfig()
cfg.User = "noexist"
cfg.Net = "tcp"
cfg.Addr = fmt.Sprintf("127.0.0.1:%d", ts.Port)
cfg.DBName = "test"
db, err = sql.Open("mysql", cfg.FormatDSN())
require.NoError(t, err)
defer func() {
require.NoError(t, db.Close())
}()
_, err = db.Conn(context.Background())
require.Error(t, err)
require.NoError(t, logs.waitEvent(extension.ConnDisconnected))
logs.check(func() {
require.Equal(t, []extension.ConnEventTp{
extension.ConnConnected,
extension.ConnHandshakeRejected,
extension.ConnDisconnected,
}, logs.types)
conn1 := logs.infos[0]
require.Equal(t, "127.0.0.1", conn1.ClientIP)
require.Equal(t, "127.0.0.1", conn1.ServerIP)
require.Empty(t, conn1.User)
require.Empty(t, conn1.DB)
require.Equal(t, int(ts.Port), conn1.ServerPort)
require.NotEqual(t, conn1.ServerPort, conn1.ClientPort)
require.NotEmpty(t, conn1.ConnectionID)
require.Nil(t, conn1.ActiveRoles)
require.NoError(t, conn1.Error)
require.Empty(t, conn1.SessionAlias)
expectedConn2 = *(conn1.ConnectionInfo)
expectedConn2.User = "noexist"
expectedConn2.DB = "test"
require.Equal(t, []*auth.RoleIdentity{}, logs.infos[1].ActiveRoles)
require.EqualError(t, logs.infos[1].Error, "[server:1045]Access denied for user 'noexist'@'127.0.0.1' (using password: NO)")
require.Equal(t, expectedConn2, *(logs.infos[1].ConnectionInfo))
require.Empty(t, logs.infos[2].SessionAlias)
})
}
func TestSandBoxMode(t *testing.T) {
ts := createTidbTestSuite(t)
qctx, err := ts.tidbdrv.OpenCtx(uint64(0), 0, uint8(tmysql.DefaultCollationID), "test", nil, nil)
require.NoError(t, err)
_, err = Execute(context.Background(), qctx, "create user testuser;")
require.NoError(t, err)
qctx.Session.GetSessionVars().User = &auth.UserIdentity{Username: "testuser", AuthUsername: "testuser", AuthHostname: "%"}
alterPwdStmts := []string{
"set password = '1234';",
"alter user testuser identified by '1234';",
"alter user current_user() identified by '1234';",
}
for _, alterPwdStmt := range alterPwdStmts {
require.False(t, qctx.Session.InSandBoxMode())
_, err = Execute(context.Background(), qctx, "select 1;")
require.NoError(t, err)
qctx.Session.EnableSandBoxMode()
require.True(t, qctx.Session.InSandBoxMode())
_, err = Execute(context.Background(), qctx, "select 1;")
require.Error(t, err)
_, err = Execute(context.Background(), qctx, "alter user testuser identified with 'mysql_native_password';")
require.Error(t, err)
_, err = Execute(context.Background(), qctx, alterPwdStmt)
require.NoError(t, err)
_, err = Execute(context.Background(), qctx, "select 1;")
require.NoError(t, err)
}
}
// See: https://github.com/pingcap/tidb/issues/40979
// Reusing memory of `chunk.Chunk` may cause some systems variable's memory value to be modified unexpectedly.
func TestChunkReuseCorruptSysVarString(t *testing.T) {
ts := createTidbTestSuite(t)
db, err := sql.Open("mysql", ts.GetDSN())
require.NoError(t, err)
defer func() {
require.NoError(t, db.Close())
}()
conn, err := db.Conn(context.Background())
require.NoError(t, err)
defer func() {
require.NoError(t, conn.Close())
}()
rs, err := conn.QueryContext(context.Background(), "show tables in test")
ts.Rows(t, rs)
require.NoError(t, err)
_, err = conn.ExecContext(context.Background(), "set @@time_zone=(select 'Asia/Shanghai')")
require.NoError(t, err)
rs, err = conn.QueryContext(context.Background(), "select TIDB_TABLE_ID from information_schema.tables where TABLE_SCHEMA='aaaa'")
ts.Rows(t, rs)
require.NoError(t, err)
rs, err = conn.QueryContext(context.Background(), "select @@time_zone")
require.NoError(t, err)
defer func() {
require.NoError(t, rs.Close())
}()
rows := ts.Rows(t, rs)
require.Equal(t, 1, len(rows))
require.Equal(t, "Asia/Shanghai", rows[0])
}
type mockProxyProtocolProxy struct {
frontend string
backend string
clientAddr string
backendIsSock bool
ln net.Listener
run atomic.Bool
}
func newMockProxyProtocolProxy(frontend, backend, clientAddr string, backendIsSock bool) *mockProxyProtocolProxy {
return &mockProxyProtocolProxy{
frontend: frontend,
backend: backend,
clientAddr: clientAddr,
backendIsSock: backendIsSock,
ln: nil,
}
}
func (p *mockProxyProtocolProxy) ListenAddr() net.Addr {
return p.ln.Addr()
}
func (p *mockProxyProtocolProxy) Run() (err error) {
p.run.Store(true)
p.ln, err = net.Listen("tcp", p.frontend)
if err != nil {
return err
}
for p.run.Load() {
conn, err := p.ln.Accept()
if err != nil {
break
}
go p.onConn(conn)
}
return nil
}
func (p *mockProxyProtocolProxy) Close() error {
p.run.Store(false)
if p.ln != nil {
return p.ln.Close()
}
return nil
}
func (p *mockProxyProtocolProxy) connectToBackend() (net.Conn, error) {
if p.backendIsSock {
return net.Dial("unix", p.backend)
}
return net.Dial("tcp", p.backend)
}
func (p *mockProxyProtocolProxy) onConn(conn net.Conn) {
bconn, err := p.connectToBackend()
if err != nil {
conn.Close()
fmt.Println(err)
}
defer bconn.Close()
ppHeader := p.generateProxyProtocolHeaderV2("tcp4", p.clientAddr, p.frontend)
bconn.Write(ppHeader)
p.proxyPipe(conn, bconn)
}
func (p *mockProxyProtocolProxy) proxyPipe(p1, p2 io.ReadWriteCloser) {
defer p1.Close()
defer p2.Close()
// start proxy
p1die := make(chan struct{})
go func() { io.Copy(p1, p2); close(p1die) }()
p2die := make(chan struct{})
go func() { io.Copy(p2, p1); close(p2die) }()
// wait for proxy termination
select {
case <-p1die:
case <-p2die:
}
}
func (p *mockProxyProtocolProxy) generateProxyProtocolHeaderV2(network, srcAddr, dstAddr string) []byte {
var (
proxyProtocolV2Sig = []byte{0x0D, 0x0A, 0x0D, 0x0A, 0x00, 0x0D, 0x0A, 0x51, 0x55, 0x49, 0x54, 0x0A}
v2CmdPos = 12
v2FamlyPos = 13
)
saddr, _ := net.ResolveTCPAddr(network, srcAddr)
daddr, _ := net.ResolveTCPAddr(network, dstAddr)
buffer := make([]byte, 1024)
copy(buffer, proxyProtocolV2Sig)
// Command
buffer[v2CmdPos] = 0x21
// Famly
if network == "tcp4" {
buffer[v2FamlyPos] = 0x11
binary.BigEndian.PutUint16(buffer[14:14+2], 12)
copy(buffer[16:16+4], []byte(saddr.IP.To4()))
copy(buffer[20:20+4], []byte(daddr.IP.To4()))
binary.BigEndian.PutUint16(buffer[24:24+2], uint16(saddr.Port))
binary.BigEndian.PutUint16(buffer[26:26+2], uint16(saddr.Port))
return buffer[0:28]
} else if network == "tcp6" {
buffer[v2FamlyPos] = 0x21
binary.BigEndian.PutUint16(buffer[14:14+2], 36)
copy(buffer[16:16+16], []byte(saddr.IP.To16()))
copy(buffer[32:32+16], []byte(daddr.IP.To16()))
binary.BigEndian.PutUint16(buffer[48:48+2], uint16(saddr.Port))
binary.BigEndian.PutUint16(buffer[50:50+2], uint16(saddr.Port))
return buffer[0:52]
}
return buffer
}
func TestProxyProtocolWithIpFallbackable(t *testing.T) {
cfg := util2.NewTestConfig()
cfg.Port = 4999
cfg.Status.ReportStatus = false
// Setup proxy protocol config
cfg.ProxyProtocol.Networks = "*"
cfg.ProxyProtocol.Fallbackable = true
ts := createTidbTestSuite(t)
// Prepare Server
server, err := server2.NewServer(cfg, ts.tidbdrv)
require.NoError(t, err)
server.SetDomain(ts.domain)
go func() {
err := server.Run()
require.NoError(t, err)
}()
time.Sleep(time.Millisecond * 100)
defer func() {
server.Close()
}()
require.NotNil(t, server.Listener())
require.Nil(t, server.Socket())
// Prepare Proxy
ppProxy := newMockProxyProtocolProxy("127.0.0.1:5000", "127.0.0.1:4999", "192.168.1.2:60055", false)
go func() {
ppProxy.Run()
}()
time.Sleep(time.Millisecond * 100)
defer func() {
ppProxy.Close()
}()
cli := testserverclient.NewTestServerClient()
cli.Port = testutil.GetPortFromTCPAddr(ppProxy.ListenAddr())
cli.WaitUntilServerCanConnect()
cli.RunTests(t,
func(config *mysql.Config) {
config.User = "root"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("SHOW PROCESSLIST;")
records := cli.Rows(t, rows)
require.Contains(t, records[0], "192.168.1.2:60055")
},
)
cli2 := testserverclient.NewTestServerClient()
cli2.Port = 4999
cli2.RunTests(t,
func(config *mysql.Config) {
config.User = "root"
},
func(dbt *testkit.DBTestKit) {
rows := dbt.MustQuery("SHOW PROCESSLIST;")
records := cli.Rows(t, rows)
require.Contains(t, records[0], "127.0.0.1:")
},
)
}
func TestProxyProtocolWithIpNoFallbackable(t *testing.T) {
cfg := util2.NewTestConfig()
cfg.Port = 0
cfg.Status.ReportStatus = false
// Setup proxy protocol config
cfg.ProxyProtocol.Networks = "*"
cfg.ProxyProtocol.Fallbackable = false
ts := createTidbTestSuite(t)
// Prepare Server
server, err := server2.NewServer(cfg, ts.tidbdrv)
require.NoError(t, err)
server.SetDomain(ts.domain)
go func() {
err := server.Run()
require.NoError(t, err)
}()
time.Sleep(time.Millisecond * 1000)
defer func() {
server.Close()
}()
require.NotNil(t, server.Listener())
require.Nil(t, server.Socket())
cli := testserverclient.NewTestServerClient()
cli.Port = testutil.GetPortFromTCPAddr(server.ListenAddr())
dsn := cli.GetDSN(func(config *mysql.Config) {
config.User = "root"
config.DBName = "test"
})
db, err := sql.Open("mysql", dsn)
require.Nil(t, err)
err = db.Ping()
require.NotNil(t, err)
db.Close()
}
|
package httpexpect
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"github.com/valyala/fasthttp"
)
// Binder implements networkless http.RoundTripper attached directly to
// http.Handler.
//
// Binder emulates network communication by invoking given http.Handler
// directly. It passes httptest.ResponseRecorder as http.ResponseWriter
// to the handler, and then constructs http.Response from recorded data.
type Binder struct {
handler http.Handler
}
// NewBinder returns a new Binder given a http.Handler.
//
// Example:
// client := &http.Client{
// Transport: NewBinder(handler),
// }
func NewBinder(handler http.Handler) Binder {
return Binder{handler}
}
// RoundTrip implements http.RoundTripper.RoundTrip.
func (binder Binder) RoundTrip(req *http.Request) (*http.Response, error) {
if req.Proto == "" {
req.Proto = fmt.Sprintf("HTTP/%d.%d", req.ProtoMajor, req.ProtoMinor)
}
if req.Body != nil {
if req.ContentLength == -1 {
req.TransferEncoding = []string{"chunked"}
}
} else {
req.Body = ioutil.NopCloser(bytes.NewReader(nil))
}
recorder := httptest.NewRecorder()
binder.handler.ServeHTTP(recorder, req)
resp := http.Response{
Request: req,
StatusCode: recorder.Code,
Status: http.StatusText(recorder.Code),
Header: recorder.HeaderMap,
}
if recorder.Flushed {
resp.TransferEncoding = []string{"chunked"}
}
if recorder.Body != nil {
resp.Body = ioutil.NopCloser(recorder.Body)
}
return &resp, nil
}
// FastBinder implements networkless http.RoundTripper attached directly
// to fasthttp.RequestHandler.
//
// FastBinder emulates network communication by invoking given http.Handler
// directly. It passes converts http.Request to fasthttp.Request, invokes
// handler, and then converts fasthttp.Response to http.Response.
type FastBinder struct {
handler fasthttp.RequestHandler
}
// NewFastBinder returns a new FastBinder given a fasthttp.RequestHandler.
//
// Example:
// client := &http.Client{
// Transport: NewFastBinder(fasthandler),
// }
func NewFastBinder(handler fasthttp.RequestHandler) FastBinder {
return FastBinder{handler}
}
// RoundTrip implements http.RoundTripper.RoundTrip.
func (binder FastBinder) RoundTrip(stdreq *http.Request) (*http.Response, error) {
var fastreq fasthttp.Request
convertRequest(stdreq, &fastreq)
var ctx fasthttp.RequestCtx
ctx.Init(&fastreq, nil, nil)
if stdreq.ContentLength >= 0 {
ctx.Request.Header.SetContentLength(int(stdreq.ContentLength))
} else {
ctx.Request.Header.Add("Transfer-Encoding", "chunked")
}
if stdreq.Body != nil {
b, err := ioutil.ReadAll(stdreq.Body)
if err == nil {
ctx.Request.SetBody(b)
}
}
binder.handler(&ctx)
return convertResponse(stdreq, &ctx.Response), nil
}
func convertRequest(stdreq *http.Request, fastreq *fasthttp.Request) {
fastreq.SetRequestURI(stdreq.URL.String())
fastreq.Header.SetMethod(stdreq.Method)
for k, a := range stdreq.Header {
for n, v := range a {
if n == 0 {
fastreq.Header.Set(k, v)
} else {
fastreq.Header.Add(k, v)
}
}
}
}
func convertResponse(stdreq *http.Request, fastresp *fasthttp.Response) *http.Response {
status := fastresp.Header.StatusCode()
body := fastresp.Body()
stdresp := &http.Response{
Request: stdreq,
StatusCode: status,
Status: http.StatusText(status),
}
fastresp.Header.VisitAll(func(k, v []byte) {
sk := string(k)
sv := string(v)
if stdresp.Header == nil {
stdresp.Header = make(http.Header)
}
stdresp.Header.Add(sk, sv)
})
if fastresp.Header.ContentLength() == -1 {
stdresp.TransferEncoding = []string{"chunked"}
}
if body != nil {
stdresp.Body = ioutil.NopCloser(bytes.NewReader(body))
} else {
stdresp.Body = ioutil.NopCloser(bytes.NewReader(nil))
}
return stdresp
}
|
// Copyright © 2018 NAME HERE <EMAIL ADDRESS>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"github.com/FScoward/paper-sync/cmd"
)
func main() {
//client := new(http.Client)
//fmt.Println(string(drobox.GetDocIdList(client)))
cmd.Execute()
//response, _ := drobox.DownloadDoc(client, "TTriQUccwfBTxlqEqSOlX", "markdown")
//response.Save()
//fmt.Println(response.Body)
//_, err := drobox.UpdateDoc(client, "./README.md", "TTriQUccwfBTxlqEqSOlX", "overwrite_all", 48, "markdown")
//fmt.Println(err)
}
|
/*
* @lc app=leetcode.cn id=1323 lang=golang
*
* [1323] 6 和 9 组成的最大数字
*/
package main
// @lc code=start
func maximum69Number(num int) int {
divider := 1000
for divider > 0 {
if num/divider%10 == 6 {
return num + 3*divider
}
divider /= 10
}
return num
}
// func main() {
// fmt.Println(maximum69Number(9669))
// fmt.Println(maximum69Number(9996))
// fmt.Println(maximum69Number(9999))
// }
// @lc code=end
|
package entity
import "time"
//用户
type User struct {
Id int32
Name string
Password string
Email string
CreateTime time.Time
Sign string `orm:"type(text)"`
Role int32 //角色,1 作者,2 游客
UpdateTime time.Time
}
type UserSetting struct{
Id int32 //userId
Photo string `orm:"type(text)"` //头像️,这个是base64存储
SelfInfo string `orm:"type(text)"`
HomePage string `orm:"type(text)"`
CreateTime time.Time
UpdateTime time.Time
Contract string
}
//文章
type Blog struct {
Id int32
Title string
CreateTime time.Time
VisitCount int64
Context string `orm:"type(text)"`
Tag int32
Words int32
CommentNum int32
UserId int32
CoverPic string
Summary string `orm:"type(text)"`
Category int32 //1:日志,2:说说 这个我想了下,暂时不用
Recommend int32 //推荐指数
UpdateTime time.Time
}
//文章摘要
type SimpleBlog struct {
Id int32
Title string
CreateTime time.Time
VisitCount int64
Desc string
UserId int32
}
//评论
type Comment struct {
Id int32
FCId int32 //fatherCommentId
OriginalId int32 //原始id,文章活着其他类型
OriginalType int32 //原始类型,1。文章
User int32
ToUser int32
Remark string `orm:"type(text)"`
CreateTime time.Time
}
//图片
type Image struct {
Id int32
Path string
UId string
CreateTime time.Time
}
type Tag struct {
Id int32
Name string
CreateTime time.Time
}
//开发者日志
type UpdateDiary struct {
Id int32
CreateTime time.Time
Content string `orm:"type(text)"`
}
|
package office
type login struct {
UserName string `json:"userName"`
Password string `json:"password"`
}
type loginRes struct {
Count int `json:"count"`
Status int `json:"status"`
StatusCode int `json:"statuscode"`
Response struct {
Token string `json:"token"`
Expires string `json:"expires"`
Sms bool `json:"sms"`
} `json:"response"`
}
type uploadRes struct {
Count int `json:"count"`
Status int `json:"status"`
StatusCode int `json:"statuscode"`
Response struct {
Id int `json:"id"`
Title string `json:"title"`
} `json:"response"`
}
type shareRes struct {
Count int `json:"count"`
Status int `json:"status"`
StatusCode int `json:"statuscode"`
Response string `json:"response"`
}
|
// Copyright 2019-present PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package raftstore
import (
"sync"
"sync/atomic"
"time"
"github.com/pingcap/tidb/store/mockstore/unistore/metrics"
)
// peerState contains the peer states that needs to run raft command and apply command.
// It binds to a worker to make sure the commands are always executed on a same goroutine.
type peerState struct {
closed uint32
peer *peerFsm
apply *applier
}
type applyBatch struct {
msgs []Msg
peers map[uint64]*peerState
proposals []*regionProposal
}
func (b *applyBatch) iterCallbacks(f func(cb *Callback)) {
for _, rp := range b.proposals {
for _, p := range rp.Props {
if p.cb != nil {
f(p.cb)
}
}
}
}
// raftWorker is responsible for run raft commands and apply raft logs.
type raftWorker struct {
pr *router
raftCh chan Msg
raftCtx *RaftContext
raftStartTime time.Time
applyCh chan *applyBatch
applyResCh chan Msg
applyCtx *applyContext
msgCnt uint64
movePeerCandidate uint64
}
func newRaftWorker(ctx *GlobalContext, ch chan Msg, pm *router) *raftWorker {
raftCtx := &RaftContext{
GlobalContext: ctx,
applyMsgs: new(applyMsgs),
queuedSnaps: make(map[uint64]struct{}),
kvWB: new(WriteBatch),
raftWB: new(WriteBatch),
localStats: new(storeStats),
}
applyResCh := make(chan Msg, cap(ch))
return &raftWorker{
raftCh: ch,
applyResCh: applyResCh,
raftCtx: raftCtx,
pr: pm,
applyCh: make(chan *applyBatch, 1),
applyCtx: newApplyContext("", ctx.regionTaskSender, ctx.engine, applyResCh, ctx.cfg),
}
}
// run runs raft commands.
// On each loop, raft commands are batched by channel buffer.
// After commands are handled, we collect apply messages by peers, make a applyBatch, send it to apply channel.
func (rw *raftWorker) run(closeCh <-chan struct{}, wg *sync.WaitGroup) {
defer wg.Done()
timeTicker := time.NewTicker(rw.raftCtx.cfg.RaftBaseTickInterval)
var msgs []Msg
for {
for i := range msgs {
msgs[i] = Msg{}
}
msgs = msgs[:0]
select {
case <-closeCh:
rw.applyCh <- nil
return
case msg := <-rw.raftCh:
msgs = append(msgs, msg)
case msg := <-rw.applyResCh:
msgs = append(msgs, msg)
case <-timeTicker.C:
rw.pr.peers.Range(func(key, value interface{}) bool {
msgs = append(msgs, NewPeerMsg(MsgTypeTick, key.(uint64), nil))
return true
})
}
pending := len(rw.raftCh)
for i := 0; i < pending; i++ {
msgs = append(msgs, <-rw.raftCh)
}
resLen := len(rw.applyResCh)
for i := 0; i < resLen; i++ {
msgs = append(msgs, <-rw.applyResCh)
}
metrics.RaftBatchSize.Observe(float64(len(msgs)))
atomic.AddUint64(&rw.msgCnt, uint64(len(msgs)))
peerStateMap := make(map[uint64]*peerState)
rw.raftCtx.pendingCount = 0
rw.raftCtx.hasReady = false
rw.raftStartTime = time.Now()
batch := &applyBatch{
peers: peerStateMap,
}
for _, msg := range msgs {
peerState := rw.getPeerState(peerStateMap, msg.RegionID)
newRaftMsgHandler(peerState.peer, rw.raftCtx).HandleMsgs(msg)
}
var movePeer uint64
for id, peerState := range peerStateMap {
movePeer = id
batch.proposals = newRaftMsgHandler(peerState.peer, rw.raftCtx).HandleRaftReadyAppend(batch.proposals)
}
// Pick one peer as the candidate to be moved to other workers.
atomic.StoreUint64(&rw.movePeerCandidate, movePeer)
if rw.raftCtx.hasReady {
rw.handleRaftReady(peerStateMap, batch)
}
rw.raftCtx.flushLocalStats()
doneRaftTime := time.Now()
batch.iterCallbacks(func(cb *Callback) {
cb.raftBeginTime = rw.raftStartTime
cb.raftDoneTime = doneRaftTime
})
applyMsgs := rw.raftCtx.applyMsgs
batch.msgs = append(batch.msgs, applyMsgs.msgs...)
for i := range applyMsgs.msgs {
applyMsgs.msgs[i] = Msg{}
}
applyMsgs.msgs = applyMsgs.msgs[:0]
rw.removeQueuedSnapshots()
rw.applyCh <- batch
}
}
func (rw *raftWorker) getPeerState(peersMap map[uint64]*peerState, regionID uint64) *peerState {
peer, ok := peersMap[regionID]
if !ok {
peer = rw.pr.get(regionID)
peersMap[regionID] = peer
}
return peer
}
func (rw *raftWorker) handleRaftReady(peers map[uint64]*peerState, batch *applyBatch) {
for _, proposal := range batch.proposals {
msg := Msg{Type: MsgTypeApplyProposal, Data: proposal}
rw.raftCtx.applyMsgs.appendMsg(proposal.RegionID, msg)
}
kvWB := rw.raftCtx.kvWB
if len(kvWB.entries) > 0 {
err := kvWB.WriteToKV(rw.raftCtx.engine.kv)
if err != nil {
panic(err)
}
kvWB.Reset()
}
raftWB := rw.raftCtx.raftWB
if len(raftWB.entries) > 0 {
err := raftWB.WriteToRaft(rw.raftCtx.engine.raft)
if err != nil {
panic(err)
}
raftWB.Reset()
}
readyRes := rw.raftCtx.ReadyRes
rw.raftCtx.ReadyRes = nil
if len(readyRes) > 0 {
for _, pair := range readyRes {
regionID := pair.IC.RegionID
newRaftMsgHandler(peers[regionID].peer, rw.raftCtx).PostRaftReadyPersistent(&pair.Ready, pair.IC)
}
}
dur := time.Since(rw.raftStartTime)
if !rw.raftCtx.isBusy {
electionTimeout := rw.raftCtx.cfg.RaftBaseTickInterval * time.Duration(rw.raftCtx.cfg.RaftElectionTimeoutTicks)
if dur > electionTimeout {
rw.raftCtx.isBusy = true
}
}
}
func (rw *raftWorker) removeQueuedSnapshots() {
if len(rw.raftCtx.queuedSnaps) > 0 {
rw.raftCtx.storeMetaLock.Lock()
meta := rw.raftCtx.storeMeta
retained := meta.pendingSnapshotRegions[:0]
for _, region := range meta.pendingSnapshotRegions {
if _, ok := rw.raftCtx.queuedSnaps[region.Id]; !ok {
retained = append(retained, region)
}
}
meta.pendingSnapshotRegions = retained
rw.raftCtx.storeMetaLock.Unlock()
rw.raftCtx.queuedSnaps = map[uint64]struct{}{}
}
}
type applyWorker struct {
r *router
ch chan *applyBatch
ctx *applyContext
}
func newApplyWorker(r *router, ch chan *applyBatch, ctx *applyContext) *applyWorker {
return &applyWorker{
r: r,
ch: ch,
ctx: ctx,
}
}
// run runs apply tasks, since it is already batched by raftCh, we don't need to batch it here.
func (aw *applyWorker) run(wg *sync.WaitGroup) {
defer wg.Done()
for {
batch := <-aw.ch
if batch == nil {
return
}
begin := time.Now()
batch.iterCallbacks(func(cb *Callback) {
cb.applyBeginTime = begin
})
for _, peer := range batch.peers {
peer.apply.redoIndex = peer.apply.applyState.appliedIndex + 1
}
for _, msg := range batch.msgs {
ps := batch.peers[msg.RegionID]
if ps == nil {
ps = aw.r.get(msg.RegionID)
batch.peers[msg.RegionID] = ps
}
ps.apply.handleTask(aw.ctx, msg)
}
aw.ctx.flush()
}
}
// storeWorker runs store commands.
type storeWorker struct {
store *storeMsgHandler
}
func newStoreWorker(ctx *GlobalContext, r *router) *storeWorker {
storeCtx := &StoreContext{GlobalContext: ctx, applyingSnapCount: new(uint64)}
return &storeWorker{
store: newStoreFsmDelegate(r.storeFsm, storeCtx),
}
}
func (sw *storeWorker) run(closeCh <-chan struct{}, wg *sync.WaitGroup) {
defer wg.Done()
timeTicker := time.NewTicker(sw.store.ctx.cfg.RaftBaseTickInterval)
storeTicker := sw.store.ticker
for {
var msg Msg
select {
case <-closeCh:
return
case <-timeTicker.C:
storeTicker.tickClock()
for i := range storeTicker.schedules {
if storeTicker.isOnStoreTick(StoreTick(i)) {
sw.store.handleMsg(NewMsg(MsgTypeStoreTick, StoreTick(i)))
}
}
case msg = <-sw.store.receiver:
}
sw.store.handleMsg(msg)
}
}
|
package main
import (
"github.com/felipeagger/go-redis/cache"
)
func init() {
cache.InitCacheClientSvc("0.0.0.0", "6379", "")
cache.InitCacheClusterClientSvc("0.0.0.0", "7005", "")
}
func main() {
cache.GetCacheClient().HSet("test", "key", "value")
cache.GetCacheClusterClient().HSet("test", "key", "value")
cache.GetCacheClient().HGet("test", "key")
cache.GetCacheClusterClient().HGet("test", "key")
} |
package orm
import (
"laravel-go/pkg/orm/config"
"gorm.io/driver/mysql"
"gorm.io/gorm"
)
func NewMysqlConn(conn config.ConnParam) *gorm.DB {
dsn := conn.Username + ":" + conn.Password + "@tcp(" + conn.Host + ":" + conn.Port + ")/" + conn.Database + "?charset=utf8mb4&parseTime=True&loc=Local"
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})
if err != nil {
panic(err)
}
return db
}
|
package chip
import (
"fmt"
"log"
"os"
"time"
"github.com/veandco/go-sdl2/sdl"
)
var memory [4096]byte
var register [16]byte
var opCode uint16
var index uint16
var delayTimer byte
var soundTimer byte
var programCounter uint16
var stackPointer uint8
var stack [16]uint16
var pixel [][]bool
var running = true
var redraw = false
var ticker *time.Ticker
var lastTimeTimer time.Time
var lastTimeOperation time.Time
var timePassedTimer = int64(0)
var timePassedOperation = int64(0)
var timerFrequency = int64(60)
var operationFrequency = int64(2000)
const startPointer = 0x200
var fontData = [80]byte{
0xF0, 0x90, 0x90, 0x90, 0xF0, // 0
0x20, 0x60, 0x20, 0x20, 0x70, // 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3
0x90, 0x90, 0xF0, 0x10, 0x10, // 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5
0xF0, 0x80, 0xF0, 0x90, 0xF0, // 6
0xF0, 0x10, 0x20, 0x40, 0x40, // 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9
0xF0, 0x90, 0xF0, 0x90, 0x90, // A
0xE0, 0x90, 0xE0, 0x90, 0xE0, // B
0xF0, 0x80, 0x80, 0x80, 0xF0, // C
0xE0, 0x90, 0x90, 0x90, 0xE0, // D
0xF0, 0x80, 0xF0, 0x80, 0xF0, // E
0xF0, 0x80, 0xF0, 0x80, 0x80, // F
}
// Init chip8
func Init(codes [16]sdl.Scancode) {
for i := 0; i < 4096; i++ {
if i < len(fontData) {
memory[i] = fontData[i]
} else {
memory[i] = 0
}
}
for i := 0; i < 16; i++ {
register[i] = 0
}
opCode = 0
index = 0
delayTimer = 0
soundTimer = 0
programCounter = startPointer
stackPointer = 0
for i := 0; i < 16; i++ {
stack[i] = 0
}
pixel = make([][]bool, 64)
for i := 0; i < 64; i++ {
pixel[i] = make([]bool, 32)
}
for i := 0; i < 16; i++ {
keyCodes[i] = codes[i]
}
lastTimeTimer = time.Now()
lastTimeOperation = time.Now()
}
// ReadRom reads the ROM
func ReadRom(path string) {
file, err := os.Open(path)
if err != nil {
log.Fatal("file open error", err)
}
defer file.Close()
data := make([]byte, 4095-startPointer)
total, err := file.Read(data)
if err != nil {
log.Fatal("file read error", err)
}
for i := 0; i < total; i++ {
memory[startPointer+i] = data[i]
}
fmt.Println("Rom loaded ", total, " bytes")
}
//CycleEmulation emulates one cycle of operation
func CycleEmulation() bool {
operationCycle()
timerCycle()
return redraw
}
// Update the timers depending on the time paseed
func timerCycle() {
timePassedTimer += time.Now().Sub(lastTimeTimer).Nanoseconds()
lastTimeTimer = time.Now()
for timePassedTimer > 1000000000/timerFrequency {
timePassedTimer -= 1000000000 / timerFrequency
if delayTimer > 0 {
delayTimer--
}
if soundTimer > 0 {
soundTimer--
if soundTimer == 0 {
fmt.Println("Beep Boop !!")
}
}
}
}
// Ececute opCodes depending on the time passed
func operationCycle() {
timePassedOperation += time.Now().Sub(lastTimeOperation).Nanoseconds()
lastTimeOperation = time.Now()
for timePassedOperation > 1000000000/operationFrequency {
timePassedOperation -= 1000000000 / operationFrequency
opCode = (uint16(memory[programCounter]) << 8) | uint16(memory[programCounter+1])
programCounter += 2
startOperation()
}
}
// GetPixel returns the pixel
func GetPixel() [][]bool {
return pixel
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.