text stringlengths 11 4.05M |
|---|
package goSolution
import "testing"
func TestSubsetsWithDup(t *testing.T) {
nums := []int {1, 2, 2}
AssertEqual(t, 6, len(subsetsWithDup(nums)))
}
|
package message_adder
import (
"github.com/golang/protobuf/proto"
"ms/sun_old/base"
"ms/sun/shared/helper"
"ms/sun/servises/log_service"
"ms/sun/servises/sun_utils"
"ms/sun/shared/config"
"ms/sun/shared/x"
)
//todo save MessageFiles to its tables for later retrival
var chatLogger = log_service.NewSimpleLogger("chat")
type chatDirect struct {
User1Id int
User2Id int
User1ChatKey string
User2ChatKey string
User1Chat *x.Chat
User2Chat *x.Chat
AddChan chan *x.PB_MessageView
Err error
lastActiveTime int
}
func newChaatDirectByRoomKey(RoomKey string) *chatDirect {
u1, u2 := sun_utils.RoomKeyToUsers(RoomKey)
res := &chatDirect{
User1Id: u1,
User2Id: u2,
User1ChatKey: sun_utils.UsersToChatKey(u1, u2),
User2ChatKey: sun_utils.UsersToChatKey(u2, u1),
AddChan: make(chan *x.PB_MessageView, 100),
lastActiveTime: helper.TimeNow(),
}
res.loadOrCreateRooms()
go res.listenForAdding()
return res
}
func (s *chatDirect) loadOrCreateRooms() error {
if s.User1Chat != nil && s.User2Chat != nil {
return nil
}
var e1, e2 error
s.User1Chat, e1 = getOrCreateDirectChatForPeers(s.User1Id, s.User2Id)
s.User2Chat, e2 = getOrCreateDirectChatForPeers(s.User2Id, s.User1Id)
if e1 != nil {
s.Err = e1
}
if e2 != nil {
s.Err = e2
}
if config.IS_DEBUG && s.Err != nil {
chatLogger.Printf("%s - Err: %s - %v", "loadOrCreateRooms() has error: ", s.Err, s)
}
return s.Err
}
func (s *chatDirect) listenForAdding() {
defer helper.JustRecover()
for msgPb := range s.AddChan {
if s.Err != nil {
continue
}
s.lastActiveTime = helper.TimeNow()
adderUserId := int(msgPb.UserId)
peerUserId := s.User1Id
if adderUserId == s.User1Id {
peerUserId = s.User2Id
}
extraPb := &x.PB_MessageTableExtra{
MessageFileView: msgPb.MessageFileView,
}
extraBlob, _ := proto.Marshal(extraPb)
delivery := 4 //int(x.RoomMessageDeliviryStatusEnum_SENT)
m1 := x.DirectMessage{
ChatKey: s.User1ChatKey,
MessageId: int(msgPb.MessageId),
RoomKey: msgPb.RoomKey,
UserId: int(msgPb.UserId),
MessageFileId: int(msgPb.MessageFileId),
MessageTypeEnum: int(msgPb.MessageTypeEnum),
Text: msgPb.Text,
CreatedTime: int(msgPb.CreatedTime),
Seq: s.User1Chat.Seq + 1,
DeliviryStatusEnum: int(delivery),
ExtraPB: extraBlob,
}
m1.Save(base.DB)
m2 := x.DirectMessage{
ChatKey: s.User2ChatKey,
MessageId: int(msgPb.MessageId),
RoomKey: msgPb.RoomKey,
UserId: int(msgPb.UserId),
MessageFileId: int(msgPb.MessageFileId),
MessageTypeEnum: int(msgPb.MessageTypeEnum),
Text: msgPb.Text,
CreatedTime: int(msgPb.CreatedTime),
Seq: int(msgPb.Seq),
DeliviryStatusEnum: int(delivery),
ExtraPB: extraBlob,
}
m2.Save(base.DB)
s.User1Chat.Seq += 1
s.User1Chat.UpdatedMs += helper.TimeNowMs()
s.User2Chat.Seq += 1
s.User2Chat.UpdatedMs += helper.TimeNowMs()
s.User1Chat.Update(base.DB)
s.User2Chat.Update(base.DB)
upToMe := &x.PushChat{
PushId: helper.NextRowsSeqId(),
ToUserId: adderUserId,
PushTypeId: 3, //chat_service.CHAT_SYNC_MSG_RECIVED_TO_SERVER,
RoomKey: msgPb.RoomKey,
ChatKey: "",
MessageId: int(msgPb.MessageId),
MessagePb: []byte{},
MessageJson: "",
CreatedTime: helper.TimeNow(),
}
msgBlob, _ := proto.Marshal(msgPb)
upToPeer := &x.PushChat{
PushId: helper.NextRowsSeqId(),
ToUserId: peerUserId,
PushTypeId: 2, //chat_service.CHAT_SYNC_NEW_MESSAGE,
RoomKey: msgPb.RoomKey,
ChatKey: "",
MessageId: int(msgPb.MessageId),
MessagePb: msgBlob,
MessageJson: "",
CreatedTime: helper.TimeNow(),
}
upToMe.Save(base.DB)
upToPeer.Save(base.DB)
}
}
func getOrCreateDirectChatForPeers(me int, peer int) (*x.Chat, error) {
chatMe, err := x.NewChat_Selector().UserId_Eq(me).PeerUserId_Eq(peer).GetRow(base.DB)
if err != nil {
chatMe = &x.Chat{
ChatKey: sun_utils.UsersToChatKey(me, peer),
RoomKey: sun_utils.UsersToRoomKey(me, peer),
RoomTypeEnum: 1,
UserId: me,
PeerUserId: peer,
GroupId: 0,
CreatedTime: helper.TimeNow(),
Seq: 0,
SeenSeq: 0,
UpdatedMs: helper.TimeNowMs(),
}
err = chatMe.Save(base.DB)
if err != nil {
return nil, err
}
}
return chatMe, nil
}
|
// Copyright (c) 2022 Target Brands, Inc. All rights reserved.
//
// Use of this source code is governed by the LICENSE file in this repository.
//nolint:dupl // ignore dupl linter false positive
package vela
import (
"fmt"
"github.com/go-vela/types/library"
)
// StepService handles retrieving steps for builds
// from the server methods of the Vela API.
type StepService service
// Get returns the provided step.
func (svc *StepService) Get(org, repo string, build, step int) (*library.Step, *Response, error) {
// set the API endpoint path we send the request to
u := fmt.Sprintf("/api/v1/repos/%s/%s/builds/%d/steps/%d", org, repo, build, step)
// library Step type we want to return
v := new(library.Step)
// send request using client
resp, err := svc.client.Call("GET", u, nil, v)
return v, resp, err
}
// GetAll returns a list of all steps.
func (svc *StepService) GetAll(org, repo string, build int, opt *ListOptions) (*[]library.Step, *Response, error) {
// set the API endpoint path we send the request to
u := fmt.Sprintf("/api/v1/repos/%s/%s/builds/%d/steps", org, repo, build)
// add optional arguments if supplied
u, err := addOptions(u, opt)
if err != nil {
return nil, nil, err
}
// slice library Step type we want to return
v := new([]library.Step)
// send request using client
resp, err := svc.client.Call("GET", u, nil, v)
return v, resp, err
}
// Add constructs a step with the provided details.
func (svc *StepService) Add(org, repo string, build int, s *library.Step) (*library.Step, *Response, error) {
// set the API endpoint path we send the request to
u := fmt.Sprintf("/api/v1/repos/%s/%s/builds/%d/steps", org, repo, build)
// library Step type we want to return
v := new(library.Step)
// send request using client
resp, err := svc.client.Call("POST", u, s, v)
return v, resp, err
}
// Update modifies a step with the provided details.
func (svc *StepService) Update(org, repo string, build int, s *library.Step) (*library.Step, *Response, error) {
// set the API endpoint path we send the request to
u := fmt.Sprintf("/api/v1/repos/%s/%s/builds/%d/steps/%d", org, repo, build, s.GetNumber())
// library Step type we want to return
v := new(library.Step)
// send request using client
resp, err := svc.client.Call("PUT", u, s, v)
return v, resp, err
}
// Remove deletes the provided step.
func (svc *StepService) Remove(org, repo string, build, step int) (*string, *Response, error) {
// set the API endpoint path we send the request to
u := fmt.Sprintf("/api/v1/repos/%s/%s/builds/%d/steps/%d", org, repo, build, step)
// string type we want to return
v := new(string)
// send request using client
resp, err := svc.client.Call("DELETE", u, nil, v)
return v, resp, err
}
|
package cmd
import (
"github.com/spf13/cobra"
"cloudfreexiao/ant-graphql/backend-go/server"
)
var (
debug bool
disableAuth bool
port int
)
var RootCmd = &cobra.Command{
Use: "graphql-server",
Short: "GraphQL API server in golang to get linux system info",
RunE: func(cmd *cobra.Command, args []string) error {
s, err := server.NewServer(&server.Config{
Debug: debug,
DisableAuth: disableAuth,
})
if err != nil {
return err
}
s.Run(port)
return nil
},
}
func init() {
RootCmd.Flags().BoolVarP(&debug, "debug", "d", true, "debug mode")
RootCmd.Flags().BoolVarP(&disableAuth, "disable-auth", "", false, "disable auth middleware")
RootCmd.Flags().IntVarP(&port, "port", "p", 9527, "port number")
} |
package sw
import (
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp"
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp/secp256k1"
"crypto/elliptic"
"errors"
//"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp/utils"
//"crypto/ecdsa"
"fmt"
)
type ecdsa256K1Signer struct{}
func (s *ecdsa256K1Signer) Sign(k bccsp.Key, digest []byte, opts bccsp.SignerOpts) (signature []byte, err error) {
if k == nil || digest == nil {
return nil, errors.New("k or digest is nil")
}
blob, err := k.Bytes()
if err != nil {
return nil, err
}
privkey := make([]byte, 32)
copy(privkey[32-len(blob):], blob)
return secp256k1.Sign(digest, blob)
}
type ecdsaPublicKey256K1Verifier struct{}
func (v *ecdsaPublicKey256K1Verifier) Verify(k bccsp.Key, signature, digest []byte, opts bccsp.SignerOpts) (valid bool, err error) {
key, err := k.PublicKey()
if err != nil {
return false, nil
}
key1, ok := key.(*Ecdsa256K1PublicKey)
if !ok {
return false, errors.New("key type invalid")
}
pubkey := elliptic.Marshal(secp256k1.S256(), key1.PubKey.X, key1.PubKey.Y)
return secp256k1.VerifySignature(pubkey, digest, signature), nil
}
type ecdsaPrivKey256K1Verifier struct{}
func (v *ecdsaPrivKey256K1Verifier) Verify(k bccsp.Key, signature, digest []byte, opts bccsp.SignerOpts) (valid bool, err error) {
key, err := k.PublicKey()
if err != nil {
return false, nil
}
key1, ok := key.(*Ecdsa256K1PublicKey)
if !ok {
return false, errors.New("key type invalid")
}
pubkey := elliptic.Marshal(secp256k1.S256(), key1.PubKey.X, key1.PubKey.Y)
fmt.Printf("sign len:%v, hash len:%v\n", len(signature), len(digest))
return secp256k1.VerifySignature(pubkey, digest, signature), nil
}
|
package home
import (
"os"
"io/ioutil"
"net/http"
"testing"
"github.com/gin-gonic/gin"
. "github.com/smartystreets/goconvey/convey"
"github.com/zeuxisoo/go-zenwords/pkg/tester"
)
var (
engine *gin.Engine
)
func init() {
engine = tester.CreateWebEngine()
engine.GET("/", IndexGet)
engine.GET("/robots.txt", RobotsTxtGet)
}
func TestHomeIndexGetOK(t *testing.T) {
Convey("HomeIndexGet / should be OK", t, func() {
responseRecorder := tester.PerformWebRequestGet(engine, "/")
So(responseRecorder.Code, ShouldEqual, http.StatusOK)
So(responseRecorder.Body.String(), ShouldEqual, "ZenWords")
})
}
func TestHomeRobotsTxtGetOK(t *testing.T) {
Convey("HomeRobotsTxtGet /robots.txt should be OK", t, func() {
Convey("When robots.txt is not exists", func() {
responseRecorder := tester.PerformWebRequestGet(engine, "/robots.txt")
So(responseRecorder.Code, ShouldEqual, http.StatusOK)
So(responseRecorder.Body.String(), ShouldContainSubstring, "User-agent")
So(responseRecorder.Body.String(), ShouldContainSubstring, "Disallow")
})
Convey("When robots.txt is exists", func() {
ioutil.WriteFile("robots.txt", []byte("Hello World\nrobots.txt"), 0644)
responseRecorder := tester.PerformWebRequestGet(engine, "/robots.txt")
So(responseRecorder.Code, ShouldEqual, http.StatusOK)
So(responseRecorder.Body.String(), ShouldContainSubstring, "Hello World")
So(responseRecorder.Body.String(), ShouldContainSubstring, "robots.txt")
os.Remove("robots.txt")
})
})
}
|
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type Elm struct {
Node *TreeNode
Max int
Min int
}
// time: O(n), space: O(n) (log(n)*2 ?)
func isValidBST(root *TreeNode) bool {
fifo := []*Elm{}
fifo = append(fifo, &Elm{root, 2147483647 + 1, -2147483648 - 1})
for len(fifo) > 0 {
cur := fifo[0]
fifo = fifo[1:len(fifo)]
if cur.Node.Left != nil {
if cur.Min >= cur.Node.Left.Val || cur.Node.Val <= cur.Node.Left.Val {
return false
}
fifo = append(fifo, &Elm{cur.Node.Left, cur.Node.Val, cur.Min})
}
if cur.Node.Right != nil {
if cur.Node.Val >= cur.Node.Right.Val || cur.Max <= cur.Node.Right.Val {
return false
}
fifo = append(fifo, &Elm{cur.Node.Right, cur.Max, cur.Node.Val})
}
}
return true
}
|
package github
import (
"context"
"errors"
"github.com/google/go-github/github"
"golang.org/x/oauth2"
)
type Config struct {
Token string `env:"GITHUB_TOKEN,required"`
Owner string `env:"GITHUB_OWNER,required"`
Repo string `env:"GITHUB_REPO,required"`
Ref string `env:"GITHUB_REF" envDefault:"master"`
Path string `env:"GITHUB_PATH,required"`
}
type Client struct {
cfg Config
git *github.Client
}
func (c Config) NewClient() *Client {
cli := github.NewClient(
oauth2.NewClient(
context.Background(),
oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: c.Token},
)),
)
return &Client{git: cli, cfg: c}
}
func (c *Client) Client() (*github.Client, error) {
if c.git != nil {
return nil, errors.New("github Client is nil")
}
return c.git, nil
}
|
package mat
import (
"fmt"
"math"
"testing"
"github.com/stretchr/testify/assert"
)
func TestTuple4_IsVector(t *testing.T) {
v := NewVector(4.3, -4.2, 3.1)
assert.True(t, v.IsVector())
assert.False(t, v.IsPoint())
assert.Equal(t, 4.3, v.Get(0))
assert.Equal(t, -4.2, v.Get(1))
assert.Equal(t, 3.1, v.Get(2))
}
func TestTuple4_IsPoint(t *testing.T) {
p := NewPoint(4.3, -4.2, 3.1)
assert.True(t, p.IsPoint())
assert.False(t, p.IsVector())
assert.Equal(t, 4.3, p.Get(0))
assert.Equal(t, -4.2, p.Get(1))
assert.Equal(t, 3.1, p.Get(2))
}
func TestTuple4Add(t *testing.T) {
t1 := NewPoint(3, -2, 5)
t2 := NewVector(-2, 3, 1)
t3 := Add(t1, t2)
assert.Equal(t, 1.0, t3.Get(0))
assert.Equal(t, 1.0, t3.Get(1))
assert.Equal(t, 6.0, t3.Get(2))
assert.Equal(t, 1.0, t3.Get(3))
}
func TestTuple4Sub(t *testing.T) {
t1 := NewPoint(3, 2, 1)
t2 := NewPoint(5, 6, 7)
t3 := Sub(t1, t2)
assert.Equal(t, -2.0, t3.Get(0))
assert.Equal(t, -4.0, t3.Get(1))
assert.Equal(t, -6.0, t3.Get(2))
assert.Equal(t, 0.0, t3.Get(3))
}
func TestSubVectorFromPoint(t *testing.T) {
t1 := NewPoint(3, 2, 1)
t2 := NewVector(5, 6, 7)
t3 := Sub(t1, t2)
assert.Equal(t, -2.0, t3.Get(0))
assert.Equal(t, -4.0, t3.Get(1))
assert.Equal(t, -6.0, t3.Get(2))
assert.Equal(t, 1.0, t3.Get(3))
}
func TestSubVectorFromVector(t *testing.T) {
t1 := NewVector(3, 2, 1)
t2 := NewVector(5, 6, 7)
t3 := Sub(t1, t2)
assert.Equal(t, -2.0, t3.Get(0))
assert.Equal(t, -4.0, t3.Get(1))
assert.Equal(t, -6.0, t3.Get(2))
assert.Equal(t, 0.0, t3.Get(3))
}
func TestSubtractVectorFromZeroVector(t *testing.T) {
t1 := NewVector(0, 0, 0)
t2 := NewVector(1, -2, 3)
t3 := Sub(t1, t2)
assert.Equal(t, -1.0, t3.Get(0))
assert.Equal(t, 2.0, t3.Get(1))
assert.Equal(t, -3.0, t3.Get(2))
assert.Equal(t, 0.0, t3.Get(3))
}
func TestNegateTuple(t *testing.T) {
t1 := NewTupleOf(1, -2, 3, -4)
t3 := Negate(t1)
assert.Equal(t, -1.0, t3.Get(0))
assert.Equal(t, 2.0, t3.Get(1))
assert.Equal(t, -3.0, t3.Get(2))
assert.Equal(t, 4.0, t3.Get(3))
}
func TestMultiplyByScalar(t *testing.T) {
t1 := NewTupleOf(1, -2, 3, -4)
t3 := MultiplyByScalar(t1, 3.5)
assert.Equal(t, 3.5, t3.Get(0))
assert.Equal(t, -7.0, t3.Get(1))
assert.Equal(t, 10.5, t3.Get(2))
assert.Equal(t, -14.0, t3.Get(3))
}
func TestMultiplyByScalarFraction(t *testing.T) {
t1 := NewTupleOf(1, -2, 3, -4)
t3 := MultiplyByScalar(t1, 0.5)
assert.Equal(t, 0.5, t3.Get(0))
assert.Equal(t, -1.0, t3.Get(1))
assert.Equal(t, 1.5, t3.Get(2))
assert.Equal(t, -2.0, t3.Get(3))
}
func TestDivideByScalar(t *testing.T) {
t1 := NewTupleOf(1, -2, 3, -4)
t3 := DivideByScalar(t1, 2)
assert.Equal(t, 0.5, t3.Get(0))
assert.Equal(t, -1.0, t3.Get(1))
assert.Equal(t, 1.5, t3.Get(2))
assert.Equal(t, -2.0, t3.Get(3))
}
func TestMagnitude(t *testing.T) {
tc := []struct {
tpl Tuple4
out float64
}{
{NewVector(1, 0, 0), 1.0},
{NewVector(0, 1, 0), 1.0},
{NewVector(0, 0, 1), 1.0},
{NewVector(1, 2, 3), math.Sqrt(14)},
{NewVector(-1, -2, -3), math.Sqrt(14)},
}
for _, test := range tc {
assert.Equal(t, test.out, Magnitude(test.tpl))
}
}
func TestNormalizeXOnly(t *testing.T) {
t1 := NewVector(4, 0, 0)
t3 := Normalize(t1)
assert.Equal(t, 1.0, t3.Get(0))
assert.Equal(t, 0.0, t3.Get(1))
assert.Equal(t, 0.0, t3.Get(2))
}
func TestNormalizeXYZ(t *testing.T) {
t1 := NewVector(1, 2, 3)
t3 := Normalize(t1)
assert.True(t, Eq(0.26726, t3.Get(0)))
assert.True(t, Eq(0.53452, t3.Get(1)))
assert.True(t, Eq(0.80178, t3.Get(2)))
}
func TestNormalizedMagnitudeIsOne(t *testing.T) {
t1 := NewVector(1, 2, 3)
t3 := Normalize(t1)
assert.Equal(t, 1.0, Magnitude(t3))
}
func TestDot(t *testing.T) {
t1 := NewVector(1, 2, 3)
t2 := NewVector(2, 3, 4)
dotProduct := Dot(t1, t2)
assert.Equal(t, 20.0, dotProduct)
}
func TestCross(t *testing.T) {
t1 := NewVector(1, 2, 3)
t2 := NewVector(2, 3, 4)
crossT1 := Cross(t1, t2)
crossT2 := Cross(t2, t1)
assert.True(t, TupleEquals(crossT1, NewVector(-1, 2, -1)))
assert.True(t, TupleEquals(crossT2, NewVector(1, -2, 1)))
}
func TestCrossProduct(t *testing.T) {
t1 := NewVector(1, 2, 3)
t2 := NewVector(2, 3, 4)
out := Tuple4{}
CrossProduct(&t1, &t2, &out)
assert.True(t, TupleEquals(out, NewVector(-1, 2, -1)))
}
func TestColorAdd(t *testing.T) {
c1 := NewColor(0.9, 0.6, 0.75)
c2 := NewColor(0.7, 0.1, 0.25)
c3 := Add(c1, c2)
assert.Equal(t, 1.6, c3.Get(0))
assert.Equal(t, 0.7, c3.Get(1))
assert.Equal(t, 1.0, c3.Get(2))
}
func TestColorSub(t *testing.T) {
c1 := NewColor(0.9, 0.6, 0.75)
c2 := NewColor(0.7, 0.1, 0.25)
c3 := Sub(c1, c2)
assert.InEpsilon(t, 0.2, c3.Get(0), Epsilon)
assert.InEpsilon(t, 0.5, c3.Get(1), Epsilon)
assert.InEpsilon(t, 0.5, c3.Get(2), Epsilon)
}
func TestColorMultiplyByScalar(t *testing.T) {
c1 := NewColor(0.2, 0.3, 0.4)
c3 := MultiplyByScalar(c1, 2)
assert.Equal(t, 0.4, c3.Get(0))
assert.Equal(t, 0.6, c3.Get(1))
assert.Equal(t, 0.8, c3.Get(2))
}
func TestHadamard(t *testing.T) {
c1 := NewColor(1, 0.2, 0.4)
c2 := NewColor(0.9, 1, 0.1)
c3 := Hadamard(c1, c2)
assert.InEpsilon(t, 0.9, c3.Get(0), Epsilon)
assert.InEpsilon(t, 0.2, c3.Get(1), Epsilon)
assert.InEpsilon(t, 0.04, c3.Get(2), Epsilon)
}
func BenchmarkDot(b *testing.B) {
var res = 0.0
t1 := NewVector(1, 2, 3)
t2 := NewVector(2, 3, 4)
for i := 0; i < b.N; i++ {
res = Dot(t1, t2)
}
fmt.Printf("%v\n", res)
}
func BenchmarkNormalizePtr(b *testing.B) {
t1 := NewVector(1, 2, 3)
out := Tuple4{}
for i := 0; i < b.N; i++ {
NormalizePtr(&t1, &out)
}
fmt.Printf("%v\n", out)
}
func BenchmarkNormalizePtr2(b *testing.B) {
t1 := NewVector(1, 2, 3)
out := Tuple4{}
for i := 0; i < b.N; i++ {
NormalizePtr2(t1, &out)
}
fmt.Printf("%v\n", out)
}
func BenchmarkNormalize(b *testing.B) {
t1 := NewVector(1, 2, 3)
out := Tuple4{}
for i := 0; i < b.N; i++ {
out = Normalize(t1)
}
fmt.Printf("%v\n", out)
}
func BenchmarkCross(b *testing.B) {
t1 := NewVector(1.3243, 2.35456, 3.65464)
t2 := NewVector(2.6563, 3.75672, 4.54654)
out := Tuple4{}
for i := 0; i < b.N; i++ {
out = Cross(t1, t2)
}
fmt.Printf("%v\n", out)
}
func BenchmarkCross2(b *testing.B) {
t1 := NewVector(1.3243, 2.35456, 3.65464)
t2 := NewVector(2.6563, 3.75672, 4.54654)
out := Tuple4{}
for i := 0; i < b.N; i++ {
Cross2(&t1, &t2, &out)
}
fmt.Printf("%v\n", out)
}
func BenchmarkCross2Parallell(b *testing.B) {
t1 := NewVector(1.3243, 2.35456, 3.65464)
t2 := NewVector(2.6563, 3.75672, 4.54654)
b.RunParallel(func(pb *testing.PB) {
out := Tuple4{}
for pb.Next() {
Cross2(&t1, &t2, &out)
}
fmt.Printf("%v\n", out)
})
}
func BenchmarkCrossProduct(b *testing.B) {
t1 := NewVector(1.3243, 2.35456, 3.65464)
t2 := NewVector(2.6563, 3.75672, 4.54654)
out := Tuple4{}
for i := 0; i < b.N; i++ {
CrossProduct(&t1, &t2, &out)
}
fmt.Printf("%v\n", out)
}
func BenchmarkCrossProductParallell(b *testing.B) {
t1 := NewVector(1.3243, 2.35456, 3.65464)
t2 := NewVector(2.6563, 3.75672, 4.54654)
b.RunParallel(func(pb *testing.PB) {
out := Tuple4{}
for pb.Next() {
CrossProduct(&t1, &t2, &out)
}
fmt.Printf("%v\n", out)
})
}
func BenchmarkAdd(b *testing.B) {
t1 := NewPoint(3, -2, 5)
t2 := NewVector(-2, 3, 1)
var t3 Tuple4
for i := 0; i < b.N; i++ {
t3 = Add(t1, t2)
}
fmt.Printf("%v\n", t3)
}
func BenchmarkAddPtr(b *testing.B) {
t1 := NewPoint(3, -2, 5)
t2 := NewVector(-2, 3, 1)
var t3 Tuple4
for i := 0; i < b.N; i++ {
AddPtr(t1, t2, &t3)
}
fmt.Printf("%v\n", t3)
}
func BenchmarkAddPtr2(b *testing.B) {
t1 := NewPoint(3, -2, 5)
t2 := NewVector(-2, 3, 1)
var t3 Tuple4
for i := 0; i < b.N; i++ {
AddPtr2(&t1, &t2, &t3)
}
fmt.Printf("%v\n", t3)
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package flashbacktest
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/ddl"
ddlutil "github.com/pingcap/tidb/ddl/util"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/tests/realtikvtest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tikv/client-go/v2/oracle"
tikvutil "github.com/tikv/client-go/v2/util"
)
// MockGC is used to make GC work in the test environment.
func MockGC(tk *testkit.TestKit) (string, string, string, func()) {
originGC := ddlutil.IsEmulatorGCEnable()
resetGC := func() {
if originGC {
ddlutil.EmulatorGCEnable()
} else {
ddlutil.EmulatorGCDisable()
}
}
// disable emulator GC.
// Otherwise emulator GC will delete table record as soon as possible after execute drop table ddl.
ddlutil.EmulatorGCDisable()
timeBeforeDrop := time.Now().Add(0 - 48*60*60*time.Second).Format(tikvutil.GCTimeFormat)
timeAfterDrop := time.Now().Add(48 * 60 * 60 * time.Second).Format(tikvutil.GCTimeFormat)
safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '')
ON DUPLICATE KEY
UPDATE variable_value = '%[1]s'`
// clear GC variables first.
tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )")
return timeBeforeDrop, timeAfterDrop, safePointSQL, resetGC
}
func TestFlashback(t *testing.T) {
if *realtikvtest.WithRealTiKV {
store := realtikvtest.CreateMockStoreAndSetup(t)
tk := testkit.NewTestKit(t, store)
timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk)
defer resetGC()
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, index i(a))")
tk.MustExec("insert t values (1), (2), (3)")
time.Sleep(1 * time.Second)
ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{})
require.NoError(t, err)
injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/injectSafeTS",
fmt.Sprintf("return(%v)", injectSafeTS)))
tk.MustExec("insert t values (4), (5), (6)")
tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts)))
tk.MustExec("admin check table t")
require.Equal(t, tk.MustQuery("select max(a) from t").Rows()[0][0], "3")
require.Equal(t, tk.MustQuery("select max(a) from t use index(i)").Rows()[0][0], "3")
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/injectSafeTS"))
}
}
func TestPrepareFlashbackFailed(t *testing.T) {
if *realtikvtest.WithRealTiKV {
store := realtikvtest.CreateMockStoreAndSetup(t)
tk := testkit.NewTestKit(t, store)
timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk)
defer resetGC()
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, index i(a))")
tk.MustExec("insert t values (1), (2), (3)")
time.Sleep(1 * time.Second)
ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{})
require.NoError(t, err)
injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/injectSafeTS",
fmt.Sprintf("return(%v)", injectSafeTS)))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/mockPrepareMeetsEpochNotMatch", `return(true)`))
tk.MustExec("insert t values (4), (5), (6)")
tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts)))
tk.MustExec("admin check table t")
require.Equal(t, tk.MustQuery("select max(a) from t").Rows()[0][0], "3")
require.Equal(t, tk.MustQuery("select max(a) from t use index(i)").Rows()[0][0], "3")
jobMeta := tk.MustQuery("select job_meta from mysql.tidb_ddl_history order by job_id desc limit 1").Rows()[0][0].(string)
job := model.Job{}
require.NoError(t, job.Decode([]byte(jobMeta)))
require.Equal(t, job.ErrorCount, int64(0))
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/injectSafeTS"))
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockPrepareMeetsEpochNotMatch"))
}
}
func TestFlashbackAddDropIndex(t *testing.T) {
if *realtikvtest.WithRealTiKV {
store := realtikvtest.CreateMockStoreAndSetup(t)
tk := testkit.NewTestKit(t, store)
timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk)
defer resetGC()
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, index i(a))")
tk.MustExec("insert t values (1), (2), (3)")
prevGCCount := tk.MustQuery("select count(*) from mysql.gc_delete_range").Rows()[0][0]
time.Sleep(1 * time.Second)
ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{})
require.NoError(t, err)
tk.MustExec("alter table t add index k(a)")
require.Equal(t, tk.MustQuery("select max(a) from t use index(k)").Rows()[0][0], "3")
tk.MustExec("alter table t drop index i")
tk.MustGetErrCode("select max(a) from t use index(i)", errno.ErrKeyDoesNotExist)
require.Greater(t, tk.MustQuery("select count(*) from mysql.gc_delete_range").Rows()[0][0], prevGCCount)
injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/injectSafeTS",
fmt.Sprintf("return(%v)", injectSafeTS)))
tk.MustExec("insert t values (4), (5), (6)")
tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts)))
tk.MustExec("admin check table t")
require.Equal(t, tk.MustQuery("select max(a) from t use index(i)").Rows()[0][0], "3")
tk.MustGetErrCode("select max(a) from t use index(k)", errno.ErrKeyDoesNotExist)
require.Equal(t, tk.MustQuery("select count(*) from mysql.gc_delete_range").Rows()[0][0], prevGCCount)
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/injectSafeTS"))
}
}
func TestFlashbackAddDropModifyColumn(t *testing.T) {
if *realtikvtest.WithRealTiKV {
store := realtikvtest.CreateMockStoreAndSetup(t)
tk := testkit.NewTestKit(t, store)
timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk)
defer resetGC()
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, index i(a))")
tk.MustExec("insert t values (1, 1), (2, 2), (3, 3)")
time.Sleep(1 * time.Second)
ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{})
require.NoError(t, err)
tk.MustExec("alter table t add column c int")
tk.MustExec("alter table t drop column b")
tk.MustExec("alter table t modify column a tinyint")
require.Equal(t, tk.MustQuery("show create table t").Rows()[0][1], "CREATE TABLE `t` (\n"+
" `a` tinyint(4) DEFAULT NULL,\n"+
" `c` int(11) DEFAULT NULL,\n"+
" KEY `i` (`a`)\n"+
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")
injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/injectSafeTS",
fmt.Sprintf("return(%v)", injectSafeTS)))
tk.MustExec("insert t values (4, 4), (5, 5), (6, 6)")
tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts)))
tk.MustExec("admin check table t")
require.Equal(t, tk.MustQuery("show create table t").Rows()[0][1], "CREATE TABLE `t` (\n"+
" `a` int(11) DEFAULT NULL,\n"+
" `b` int(11) DEFAULT NULL,\n"+
" KEY `i` (`a`)\n"+
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")
require.Equal(t, tk.MustQuery("select max(b) from t").Rows()[0][0], "3")
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/injectSafeTS"))
}
}
func TestFlashbackBasicRenameDropCreateTable(t *testing.T) {
if *realtikvtest.WithRealTiKV {
store := realtikvtest.CreateMockStoreAndSetup(t)
tk := testkit.NewTestKit(t, store)
timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk)
defer resetGC()
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
tk.MustExec("use test")
tk.MustExec("drop table if exists t, t1, t2, t3")
tk.MustExec("create table t(a int, index i(a))")
tk.MustExec("insert t values (1), (2), (3)")
tk.MustExec("create table t1(a int, index i(a))")
tk.MustExec("insert t1 values (4), (5), (6)")
prevGCCount := tk.MustQuery("select count(*) from mysql.gc_delete_range").Rows()[0][0]
time.Sleep(1 * time.Second)
ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{})
require.NoError(t, err)
tk.MustExec("rename table t to t3")
tk.MustExec("drop table t1")
tk.MustExec("create table t2(a int, index i(a))")
tk.MustExec("insert t2 values (7), (8), (9)")
require.Equal(t, tk.MustQuery("select max(a) from t3").Rows()[0][0], "3")
require.Equal(t, tk.MustQuery("select max(a) from t2").Rows()[0][0], "9")
require.Greater(t, tk.MustQuery("select count(*) from mysql.gc_delete_range").Rows()[0][0], prevGCCount)
injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/injectSafeTS",
fmt.Sprintf("return(%v)", injectSafeTS)))
tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts)))
tk.MustExec("admin check table t")
require.Equal(t, tk.MustQuery("select max(a) from t").Rows()[0][0], "3")
tk.MustExec("admin check table t1")
require.Equal(t, tk.MustQuery("select max(a) from t1").Rows()[0][0], "6")
require.Equal(t, tk.MustQuery("select count(*) from mysql.gc_delete_range").Rows()[0][0], prevGCCount)
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/injectSafeTS"))
}
}
func TestFlashbackCreateDropTableWithData(t *testing.T) {
if *realtikvtest.WithRealTiKV {
store := realtikvtest.CreateMockStoreAndSetup(t)
tk := testkit.NewTestKit(t, store)
timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk)
defer resetGC()
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
tk.MustExec("use test")
tk.MustExec("create table t(a int)")
time.Sleep(1 * time.Second)
ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{})
require.NoError(t, err)
tk.MustExec("insert into t values (1)")
tk.MustExec("drop table t")
tk.MustExec("create table t(b int)")
tk.MustExec("insert into t(b) values (1)")
injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/injectSafeTS",
fmt.Sprintf("return(%v)", injectSafeTS)))
tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts)))
tk.MustExec("admin check table t")
require.Equal(t, tk.MustQuery("select count(a) from t").Rows()[0][0], "0")
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/injectSafeTS"))
}
}
func TestFlashbackCreateDropSchema(t *testing.T) {
if *realtikvtest.WithRealTiKV {
store := realtikvtest.CreateMockStoreAndSetup(t)
tk := testkit.NewTestKit(t, store)
timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk)
defer resetGC()
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
tk.MustExec("use test")
tk.MustExec("create table t(a int, index k(a))")
tk.MustExec("insert into t values (1),(2)")
time.Sleep(1 * time.Second)
ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{})
require.NoError(t, err)
tk.MustExec("drop schema test")
tk.MustExec("create schema test1")
tk.MustExec("create schema test2")
tk.MustExec("use test1")
tk.MustGetErrCode("use test", errno.ErrBadDB)
tk.MustExec("use test2")
tk.MustExec("drop schema test2")
injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/injectSafeTS",
fmt.Sprintf("return(%v)", injectSafeTS)))
tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts)))
tk.MustExec("admin check table test.t")
res := tk.MustQuery("select max(a) from test.t").Rows()
require.Equal(t, res[0][0], "2")
tk.MustGetErrCode("use test1", errno.ErrBadDB)
tk.MustGetErrCode("use test2", errno.ErrBadDB)
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/injectSafeTS"))
}
}
func TestFlashbackAutoID(t *testing.T) {
if *realtikvtest.WithRealTiKV {
store := realtikvtest.CreateMockStoreAndSetup(t)
tk := testkit.NewTestKit(t, store)
timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk)
defer resetGC()
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
tk.MustExec("use test")
tk.MustExec("create table t(a int auto_increment, primary key(a)) auto_id_cache 100")
tk.MustExec("insert into t values (),()")
time.Sleep(1 * time.Second)
ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{})
require.NoError(t, err)
tk.MustExec("insert into t values (),()")
res := tk.MustQuery("select max(a) from test.t").Rows()
require.Equal(t, res[0][0], "4")
tk.MustExec("drop table t")
injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/injectSafeTS",
fmt.Sprintf("return(%v)", injectSafeTS)))
tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts)))
tk.MustExec("admin check table t")
res = tk.MustQuery("select max(a) from t").Rows()
require.Equal(t, res[0][0], "2")
tk.MustExec("insert into t values ()")
res = tk.MustQuery("select max(a) from t").Rows()
require.Equal(t, res[0][0], "101")
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/injectSafeTS"))
}
}
func TestFlashbackSequence(t *testing.T) {
if *realtikvtest.WithRealTiKV {
store := realtikvtest.CreateMockStoreAndSetup(t)
tk := testkit.NewTestKit(t, store)
timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk)
defer resetGC()
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
tk.MustExec("use test")
tk.MustExec("create sequence seq cache 100")
res := tk.MustQuery("select nextval(seq)").Rows()
require.Equal(t, res[0][0], "1")
time.Sleep(1 * time.Second)
ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{})
require.NoError(t, err)
res = tk.MustQuery("select nextval(seq)").Rows()
require.Equal(t, res[0][0], "2")
tk.MustExec("drop sequence seq")
injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/injectSafeTS",
fmt.Sprintf("return(%v)", injectSafeTS)))
tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts)))
// flashback schema and skip cached values
res = tk.MustQuery("select nextval(seq)").Rows()
require.Equal(t, res[0][0], "101")
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/injectSafeTS"))
}
}
func TestFlashbackPartitionTable(t *testing.T) {
if *realtikvtest.WithRealTiKV {
store := realtikvtest.CreateMockStoreAndSetup(t)
tk := testkit.NewTestKit(t, store)
timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk)
defer resetGC()
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
tk.MustExec("use test")
tk.MustExec("create table t(a int) partition by range(`a`) " +
"(partition `a_1` values less than (25), " +
"partition `a_2` values less than (75), " +
"partition `a_3` values less than (200))")
for i := 0; i < 100; i++ {
tk.MustExec(fmt.Sprintf("insert into t values (%d)", i))
}
time.Sleep(1 * time.Second)
ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{})
require.NoError(t, err)
tk.MustExec("alter table t drop partition `a_3`")
tk.MustExec("alter table t add partition (partition `a_3` values less than (300))")
res := tk.MustQuery("select max(a) from t").Rows()
require.Equal(t, res[0][0], "74")
tk.MustExec("drop table t")
injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/injectSafeTS",
fmt.Sprintf("return(%v)", injectSafeTS)))
tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts)))
tk.MustExec("admin check table t")
res = tk.MustQuery("select max(a), min(a), count(*) from t").Rows()
require.Equal(t, res[0][0], "99")
require.Equal(t, res[0][1], "0")
require.Equal(t, res[0][2], "100")
tk.MustExec("insert into t values (100), (-1)")
res = tk.MustQuery("select max(a), min(a), count(*) from t").Rows()
require.Equal(t, res[0][0], "100")
require.Equal(t, res[0][1], "-1")
require.Equal(t, res[0][2], "102")
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/injectSafeTS"))
}
}
func TestFlashbackTmpTable(t *testing.T) {
if *realtikvtest.WithRealTiKV {
store := realtikvtest.CreateMockStoreAndSetup(t)
tk := testkit.NewTestKit(t, store)
timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk)
defer resetGC()
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
tk.MustExec("use test")
tk.MustExec("create temporary table t(a int)")
// test flashback tmp table data
time.Sleep(1 * time.Second)
ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{})
require.NoError(t, err)
tk.MustExec("insert into t values (1), (2), (3)")
injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/injectSafeTS",
fmt.Sprintf("return(%v)", injectSafeTS)))
tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts)))
res := tk.MustQuery("select max(a) from t").Rows()
require.Equal(t, res[0][0], "3")
// test flashback tmp table schema
time.Sleep(1 * time.Second)
ts, err = tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{})
require.NoError(t, err)
tk.MustExec("drop table t")
injectSafeTS = oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/injectSafeTS",
fmt.Sprintf("return(%v)", injectSafeTS)))
tk.MustExec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts)))
tk.MustGetErrCode("select * from t", errno.ErrNoSuchTable)
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/injectSafeTS"))
}
}
func TestFlashbackInProcessErrorMsg(t *testing.T) {
if *realtikvtest.WithRealTiKV {
store, dom := realtikvtest.CreateMockStoreAndDomainAndSetup(t)
originHook := dom.DDL().GetHook()
tk := testkit.NewTestKit(t, store)
timeBeforeDrop, _, safePointSQL, resetGC := MockGC(tk)
defer resetGC()
tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop))
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
time.Sleep(1 * time.Second)
ts, err := tk.Session().GetStore().GetOracle().GetTimestamp(context.Background(), &oracle.Option{})
require.NoError(t, err)
// do some ddl and dml
tk.MustExec("alter table t add index k(a)")
tk.MustExec("insert into t values (1), (2), (3)")
injectSafeTS := oracle.GoTimeToTS(oracle.GetTimeFromTS(ts).Add(100 * time.Second))
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/injectSafeTS",
fmt.Sprintf("return(%v)", injectSafeTS)))
hook := newTestCallBack(t, dom)
hook.OnJobRunBeforeExported = func(job *model.Job) {
if job.Type == model.ActionFlashbackCluster && job.SchemaState == model.StateWriteReorganization {
txn, err := store.Begin()
assert.NoError(t, err)
_, err = meta.NewMeta(txn).ListDatabases()
errorMsg := err.Error()
assert.Contains(t, errorMsg, "is in flashback progress, FlashbackStartTS is ")
slices := strings.Split(errorMsg, "is in flashback progress, FlashbackStartTS is ")
assert.Equal(t, len(slices), 2)
assert.NotEqual(t, slices[1], "0")
txn.Rollback()
}
}
dom.DDL().SetHook(hook)
tk.Exec(fmt.Sprintf("flashback cluster to timestamp '%s'", oracle.GetTimeFromTS(ts)))
dom.DDL().SetHook(originHook)
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/injectSafeTS"))
}
}
type testCallback struct {
ddl.Callback
OnJobRunBeforeExported func(job *model.Job)
}
func newTestCallBack(t *testing.T, dom *domain.Domain) *testCallback {
defHookFactory, err := ddl.GetCustomizedHook("default_hook")
require.NoError(t, err)
return &testCallback{
Callback: defHookFactory(dom),
}
}
func (c *testCallback) OnJobRunBefore(job *model.Job) {
if c.OnJobRunBeforeExported != nil {
c.OnJobRunBeforeExported(job)
}
}
|
package main
import (
"flag"
"fmt"
"log"
"math"
"os"
"sort"
"time"
)
var (
weekly = flag.Duration("weekly", 24*time.Hour, "Weekly working time")
dateInLayout = flag.String("date-layout", "06-1-2", "Layout of date input")
timeInLayout = flag.String("time-layout", "1504", "Layout of time input")
quiet = flag.Bool("quiet", false, "Do not print column names")
thisWeek = flag.Bool("this-week", false, "Print only items from the current week")
)
const (
dateOutLayout = "Mon 02.01."
timeOutLayout = "15:04"
)
type entry struct {
date time.Time
from *time.Time
to *time.Time
duration *time.Duration
}
func main() {
flag.Parse()
flag.Usage = usage
if flag.NArg() != 1 {
flag.Usage()
os.Exit(1)
}
// get local location
location := time.Now().Location()
filename := flag.Arg(0)
entries, err := readFile(filename, location)
if err != nil {
log.Fatal(err)
}
// sort by date
sort.Slice(entries, func(i, j int) bool {
return entries[i].date.Before(entries[j].date) && entries[i].from.Before(*entries[j].from)
})
year, week, day := 0, 0, 0
weekTotal := time.Duration(0)
dayTotal := time.Duration(0)
carry := time.Duration(0)
currentYear, currentWeek := time.Now().ISOWeek()
if !*quiet {
fmt.Println("Date From To Dur. Day Week Total")
}
for i, e := range entries {
y, w := e.date.ISOWeek()
if year != y || week != w {
year, week = y, w
weekTotal = time.Duration(0)
carry -= *weekly
}
if d := e.date.YearDay(); d != day || y != year {
// edge case: if you only logged once a year but at the
// same day, we would not know it were different days
// if we did not check the year.
year = y
day = d
dayTotal = time.Duration(0)
}
// assume last entry without duration ends now, if it is from today
if lastEntry := i == len(entries)-1; lastEntry && e.to == nil && today(e.date) {
t := time.Now().Truncate(time.Minute)
e.to = &t
d := e.to.Sub(*e.from)
e.duration = &d
}
if e.duration != nil {
weekTotal += *e.duration
dayTotal += *e.duration
} else {
fmt.Printf("Missing duration on %v\n", e.date)
continue
}
carry += *e.duration
if *thisWeek && (y != currentYear || w != currentWeek) {
continue
}
date := e.date.Format(dateOutLayout)
fmt.Printf("%v %v-%v %v %v %v %v\n",
date,
e.from.Format(timeOutLayout),
e.to.Format(timeOutLayout),
d2s(*e.duration, false),
d2s(dayTotal, false),
d2s(weekTotal, false),
d2s(carry, true))
}
}
func d2s(d time.Duration, negPossible bool) string {
n := int64(d) / int64(time.Minute)
min := int64(math.Abs(float64(n % 60)))
hours := int64(n / 60)
if negPossible {
return fmt.Sprintf("%3vh%02dm", hours, min)
} else {
return fmt.Sprintf("%2vh%02dm", hours, min)
}
}
func today(t time.Time) bool {
o := time.Now()
return t.Day() == o.Day() && t.Month() == o.Month() && t.Year() == o.Year()
}
|
package cmd
// OutputDir the output directory where the built version of Authelia is located.
var OutputDir = "dist"
// DockerImageName the official name of Authelia docker image.
var DockerImageName = "authelia/authelia"
// IntermediateDockerImageName local name of the docker image.
var IntermediateDockerImageName = "authelia:dist"
const dockerhub = "docker.io"
const ghcr = "ghcr.io"
const masterTag = "master"
const stringFalse = "false"
const webDirectory = "web"
const fmtLDFLAGSX = "-X 'github.com/authelia/authelia/v4/internal/utils.%s=%s'"
const (
cmdRootShort = "A utility used in the Authelia development process."
cmdRootLong = `The authelia-scripts utility is utilized by developers and the CI/CD pipeline for configuring
testing suites and various other aspects of the environment.
It can be used to automate or manually run unit testing, integration testing, etc.`
cmdRootExample = `authelia-scripts help`
cmdBootstrapShort = "Prepare environment for development and testing"
cmdBootstrapLong = `Prepare environment for development and testing.`
cmdBootstrapExample = `authelia-scripts bootstrap`
cmdBuildShort = "Build Authelia binary and static assets"
cmdBuildLong = `Build Authelia binary and static assets.`
cmdBuildExample = `authelia-scripts build`
cmdCleanShort = "Clean build artifacts"
cmdCleanLong = `Clean build artifacts.`
cmdCleanExample = `authelia-scripts clean`
cmdCIShort = "Run the continuous integration script"
cmdCILong = `Run the continuous integration script.`
cmdCIExample = `authelia-scripts ci`
cmdDockerShort = "Commands related to building and publishing docker image"
cmdDockerLong = `Commands related to building and publishing docker image.`
cmdDockerExample = `authelia-scripts docker`
cmdDockerBuildShort = "Build the docker image of Authelia"
cmdDockerBuildLong = `Build the docker image of Authelia.`
cmdDockerBuildExample = `authelia-scripts docker build`
cmdDockerPushManifestShort = "Push Authelia docker manifest to the Docker registries"
cmdDockerPushManifestLong = `Push Authelia docker manifest to the Docker registries.`
cmdDockerPushManifestExample = `authelia-scripts docker push-manifest`
cmdServeShort = "Serve compiled version of Authelia"
cmdServeLong = `Serve compiled version of Authelia.`
cmdServeExample = `authelia-scripts serve test.yml`
cmdSuitesShort = "Commands related to suites management"
cmdSuitesLong = `Commands related to suites management.`
cmdSuitesExample = `authelia-scripts suites`
cmdSuitesListShort = "List available suites"
cmdSuitesListLong = `List available suites.
Suites can be ran with the authelia-scripts suites test [suite] command.`
cmdSuitesListExample = `authelia-scripts suites list`
cmdSuitesTestShort = "Run a test suite"
cmdSuitesTestLong = `Run a test suite.
Suites can be listed with the authelia-scripts suites list command.`
cmdSuitesTestExample = `authelia-scripts suites test Standalone`
cmdSuitesSetupShort = "Setup a test suite environment"
cmdSuitesSetupLong = `Setup a test suite environment.
Suites can be listed with the authelia-scripts suites list command.`
cmdSuitesSetupExample = `authelia-scripts suites setup Standalone`
cmdSuitesTeardownShort = "Teardown a test suite environment"
cmdSuitesTeardownLong = `Teardown a test suite environment.
Suites can be listed with the authelia-scripts suites list command.`
cmdSuitesTeardownExample = `authelia-scripts suites setup Standalone`
cmdUnitTestShort = "Run unit tests"
cmdUnitTestLong = `Run unit tests.`
cmdUnitTestExample = `authelia-scripts unittest`
cmdXFlagsShort = "Generate X LDFlags for building Authelia"
cmdXFlagsLong = `Generate X LDFlags for building Authelia.`
cmdXFlagsExample = `authelia-scripts xflags`
)
|
package main
import (
"fmt"
"log"
"os"
"github.com/urfave/cli"
)
func init() {
app.Commands = append(app.Commands,
cli.Command{
Name: "listen",
Usage: "tails messages from kafka",
Action: func(c *cli.Context) {
if len(c.Args()) > 0 && c.Args()[0] != "" {
globalFlags.Topic = c.Args()[0]
}
consumer, err := CreateConsumer(map[string]string{
"consumer-type": "kafka",
"brokers": globalFlags.Brokers,
"groupid": globalFlags.Groupid,
"topics": globalFlags.Topic,
})
if err != nil {
log.Printf("Failed to create consumer: %v", err)
}
go func(messages <-chan *Message) {
count := 0
for msg := range messages {
count++
if count%100 == 0 {
if len(messages) > 100 {
fmt.Fprintf(os.Stderr, "Display goroutine is not fast enough: %d\n", len(messages))
}
fmt.Fprintf(os.Stdout, "%d\r", count)
}
if c.Bool("all") {
fmt.Printf("%s\n", msg.Data)
} else {
msg.ParseJSON()
series, _ := msg.GetString("series")
fmt.Printf("%s", series)
}
}
}(consumer.GetMsgChan())
consumer.StartConsuming()
consumer.Wait()
},
Flags: []cli.Flag{
cli.BoolFlag{
Name: "all",
Usage: "Print entire JSON, not just series metrics",
},
},
})
}
|
package dp
// FibonacciRecursive solve using recursion only - No dynamic programming used
// Time Complexity: O(2^n) - there's a lot of repetition of already solved subproblems
// Space Complexity: O(2^n) - because of the stack calls
func FibonacciRecursive(n int) int {
if n <= 2 {
return 1
}
return FibonacciRecursive(n-1) + FibonacciRecursive(n-2)
}
// FibRecursionWithMemoization uses both recursion and memoization
// Time Complexity: O(n)
// Space Complexity: O(n)
func FibRecursionWithMemoization(n int) int {
cache := map[int]int{}
if n <= 2 {
cache[n] = 1
} else {
cache[n] = FibRecursionWithMemoization(n-1) + FibRecursionWithMemoization(n-2)
}
return cache[n]
}
// FibonacciBottomUp bottom-up approach
// Time Complexity: O(n)
// Space Complexity: O(n) - we're using extra space for memoizing
func FibonacciBottomUp(n int) int {
cache := map[int]int{}
for i := 1; i <= n; i++ {
if i <= 2 {
cache[i] = 1
continue
}
cache[i] = cache[i-1] + cache[i-2]
}
return cache[n]
}
// FibonacciBottomUp2 bottom up approach
// Time Complexity: O(n)
// Space Complexity: O(1) - we are only keeping trach of the last 2 answers
func FibonacciBottomUp2(n int) int {
var first, second int
for i := 1; i <= n; i++ {
if i <= 2 {
first = 1
second = 1
continue
}
temp := second
second = first + second
first = temp
}
return second
}
|
package collect
import (
"github.com/robertang/collector/cncf"
"github.com/robertang/collector/common"
"github.com/robertang/collector/metric"
"fmt"
"github.com/robertang/collector/output"
)
type Collector struct {
config cncf.Yml
metrics []common.Metric
outputs []common.Output
}
var _collectors = make([]Collector, 0)
func Collect(ymls []cncf.Yml){
for _, yml := range ymls {
_collectors = append(_collectors, newCollector(yml))
}
for _, c := range _collectors {
for _, m :=range c.metrics{
if len(c.outputs) > 0 {
m.Start(c.outputs[0])
}
}
}
}
func newCollector(yml cncf.Yml) Collector{
c := Collector{config : yml}
c.metrics = newMetrics(yml.Metric)
c.outputs = newOutputs(yml.Output)
return c
}
func newMetrics(mcs []cncf.MetricConfig) []common.Metric {
ms := make([]common.Metric, 0)
for _, mc := range mcs {
module, err := metric.GetModule(mc.Module)
if err != nil {
fmt.Println(err)
continue
}
ms = append(ms, module.NewMetric(mc))
}
return ms
}
func newOutputs(ocs []cncf.OutputConfig) []common.Output{
os := make([]common.Output, 0)
for _, oc := range ocs {
module, err := output.GetModule(oc.Module)
if err != nil {
fmt.Println(err)
continue
}
os = append(os, module.NewOutput(oc))
}
return os
}
|
package registry
import (
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
"github.com/iotaledger/wasp/packages/parameters"
flag "github.com/spf13/pflag"
)
const (
// CfgBindAddress defines the config flag of the web API binding address.
CfgRewardAddress = "reward.address"
)
func InitFlags() {
flag.String(CfgRewardAddress, "", "reward address for this Wasp node. Empty (default) means no rewards are collected")
}
func GetFeeDestination(scaddr *address.Address) address.Address {
//TODO
ret, err := address.FromBase58(parameters.GetString(CfgRewardAddress))
if err != nil {
ret = address.Address{}
}
return ret
}
|
package main
import (
//"math"
"fmt"
)
func main() {
var a, b int
fmt.Scan(&a, &b)
if a == 0 && b == 0 {
fmt.Printf("NO")
} else if ( a == b || a-b == 1 || a - b == -1) {
fmt.Printf("YES")
} else {
fmt.Printf("NO")
}
}
|
package concur_test
import (
"errors"
"fmt"
"strings"
"testing"
"github.com/stevenmatthewt/concur"
)
func TestConcurrentRunnerSimple(t *testing.T) {
jobs := []MockJob{
MockJob{},
MockJob{},
MockJob{},
MockJob{},
MockJob{},
MockJob{},
MockJob{},
MockJob{},
MockJob{},
}
err := concur.Concurrent().Run(&jobs[0], &jobs[1], &jobs[2], &jobs[3], &jobs[4], &jobs[5], &jobs[6], &jobs[7], &jobs[8])
if err != nil {
t.Error(err)
}
for i, job := range jobs {
if job.invoked != 1 {
t.Errorf("jobs[%d] invoked incorrect number of times: %d", i, job.invoked)
}
}
}
func TestConcurrentRunnerUneven(t *testing.T) {
job1 := MockJob{}
job2 := MockJob{}
job3 := MockJob{}
err := concur.Concurrent().Run(&job1, &job2, &job2, &job3, &job3, &job3)
if err != nil {
t.Error(err)
}
if job1.invoked != 1 {
t.Errorf("job1 invoked incorrect number of times: %d", job1.invoked)
}
if job2.invoked != 2 {
t.Errorf("job2 invoked incorrect number of times: %d", job2.invoked)
}
if job3.invoked != 3 {
t.Errorf("job3 invoked incorrect number of times: %d", job3.invoked)
}
}
// At the moment, concur does nothing to avoid race conditions, so this
// test has been disabled. I don't believe it is possible for concur
// to avoid race conditions in any practical scenarios, so this will
// likely never be supported.
// func TestConcurrentRunnerRaceCondition(t *testing.T) {
// const numRuns = 100000
// mockJob := UnatomicMockJob{}
// mockJobs := make([]*UnatomicMockJob, numRuns)
// tasks := make([]concur.Task, len(mockJobs))
// for i := range mockJobs {
// // We want all of the jobs to be the same
// // That's how we'll test for a race condition
// mockJobs[i] = &mockJob
// tasks[i] = mockJobs[i]
// }
// err := concur.Concurrent().Run(tasks...)
// if err != nil {
// t.Error(err)
// }
// if mockJob.invoked != numRuns {
// t.Errorf("mockJob.invoked incorrect number of times: %d", mockJob.invoked)
// }
// }
func TestConcurrentRunnerReturnData(t *testing.T) {
mockJobs := make([]MockJob, 42)
tasks := make([]concur.Task, len(mockJobs))
for i := range mockJobs {
mockJobs[i].returnValue = fmt.Sprintf("MockJobReturnValue%d", i)
tasks[i] = &mockJobs[i]
}
err := concur.Concurrent().Run(tasks...)
if err != nil {
t.Error(err)
}
for i, job := range mockJobs {
if job.invoked != 1 {
t.Errorf("jobs[%d] invoked incorrect number of times: %d", i, job.invoked)
}
if job.actualReturnValue != fmt.Sprintf("MockJobReturnValue%d", i) {
t.Errorf("jobs[%d] returned incorrect results: %s", i, job.actualReturnValue)
}
}
}
func TestConcurrentRunnerPanic(t *testing.T) {
const numRuns = 100
mockJob := MockJob{}
panicJob := PanicJob{}
mockJobs := make([]*MockJob, numRuns)
tasks := make([]concur.Task, len(mockJobs))
for i := range mockJobs {
// Halfway through, we'll have it panic
if i == numRuns/2 {
tasks[i] = &panicJob
} else {
mockJobs[i] = &mockJob
tasks[i] = mockJobs[i]
}
}
err := concur.Concurrent().Run(tasks...)
if err == nil || !strings.Contains(err.Error(), "panic") {
t.Errorf("received incorrect error: %v", err)
}
if mockJob.invoked != numRuns-1 {
t.Errorf("mockJob.invoked incorrect number of times: %d", mockJob.invoked)
}
}
func TestConcurrentRunnerErrors(t *testing.T) {
job1 := MockJob{returnError: errors.New("job1 error")}
job2 := MockJob{returnError: errors.New("job2 error")}
job3 := MockJob{returnError: errors.New("job3 error")}
err := concur.Concurrent().Run(&job1, &job2, &job3)
if err == nil {
t.Error("should have received error")
} else if !strings.Contains(err.Error(), "job1 error") ||
!strings.Contains(err.Error(), "job2 error") ||
!strings.Contains(err.Error(), "job2 error") {
t.Errorf("error missing information: %v", err)
}
if job1.invoked != 1 {
t.Errorf("job1 invoked incorrect number of times: %d", job1.invoked)
}
if job2.invoked != 1 {
t.Errorf("job2 invoked incorrect number of times: %d", job2.invoked)
}
if job3.invoked != 1 {
t.Errorf("job3 invoked incorrect number of times: %d", job3.invoked)
}
}
|
/*
* @lc app=leetcode.cn id=239 lang=golang
*
* [239] 滑动窗口最大值
*/
// @lc code=start
package main
import "fmt"
import "math"
func maxSlidingWindow(nums []int, k int) []int {
// stack := []int{}
if len(nums) <= 0 {
return []int{}
}
max, maxPos := math.MinInt32, -1
if k >= len(nums) {
max, _ = findMax(nums)
return []int{max}
}
res := []int{}
// res = append(res, max)
// low, upper := 0, k
for i := 0 ; i < len(nums) - k +1 ; i++ {
low := i
upper := i + k -1
fmt.Printf("%d, %d\n", low, upper)
if maxPos >= low {
fmt.Printf("maxPos <= low\n")
if max < nums[upper] {
max = nums[upper]
maxPos = upper
}
res = append(res, max)
} else {
max, maxPos = findMax(nums[low: upper+1])
// fmt.Printf("findMax %d, %d\n", max, maxPos)
res = append(res, max)
}
// fmt.Printf("res is %v\n", res)
}
return res
}
// @lc code=end
func findMax(nums []int) ( max int, pos int) {
max = math.MinInt64
for k, i := range nums {
if i > max {
max = i
pos = k
}
}
return
}
func main(){
a := []int{1,3,-1,-3,5,3,6,7}
fmt.Printf("%v, %v\n",a, maxSlidingWindow(a, 5))
a = []int{1}
fmt.Printf("%v, %v\n",a, maxSlidingWindow(a, 1))
}
|
package main
import (
//"fmt"
"sort"
)
func contains(s []int, e int) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
type Elevator struct {
ID string
status string
amountOfFloors int
direction string
currentFloor int
door Door
floorRequestsList []int
completedRequestsList []int
}
func NewElevator(_id, _status string, _amountOfFloors, _currentFloor int) *Elevator {
e := new(Elevator)
e.ID = _id
e.status = _status
e.amountOfFloors = _amountOfFloors
e.currentFloor = _currentFloor
e.direction = ""
e.door = Door{1, ""}
return e
}
func (e *Elevator) move() {
for len(e.floorRequestsList) != 0 {
var destination int = e.floorRequestsList[0]
e.status = "moving"
if e.currentFloor < destination {
e.direction = "up"
e.sortFloorList()
for e.currentFloor < destination {
e.currentFloor++
}
} else if e.currentFloor > destination {
e.direction = "down"
e.sortFloorList()
for e.currentFloor > destination {
e.currentFloor--
}
}
e.status = "stopped"
e.operateDoors()
e.completedRequestsList = append(e.completedRequestsList, e.floorRequestsList[0])
e.floorRequestsList = e.floorRequestsList[1:]
}
e.status = "idle"
}
func (e *Elevator) sortFloorList() {
if e.direction == "up" {
sort.Ints(e.floorRequestsList)
} else if e.direction == "down" {
sort.Ints(e.floorRequestsList)
e.floorRequestsList = (e.floorRequestsList)
}
}
func (elevator *Elevator) operateDoors() {
if elevator.status == "stopped" || elevator.status == "idle" {
elevator.door.status = "open"
if len(elevator.floorRequestsList) < 1 {
elevator.direction = ""
elevator.status = "idle"
}
}
}
func (e *Elevator) addNewRequest(requestedFloor int) {
if !containsElement(requestedFloor, e.floorRequestsList) {
e.floorRequestsList = append(e.floorRequestsList, requestedFloor)
}
if e.currentFloor < requestedFloor {
e.direction = "up"
}
if e.currentFloor > requestedFloor {
e.direction = "down"
}
}
|
// Package neunet provides a basic implementation of an artificial neural net.
package neunet
import (
)
// Type Parameters holds the global H and gain parameters. See the documentation for NewParameters
// for information on the parameters.
type Parameters struct {
H float64 // The coefficient for the sigmoid function (passed to
// NewComputablePerceptron)
Gain float64 // The learning gain (passed to NewComutablePerceptron)
}
// Function NewParameters creates and returns a new Parameters object. `h` is the coefficient for
// the sigmoid function and `gain` is the learning gain. If you are unsure what to put for these
// values, 1.0 and 5.0 tend to be a good starting point.
func NewParameters(h float64, gain float64) (params *Parameters) {
params = new(Parameters)
params.H = h
params.Gain = gain
return params
}
// Type NeuralNet represents a neural network.
type NeuralNet struct {
Layers []Layer // The layers in the network.
InputLayer Layer // The input layer (must be in Layers as well).
OutputLayer Layer // The output layer (must be in Layers as well).
Params *Parameters // The H and gain parameters.
prevLayer Layer // The last layer added.
}
// Function NewNeuralNet creates and returns a new NeuralNet.
func NewNeuralNet(params *Parameters) (net *NeuralNet) {
net = new(NeuralNet)
net.Layers = make([]Layer, 0, 3)
net.InputLayer = nil
net.OutputLayer = nil
net.Params = params
net.prevLayer = nil
return net
}
// Function NeuralNet.AddInputLayer creates and adds a new input layer to the net.
func (net *NeuralNet) AddInputLayer(size int) (layer Layer) {
layer = NewInputLayer(size)
net.Layers = append(net.Layers, layer)
net.InputLayer = layer
net.prevLayer = layer
return layer
}
// Function NeuralNet.AddHiddenLayer creates and adds a new hidden layer to the net.
func (net *NeuralNet) AddHiddenLayer(size int) (layer Layer) {
layer = NewComputableLayer(net.prevLayer, size, net.Params)
net.Layers = append(net.Layers, layer)
net.prevLayer = layer
return layer
}
// Function NeuralNet.AddOutputLayer creates and adds a new output layer to the net.
func (net *NeuralNet) AddOutputLayer(size int) (layer Layer) {
layer = NewComputableLayer(net.prevLayer, size, net.Params)
net.Layers = append(net.Layers, layer)
net.OutputLayer = layer
net.prevLayer = layer
return layer
}
// Function NeuralNet.SetInput sets the value of the input perceptron numbered `num` to `value`.
func (net *NeuralNet) SetInput(num int, value float64) {
p := net.InputLayer[num]
p.(*InputPerceptron).SetValue(value)
}
// Function NeuralNet.GetOutput returns the value of the output perceptron numbered `num`.
func (net *NeuralNet) GetOutput(num int) (value float64) {
p := net.OutputLayer[num]
return p.GetValue()
}
// Function NeuralNet.Propagate calls Compute on all perceptrons.
func (net *NeuralNet) Propagate() {
for _, layer := range net.Layers {
for _, p := range layer {
p.Compute()
}
}
}
// Function NeuralNet.CalculateOutputError calculates the error between the `expectedOutputs`
// parameter and the output perceptrons.
func (net *NeuralNet) CalculateOutputError(expectedOutputs []float64) (error float64) {
error = 0.0
for i, p := range net.OutputLayer {
thisError := expectedOutputs[i] - p.GetValue()
error += 0.5 * thisError * thisError
}
return error
}
// Function NeuralNet.BackPropagate updates the weights based on the error
func (net *NeuralNet) BackPropagate(expectedOutputs []float64) {
for i, p_ := range net.OutputLayer {
p := p_.(*ComputablePerceptron)
v := p.GetValue()
error := v * (1.0 - v) * (expectedOutputs[i] - v)
p.AdjustWeights(error)
}
for i := len(net.Layers) - 2; i > 0; i-- {
layer := net.Layers[i]
for _, p_ := range layer {
p := p_.(*ComputablePerceptron)
v := p.GetValue()
var sum float64 = 0.0
for _, output_ := range net.Layers[i + 1] {
output := output_.(*ComputablePerceptron)
sum += output.GetIncomingWeight(p_) * output.Error
}
error := v * (1.0 - v) * sum
p.AdjustWeights(error)
}
}
}
// Function NeuralNet.Train performs a single training cycle on the training set `tset`, returning
// the error distance.
func (net *NeuralNet) Train(inputs []float64, expectedOutputs []float64) (error float64) {
for i, p := range net.InputLayer {
p.(*InputPerceptron).SetValue(inputs[i])
}
net.Propagate()
error = net.CalculateOutputError(expectedOutputs)
net.BackPropagate(expectedOutputs)
return error
}
|
package grpcclient
import (
//"net"
"time"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"google.golang.org/grpc"
pb "github.com/navinds25/grpcGoExpts/eaconnproto"
)
const (
address = "localhost:50051"
)
func GrpcClient() {
conn, err := grpc.Dial(address, grpc.WithInsecure())
if err != nil {
log.Fatalf("did not connect: %v", err)
}
defer conn.Close()
c := pb.NewPublisherClient(conn)
name := "myfile"
filename := "myfilename"
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
r, err := c.SendConfig(ctx, &pb.Config{Name: name, Filename: filename})
if err != nil {
log.Fatalf("could not greet: %v", err)
}
log.Printf("Recieved %s", r.Valid)
}
|
// Package commands contains commands for the Kong library
package commands
|
package main
// 这个示例程序展示如何写基础单元测试
import (
"net/http"
"testing"
)
const checkMark = "\u2713" //√
const ballotX = "\u2717" //×
// TestDownload 确认 http 包的 Get 函数可以下载内容
func TestDownload(t *testing.T) {
//url := "http://2cifang.com"
url := "https://www.baidu.com/"
statusCode := 200
t.Log("Given the need to test downloading content.")
{
t.Logf("\tWhen checking \"%s\" for status code \"%d\"", url, statusCode)
{
resp, err := http.Get(url)
if err != nil {
t.Fatal("\t\tShould be able to make the Get call.", ballotX, err)
}
t.Log("\t\tShould be able to make the Get call.", checkMark)
defer resp.Body.Close()
if resp.StatusCode == statusCode {
t.Logf("\t\tShould receive a \"%d\" status. %v", statusCode, checkMark)
} else {
t.Errorf("\t\tShould receive a \"%d\" status. %v %v", statusCode, ballotX, resp.StatusCode)
}
}
}
}
/*
1.测试对象
1.包
2.程序的一部分
3.函数
2.目的: 验证目标代码能否在给定场景达到预期效果
3.测试场景
1.正向测试: 正常执行情况下, 保证代码不产生错误
eg. 确认代码是否可以成功向数据库中插入一条记录
2.反向测试: 保证代码不仅产生错误, 且是预期的错误
eg. 可能原因: 对数据库查询时没有找到任何结果, 或对数据库做了无效更新
3.单元测试
1.基础测试: 只使用一组参数和预期结果来测试一段代码
2.表组测试: 多
*/
/*
1.Go语言测试工具
1.testing包对Go package的自动化测试提供了支持
1.提供从测试框架到报告测试的输出和状态的各种测试功能的支持
2.测试框架识别测试对象约定
1.测试文件
1.必须以 _test.go 结尾
2.测试函数
1.签名: 必须公开 ~ 导出
1.格式: TestXxx ~ 1.以Test开头, 自定义部分Xxx必须首字母大写 ~ 大驼峰
2.形参: 必须是test.T类型的指针
1.T类型管理测试状态和格式化的测试日志
3.返回值: 无
*/
|
package main
func maxArea(height []int) int {
i, j := 0, len(height)-1
area := 0
for i < j {
// 面积由短的一根的高度决定
h := MinInt(height[i], height[j])
area = MaxInt(area, h*(j-i))
// 比较两边的柱子,如果某侧的柱子比较矮,因为面积是由矮柱子决定的,所以较矮柱子一侧的面积在往中间方向走的时候已经是最大的了,
// 需要该侧柱子往中间移动,检查是否可能有更高的柱子能组成更大的面积
// 一样高的时候就一起移动,或者移动随意一侧继续计算即可
delta := height[i] - height[j]
switch {
case delta > 0:
j--
case delta < 0:
i++
default:
j--
i++
}
}
return area
}
func MaxInt(a, b int) int {
if a > b {
return a
}
return b
}
func MinInt(a, b int) int {
if a > b {
return b
}
return a
}
|
package client
import "os"
func reboot() {
log.Warningf("Not rebooting. As you're on a mac and probably don't want to actually boot your dev machine. You're welcome.")
os.Exit(0)
}
|
package router
import (
"github.com/kataras/iris"
"github.com/kataras/iris/context"
"log"
)
const ACTION_METHOD_TYPE_GET = "GET"
const ACTION_METHOD_TYPE_POST = "POST"
const ACTION_METHOD_TYPE_PUT = "PUT"
const ACTION_METHOD_TYPE_DELETE = "DELETE"
const ACTION_METHOD_TYPE_ANY = "ANY"
type Controller struct {
Name string `json:"name"`
BasePath string `json:"base_path"`
Actions []*Action `json:"actions"`
PreRequest func(ctx context.Context)
PostRequest func(ctx context.Context)
}
type Action struct {
Path string `json:"path"`
Method string `json:"method"`
AccessLevel string
Process func(ctx context.Context)
}
func (c *Controller) Get(path string, process func(ctx context.Context)) {
c.Actions = append(c.Actions, &Action{Path: path, Method: ACTION_METHOD_TYPE_GET, Process: process})
}
func (c *Controller) Post(path string, process func(ctx context.Context)) {
c.Actions = append(c.Actions, &Action{Path: path, Method: ACTION_METHOD_TYPE_POST, Process: process})
}
func (c *Controller) Put(path string, process func(ctx context.Context)) {
c.Actions = append(c.Actions, &Action{Path: path, Method: ACTION_METHOD_TYPE_PUT, Process: process})
}
func (c *Controller) Delete(path string, process func(ctx context.Context)) {
c.Actions = append(c.Actions, &Action{Path: path, Method: ACTION_METHOD_TYPE_DELETE, Process: process})
}
func (c *Controller) Any(path string, process func(ctx context.Context)) {
c.Actions = append(c.Actions, &Action{Path: path, Method: ACTION_METHOD_TYPE_ANY, Process: process})
}
func (c *Controller) GetWithRole(path string, accessLevel string, process func(ctx context.Context)) {
c.Actions = append(c.Actions, &Action{Path: path, Method: ACTION_METHOD_TYPE_GET, Process: process, AccessLevel: accessLevel})
}
func (c *Controller) PostWithRole(path string, accessLevel string, process func(ctx context.Context)) {
c.Actions = append(c.Actions, &Action{Path: path, Method: ACTION_METHOD_TYPE_POST, Process: process, AccessLevel: accessLevel})
}
func (c *Controller) PutWithRole(path string, accessLevel string, process func(ctx context.Context)) {
c.Actions = append(c.Actions, &Action{Path: path, Method: ACTION_METHOD_TYPE_PUT, Process: process, AccessLevel: accessLevel})
}
func (c *Controller) DeleteWithRole(path string, accessLevel string, process func(ctx context.Context)) {
c.Actions = append(c.Actions, &Action{Path: path, Method: ACTION_METHOD_TYPE_DELETE, Process: process, AccessLevel: accessLevel})
}
func (c *Controller) AnyWithRole(path string, accessLevel string, process func(ctx context.Context)) {
c.Actions = append(c.Actions, &Action{Path: path, Method: ACTION_METHOD_TYPE_ANY, Process: process, AccessLevel: accessLevel})
}
var controllers []*Controller
func ProcessController(app *iris.Application) {
if controllers == nil {
log.Println("there aren't any controllers initialized yet..")
} else {
for _, c := range controllers {
log.Println("Registering controller " + c.Name + " at " + c.BasePath)
for _, a := range c.Actions {
log.Println("----> Action " + a.Method + ":" + a.Path)
switch a.Method {
case ACTION_METHOD_TYPE_GET:
app.Get(c.BasePath+a.Path, c.PreRequest, a.Process)
break
case ACTION_METHOD_TYPE_POST:
app.Post(c.BasePath+a.Path, c.PreRequest, a.Process)
break
case ACTION_METHOD_TYPE_PUT:
app.Put(c.BasePath+a.Path, c.PreRequest, a.Process)
break
case ACTION_METHOD_TYPE_DELETE:
app.Delete(c.BasePath+a.Path, c.PreRequest, a.Process)
break
default:
app.Any(c.BasePath+a.Path, c.PreRequest, a.Process)
break
}
}
}
}
}
func ProcessControllerWithJwt(app *iris.Application) {
if controllers == nil {
log.Println("there aren't any controllers initialized yet..")
} else {
for _, c := range controllers {
log.Println("Registering controller " + c.Name + " at " + c.BasePath)
for _, a := range c.Actions {
log.Println("----> Action " + a.Method + ":" + a.Path)
var handlers []context.Handler
handlers = append(handlers, c.PreRequest)
switch a.AccessLevel {
case RolePublic:
handlers = append(handlers, CheckPublic, JwtValidator)
break
case RoleMemberRegistered:
handlers = append(handlers, CheckRegistered, JwtValidator)
break
case RoleMemberApproved:
handlers = append(handlers, CheckApproved, JwtValidator)
break
default:
break
}
handlers = append(handlers, a.Process)
switch a.Method {
case ACTION_METHOD_TYPE_GET:
app.Get(c.BasePath+a.Path, handlers...)
break
case ACTION_METHOD_TYPE_POST:
app.Post(c.BasePath+a.Path, handlers...)
break
case ACTION_METHOD_TYPE_PUT:
app.Put(c.BasePath+a.Path, handlers...)
break
case ACTION_METHOD_TYPE_DELETE:
app.Delete(c.BasePath+a.Path, handlers...)
break
default:
app.Any(c.BasePath+a.Path, handlers...)
break
}
}
}
}
}
func CreateNewControllerInstance(name string, basePath string) *Controller {
c := Controller{
Name: name,
BasePath: basePath,
PreRequest: func(ctx context.Context) { ctx.Next() },
PostRequest: func(ctx context.Context) {},
Actions: make([]*Action, 0),
}
addController(&c)
return &c
}
func addController(controller *Controller) {
if controller == nil {
controllers = make([]*Controller, 0)
}
controllers = append(controllers, controller)
}
|
package main
import (
"time"
"fmt"
"math/rand"
g "github.com/vseledkin/gortex"
"log"
"bufio"
"os"
"strings"
)
func main() {
// maintain random seed
rand.Seed(time.Now().UnixNano())
tokenizer := g.CharSplitter{}
trainFile := "train.txt"
dic, e := g.DictionaryFromFile(trainFile, tokenizer)
if e != nil {
log.Fatal(e)
}
g.SaveDictionary("dic.json", dic)
hidden_size := 128
fmt.Printf("Dictionary has %d tokens\n", dic.Len())
fmt.Printf("%s\n", dic)
optimizer := g.NewOptimizer(g.OpOp{Method: g.WINDOWGRAD, LearningRate: 0.001, Momentum: g.DefaultMomentum, Clip: 4})
encoder := g.MakeOutputlessRNN(dic.Len(), hidden_size)
Who := g.RandXavierMat(2, hidden_size)
Bho := g.RandXavierMat(2, 1)
// define model parameters
encoderModel := encoder.GetParameters("Encoder")
encoderModel["out"] = Who
encoderModel["outBias"] = Bho
count := 0
ma_loss := g.NewMovingAverage(100)
learning_rate := float32(0.001)
anneal_rate := float32(0.999)
g.CharClassifierSampleVisitor(trainFile, 1, true, tokenizer, dic, func(x []uint, label string) {
if rand.NormFloat64() < 0.4 {
x = append([]uint{52}, x...)
}
// read sample
sample := ""
for i := range x {
sample += dic.TokenByID(x[i])
}
G := &g.Graph{NeedsBackprop: true}
ht := g.Mat(hidden_size, 1) // vector of zeros
// encode sequence into z
for i := range x {
oneHot := g.Mat(dic.Len(), 1)
oneHot.W[x[i]] = 1.0
ht = encoder.Step(G, oneHot, ht)
}
logits := G.Add(G.Mul(Who, ht), Bho)
var target uint
if label == "__label__ru" {
target = 1
}
predicted_class, _ := g.MaxIV(g.Softmax(logits))
cost, prob := G.Crossentropy(logits, target)
G.Backward()
optimizer.Step(encoderModel)
count++
ma_loss.Add(cost)
avg_cost := ma_loss.Avg()
if count%50 == 0 {
fmt.Printf("step: %d lr: %f loss: %f label: [%s] [%d] ? %d %f sample: [%s]\n",
count, learning_rate, avg_cost, label, target, predicted_class, prob, sample)
learning_rate = learning_rate * anneal_rate
}
if avg_cost < 1e-5 { // TEST
var trueLabels, predictedLabels []uint
g.CharClassifierSampleVisitor("test.txt", 1, false, tokenizer, dic, func(x []uint, label string) {
// read sample
sample := ""
for i := range x {
sample += dic.TokenByID(x[i])
}
G := &g.Graph{NeedsBackprop: false}
ht := g.Mat(hidden_size, 1) // vector of zeros
// encode sequence into z
for i := range x {
oneHot := g.Mat(dic.Len(), 1)
oneHot.W[x[i]] = 1.0
ht = encoder.Step(G, oneHot, ht)
}
logits := G.Add(G.Mul(Who, ht), Bho)
var target uint
if label == "__label__ru" {
target = 1
}
trueLabels = append(trueLabels, target)
predicted_class, _ := g.MaxIV(g.Softmax(logits))
predictedLabels = append(predictedLabels, predicted_class)
//fmt.Printf("sample %d %d %s\n", predicted_class, target, sample)
})
F1, message := g.F1Score(trueLabels, predictedLabels, []string{"ru", "en"}, nil)
fmt.Printf("\n\nF1: %f %s\n\n", F1, message)
}
if avg_cost < 1e-5 {
g.SaveModel("model.json", encoderModel)
for true {
r := bufio.NewReader(os.Stdin)
line, e := r.ReadString('\n')
if e != nil {
break
}
line = strings.TrimSpace(line)
println(line)
G := &g.Graph{NeedsBackprop: false}
ht := g.Mat(hidden_size, 1) // vector of zeros
// encode sequence into z
for _, char := range tokenizer.Split(line) {
char_id := dic.IDByToken(char)
oneHot := g.Mat(dic.Len(), 1)
oneHot.W[char_id] = 1.0
ht = encoder.Step(G, oneHot, ht)
}
logits := G.Add(G.Mul(Who, ht), Bho)
predicted_label_index, probability := g.MaxIV(g.Softmax(logits))
predicted_label := ""
if predicted_label_index == 0 {
predicted_label = "en"
} else {
predicted_label = "ru"
}
fmt.Printf("Input: [%s] Predicted label: %s Probabilty: %f\n", line, predicted_label, probability)
}
}
})
}
|
package main
import (
"fmt"
"strings"
)
var s = "stressed"
func main() {
j := strings.Split(s, "")
max := len(j)
result := make([]string, max)
for i := 0; i < max; i++ {
result[i] = j[max-i-1]
}
fmt.Println(strings.Join(result, ""))
}
|
package main
import ("bytes"; "encoding/base64"; "fmt" )
func main() {
data := []byte{1, 2, 3, 4, 5, 6, 7, 8}
bb := &bytes.Buffer{}
encoder := base64.NewEncoder(base64.StdEncoding, bb)
encoder.Write(data)
encoder.Close()
fmt.Println(bb)
dbuf := make([]byte, 12)
decoder := base64.NewDecoder(base64.StdEncoding, bb)
decoder.Read(dbuf)
for _, ch := range dbuf {
fmt.Print(ch)
}
} |
package ice
import "github.com/nkbai/goice/stun"
type candidateGetter interface {
/*
获取有一部分信息的candidiate.第一个是本机主要地址,最后一个是缺省 Candidate
*/
GetCandidates() (candidates []*Candidate, err error)
}
//treat stun and turn as the same ...
type stunTranporter interface {
candidateGetter
Close()
getListenCandidiates() []string
/*
transporter is using turn?
*/
//IsTurn() bool
}
/*
建立连接需要的本地的 简易stun 服务器.
*/
type serverSocker interface {
/*
指定 从 from 到 to 发送一个消息
from 有可能是本地地址,也有可能是 turn server relay 的地址
*/
sendStunMessageSync(msg *stun.Message, fromaddr, toaddr string) (res *stun.Message, err error)
/*
暂时没用,先留着
*/
sendStunMessageWithResult(msg *stun.Message, fromaddr, toaddr string) (key stun.TransactionID, ch chan *serverSockResponse, err error)
/*
参数含义和 sync 是一致的,不用等待结果.
*/
sendStunMessageAsync(msg *stun.Message, fromaddr, toaddr string) error
/*
从 from 到 to 发送一个数据包,
如果 from 是本机地址,则直接发送,
如果是 turn server relay address, 那么需要经由 turn server 中转.
也就是会把 data 封装到 SendIndication 或者 ChannelDataRequest中
*/
sendData(data []byte, fromaddr, toaddr string) error
/*
关闭连接
*/
Close()
/*
ice check 真正完毕以后,
需要开启刷新权限以及 keep alive 等操作.
todo 这里的 mode 定义并不清晰,需要梳理.
*/
FinishNegotiation(mode serverSockMode)
//StartRefresh()
}
|
package util
import (
"testing"
)
func TestNewRingBuffer(t *testing.T) {
rb := NewRingBuffer(8)
t.Log("begin:", rb, rb.msgs.Len())
rb.Push([]byte{1, 2, 3})
t.Log("push:", rb, rb.msgs.Len())
if rb.end != 3 {
t.Fatal("index failed, must 0, 3", rb.buffer)
}
rb.Push([]byte{2, 2, 2})
t.Log("push:", rb, rb.msgs.Len())
data, err := rb.Pop()
if err != nil || len(data) != 3 || data[0] != 1 || data[1] != 2 || data[2] != 3 {
t.Fatal("data error, must 1,2,3, now:", len(data), data)
}
t.Log("pop:", rb, rb.msgs.Len())
data, err = rb.Pop()
t.Log("pop:", rb, rb.msgs.Len())
if rb.first != 6 {
t.Fatal("index failed, must 3")
}
rb.Push([]byte{4, 4, 4})
t.Log("push", rb, rb.msgs.Len())
if rb.end != 3 {
t.Fatal("index failed, must 6", rb.end)
}
rb.Push([]byte{4, 5, 6})
t.Log("push", rb, rb.msgs.Len())
if rb.end != 6 {
t.Fatal("index failed, must 3, now:", rb.end, rb.first)
}
ret := rb.Push([]byte{7, 8, 9})
if ret != nil {
t.Fatal("push failed, must false")
}
t.Log("push", rb, rb.msgs.Len())
data, err = rb.Pop()
t.Log("pop", rb, rb.msgs.Len())
if err != nil || len(data) != 3 || data[0] != 4 || data[1] != 4 || data[2] != 4 {
t.Fatal("data error, now:", data, err)
}
if rb.first != 3 {
t.Fatal("index failed")
}
data, err = rb.Pop()
t.Log("pop", rb, rb.msgs.Len())
if err != nil || len(data) != 3 || data[0] != 4 || data[1] != 5 || data[2] != 6 {
t.Fatal("data error, now:", data)
}
if rb.first != 6 {
t.Fatal("index failed")
}
}
|
package Solution
import "until"
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
// 自顶而下递归解决问题 依次找到当前的根节点 然后接着进行下一层的寻找
// preorder[0] 前序遍历的第一个值肯定是根节点 根据这个值找到 处于中序遍历的位置 中序遍历 根节点的左边肯定是左子树 右边就是右子树
// 就这样一直递归 知道数组为空即可。
func buildTree(preorder []int, inorder []int) *until.TreeNode {
if len(preorder) == 0 || len(inorder) == 0 {
return nil
}
// 中顺序列找根结点
var root int
for k, v := range inorder {
if v == preorder[0] {
root = k
break
}
}
// 左右子树归类
// pre_left, pre_right := preorder[1: root+1], preorder[root+1:]
// in_left, in_right := inorder[0: root], inorder[root+1:]
// 左右子树递归
return &until.TreeNode{
Val: preorder[0],
Left: buildTree(preorder[1:root+1], inorder[0:root]),
Right: buildTree(preorder[root+1:], inorder[root+1:]),
}
}
|
package owls
import (
"image/color"
"github.com/bcokert/engo-test/logging"
"engo.io/ecs"
"engo.io/engo"
"engo.io/engo/common"
)
type BasicHealthComponent struct {
Health float32
MaxHealth float32
}
type HealthBarComponent struct {
width float32
height float32
position engo.Point
emptySpace common.SpaceComponent
emptyRender common.RenderComponent
emptyBasic ecs.BasicEntity
fullSpace common.SpaceComponent
fullRender common.RenderComponent
fullBasic ecs.BasicEntity
}
func (c *HealthBarComponent) Update(percent float32, parentPosition engo.Point) {
if percent < 0 {
percent = 0
}
if percent > 1 {
percent = 1
}
position := engo.Point{parentPosition.X, parentPosition.Y - c.height*2}
c.fullSpace.Width = percent * c.width
c.emptySpace.Width = c.width - c.fullSpace.Width
c.emptySpace.Position = position
c.fullSpace.Position = position
c.fullSpace.Position.X += c.emptySpace.Width
}
func NewHealthBarComponent(width, height float32, parentPosition engo.Point) HealthBarComponent {
position := engo.Point{parentPosition.X, parentPosition.Y - height*2}
return HealthBarComponent{
width: width,
height: height,
position: position,
emptySpace: common.SpaceComponent{Position: position, Width: 0, Height: height},
emptyRender: common.RenderComponent{Drawable: common.Rectangle{}, Color: color.RGBA{255, 0, 0, 255}},
emptyBasic: ecs.NewBasic(),
fullSpace: common.SpaceComponent{Position: position, Width: width, Height: height},
fullRender: common.RenderComponent{Drawable: common.Rectangle{}, Color: color.RGBA{0, 255, 0, 255}},
fullBasic: ecs.NewBasic(),
}
}
type owlEntity interface {
BasicEntity() *ecs.BasicEntity
RenderComponent() *common.RenderComponent
MouseComponent() *common.MouseComponent
BasicHealthComponent() *BasicHealthComponent
HealthBarComponent() *HealthBarComponent
SpaceComponent() *common.SpaceComponent
}
// The OwlSystem manages a group of owls; their creation, mouse interaction, and so on.
// Everything but physics related properties are managed by the Owl System.
// An owl system also automatically adds owls to itself over time
type OwlSystem struct {
entities map[uint64]owlEntity
world *ecs.World
Log logging.Logger
}
// Add adds a new entity to the system
func (s *OwlSystem) Add(entity owlEntity) {
s.entities[entity.BasicEntity().ID()] = entity
healthbar := entity.HealthBarComponent()
for _, worldSystem := range s.world.Systems() {
switch targetSystem := worldSystem.(type) {
case *common.RenderSystem:
targetSystem.Add(&healthbar.emptyBasic, &healthbar.emptyRender, &healthbar.emptySpace)
targetSystem.Add(&healthbar.fullBasic, &healthbar.fullRender, &healthbar.fullSpace)
}
}
}
// Remove removes an entity from the system, by its entity id
func (s *OwlSystem) Remove(entity ecs.BasicEntity) {
if _, ok := s.entities[entity.ID()]; ok {
delete(s.entities, entity.ID())
}
}
// New is called every time the system is added to a world
func (s *OwlSystem) New(world *ecs.World) {
s.world = world
if s.entities == nil {
s.entities = make(map[uint64]owlEntity, 10)
}
}
// Update processes the user interactions with owls, updating or removing them as necessary
// It also internally manages the list of owls, adding them at certain intervals
func (s *OwlSystem) Update(dt float32) {
for _, owl := range s.entities {
health := owl.BasicHealthComponent()
healthbar := owl.HealthBarComponent()
mouse := owl.MouseComponent()
// update health, removing if dead
if mouse.Clicked {
health.Health--
if health.Health <= 0 {
s.world.RemoveEntity(*owl.BasicEntity())
s.world.RemoveEntity(owl.HealthBarComponent().emptyBasic)
s.world.RemoveEntity(owl.HealthBarComponent().fullBasic)
continue
}
}
// remove owls that have escaped the screen
p := owl.SpaceComponent().Position
if p.X < -100 || p.X > engo.GameWidth()+100 || p.Y < -100 || p.Y > engo.GameHeight()+100 {
s.world.RemoveEntity(*owl.BasicEntity())
s.world.RemoveEntity(owl.HealthBarComponent().emptyBasic)
s.world.RemoveEntity(owl.HealthBarComponent().fullBasic)
continue
}
// update healthbar based on health
percentHealthy := health.Health / health.MaxHealth
healthbar.Update(percentHealthy, owl.SpaceComponent().Position)
col := color.RGBA{255, 255, 255, 255}
// if the mouse is over the owl, make it glow a bit blue
if mouse.Hovered {
col.R -= 40
col.G -= 40
}
owl.RenderComponent().Color = col
}
}
|
package solution
import "testing"
type testCase struct {
answer int
input string
}
func TestSolution(t *testing.T) {
cases := []testCase{
testCase{
answer: 3,
input : "abcabcbb",
},
testCase{
answer: 3,
input : "testtest",
},
testCase{
answer: 3,
input : "pwwkew",
},
testCase{
answer: 1,
input : "bbbbbbb",
},
testCase{
answer: 0,
input : "",
},
testCase{
answer: 1,
input : " ",
},
testCase{
answer: 2,
input : "ab",
},
testCase{
answer: 3,
input : "ab ab",
},
testCase{
answer: 4,
input : "abcd",
},
testCase{
answer: 2,
input : "aab",
},
testCase{
answer: 6,
input : "asjrgapa",
},
}
for _, c := range cases {
answer := lengthOfLongestSubstring(c.input)
t.Logf("Start testing %s, correct answer is: %d; your answer is: %d", c.input, c.answer, answer)
if c.answer != answer {
t.Fatal("Got wrong answer!")
}else {
t.Log(" ...PASS")
}
}
} |
package inference
import (
"fmt"
"io/ioutil"
"testing"
"github.com/stretchr/testify/assert"
"github.com/TIBCOSoftware/flogo-contrib/action/flow/test"
"github.com/TIBCOSoftware/flogo-contrib/activity/inference/framework/tf"
"github.com/TIBCOSoftware/flogo-lib/core/activity"
)
var _ tf.TensorflowModel
var activityMetadata *activity.Metadata
func getActivityMetadata() *activity.Metadata {
if activityMetadata == nil {
jsonMetadataBytes, err := ioutil.ReadFile("activity.json")
if err != nil {
panic("No Json Metadata found for activity.json path")
}
activityMetadata = activity.NewMetadata(string(jsonMetadataBytes))
}
return activityMetadata
}
func TestCreate(t *testing.T) {
act := NewActivity(getActivityMetadata())
if act == nil {
t.Error("Activity Not Created")
t.Fail()
return
}
}
func TestEval(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Failed()
t.Errorf("panic during execution: %v", r)
}
}()
act := NewActivity(getActivityMetadata())
tc := test.NewTestActivityContext(getActivityMetadata())
//setup attrs
tc.SetInput("model", "/Users/mellis/Documents/IoT/models/tn_demo/Archive.zip")
tc.SetInput("inputName", "inputs")
tc.SetInput("framework", "Tensorflow")
tc.SetInput("features", "z-axis-q75:4.140586,corr-x-z:0.1381063882214782,x-axis-mean:1.7554575428900194,z-axis-sd:4.6888631696380765,z-axis-skew:-0.3619011587545954,y-axis-sd:7.959084724314854,y-axis-q75:16.467001,corr-z-y:0.3467060369518231,x-axis-sd:6.450293741961166,x-axis-skew:0.09756801680727022,y-axis-mean:9.389463650669393,y-axis-skew:-0.49036224958471764,z-axis-mean:1.1226106985139188,x-axis-q25:-3.1463003,x-axis-q75:6.3198414,y-axis-q25:3.0645783,z-axis-q25:-1.9477097,corr-x-y:0.08100326860866637")
done, _ := act.Eval(tc)
if done == false {
assert.Fail(t, "Invalid framework specified")
}
//check result attr
fmt.Println(tc.GetOutput("result"))
}
|
// go test -run none -bench . -benchtime 3s -benchmem.
// Basic benchmark test.
package basic
import (
"fmt"
"testing"
)
var gs string
// BenchmarkSprint tests the performance of using Sprint.
func BenchmarkSprint(b *testing.B) {
var s string
for i := 0; i < b.N; i++ {
s = fmt.Sprint("hello")
}
gs = s
}
// BenchmarkSprintf tests the performance of using Sprintf.
func BenchmarkSprintf(b *testing.B) {
var s string
for i := 0; i < b.N; i++ {
s = fmt.Sprintf("hello")
}
gs = s
}
|
package util
// Use masks unused variables when compiling go program.
func Use(vals... interface{}) {
for _, val := range vals {
_ = val
}
}
|
package main
/*
The sum of the squares of the first ten natural numbers is,
1^2 + 2^2 + ... + 10^2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)^2 = 55^2 = 3025
Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 . 385 = 2640.
Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.
*/
import ( "fmt" )
func main() {
sum_of_squares := 0
sum := 0
for x :=1; x <= 100; x++ {
sum_of_squares+= x * x
sum+=x
}
square_of_sum := sum * sum
diff := square_of_sum - sum_of_squares
fmt.Println(diff)
}
|
package main
import "fmt"
type spawner func(l int) int
// Playfield is the grid into which tetrominoes fall
type Playfield struct {
store [][]Mino
current *CurrentTetromino
Shapes []string
}
// CurrentTetromino location and pointer
type CurrentTetromino struct {
x int
y int
obj Tetromino
}
type Coordinates struct {
x int
y int
}
// NewPlayfield generator
func NewPlayfield(rows int, cols int) *Playfield {
m := make([][]Mino, rows)
for i := 0; i < rows; i++ {
m[i] = make([]Mino, cols)
}
pf := &Playfield{
store: m,
Shapes: []string{"I", "O", "T", "S", "Z", "J", "L"},
}
return pf
}
// Cap of Playfield
func (pf *Playfield) Cap() (rows int, cols int) {
rows = cap(pf.store)
cols = cap(pf.store[0])
return rows, cols
}
// Spawn a Tetromino in the playfield;
// spawner function can return a static or dynamic (random) value
func (pf *Playfield) Spawn(spwn spawner) error {
var obj Tetromino
i := spwn(len(pf.Shapes))
if i < 0 || i >= len(pf.Shapes) {
return fmt.Errorf("value out of bounds")
}
switch pf.Shapes[i] {
case "I":
obj = &ITetromino{}
case "O":
obj = &OTetromino{}
case "T":
obj = &TTetromino{}
case "S":
obj = &STetromino{}
case "Z":
obj = &ZTetromino{}
case "J":
obj = &JTetromino{}
case "L":
obj = <etromino{}
}
pf.current = &CurrentTetromino{
x: 0,
y: 0,
obj: obj,
}
return nil
}
func (pf *Playfield) Move(x, y int) error {
if pf.outOfBounds(pf.current.x+x, pf.current.y+y) {
return fmt.Errorf("value out of bounds")
}
pf.current.x = x
pf.current.y = y
return nil
}
func (pf *Playfield) outOfBounds(x, y int) bool {
r, c := pf.Cap()
if (x > r || y > c) || (x < 0 || y < 0) {
return true
}
return false
}
func (pf *Playfield) Rotate() error {
return nil
}
|
package server
import (
"crypto/elliptic"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"fmt"
"io"
"net/http"
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/valyala/fasthttp"
"github.com/authelia/authelia/v4/internal/configuration/schema"
"github.com/authelia/authelia/v4/internal/logging"
"github.com/authelia/authelia/v4/internal/middlewares"
"github.com/authelia/authelia/v4/internal/templates"
"github.com/authelia/authelia/v4/internal/utils"
)
// TemporaryCertificate contains the FD of 2 temporary files containing the PEM format of the certificate and private key.
type TemporaryCertificate struct {
CertFile *os.File
KeyFile *os.File
Certificate *x509.Certificate
CertificatePEM []byte
KeyPEM []byte
}
func (tc TemporaryCertificate) TLSCertificate() (tls.Certificate, error) {
return tls.LoadX509KeyPair(tc.CertFile.Name(), tc.KeyFile.Name())
}
func (tc *TemporaryCertificate) Close() {
if tc.CertFile != nil {
tc.CertFile.Close()
}
if tc.KeyFile != nil {
tc.KeyFile.Close()
}
}
type CertificateContext struct {
Certificates []TemporaryCertificate
privateKeyBuilder utils.PrivateKeyBuilder
}
// NewCertificateContext instantiate a new certificate context used to easily generate certificates within tests.
func NewCertificateContext(privateKeyBuilder utils.PrivateKeyBuilder) (*CertificateContext, error) {
certificateContext := new(CertificateContext)
certificateContext.privateKeyBuilder = privateKeyBuilder
cert, err := certificateContext.GenerateCertificate()
if err != nil {
return nil, err
}
certificateContext.Certificates = []TemporaryCertificate{*cert}
return certificateContext, nil
}
// GenerateCertificate generate a new certificate in the context.
func (cc *CertificateContext) GenerateCertificate() (*TemporaryCertificate, error) {
certBytes, keyBytes, err := utils.GenerateCertificate(cc.privateKeyBuilder,
[]string{"authelia.com", "example.org", "local.example.com"},
time.Now(), 3*time.Hour, false)
if err != nil {
return nil, fmt.Errorf("unable to generate certificate: %v", err)
}
tmpCertificate := new(TemporaryCertificate)
certFile, err := os.CreateTemp("", "cert")
if err != nil {
return nil, fmt.Errorf("unable to create temp file for certificate: %v", err)
}
tmpCertificate.CertFile = certFile
tmpCertificate.CertificatePEM = certBytes
block, _ := pem.Decode(certBytes)
c, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, fmt.Errorf("unable to parse certificate: %v", err)
}
tmpCertificate.Certificate = c
err = os.WriteFile(tmpCertificate.CertFile.Name(), certBytes, 0600)
if err != nil {
tmpCertificate.Close()
return nil, fmt.Errorf("unable to write certificates in file: %v", err)
}
keyFile, err := os.CreateTemp("", "key")
if err != nil {
tmpCertificate.Close()
return nil, fmt.Errorf("unable to create temp file for private key: %v", err)
}
tmpCertificate.KeyFile = keyFile
tmpCertificate.KeyPEM = keyBytes
err = os.WriteFile(tmpCertificate.KeyFile.Name(), keyBytes, 0600)
if err != nil {
tmpCertificate.Close()
return nil, fmt.Errorf("unable to write private key in file: %v", err)
}
cc.Certificates = append(cc.Certificates, *tmpCertificate)
return tmpCertificate, nil
}
func (cc *CertificateContext) Close() {
for _, tc := range cc.Certificates {
tc.Close()
}
}
type TLSServerContext struct {
server *fasthttp.Server
port int
}
func NewTLSServerContext(configuration schema.Configuration) (serverContext *TLSServerContext, err error) {
serverContext = new(TLSServerContext)
providers := middlewares.Providers{}
providers.Templates, err = templates.New(templates.Config{EmailTemplatesPath: configuration.Notifier.TemplatePath})
if err != nil {
return nil, err
}
s, listener, _, _, err := CreateDefaultServer(&configuration, providers)
if err != nil {
return nil, err
}
serverContext.server = s
go func() {
err := s.Serve(listener)
if err != nil {
logging.Logger().Fatal(err)
}
}()
addrSplit := strings.Split(listener.Addr().String(), ":")
if len(addrSplit) > 1 {
port, err := strconv.ParseInt(addrSplit[len(addrSplit)-1], 10, 32)
if err != nil {
return nil, fmt.Errorf("unable to parse port from address: %v", err)
}
serverContext.port = int(port)
}
return serverContext, nil
}
func (sc *TLSServerContext) Port() int {
return sc.port
}
func (sc *TLSServerContext) Close() error {
return sc.server.Shutdown()
}
func TestShouldRaiseErrorWhenClientDoesNotSkipVerify(t *testing.T) {
privateKeyBuilder := utils.ECDSAKeyBuilder{}.WithCurve(elliptic.P256())
certificateContext, err := NewCertificateContext(privateKeyBuilder)
require.NoError(t, err)
defer certificateContext.Close()
tlsServerContext, err := NewTLSServerContext(schema.Configuration{
Server: schema.Server{
Address: &schema.AddressTCP{Address: schema.NewAddressFromNetworkValues("tcp", "0.0.0.0", 9091)},
TLS: schema.ServerTLS{
Certificate: certificateContext.Certificates[0].CertFile.Name(),
Key: certificateContext.Certificates[0].KeyFile.Name(),
},
},
})
require.NoError(t, err)
defer tlsServerContext.Close()
req, err := http.NewRequest(fasthttp.MethodGet, fmt.Sprintf("https://local.example.com:%d", tlsServerContext.Port()), nil)
require.NoError(t, err)
_, err = http.DefaultClient.Do(req)
require.Error(t, err)
require.Contains(t, err.Error(), "x509: certificate signed by unknown authority")
}
func TestShouldServeOverTLSWhenClientDoesSkipVerify(t *testing.T) {
privateKeyBuilder := utils.ECDSAKeyBuilder{}.WithCurve(elliptic.P256())
certificateContext, err := NewCertificateContext(privateKeyBuilder)
require.NoError(t, err)
defer certificateContext.Close()
tlsServerContext, err := NewTLSServerContext(schema.Configuration{
Server: schema.Server{
Address: schema.DefaultServerConfiguration.Address,
TLS: schema.ServerTLS{
Certificate: certificateContext.Certificates[0].CertFile.Name(),
Key: certificateContext.Certificates[0].KeyFile.Name(),
},
},
})
require.NoError(t, err)
defer tlsServerContext.Close()
req, err := http.NewRequest(fasthttp.MethodGet, fmt.Sprintf("https://local.example.com:%d/api/notfound", tlsServerContext.Port()), nil)
require.NoError(t, err)
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec // Needs to be enabled in tests. Not used in production.
}
client := &http.Client{Transport: tr}
res, err := client.Do(req)
require.NoError(t, err)
defer res.Body.Close()
_, err = io.ReadAll(res.Body)
require.NoError(t, err)
assert.Equal(t, "404 Not Found", res.Status)
}
func TestShouldServeOverTLSWhenClientHasProperRootCA(t *testing.T) {
privateKeyBuilder := utils.ECDSAKeyBuilder{}.WithCurve(elliptic.P256())
certificateContext, err := NewCertificateContext(privateKeyBuilder)
require.NoError(t, err)
defer certificateContext.Close()
tlsServerContext, err := NewTLSServerContext(schema.Configuration{
Server: schema.Server{
Address: schema.DefaultServerConfiguration.Address,
TLS: schema.ServerTLS{
Certificate: certificateContext.Certificates[0].CertFile.Name(),
Key: certificateContext.Certificates[0].KeyFile.Name(),
},
},
})
require.NoError(t, err)
defer tlsServerContext.Close()
req, err := http.NewRequest(fasthttp.MethodGet, fmt.Sprintf("https://local.example.com:%d/api/notfound", tlsServerContext.Port()), nil)
require.NoError(t, err)
block, _ := pem.Decode(certificateContext.Certificates[0].CertificatePEM)
c, err := x509.ParseCertificate(block.Bytes)
require.NoError(t, err)
// Create a root CA for the client to properly validate server cert.
rootCAs := x509.NewCertPool()
rootCAs.AddCert(c)
tr := &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: rootCAs,
MinVersion: tls.VersionTLS13,
},
}
client := &http.Client{Transport: tr}
res, err := client.Do(req)
require.NoError(t, err)
defer res.Body.Close()
_, err = io.ReadAll(res.Body)
require.NoError(t, err)
assert.Equal(t, "404 Not Found", res.Status)
}
func TestShouldRaiseWhenMutualTLSIsConfiguredAndClientIsNotAuthenticated(t *testing.T) {
privateKeyBuilder := utils.ECDSAKeyBuilder{}.WithCurve(elliptic.P256())
certificateContext, err := NewCertificateContext(privateKeyBuilder)
require.NoError(t, err)
defer certificateContext.Close()
clientCert, err := certificateContext.GenerateCertificate()
require.NoError(t, err)
tlsServerContext, err := NewTLSServerContext(schema.Configuration{
Server: schema.Server{
Address: schema.DefaultServerConfiguration.Address,
TLS: schema.ServerTLS{
Certificate: certificateContext.Certificates[0].CertFile.Name(),
Key: certificateContext.Certificates[0].KeyFile.Name(),
ClientCertificates: []string{clientCert.CertFile.Name()},
},
},
})
require.NoError(t, err)
defer tlsServerContext.Close()
req, err := http.NewRequest(fasthttp.MethodGet, fmt.Sprintf("https://local.example.com:%d/api/notfound", tlsServerContext.Port()), nil)
require.NoError(t, err)
// Create a root CA for the client to properly validate server cert.
rootCAs := x509.NewCertPool()
rootCAs.AddCert(certificateContext.Certificates[0].Certificate)
tr := &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: rootCAs,
MinVersion: tls.VersionTLS13,
},
}
client := &http.Client{Transport: tr}
_, err = client.Do(req)
require.Error(t, err)
assert.Contains(t, err.Error(), "remote error: tls: certificate required")
}
func TestShouldServeProperlyWhenMutualTLSIsConfiguredAndClientIsAuthenticated(t *testing.T) {
privateKeyBuilder := utils.ECDSAKeyBuilder{}.WithCurve(elliptic.P256())
certificateContext, err := NewCertificateContext(privateKeyBuilder)
require.NoError(t, err)
defer certificateContext.Close()
clientCert, err := certificateContext.GenerateCertificate()
require.NoError(t, err)
tlsServerContext, err := NewTLSServerContext(schema.Configuration{
Server: schema.Server{
Address: schema.DefaultServerConfiguration.Address,
TLS: schema.ServerTLS{
Certificate: certificateContext.Certificates[0].CertFile.Name(),
Key: certificateContext.Certificates[0].KeyFile.Name(),
ClientCertificates: []string{clientCert.CertFile.Name()},
},
},
})
require.NoError(t, err)
defer tlsServerContext.Close()
req, err := http.NewRequest(fasthttp.MethodGet, fmt.Sprintf("https://local.example.com:%d/api/notfound", tlsServerContext.Port()), nil)
require.NoError(t, err)
// Create a root CA for the client to properly validate server cert.
rootCAs := x509.NewCertPool()
rootCAs.AddCert(certificateContext.Certificates[0].Certificate)
cCert, err := certificateContext.Certificates[1].TLSCertificate()
require.NoError(t, err)
tr := &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: rootCAs,
Certificates: []tls.Certificate{cCert},
MinVersion: tls.VersionTLS13,
},
}
client := &http.Client{Transport: tr}
res, err := client.Do(req)
require.NoError(t, err)
defer res.Body.Close()
_, err = io.ReadAll(res.Body)
require.NoError(t, err)
assert.Equal(t, "404 Not Found", res.Status)
}
|
package main
import (
"fmt"
)
func main() {
ArrayOfNames := [6]string{"Joabe", "Phelipe", "Gabriel", "Igor", "Matheus"}
fmt.Printf("1: ArrayOfNames: %v\n", ArrayOfNames)
fmt.Printf("1: len(ArrayOfNames: %v\n", len(ArrayOfNames))
fmt.Println()
ArrayOfNames[0] = "Phelipe"
ArrayOfNames[1] = "Joabe"
fmt.Printf("2: ArrayOfNames: %v\n", ArrayOfNames)
fmt.Printf("2: len(ArrayOfNames: %v\n", len(ArrayOfNames))
fmt.Println()
SliceOfNames := []string{"Joabe", "Phelipe", "Gabriel", "Igor", "Matheus"}
fmt.Printf("3: SliceOfNames: %v\n", SliceOfNames)
fmt.Printf("3: len(SliceOfNames: %v\n", len(SliceOfNames))
fmt.Println()
SliceOfNames = append(SliceOfNames, "Lucas")
SliceOfNames = append(SliceOfNames, "Alex")
fmt.Printf("4: SliceOfNames: %v\n", SliceOfNames)
fmt.Printf("4: len(SliceOfNames: %v\n", len(SliceOfNames))
}
|
/*
Copyright Ken
*/
package main
import (
"fmt"
"log"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
)
func main() {
// Create sessiontly filter on a specific region or use env vars to do it
sess, err := session.NewSession()
if err != nil {
log.Fatal(err.Error())
}
/*
* 1. Create session with the specific AWS account credentials
* (defaults to current account if not specified; this requires
* the 'autoscaling:Describe*' action to be specified in the IAM role
* for the blue master instance)
*/
// Required - this must be specified in the CRD
autoscaleName := os.Getenv("AWS_AUTOSCALE_GROUP_NAME")
if autoscaleName == "" {
log.Fatal("Must specify the environment variable AWS_AUTOSCALE_GROUP_NAME")
}
// Optional - if specified, all must be specified
region := os.Getenv("AWS_REGION")
accessKey := os.Getenv("AWS_ACCESS_KEY_ID")
secretKey := os.Getenv("AWS_SECRET_ACCESS_KEY")
if region == "" {
// Call the metadata server to get our region
metaDataSvc := ec2metadata.New(sess)
region, err = metaDataSvc.Region()
if err == nil {
sess, err = session.NewSession(&aws.Config{Region: aws.String(region)})
}
} else {
sess, err = session.NewSession(&aws.Config{
Region: aws.String(region),
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, "")})
}
if err != nil {
log.Fatal(err.Error())
}
fmt.Printf("\nAWS region: %s\n", region)
/*
* 2. Get the autoscale group information (list of instances & associated state)
*/
asInput := &autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: []*string{aws.String(autoscaleName)},
}
asClient := autoscaling.New(sess)
asDescription, err := asClient.DescribeAutoScalingGroups(asInput)
if err != nil {
log.Fatal(err.Error())
}
/*
* 3. Get the IP (int/ext) and instance name of all instances in the autoscale group
*/
ec2Client := ec2.New(sess)
// (should probably verify we only have 1 group)
if (len(asDescription.AutoScalingGroups) == 0) {
log.Fatalf("Could not find the autoscale group %s\n", autoscaleName)
}
for _, instance := range asDescription.AutoScalingGroups[0].Instances {
fmt.Printf("Instance ID: %s,", *instance.InstanceId)
fmt.Printf(" Lifecycle State: %s", *instance.LifecycleState)
fmt.Printf(", Health State: %s", *instance.HealthStatus)
// We should add all instance Ids for a single autoscale group to cut down on the api calls
ec2Input := &ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String(*instance.InstanceId)},
}
ecDescription, err := ec2Client.DescribeInstances(ec2Input)
if err == nil {
if len(ecDescription.Reservations[0].Instances) > 0 && len(ecDescription.Reservations[0].Instances[0].NetworkInterfaces) > 0 {
fmt.Printf(", Private IP: %s", *ecDescription.Reservations[0].Instances[0].NetworkInterfaces[0].PrivateIpAddress)
fmt.Printf(", Public IP: %s", *ecDescription.Reservations[0].Instances[0].NetworkInterfaces[0].Association.PublicIp)
// FIXME(kenr): This assumes the second index. We need to traverse looking for a tag where Tags[i].Key == 'Name'
for _, tag := range ecDescription.Reservations[0].Instances[0].Tags {
if *tag.Key == "Name" {
fmt.Printf(", Name: %s", *tag.Value)
}
}
}
}
fmt.Printf("\n")
}
if err != nil {
log.Fatal(err.Error())
}
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build linux
package linux
import (
"net"
"syscall"
"golang.org/x/sys/unix"
)
// OSVersion returns version info of operation system.
// e.g. Linux 4.15.0-45-generic.x86_64
func OSVersion() (osVersion string, err error) {
var un syscall.Utsname
err = syscall.Uname(&un)
if err != nil {
return
}
charsToString := func(ca []int8) string {
s := make([]byte, len(ca))
var lens int
for ; lens < len(ca); lens++ {
if ca[lens] == 0 {
break
}
s[lens] = uint8(ca[lens])
}
return string(s[0:lens])
}
osVersion = charsToString(un.Sysname[:]) + " " + charsToString(un.Release[:]) + "." + charsToString(un.Machine[:])
return
}
// SetAffinity sets cpu affinity.
func SetAffinity(cpus []int) error {
var cpuSet unix.CPUSet
cpuSet.Zero()
for _, c := range cpus {
cpuSet.Set(c)
}
return unix.SchedSetaffinity(unix.Getpid(), &cpuSet)
}
// GetSockUID gets the uid of the other end of the UNIX domain socket
func GetSockUID(uc net.UnixConn) (uid uint32, err error) {
raw, err := uc.SyscallConn()
if err != nil {
return 0, err
}
var cred *unix.Ucred
err = raw.Control(func(fd uintptr) {
cred, err = unix.GetsockoptUcred(int(fd),
unix.SOL_SOCKET,
unix.SO_PEERCRED)
})
if err != nil {
return 0, err
}
return cred.Uid, nil
}
|
package shell
import (
"bytes"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestFPromptUserForInputReturnsYesOnNonInteractive(t *testing.T) {
t.Parallel()
opts := NewShellOptions()
opts.NonInteractive = true
resp, err := FPromptUserForInput(os.Stdout, os.Stdin, "", opts)
assert.Nil(t, err)
assert.Equal(t, resp, "yes")
}
func TestFPromptUserForInputStripsInput(t *testing.T) {
t.Parallel()
opts := NewShellOptions()
sout := ""
fakeStdout := bytes.NewBufferString(sout)
sin := "\t1.21 Gigawatts\t \n"
fakeStdin := bytes.NewBufferString(sin)
resp, err := FPromptUserForInput(fakeStdout, fakeStdin, "", opts)
assert.Nil(t, err)
assert.Equal(t, resp, "1.21 Gigawatts")
}
func TestFPromptUserForInputAllowsEmptyString(t *testing.T) {
t.Parallel()
opts := NewShellOptions()
sout := ""
fakeStdout := bytes.NewBufferString(sout)
sin := "\n"
fakeStdin := bytes.NewBufferString(sin)
resp, err := FPromptUserForInput(fakeStdout, fakeStdin, "", opts)
assert.Nil(t, err)
assert.Equal(t, resp, "")
}
func TestFPromptUserForInputPrintsOutPrompt(t *testing.T) {
t.Parallel()
opts := NewShellOptions()
sout := ""
fakeStdout := bytes.NewBufferString(sout)
sin := "This is heavy\n"
fakeStdin := bytes.NewBufferString(sin)
_, err := FPromptUserForInput(fakeStdout, fakeStdin, "Great Scott!", opts)
assert.Nil(t, err)
assert.Contains(t, fakeStdout.String(), "Great Scott!")
}
func TestFPromptUserForYesNoPrintsOutPromptWithYN(t *testing.T) {
t.Parallel()
opts := NewShellOptions()
sout := ""
fakeStdout := bytes.NewBufferString(sout)
sin := "y\n"
fakeStdin := bytes.NewBufferString(sin)
_, err := FPromptUserForYesNo(fakeStdout, fakeStdin, "Great Scott!", opts)
assert.Nil(t, err)
assert.Contains(t, fakeStdout.String(), "Great Scott! (y/n)")
}
var yesNoPromptTests = []struct {
in string
out bool
}{
{"y", true},
{"YEs", true},
{"Y", true},
{"YES", true},
{"yes ", true},
{" yes", true},
{"\tyes", true},
{"yes\t", true},
{"ye", false},
{"", false},
{"delorean", false},
{"yes no", false},
}
func TestFPromptUserForYesNo(t *testing.T) {
for _, tt := range yesNoPromptTests {
t.Run(tt.in, func(t *testing.T) {
t.Parallel()
opts := NewShellOptions()
sout := ""
fakeStdout := bytes.NewBufferString(sout)
sin := tt.in + "\n"
fakeStdin := bytes.NewBufferString(sin)
resp, err := FPromptUserForYesNo(fakeStdout, fakeStdin, "Great Scott!", opts)
assert.Nil(t, err)
assert.Equal(t, resp, tt.out)
})
}
}
|
package nchanClient
import "testing"
const validStabStats = "total published messages: 123\nstored messages: 54353\nshared memory used: 12K\nshared memory limit: 131072K\nchannels: 34\nsubscribers: 5434535\nredis pending commands: 48\nredis connected servers: 65\ntotal interprocess alerts received: 43\ninterprocess alerts in transit: 654\ninterprocess queued alerts: 765\ntotal interprocess send delay: 534\ntotal interprocess receive delay: 46\nnchan version: 1.1.5\n"
func TestParseStubStatsValidInput(t *testing.T) {
var tests = []struct {
input []byte
expectedResult StubStats
expectedError bool
}{
{
input: []byte(validStabStats),
expectedResult: StubStats{
Redis: StubRedis{
PendingCommands: 48,
ConnectedServers: 65,
},
Channels: 34,
Subscribers: 5434535,
SharedMemoryUsed: 12,
SharedMemoryLimit: 131072,
Interprocess: StubInterprocess{
AlertsInTransit: 654,
QueuedAlerts: 765,
TotalAlertsReceived: 43,
TotalSendDelay: 534,
TotalReceiveDelay: 46,
},
Messages: StubMessages{
TotalPublished: 123,
Stored: 54353,
},
},
expectedError: false,
},
{
input: []byte("invalid-stats"),
expectedError: true,
},
}
for _, test := range tests {
var result StubStats
err := parseStubStats(test.input, &result)
if err != nil && !test.expectedError {
t.Errorf("parseStubStats() returned error for valid input %q: %v", string(test.input), err)
}
if !test.expectedError && test.expectedResult != result {
t.Errorf("parseStubStats() result %v != expected %v for input %q", result, test.expectedResult, test.input)
}
}
}
|
package usecase
import (
"context"
"github.com/dsukesato/go13/pbl/app1-backend/domain/model"
"github.com/dsukesato/go13/pbl/app1-backend/domain/repository"
)
type RestaurantsUsecase interface {
GetRestaurants(context.Context) ([]*model.Restaurant, error)
PostRestaurants(context.Context) ([]*model.Restaurant, error)
}
type restaurantsUsecase struct {
restaurantsRepository repository.RestaurantsRepository
}
func NewRestaurantsUsecase(rr repository.RestaurantsRepository) RestaurantsUsecase {
return &restaurantsUsecase {
restaurantsRepository: rr,
}
}
func (ru restaurantsUsecase) GetRestaurants(ctx context.Context) (posts []*model.Restaurant, err error) {
posts, err = ru.restaurantsRepository.GetRestaurants(ctx)
if err != nil {
return nil, err
}
return posts, err
}
func (ru restaurantsUsecase) PostRestaurants(ctx context.Context) (posts []*model.Restaurant, err error) {
posts, err = ru.restaurantsRepository.GetRestaurants(ctx)
if err != nil {
return nil, err
}
return posts, err
}
|
package main
import (
"fmt"
"runtime"
"sync"
"time"
"github.com/colefan/gsgo/console"
"github.com/colefan/gsgo/netio"
"github.com/colefan/gsgo/netio/iobuffer"
"github.com/colefan/gsgo/netio/packet"
"github.com/colefan/gsgo/netio/qos"
)
type MyServer struct {
*netio.Server
rw sync.Mutex
nMsgCount int
}
func (this *MyServer) HandleMsg(cmdid uint16, pack *packet.Packet, conn netio.ConnInf) {
//fmt.Println("[S]...read a msg, id = ", cmdid)
buf := iobuffer.NewOutBuffer(int(pack.PackLen + packet.PACKET_PROXY_HEADER_LEN))
buf = pack.Header.Encode(buf)
for _, tmp := range pack.RawData {
buf.PutByte(tmp)
}
nPackLen := buf.GetLen() - packet.PACKET_PROXY_HEADER_LEN
buf.SetUint16(uint16(nPackLen), 0)
if conn != nil {
conn.Write(buf.GetData())
}
}
func NewMyServer() *MyServer {
s := &MyServer{}
s.Server = netio.NewTcpSocketServer()
return s
}
type ServerDispatcher struct {
netio.DefaultPackDispatcher
sessionMap map[int]int
nums int
}
func NewServerDispatcher() *ServerDispatcher {
p := &ServerDispatcher{}
p.sessionMap = make(map[int]int)
return p
}
func (this *ServerDispatcher) SessionOpen(conn netio.ConnInf) {
this.nums++
fmt.Println("New clien coming:", this.nums)
}
func main() {
fmt.Println("time = ", time.Now().Second())
runtime.GOMAXPROCS(runtime.NumCPU() - 1)
s1 := NewMyServer()
err := s1.Init(`{"ip":"127.0.0.1","port":12000}`)
if err != nil {
fmt.Println("net server init err,", err)
return
}
s1.SetListenAddress("0.0.0.0")
s1.SetListenPort(12000)
s1.SetPackParser(netio.NewDefaultParser())
s1.SetPackDispatcher(NewServerDispatcher())
s1.GetPackDispatcher().AddPackEventListener("myserver", s1)
s1.SetQos(netqos.NewServerQos())
s1.GetQos().SetEnable(true)
s1.GetQos().Stat()
go s1.Start() //启动服务器
console.CheckInput()
s1.Close()
}
|
package downloader
import (
"github.com/lf-edge/eve/pkg/pillar/types"
log "github.com/sirupsen/logrus"
)
// Handles both create and modify events
func handleGlobalDownloadConfigModify(ctxArg interface{}, key string,
configArg interface{}) {
ctx := ctxArg.(*downloaderContext)
config := configArg.(types.GlobalDownloadConfig)
if key != "global" {
log.Errorf("handleGlobalDownloadConfigModify: unexpected key %s\n", key)
return
}
log.Infof("handleGlobalDownloadConfigModify for %s\n", key)
ctx.globalConfig = config
log.Infof("handleGlobalDownloadConfigModify done for %s\n", key)
}
|
package service
import (
"github.com/sirsean/packhunter/config"
"github.com/sirsean/packhunter/model"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
var userCollection = func(session *mgo.Session) *mgo.Collection {
return session.DB(config.Get().Mongo.Database).C("users")
}
func GetUserByIdHex(session *mgo.Session, id string) (model.User, error) {
coll := userCollection(session)
var user model.User
err := coll.FindId(bson.ObjectIdHex(id)).One(&user)
return user, err
}
func GetUserByPHId(session *mgo.Session, phUserId int) (model.User, error) {
coll := userCollection(session)
var user model.User
err := coll.Find(bson.M{"phid": phUserId}).One(&user)
return user, err
}
func SaveUser(session *mgo.Session, user *model.User) error {
coll := userCollection(session)
_, err := coll.Upsert(bson.M{"phid": user.PHId}, user)
return err
}
|
// Copyright 2015-2018 trivago N.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package format
import (
"strings"
"gollum/core"
)
// Trim formatter plugin
//
// Trim removes a set of characters from the beginning and end of a metadata value
// or the payload.
//
// Parameters
//
// - Characters: This value defines which characters should be removed from
// both ends of the data. The data to operate on is expected to be a string.
// By default this is set to " \t\r\n\v\f".
//
// Examples
//
// This example will trim spaces from the message payload:
//
// exampleConsumer:
// Type: consumer.Console
// Streams: "*"
// Modulators:
// - format.Trim: {}
type Trim struct {
core.SimpleFormatter `gollumdoc:"embed_type"`
characters string `config:"Characters" default:" \t\r\n\v\f"`
}
func init() {
core.TypeRegistry.Register(Trim{})
}
// Configure initializes this formatter with values from a plugin config.
func (format *Trim) Configure(conf core.PluginConfigReader) {
}
// ApplyFormatter update message payload
func (format *Trim) ApplyFormatter(msg *core.Message) error {
content := format.GetSourceDataAsString(msg)
format.SetTargetData(msg, strings.Trim(content, format.characters))
return nil
}
|
package integration
import (
"errors"
"testing"
"github.com/CyCoreSystems/ari"
)
func TestLoggingList(t *testing.T, s Server) {
runTest("ok", t, s, func(t *testing.T, m *mock, cl ari.Client) {
var expected = []*ari.Key{
ari.NewKey(ari.LoggingKey, "n1"),
}
m.Logging.On("List", (*ari.Key)(nil)).Return(expected, nil)
ld, err := cl.Asterisk().Logging().List(nil)
if err != nil {
t.Errorf("Unexpected error in logging list: %s", err)
}
if len(ld) != len(expected) {
t.Errorf("Expected return of length %d, got %d", len(expected), len(ld))
} else {
for idx := range ld {
failed := false
failed = failed || ld[idx].ID != expected[idx].ID
if failed {
t.Errorf("Expected item '%d' to be '%v', got '%v",
idx, expected[idx], ld[idx])
}
}
}
m.Logging.AssertCalled(t, "List", (*ari.Key)(nil))
})
runTest("err", t, s, func(t *testing.T, m *mock, cl ari.Client) {
var expected []*ari.Key
m.Logging.On("List", (*ari.Key)(nil)).Return(expected, errors.New("error"))
ld, err := cl.Asterisk().Logging().List(nil)
if err == nil {
t.Errorf("Expected error in logging list")
}
if len(ld) != len(expected) {
t.Errorf("Expected return of length %d, got %d", len(expected), len(ld))
} else {
for idx := range ld {
failed := false
failed = failed || ld[idx].ID != expected[idx].ID
if failed {
t.Errorf("Expected item '%d' to be '%v', got '%v",
idx, expected[idx], ld[idx])
}
}
}
m.Logging.AssertCalled(t, "List", (*ari.Key)(nil))
})
}
func TestLoggingCreate(t *testing.T, s Server) {
key := ari.NewKey(ari.LoggingKey, "n1")
runTest("ok", t, s, func(t *testing.T, m *mock, cl ari.Client) {
m.Logging.On("Create", key, "l1").Return(ari.NewLogHandle(key, m.Logging), nil)
_, err := cl.Asterisk().Logging().Create(key, "l1")
if err != nil {
t.Errorf("Unexpected error in logging create: %s", err)
}
m.Logging.AssertCalled(t, "Create", key, "l1")
})
runTest("err", t, s, func(t *testing.T, m *mock, cl ari.Client) {
m.Logging.On("Create", key, "l1").Return(nil, errors.New("error"))
_, err := cl.Asterisk().Logging().Create(key, "l1")
if err == nil {
t.Errorf("Expected error in logging create")
}
m.Logging.AssertCalled(t, "Create", key, "l1")
})
}
func TestLoggingDelete(t *testing.T, s Server) {
key := ari.NewKey(ari.LoggingKey, "n1")
runTest("ok", t, s, func(t *testing.T, m *mock, cl ari.Client) {
m.Logging.On("Delete", key).Return(nil)
err := cl.Asterisk().Logging().Delete(key)
if err != nil {
t.Errorf("Unexpected error in logging Delete: %s", err)
}
m.Logging.AssertCalled(t, "Delete", key)
})
runTest("err", t, s, func(t *testing.T, m *mock, cl ari.Client) {
m.Logging.On("Delete", key).Return(errors.New("error"))
err := cl.Asterisk().Logging().Delete(key)
if err == nil {
t.Errorf("Expected error in logging Delete")
}
m.Logging.AssertCalled(t, "Delete", key)
})
}
func TestLoggingRotate(t *testing.T, s Server) {
key := ari.NewKey(ari.LoggingKey, "n1")
runTest("ok", t, s, func(t *testing.T, m *mock, cl ari.Client) {
m.Logging.On("Rotate", key).Return(nil)
err := cl.Asterisk().Logging().Rotate(key)
if err != nil {
t.Errorf("Unexpected error in logging Rotate: %s", err)
}
m.Logging.AssertCalled(t, "Rotate", key)
})
runTest("err", t, s, func(t *testing.T, m *mock, cl ari.Client) {
m.Logging.On("Rotate", key).Return(errors.New("error"))
err := cl.Asterisk().Logging().Rotate(key)
if err == nil {
t.Errorf("Expected error in logging Rotate")
}
m.Logging.AssertCalled(t, "Rotate", key)
})
}
|
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v20180525
import (
"github.com/tencentyun/tcecloud-sdk-go/tcecloud/common"
tchttp "github.com/tencentyun/tcecloud-sdk-go/tcecloud/common/http"
"github.com/tencentyun/tcecloud-sdk-go/tcecloud/common/profile"
)
const APIVersion = "2018-05-25"
type Client struct {
common.Client
}
// Deprecated
func NewClientWithSecretId(secretId, secretKey, region string) (client *Client, err error) {
cpf := profile.NewClientProfile()
client = &Client{}
client.Init(region).WithSecretId(secretId, secretKey).WithProfile(cpf)
return
}
func NewClient(credential *common.Credential, region string, clientProfile *profile.ClientProfile) (client *Client, err error) {
client = &Client{}
client.Init(region).
WithCredential(credential).
WithProfile(clientProfile)
return
}
func NewAddClusterInstancesRequest() (request *AddClusterInstancesRequest) {
request = &AddClusterInstancesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "AddClusterInstances")
return
}
func NewAddClusterInstancesResponse() (response *AddClusterInstancesResponse) {
response = &AddClusterInstancesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 扩展集群节点,API 3.0
func (c *Client) AddClusterInstances(request *AddClusterInstancesRequest) (response *AddClusterInstancesResponse, err error) {
if request == nil {
request = NewAddClusterInstancesRequest()
}
response = NewAddClusterInstancesResponse()
err = c.Send(request, response)
return
}
func NewAddExistedInstancesRequest() (request *AddExistedInstancesRequest) {
request = &AddExistedInstancesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "AddExistedInstances")
return
}
func NewAddExistedInstancesResponse() (response *AddExistedInstancesResponse) {
response = &AddExistedInstancesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 添加已经存在的实例到集群
func (c *Client) AddExistedInstances(request *AddExistedInstancesRequest) (response *AddExistedInstancesResponse, err error) {
if request == nil {
request = NewAddExistedInstancesRequest()
}
response = NewAddExistedInstancesResponse()
err = c.Send(request, response)
return
}
func NewCheckClusterCIDRRequest() (request *CheckClusterCIDRRequest) {
request = &CheckClusterCIDRRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "CheckClusterCIDR")
return
}
func NewCheckClusterCIDRResponse() (response *CheckClusterCIDRResponse) {
response = &CheckClusterCIDRResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 检查集群的CIDR是否冲突
func (c *Client) CheckClusterCIDR(request *CheckClusterCIDRRequest) (response *CheckClusterCIDRResponse, err error) {
if request == nil {
request = NewCheckClusterCIDRRequest()
}
response = NewCheckClusterCIDRResponse()
err = c.Send(request, response)
return
}
func NewCreateClusterRequest() (request *CreateClusterRequest) {
request = &CreateClusterRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "CreateCluster")
return
}
func NewCreateClusterResponse() (response *CreateClusterResponse) {
response = &CreateClusterResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 创建集群
func (c *Client) CreateCluster(request *CreateClusterRequest) (response *CreateClusterResponse, err error) {
if request == nil {
request = NewCreateClusterRequest()
}
response = NewCreateClusterResponse()
err = c.Send(request, response)
return
}
func NewCreateClusterAsGroupRequest() (request *CreateClusterAsGroupRequest) {
request = &CreateClusterAsGroupRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "CreateClusterAsGroup")
return
}
func NewCreateClusterAsGroupResponse() (response *CreateClusterAsGroupResponse) {
response = &CreateClusterAsGroupResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 为已经存在的集群创建伸缩组
func (c *Client) CreateClusterAsGroup(request *CreateClusterAsGroupRequest) (response *CreateClusterAsGroupResponse, err error) {
if request == nil {
request = NewCreateClusterAsGroupRequest()
}
response = NewCreateClusterAsGroupResponse()
err = c.Send(request, response)
return
}
func NewCreateClusterInstancesRequest() (request *CreateClusterInstancesRequest) {
request = &CreateClusterInstancesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "CreateClusterInstances")
return
}
func NewCreateClusterInstancesResponse() (response *CreateClusterInstancesResponse) {
response = &CreateClusterInstancesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 扩展(新建)集群节点
func (c *Client) CreateClusterInstances(request *CreateClusterInstancesRequest) (response *CreateClusterInstancesResponse, err error) {
if request == nil {
request = NewCreateClusterInstancesRequest()
}
response = NewCreateClusterInstancesResponse()
err = c.Send(request, response)
return
}
func NewDeleteClusterRequest() (request *DeleteClusterRequest) {
request = &DeleteClusterRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "DeleteCluster")
return
}
func NewDeleteClusterResponse() (response *DeleteClusterResponse) {
response = &DeleteClusterResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 删除集群(YUNAPI V3版本)
func (c *Client) DeleteCluster(request *DeleteClusterRequest) (response *DeleteClusterResponse, err error) {
if request == nil {
request = NewDeleteClusterRequest()
}
response = NewDeleteClusterResponse()
err = c.Send(request, response)
return
}
func NewDeleteClusterAsGroupsRequest() (request *DeleteClusterAsGroupsRequest) {
request = &DeleteClusterAsGroupsRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "DeleteClusterAsGroups")
return
}
func NewDeleteClusterAsGroupsResponse() (response *DeleteClusterAsGroupsResponse) {
response = &DeleteClusterAsGroupsResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 删除集群伸缩组
func (c *Client) DeleteClusterAsGroups(request *DeleteClusterAsGroupsRequest) (response *DeleteClusterAsGroupsResponse, err error) {
if request == nil {
request = NewDeleteClusterAsGroupsRequest()
}
response = NewDeleteClusterAsGroupsResponse()
err = c.Send(request, response)
return
}
func NewDeleteClusterInstancesRequest() (request *DeleteClusterInstancesRequest) {
request = &DeleteClusterInstancesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "DeleteClusterInstances")
return
}
func NewDeleteClusterInstancesResponse() (response *DeleteClusterInstancesResponse) {
response = &DeleteClusterInstancesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 删除集群中的实例
func (c *Client) DeleteClusterInstances(request *DeleteClusterInstancesRequest) (response *DeleteClusterInstancesResponse, err error) {
if request == nil {
request = NewDeleteClusterInstancesRequest()
}
response = NewDeleteClusterInstancesResponse()
err = c.Send(request, response)
return
}
func NewDescribeClusterAsGroupOptionRequest() (request *DescribeClusterAsGroupOptionRequest) {
request = &DescribeClusterAsGroupOptionRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "DescribeClusterAsGroupOption")
return
}
func NewDescribeClusterAsGroupOptionResponse() (response *DescribeClusterAsGroupOptionResponse) {
response = &DescribeClusterAsGroupOptionResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 集群弹性伸缩配置
func (c *Client) DescribeClusterAsGroupOption(request *DescribeClusterAsGroupOptionRequest) (response *DescribeClusterAsGroupOptionResponse, err error) {
if request == nil {
request = NewDescribeClusterAsGroupOptionRequest()
}
response = NewDescribeClusterAsGroupOptionResponse()
err = c.Send(request, response)
return
}
func NewDescribeClusterAsGroupsRequest() (request *DescribeClusterAsGroupsRequest) {
request = &DescribeClusterAsGroupsRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "DescribeClusterAsGroups")
return
}
func NewDescribeClusterAsGroupsResponse() (response *DescribeClusterAsGroupsResponse) {
response = &DescribeClusterAsGroupsResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 集群关联的伸缩组列表
func (c *Client) DescribeClusterAsGroups(request *DescribeClusterAsGroupsRequest) (response *DescribeClusterAsGroupsResponse, err error) {
if request == nil {
request = NewDescribeClusterAsGroupsRequest()
}
response = NewDescribeClusterAsGroupsResponse()
err = c.Send(request, response)
return
}
func NewDescribeClusterInstanceIdsRequest() (request *DescribeClusterInstanceIdsRequest) {
request = &DescribeClusterInstanceIdsRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "DescribeClusterInstanceIds")
return
}
func NewDescribeClusterInstanceIdsResponse() (response *DescribeClusterInstanceIdsResponse) {
response = &DescribeClusterInstanceIdsResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 获取集群节点ID列表【仅内部使用】
func (c *Client) DescribeClusterInstanceIds(request *DescribeClusterInstanceIdsRequest) (response *DescribeClusterInstanceIdsResponse, err error) {
if request == nil {
request = NewDescribeClusterInstanceIdsRequest()
}
response = NewDescribeClusterInstanceIdsResponse()
err = c.Send(request, response)
return
}
func NewDescribeClusterInstancesRequest() (request *DescribeClusterInstancesRequest) {
request = &DescribeClusterInstancesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "DescribeClusterInstances")
return
}
func NewDescribeClusterInstancesResponse() (response *DescribeClusterInstancesResponse) {
response = &DescribeClusterInstancesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 查询集群下节点实例信息
func (c *Client) DescribeClusterInstances(request *DescribeClusterInstancesRequest) (response *DescribeClusterInstancesResponse, err error) {
if request == nil {
request = NewDescribeClusterInstancesRequest()
}
response = NewDescribeClusterInstancesResponse()
err = c.Send(request, response)
return
}
func NewDescribeClusterSecurityRequest() (request *DescribeClusterSecurityRequest) {
request = &DescribeClusterSecurityRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "DescribeClusterSecurity")
return
}
func NewDescribeClusterSecurityResponse() (response *DescribeClusterSecurityResponse) {
response = &DescribeClusterSecurityResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 集群的密钥信息
func (c *Client) DescribeClusterSecurity(request *DescribeClusterSecurityRequest) (response *DescribeClusterSecurityResponse, err error) {
if request == nil {
request = NewDescribeClusterSecurityRequest()
}
response = NewDescribeClusterSecurityResponse()
err = c.Send(request, response)
return
}
func NewDescribeClusterServicesRequest() (request *DescribeClusterServicesRequest) {
request = &DescribeClusterServicesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "DescribeClusterServices")
return
}
func NewDescribeClusterServicesResponse() (response *DescribeClusterServicesResponse) {
response = &DescribeClusterServicesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 该接口获取集群内Service相关的详细描述信息,参考kubernetes API获取Service,只对内部短期使用
func (c *Client) DescribeClusterServices(request *DescribeClusterServicesRequest) (response *DescribeClusterServicesResponse, err error) {
if request == nil {
request = NewDescribeClusterServicesRequest()
}
response = NewDescribeClusterServicesResponse()
err = c.Send(request, response)
return
}
func NewDescribeClustersRequest() (request *DescribeClustersRequest) {
request = &DescribeClustersRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "DescribeClusters")
return
}
func NewDescribeClustersResponse() (response *DescribeClustersResponse) {
response = &DescribeClustersResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 查询集群列表
func (c *Client) DescribeClusters(request *DescribeClustersRequest) (response *DescribeClustersResponse, err error) {
if request == nil {
request = NewDescribeClustersRequest()
}
response = NewDescribeClustersResponse()
err = c.Send(request, response)
return
}
func NewDescribeExistedInstancesRequest() (request *DescribeExistedInstancesRequest) {
request = &DescribeExistedInstancesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "DescribeExistedInstances")
return
}
func NewDescribeExistedInstancesResponse() (response *DescribeExistedInstancesResponse) {
response = &DescribeExistedInstancesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 查询已经存在的节点,判断是否可以加入集群
func (c *Client) DescribeExistedInstances(request *DescribeExistedInstancesRequest) (response *DescribeExistedInstancesResponse, err error) {
if request == nil {
request = NewDescribeExistedInstancesRequest()
}
response = NewDescribeExistedInstancesResponse()
err = c.Send(request, response)
return
}
func NewDescribeImagesRequest() (request *DescribeImagesRequest) {
request = &DescribeImagesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "DescribeImages")
return
}
func NewDescribeImagesResponse() (response *DescribeImagesResponse) {
response = &DescribeImagesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 获取镜像信息
func (c *Client) DescribeImages(request *DescribeImagesRequest) (response *DescribeImagesResponse, err error) {
if request == nil {
request = NewDescribeImagesRequest()
}
response = NewDescribeImagesResponse()
err = c.Send(request, response)
return
}
func NewDescribeQuotaRequest() (request *DescribeQuotaRequest) {
request = &DescribeQuotaRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "DescribeQuota")
return
}
func NewDescribeQuotaResponse() (response *DescribeQuotaResponse) {
response = &DescribeQuotaResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 获取集群配额
func (c *Client) DescribeQuota(request *DescribeQuotaRequest) (response *DescribeQuotaResponse, err error) {
if request == nil {
request = NewDescribeQuotaRequest()
}
response = NewDescribeQuotaResponse()
err = c.Send(request, response)
return
}
func NewDescribeRegionsRequest() (request *DescribeRegionsRequest) {
request = &DescribeRegionsRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "DescribeRegions")
return
}
func NewDescribeRegionsResponse() (response *DescribeRegionsResponse) {
response = &DescribeRegionsResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 获取容器服务支持的所有地域
func (c *Client) DescribeRegions(request *DescribeRegionsRequest) (response *DescribeRegionsResponse, err error) {
if request == nil {
request = NewDescribeRegionsRequest()
}
response = NewDescribeRegionsResponse()
err = c.Send(request, response)
return
}
func NewDescribeVersionsRequest() (request *DescribeVersionsRequest) {
request = &DescribeVersionsRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "DescribeVersions")
return
}
func NewDescribeVersionsResponse() (response *DescribeVersionsResponse) {
response = &DescribeVersionsResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 获取集群版本信息
func (c *Client) DescribeVersions(request *DescribeVersionsRequest) (response *DescribeVersionsResponse, err error) {
if request == nil {
request = NewDescribeVersionsRequest()
}
response = NewDescribeVersionsResponse()
err = c.Send(request, response)
return
}
func NewDrainClusterNodeRequest() (request *DrainClusterNodeRequest) {
request = &DrainClusterNodeRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "DrainClusterNode")
return
}
func NewDrainClusterNodeResponse() (response *DrainClusterNodeResponse) {
response = &DrainClusterNodeResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 驱逐集群中的节点
func (c *Client) DrainClusterNode(request *DrainClusterNodeRequest) (response *DrainClusterNodeResponse, err error) {
if request == nil {
request = NewDrainClusterNodeRequest()
}
response = NewDrainClusterNodeResponse()
err = c.Send(request, response)
return
}
func NewForwardRequestRequest() (request *ForwardRequestRequest) {
request = &ForwardRequestRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "ForwardRequest")
return
}
func NewForwardRequestResponse() (response *ForwardRequestResponse) {
response = &ForwardRequestResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// YUNAPI 转发请求给TKE APIServer接口
func (c *Client) ForwardRequest(request *ForwardRequestRequest) (response *ForwardRequestResponse, err error) {
if request == nil {
request = NewForwardRequestRequest()
}
response = NewForwardRequestResponse()
err = c.Send(request, response)
return
}
func NewModifyClusterAsGroupAttributeRequest() (request *ModifyClusterAsGroupAttributeRequest) {
request = &ModifyClusterAsGroupAttributeRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "ModifyClusterAsGroupAttribute")
return
}
func NewModifyClusterAsGroupAttributeResponse() (response *ModifyClusterAsGroupAttributeResponse) {
response = &ModifyClusterAsGroupAttributeResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 修改集群伸缩组属性
func (c *Client) ModifyClusterAsGroupAttribute(request *ModifyClusterAsGroupAttributeRequest) (response *ModifyClusterAsGroupAttributeResponse, err error) {
if request == nil {
request = NewModifyClusterAsGroupAttributeRequest()
}
response = NewModifyClusterAsGroupAttributeResponse()
err = c.Send(request, response)
return
}
func NewModifyClusterAsGroupOptionAttributeRequest() (request *ModifyClusterAsGroupOptionAttributeRequest) {
request = &ModifyClusterAsGroupOptionAttributeRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "ModifyClusterAsGroupOptionAttribute")
return
}
func NewModifyClusterAsGroupOptionAttributeResponse() (response *ModifyClusterAsGroupOptionAttributeResponse) {
response = &ModifyClusterAsGroupOptionAttributeResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 修改集群弹性伸缩属性
func (c *Client) ModifyClusterAsGroupOptionAttribute(request *ModifyClusterAsGroupOptionAttributeRequest) (response *ModifyClusterAsGroupOptionAttributeResponse, err error) {
if request == nil {
request = NewModifyClusterAsGroupOptionAttributeRequest()
}
response = NewModifyClusterAsGroupOptionAttributeResponse()
err = c.Send(request, response)
return
}
func NewModifyClusterAttributeRequest() (request *ModifyClusterAttributeRequest) {
request = &ModifyClusterAttributeRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "ModifyClusterAttribute")
return
}
func NewModifyClusterAttributeResponse() (response *ModifyClusterAttributeResponse) {
response = &ModifyClusterAttributeResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 修改集群属性
func (c *Client) ModifyClusterAttribute(request *ModifyClusterAttributeRequest) (response *ModifyClusterAttributeResponse, err error) {
if request == nil {
request = NewModifyClusterAttributeRequest()
}
response = NewModifyClusterAttributeResponse()
err = c.Send(request, response)
return
}
func NewServiceMeshForwardRequestRequest() (request *ServiceMeshForwardRequestRequest) {
request = &ServiceMeshForwardRequestRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("tke", APIVersion, "ServiceMeshForwardRequest")
return
}
func NewServiceMeshForwardRequestResponse() (response *ServiceMeshForwardRequestResponse) {
response = &ServiceMeshForwardRequestResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// 服务网格代理转发
func (c *Client) ServiceMeshForwardRequest(request *ServiceMeshForwardRequestRequest) (response *ServiceMeshForwardRequestResponse, err error) {
if request == nil {
request = NewServiceMeshForwardRequestRequest()
}
response = NewServiceMeshForwardRequestResponse()
err = c.Send(request, response)
return
}
|
/*
Copyright 2019 The Yingxi.company Authors. All rights reserved.
Go
controller User
*/
package controller
import (
"github.com/gin-gonic/gin"
"yingxi.company/infra/go/handler"
"yingxi.company/infra/go/handler/errno"
"yingxi.company/infra/go/model"
"net/http"
"time"
"math"
"strings"
)
// 返回结构体
type ListResponse struct {
UserName string `json:"userName"`
TotalCount int `json:"totalCount"`
UserList []*model.User `json:"userList"`
}
// 获取列表
func GetList(context *gin.Context) {
userName := context.Param("name")
users, count, _ := model.ListUser()
handler.SendResponse(
context,
errno.OK,
ListResponse{
UserName: userName,
TotalCount: count,
UserList: users,
},
)
}
// Test
func Test(context *gin.Context) {
context.String(http.StatusOK, "yingxi.company")
}
// 获取当前周
func getWeekDay() []string {
now := time.Now()
offset := int(time.Monday - now.Weekday())
if offset > 0 {
offset = -6
}
limit := 4 - int(math.Abs(float64(offset)))
weekStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.Local).AddDate(0, 0, offset).Format("2006-01-02")
weekEnd := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.Local).AddDate(0, 0, limit).Format("2006-01-02")
return []string{weekStart, weekEnd}
}
// Index
func Index(context *gin.Context) {
weekList := getWeekDay()
context.HTML(http.StatusOK, "web.html", gin.H{
"title": "yingxi.company",
"week": strings.Join(weekList, "---"),
})
}
|
package main
import (
"log"
"testing"
"github.com/bahusvel/ClusterPipe/common"
)
func TestWeirdMapThingy(t *testing.T) {
cpdStatus := common.CPDStatus{}
mapThingy := TraverseParamTree(cpdStatus)
log.Println(mapThingy)
}
|
/*
* Copyright 2017 StreamSets Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spooler
import (
"container/heap"
"strings"
"sync"
)
type FilesHeap struct {
fileInfos FileInfos
readOrder string
}
func (h FilesHeap) Len() int {
return len(h.fileInfos)
}
func (h FilesHeap) Less(i, j int) bool {
return (h.readOrder == TIMESTAMP &&
(h.fileInfos[i].getModTime().Before(h.fileInfos[j].getModTime()) ||
(h.fileInfos[i].getModTime().Equal(h.fileInfos[j].getModTime()) &&
strings.Compare(h.fileInfos[i].getFullPath(), h.fileInfos[j].getFullPath()) < 0))) ||
(h.readOrder == LEXICOGRAPHICAL &&
(strings.Compare(h.fileInfos[i].getFullPath(), h.fileInfos[j].getFullPath()) < 0 ||
(strings.Compare(h.fileInfos[i].getFullPath(), h.fileInfos[j].getFullPath()) == 0 &&
h.fileInfos[i].getModTime().Before(h.fileInfos[j].getModTime()))))
}
func (h FilesHeap) Swap(i, j int) {
h.fileInfos[i], h.fileInfos[j] = h.fileInfos[j], h.fileInfos[i]
}
func (h *FilesHeap) Contains(path string) bool {
for _, fInfo := range h.fileInfos {
if fInfo.getFullPath() == path {
return true
}
}
return false
}
func (h *FilesHeap) Push(x interface{}) {
h.fileInfos = append(h.fileInfos, x.(*AtomicFileInformation))
}
func (h *FilesHeap) Pop() interface{} {
old := h.fileInfos
n := len(old)
if n > 0 {
x := old[n-1]
h.fileInfos = old[0 : n-1]
return x
}
return nil
}
type SynchronizedFilesHeap struct {
filesHeap *FilesHeap
lock *sync.RWMutex
}
func (sfh *SynchronizedFilesHeap) Push(atf *AtomicFileInformation) {
sfh.lock.Lock()
defer sfh.lock.Unlock()
heap.Push(sfh.filesHeap, atf)
}
func (sfh *SynchronizedFilesHeap) Pop() *AtomicFileInformation {
sfh.lock.Lock()
defer sfh.lock.Unlock()
if sfh.filesHeap.Len() > 0 {
return heap.Pop(sfh.filesHeap).(*AtomicFileInformation)
}
return nil
}
func (sfh *SynchronizedFilesHeap) Contains(path string) bool {
sfh.lock.Lock()
defer sfh.lock.Unlock()
return sfh.filesHeap.Contains(path)
}
func NewSynchronizedFilesHeap(readOrder string) *SynchronizedFilesHeap {
filesQueue := &FilesHeap{fileInfos: FileInfos{}, readOrder: readOrder}
heap.Init(filesQueue)
return &SynchronizedFilesHeap{filesHeap: filesQueue, lock: &sync.RWMutex{}}
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package cmd
import (
"fmt"
"testing"
"github.com/Azure/aks-engine/pkg/armhelpers"
"github.com/google/uuid"
. "github.com/onsi/gomega"
"github.com/spf13/cobra"
)
func TestGetLocationsCmd(t *testing.T) {
t.Parallel()
command := newGetLocationsCmd()
g := NewGomegaWithT(t)
g.Expect(command.Use).Should(Equal(locationsName))
g.Expect(command.Short).Should(Equal(locationsShortDescription))
g.Expect(command.Long).Should(Equal(locationsLongDescription))
g.Expect(command.Flags().Lookup("output")).NotTo(BeNil())
command.SetArgs([]string{"--bogus"})
err := command.Execute()
g.Expect(err).To(HaveOccurred())
}
func TestLocationsCmd_run(t *testing.T) {
d := &LocationsCmd{
client: &armhelpers.MockAKSEngineClient{},
authProvider: &mockAuthProvider{
authArgs: &authArgs{},
getClientMock: &armhelpers.MockAKSEngineClient{},
},
}
r := &cobra.Command{}
f := r.Flags()
addAuthFlags(d.getAuthArgs(), f)
fakeRawSubscriptionID := "6dc93fae-9a76-421f-bbe5-cc6460ea81cb"
fakeSubscriptionID, err := uuid.Parse(fakeRawSubscriptionID)
if err != nil {
t.Fatalf("Invalid SubscriptionId in Test: %s", err)
}
fakeClientID := "b829b379-ca1f-4f1d-91a2-0d26b244680d"
fakeClientSecret := "0se43bie-3zs5-303e-aav5-dcf231vb82ds"
d.getAuthArgs().SubscriptionID = fakeSubscriptionID
d.getAuthArgs().rawSubscriptionID = fakeRawSubscriptionID
d.getAuthArgs().rawClientID = fakeClientID
d.getAuthArgs().ClientSecret = fakeClientSecret
args := []string{}
d.output = "human"
err = d.run(r, args)
if err != nil {
t.Fatalf("Failed to call get-locations:` %s", err)
}
}
func ExampleLocationsCmd_run_humanOutput() {
d := &LocationsCmd{
client: &armhelpers.MockAKSEngineClient{},
authProvider: &mockAuthProvider{
authArgs: &authArgs{},
getClientMock: &armhelpers.MockAKSEngineClient{},
},
}
r := &cobra.Command{}
f := r.Flags()
addAuthFlags(d.getAuthArgs(), f)
fakeRawSubscriptionID := "6dc93fae-9a76-421f-bbe5-cc6460ea81cb"
fakeSubscriptionID, _ := uuid.Parse(fakeRawSubscriptionID)
fakeClientID := "b829b379-ca1f-4f1d-91a2-0d26b244680d"
fakeClientSecret := "0se43bie-3zs5-303e-aav5-dcf231vb82ds"
d.getAuthArgs().SubscriptionID = fakeSubscriptionID
d.getAuthArgs().rawSubscriptionID = fakeRawSubscriptionID
d.getAuthArgs().rawClientID = fakeClientID
d.getAuthArgs().ClientSecret = fakeClientSecret
args := []string{}
d.output = "human"
if err := d.run(r, args); err != nil {
fmt.Printf("error running command: %s\n", err)
}
// Output:
// Location Name Latitude Longitude
// centraluseuap Central US EUAP (Canary) N/A N/A
// chinaeast China East N/A N/A
// chinaeast2 China East 2 N/A N/A
// chinaeast3 China East 3 N/A N/A
// chinanorth China North N/A N/A
// chinanorth2 China North 2 N/A N/A
// chinanorth3 China North 3 N/A N/A
// eastus2euap East US 2 EUAP (Canary) N/A N/A
// germanycentral Germany Central N/A N/A
// germanynortheast Germany Northeast N/A N/A
// israelcentral Israel Central N/A N/A
// italynorth Italy North N/A N/A
// spaincentral Spain Central N/A N/A
// usdodcentral US DoD Central N/A N/A
// usdodeast US Dod East N/A N/A
// usgovarizona US Gov Arizona N/A N/A
// usgoviowa US Gov Iowa N/A N/A
// usgovtexas US Gov Texas N/A N/A
// usgovvirginia US Gov Virginia N/A N/A
}
func ExampleLocationsCmd_run_jsonOutput() {
d := &LocationsCmd{
client: &armhelpers.MockAKSEngineClient{},
authProvider: &mockAuthProvider{
authArgs: &authArgs{},
getClientMock: &armhelpers.MockAKSEngineClient{},
},
}
r := &cobra.Command{}
f := r.Flags()
addAuthFlags(d.getAuthArgs(), f)
fakeRawSubscriptionID := "6dc93fae-9a76-421f-bbe5-cc6460ea81cb"
fakeSubscriptionID, _ := uuid.Parse(fakeRawSubscriptionID)
fakeClientID := "b829b379-ca1f-4f1d-91a2-0d26b244680d"
fakeClientSecret := "0se43bie-3zs5-303e-aav5-dcf231vb82ds"
d.getAuthArgs().SubscriptionID = fakeSubscriptionID
d.getAuthArgs().rawSubscriptionID = fakeRawSubscriptionID
d.getAuthArgs().rawClientID = fakeClientID
d.getAuthArgs().ClientSecret = fakeClientSecret
args := []string{}
d.output = "json"
if err := d.run(r, args); err != nil {
fmt.Printf("error running command: %s\n", err)
}
// Output:
// [
// {
// "id": "N/A",
// "name": "centraluseuap",
// "displayName": "Central US EUAP (Canary)",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "chinaeast",
// "displayName": "China East",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "chinaeast2",
// "displayName": "China East 2",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "chinaeast3",
// "displayName": "China East 3",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "chinanorth",
// "displayName": "China North",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "chinanorth2",
// "displayName": "China North 2",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "chinanorth3",
// "displayName": "China North 3",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "eastus2euap",
// "displayName": "East US 2 EUAP (Canary)",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "germanycentral",
// "displayName": "Germany Central",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "germanynortheast",
// "displayName": "Germany Northeast",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "israelcentral",
// "displayName": "Israel Central",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "italynorth",
// "displayName": "Italy North",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "spaincentral",
// "displayName": "Spain Central",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "usdodcentral",
// "displayName": "US DoD Central",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "usdodeast",
// "displayName": "US Dod East",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "usgovarizona",
// "displayName": "US Gov Arizona",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "usgoviowa",
// "displayName": "US Gov Iowa",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "usgovtexas",
// "displayName": "US Gov Texas",
// "latitude": "N/A",
// "longitude": "N/A"
// },
// {
// "id": "N/A",
// "name": "usgovvirginia",
// "displayName": "US Gov Virginia",
// "latitude": "N/A",
// "longitude": "N/A"
// }
// ]
}
func TestGetLocationsCmd_ShouldErrorIfInvalidOption(t *testing.T) {
t.Parallel()
command := &LocationsCmd{
output: "yaml",
}
err := command.run(nil, nil)
g := NewGomegaWithT(t)
g.Expect(err).To(HaveOccurred())
g.Expect(err.Error()).To(Equal("invalid output format: \"yaml\". Allowed values: human, json, code"))
}
|
package nougat
import (
"io"
"io/ioutil"
"strings"
"testing"
)
func TestBodySetter(t *testing.T) {
fakeInput := ioutil.NopCloser(strings.NewReader("test"))
fakeBodyProvider := bodyProvider{body: fakeInput}
cases := []struct {
initial BodyProvider
input io.Reader
expected BodyProvider
}{
// nil body is overriden by a set body
{nil, fakeInput, fakeBodyProvider},
// initial body is not overriden by nil body
{fakeBodyProvider, nil, fakeBodyProvider},
// nil body is returned unaltered
{nil, nil, nil},
}
for _, c := range cases {
Nougat := New()
Nougat.bodyProvider = c.initial
Nougat.Body(c.input)
if Nougat.bodyProvider != c.expected {
t.Errorf("expected %v, got %v", c.expected, Nougat.bodyProvider)
}
}
}
func TestBodyJSONSetter(t *testing.T) {
fakeModel := &FakeModel{}
fakeBodyProvider := jsonBodyProvider{payload: fakeModel}
cases := []struct {
initial BodyProvider
input interface{}
expected BodyProvider
}{
// json tagged struct is set as bodyJSON
{nil, fakeModel, fakeBodyProvider},
// nil argument to bodyJSON does not replace existing bodyJSON
{fakeBodyProvider, nil, fakeBodyProvider},
// nil bodyJSON remains nil
{nil, nil, nil},
}
for _, c := range cases {
Nougat := New()
Nougat.bodyProvider = c.initial
Nougat.BodyJSON(c.input)
if Nougat.bodyProvider != c.expected {
t.Errorf("expected %v, got %v", c.expected, Nougat.bodyProvider)
}
// Header Content-Type should be application/json if bodyJSON arg was non-nil
if c.input != nil && Nougat.header.Get(contentType) != jsonContentType {
t.Errorf("Incorrect or missing header, expected %s, got %s", jsonContentType, Nougat.header.Get(contentType))
} else if c.input == nil && Nougat.header.Get(contentType) != "" {
t.Errorf("did not expect a Content-Type header, got %s", Nougat.header.Get(contentType))
}
}
}
func TestBodyFormSetter(t *testing.T) {
fakeParams := FakeParams{KindName: "recent", Count: 25}
fakeBodyProvider := formBodyProvider{payload: fakeParams}
cases := []struct {
initial BodyProvider
input interface{}
expected BodyProvider
}{
// url tagged struct is set as bodyStruct
{nil, paramsB, fakeBodyProvider},
// nil argument to bodyStruct does not replace existing bodyStruct
{fakeBodyProvider, nil, fakeBodyProvider},
// nil bodyStruct remains nil
{nil, nil, nil},
}
for _, c := range cases {
Nougat := New()
Nougat.bodyProvider = c.initial
Nougat.BodyForm(c.input)
if Nougat.bodyProvider != c.expected {
t.Errorf("expected %v, got %v", c.expected, Nougat.bodyProvider)
}
// Content-Type should be application/x-www-form-urlencoded if bodyStruct was non-nil
if c.input != nil && Nougat.header.Get(contentType) != formContentType {
t.Errorf("Incorrect or missing header, expected %s, got %s", formContentType, Nougat.header.Get(contentType))
} else if c.input == nil && Nougat.header.Get(contentType) != "" {
t.Errorf("did not expect a Content-Type header, got %s", Nougat.header.Get(contentType))
}
}
}
|
package main
import (
"context"
"fmt"
"log"
"net/http"
"net/http/httputil"
"net/url"
)
type Server struct {
srv http.Server
shutdownServer chan struct{}
}
// NewServer prepares a new server
func NewServer(robotAddr string, port int, shutdownServer chan struct{}) (*Server, error) {
s := &Server{
shutdownServer: shutdownServer,
srv: http.Server{
Addr: fmt.Sprintf("0.0.0.0:%d", port),
},
}
robotURL, err := url.Parse(fmt.Sprintf("http://%s", robotAddr))
if err != nil {
return nil, maskAny(err)
}
s.srv.Handler = s.createHandler(robotURL)
return s, nil
}
// ListenAndServe starts listening on the desired port.
func (s *Server) ListenAndServe() error {
err := s.srv.ListenAndServe()
if err == http.ErrServerClosed {
return nil
}
return maskAny(err)
}
// Shutdown gracefully stops the server
func (s *Server) Shutdown() error {
return maskAny(s.srv.Shutdown(context.Background()))
}
func (s *Server) createHandler(robotURL *url.URL) *http.ServeMux {
mux := http.NewServeMux()
proxy := httputil.NewSingleHostReverseProxy(robotURL)
mux.HandleFunc("/crossdomain.xml", s.crossDomainHandler)
mux.Handle("/", proxy)
return mux
}
const policy = `<cross-domain-policy>
<allow-access-from-domain="*" to-port="26153"/>
</cross-domain-policy>\0`
func (s *Server) crossDomainHandler(w http.ResponseWriter, r *http.Request) {
log.Println("crossDomain")
w.Write([]byte(policy))
}
|
package indexer
import (
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/manishrjain/gocrud/search"
"github.com/manishrjain/gocrud/store"
"github.com/manishrjain/gocrud/x"
)
// Incremental indexing server to continously regenerate
// and index entities to keep store and search in-sync.
type Server struct {
ch chan x.Entity
wg *sync.WaitGroup
}
// NewServer returns back a server which runs continously in
// a loop to find and re-index entities stored.
// You can control the amount of memory consumed by the server
// via buffer of pending entities in the channel, and the
// rate of processing of these entities via numRoutines.
func NewServer(buffer int, numRoutines int) *Server {
if search.Get() == nil {
log.Fatal("No search engine found")
}
s := new(Server)
s.ch = make(chan x.Entity, buffer)
s.wg = new(sync.WaitGroup)
for i := 0; i < numRoutines; i++ {
s.wg.Add(1)
go s.regenerateAndIndex()
}
return s
}
func (s *Server) regenerateAndIndex() {
defer s.wg.Done()
for entity := range s.ch {
idxr, ok := Get(entity.Kind)
if !ok {
continue
}
doc := idxr.Regenerate(entity)
log.WithField("doc", doc).Debug("Regenerated doc")
if err := search.Get().Update(doc); err != nil {
x.LogErr(log, err).WithField("doc", doc).
Error("While updating in search engine")
}
}
}
// LoopOnce would cycle over all entities in the store, and re-index them.
func (s *Server) LoopOnce() {
var total uint64
from := ""
for {
found, last, err := store.Get().Iterate(from, 1000, s.ch)
if err != nil {
x.LogErr(log, err).Error("While iterating")
return
}
if found == 0 {
log.WithField("total", total).Info("Reached end of cycle")
return
}
log.WithFields(logrus.Fields{
"num_processed": found,
"last": last,
}).Debug("Iteration chunk done")
total += uint64(found)
from = last.Id
}
log.Fatal("This should never be reached.")
return
}
// InfiniteLoop would infinitely cycle over all entities in the
// store, waiting for wait duration after each cycle.
func (s *Server) InfiniteLoop(wait time.Duration) {
for {
s.LoopOnce()
log.Debug("Sleeping...")
time.Sleep(wait)
}
}
func (s *Server) Finish() {
close(s.ch)
s.wg.Wait()
}
|
package command
import (
"fmt"
"strings"
"github.com/flosch/pongo2/v4"
)
// RenderCommand renders commandTemplate with the given arguments using Jinja
// "env" and "vars" will be injected into context and render the template,
// if they are also defined in arguments, arguments will be overridden.
func RenderCommand(commandTemplate string, arguments, env, vars map[string]string) (string, error) {
tmpl, err := pongo2.FromString(commandTemplate)
if err != nil {
return "", err
}
context := pongo2.Context{}
for k, v := range arguments {
context[k] = v
}
context["vars"] = vars
context["env"] = env
// first render, replace arguments in template
out, err := tmpl.Execute(context)
if err != nil {
return "", err
}
// second render, replace vars and env in arguments
tmpl2, err := pongo2.FromString(out)
if err != nil {
return "", err
}
return tmpl2.Execute(context)
}
var shell = `{% for cmd in commands %}{{ cmd | safe }}
{% endfor %}`
func RenderShell(commands []string) (string, error) {
tmpl, err := pongo2.FromString(shell)
if err != nil {
return "", err
}
return tmpl.Execute(pongo2.Context{"commands": commands})
}
func RenderEnvironmentForSSH(envs map[string]string) string {
lines := []string{}
for name, value := range envs {
lines = append(lines, fmt.Sprintf("export %s=%s", name, value))
}
return strings.Join(lines, "\n")
}
// EmptyWorkloadCommand returns the command to execute in an empty workload.
// Currently we use a sleep timeout command to achieve this.
// Interestingly, k8s uses pause and GitHub Action uses tail -f /dev/null.
// Great minds think alike! ( ... ( '-' )ノ)`-' )
func EmptyWorkloadCommand(timeout int) []string {
return []string{"/bin/sh", "-c", fmt.Sprintf("sleep %d", timeout)}
}
// ToEnvironmentList transfers an environment map to a key=value list
// key will be in upper case, and . in key will be replaced by _
func ToEnvironmentList(env map[string]string) []string {
var envs []string
for key, value := range env {
key = strings.ReplaceAll(key, ".", "_")
envs = append(envs, fmt.Sprintf("%s=%s", strings.ToUpper(key), value))
}
return envs
}
// MergeVariables merges higherPriority into lowerPriority,
// will override the values in lowerPriority.
func MergeVariables(lowerPriority, higherPriority map[string]string) map[string]string {
m := map[string]string{}
for k, v := range lowerPriority {
m[k] = v
}
for k, v := range higherPriority {
m[k] = v
}
return m
}
|
package main
import (
"fmt"
"time"
)
func data1(ch chan string) {
time.Sleep(4 * time.Second)
ch <- "from data1()"
}
func data2(ch chan string) {
time.Sleep(2 * time.Second)
ch <- "from data2()"
}
func main() {
chan1 := make(chan string)
chan2 := make(chan string)
go data1(chan1)
go data2(chan2)
select {
case x := <-chan1:
fmt.Println(x)
case y := <-chan2:
fmt.Println(y)
}
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-07-05 15:04
# @File : chaincode.go
# @Description :
# @Attention :
*/
package test
import "examples/blockchain/solo/solo_single_org/config"
func Invoke(){
config.Invoke()
}
|
package observer
import "design-patterns-go/observerPattern/event"
type Observer interface {
OnNotify(event event.Event)
}
|
package envoy
import "io"
func (coll *Collector) Collect() (CountersByUpstream, HistogramsByUpstream, error) {
var (
err error
counters CountersByUpstream
histograms HistogramsByUpstream
)
counters, err = coll.collectCounters()
if err != nil {
return counters, histograms, err
}
var upstreamClusters []string
for cluster := range counters {
upstreamClusters = append(upstreamClusters, cluster)
}
histograms, err = coll.collectHistograms(upstreamClusters)
if err != nil {
return counters, histograms, err
}
return counters, histograms, nil
}
// This is a io.Reader wrapper that replaces all '|' characters in
// its output with '_' characters. It's needed to prevent the
// upstream Prometheus text format parser from crashing on invalid input.
type verticalBarReplacer struct {
raw io.Reader
}
func newVerticalBarReplacer(r io.Reader) *verticalBarReplacer {
return &verticalBarReplacer{raw: r}
}
func (s *verticalBarReplacer) Read(p []byte) (n int, err error) {
n, err = s.raw.Read(p)
for i := range p {
if p[i] == '|' {
p[i] = '_'
}
}
return
}
|
package flash_test
import (
"github.com/find-a-job/flash"
"github.com/stretchr/testify/assert"
"testing"
)
func iiGen() flash.IncTreeNode {
return flash.IncTreeNode{
Id: "1",
Type: "View",
Name: "view",
Children: []flash.IncTreeNode{
flash.IncTreeNode{
Id: "2",
Type: "Button",
Name: "button",
},
flash.IncTreeNode{
Id: "3",
Type: "View",
Name: "view_1",
Children: []flash.IncTreeNode{
flash.IncTreeNode{
Id: "4",
Type: "Button",
Name: "button_1",
},
flash.IncTreeNode{
Id: "5",
Type: "View",
Name: "view_2",
},
},
},
},
}
}
func iiGenSecond() flash.IncTreeNode {
return flash.IncTreeNode{
Id: "1",
Type: "View",
Name: "view",
Children: []flash.IncTreeNode{
flash.IncTreeNode{
Id: "2",
Type: "Button",
Name: "button",
},
flash.IncTreeNode{
Id: "3",
Type: "View",
Name: "view_1",
Children: []flash.IncTreeNode{
flash.IncTreeNode{
Id: "4",
Type: "Button",
Name: "button_1",
},
flash.IncTreeNode{
Id: "5",
Type: "View",
Name: "view_3",
},
},
},
},
}
}
func TestIncId(t *testing.T) {
iiError := "7_自增ID went Error"
itn := iiGen()
itn2 := iiGenSecond()
itn.Insert(flash.IncTreeNode{
Id: "6",
Type: "View",
Name: "view",
})
itn2.Insert(flash.IncTreeNode{
Id: "6",
Type: "View",
Name: "view",
})
assert.Equal(t, itn, flash.IncTreeNode{
Id: "1",
Type: "View",
Name: "view",
Children: []flash.IncTreeNode{
flash.IncTreeNode{
Id: "2",
Type: "Button",
Name: "button",
},
flash.IncTreeNode{
Id: "3",
Type: "View",
Name: "view_1",
Children: []flash.IncTreeNode{
flash.IncTreeNode{
Id: "4",
Type: "Button",
Name: "button_1",
},
flash.IncTreeNode{
Id: "5",
Type: "View",
Name: "view_2",
},
},
},
flash.IncTreeNode{
Id: "6",
Type: "View",
Name: "view_3",
},
},
}, iiError)
assert.Equal(t, itn2, flash.IncTreeNode{
Id: "1",
Type: "View",
Name: "view",
Children: []flash.IncTreeNode{
flash.IncTreeNode{
Id: "2",
Type: "Button",
Name: "button",
},
flash.IncTreeNode{
Id: "3",
Type: "View",
Name: "view_1",
Children: []flash.IncTreeNode{
flash.IncTreeNode{
Id: "4",
Type: "Button",
Name: "button_1",
},
flash.IncTreeNode{
Id: "5",
Type: "View",
Name: "view_3",
},
},
},
flash.IncTreeNode{
Id: "6",
Type: "View",
Name: "view_2",
},
},
}, iiError)
}
|
package tfcloud
import (
"encoding/json"
"io/ioutil"
)
type TfConfig struct {
Credentials struct {
App_terraform_io struct {
Token string `json:"token"`
} `json:"app.terraform.io"`
} `json:"credentials"`
}
func (c *TfConfig) Read(fileName string) (err error) {
jsonFile, err := ioutil.ReadFile(fileName)
if err != nil {
return err
}
json.Unmarshal([]byte(jsonFile), &c)
return nil
}
|
package main
import (
"io"
"log"
"os"
yaml "gopkg.in/yaml.v2"
)
func main() {
e := yaml.NewEncoder(os.Stdout)
defer e.Close()
for _, arg := range os.Args[1:] {
f, err := os.Open(arg)
if err != nil {
log.Fatalf("Open error: %s", err)
}
defer f.Close()
d := yaml.NewDecoder(f)
for {
var data interface{}
if dErr := d.Decode(&data); dErr != nil {
if dErr == io.EOF {
break
}
log.Fatalf("Decode error: %s", dErr)
}
e.Encode(data)
}
}
}
|
package models
import (
"time"
)
type Resource struct {
Id string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Update time.Time `json:"update"`
Create time.Time `json:"create"`
}
|
package main
import (
// "encoding/json"
"fmt"
"github.com/gorilla/websocket"
"net/http"
"math/rand"
// "time"
)
func (c *connection) reader() {
for {
_, message, err := c.ws.ReadMessage()
if err != nil {
break
}
/* if user is been silented */
if c.silent == true || c.login == false {
//c.send <- []byte(fmt.Sprintf("%s:被禁言", c.userid))
c.send <- []byte(fmt.Sprintf("被禁言"))
continue
}
live, ok := live_map[c.liveid]
if ok == true && !c.silent {
tempmy := fmt.Sprintf("%s:%s:",c.userid, c.name)
//message = append([]byte(":"), message...)
message = append([]byte(tempmy), message...)
//msgrecord := []byte(fmt.Sprintf("%d %s\n", time.Now().Unix(), string(message)))
//live.chatrecord = append(live.chatrecord, msgrecord...)
/* record chat messages into redis */
/*if len(live.chatrecord) > CHAT_BUFF_MAX {
chatlen := len(live.chatrecord)
//tmpchatrecord := live.chatrecord[:chatlen]
live.chatrecord = live.chatrecord[chatlen:]
//appendchat(c.liveid, tmpchatrecord)
}*/
/* send to other user */
for _, c_chan := range live.connections {
select {
case c_chan.send <- message:
default:
close(c_chan.send)
}
}
}
}
// c.ws.Close()
}
func (c *connection) writer() {
for message := range c.send {
err := c.ws.WriteMessage(websocket.TextMessage, message)
if err != nil {
break
}
}
c.ws.Close()
}
var upgrader = &websocket.Upgrader{ReadBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool { return true }, WriteBufferSize: 1024}
type wsHandler struct {
h *Mainhub
}
func (wsh wsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
logger.Printf("new user try to connect:%s\n", r.URL.RawQuery)
querys := r.URL.Query()
var name string = ""
var userid string = ""
var liveid string = ""
var err error
if querys["name"] != nil && querys["name"][0] != "" {
name = querys["name"][0]
}
if querys["liveid"] != nil && querys["liveid"][0] != "" {
liveid = querys["liveid"][0]
}
if querys["userid"] != nil && querys["userid"][0] != "" {
userid = querys["userid"][0]
}
if liveid == "" {
fmt.Println("no chat room")
return
}
live, ok := live_map[liveid]
if ok != true {
live_map[liveid] = &Live{
connections: make([]*connection, 0, 10000),
chatrecord: make([]byte, 0, 1000),
}
}
live, ok = live_map[liveid]
/* check repeat login */
for _, conn := range live.connections {
if userid != "" && conn.userid == userid {
logger.Printf("user(id:%s) repeat login\n", userid)
return
}
}
/* check origin */
//r.Header["Origin"][0] = "http://192.168.1.17:8080"
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
logger.Println("fail to upgrade connection")
return
}
logger.Printf("new user connected:%s\n", r.URL.RawQuery)
login := false
if name != "" && userid != "" {
login = true
}
/* info other users */
if login == true {
//info := fmt.Sprintf("%s:login:%s", userid, name)
info := fmt.Sprintf("%s 进入直播间", name)
pushmsg(live, info)
live.online += (rand.Intn(5) + 1)
info = fmt.Sprintf("online_%d", live.online)
pushmsg(live, info)
}
/* add client in connections */
c := &connection{
send: make(chan []byte, 256),
ws: ws,
h: wsh.h,
name: name,
userid: userid,
liveid: liveid,
silent: false,
login: login,
}
c.h.register <- c
/* check been silent */
for _, _userid := range live.silentusers {
if userid == _userid {
c.silent = true
}
}
/*
select {
//case c.send <- []byte(__userlist):
case c.send <- []byte("登录成功"):
default:
close(c.send)
}
*/
defer func() { c.h.unregister <- c }()
go c.writer()
c.reader()
}
|
package main
import (
"fmt"
"time"
)
// (たぶん)スタンダード?なGoによる並行処理パターン
var done = make(chan bool) // 並行処理完了のお知らせ用.よく使われるパターンぽい
var msgs = make(chan int)
func producer() {
for i := 0; i < 500; i++ {
time.Sleep(time.Millisecond * 20)
msgs <- i
}
done <- true
}
func consumer() {
for {
msg := <-msgs
fmt.Println(msg)
}
}
func main() {
go producer()
go consumer()
<-done // 1:1 or 1:nで子チャンネルから受ける使い方が多い?
println("done!!")
}
|
package gpxjson
import "testing"
var sample = []byte(`<?xml version="1.0" encoding="UTF-8"?>
<gpx version="1.1" creator="Endomondo.com" xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd http://www.garmin.com/xmlschemas/GpxExtensions/v3 http://www.garmin.com/xmlschemas/GpxExtensionsv3.xsd http://www.garmin.com/xmlschemas/TrackPointExtension/v1 http://www.garmin.com/xmlschemas/TrackPointExtensionv1.xsd" xmlns="http://www.topografix.com/GPX/1/1" xmlns:gpxtpx="http://www.garmin.com/xmlschemas/TrackPointExtension/v1" xmlns:gpxx="http://www.garmin.com/xmlschemas/GpxExtensions/v3" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<metadata>
<author>
<name>Lorenzo Greco</name>
<email id="lorenzo.rev" domain="gmail.com"/>
</author>
<link href="http://www.endomondo.com">
<text>Endomondo</text>
</link>
<time>2015-07-28T19:02:36Z</time>
</metadata>
<trk>
<src>http://www.endomondo.com/</src>
<link href="https://secreturl.endomondo.com/workouts/XXXX/YYYY">
<text>endomondo</text>
</link>
<type>CYCLING_SPORT</type>
<trkseg>
<trkpt lat="43.76319" lon="11.149139">
<time>2015-07-25T07:17:59Z</time>
</trkpt>
</trkseg>
<trkseg>
<trkpt lat="43.76319" lon="11.149139">
<ele>95.1</ele>
<time>2015-07-25T07:18:00Z</time>
</trkpt>
<trkpt lat="43.76319" lon="11.149139">
<ele>95.2</ele>
<time>2015-07-25T07:18:00Z</time>
</trkpt>
</trkseg>
</trk>
</gpx>`)
func TestUnmarshal(t *testing.T) {
if _, err := Convert(sample); err != nil {
t.Errorf(`Convert() error %s`, err)
}
}
|
package LetterPostgres
//
//import (
// "MainApplication/internal/Letter/LetterModel"
// "MainApplication/internal/Letter/LetterRepository"
// pgwrapper "gitlab.com/slax0rr/go-pg-wrapper"
//)
//
//type dataBase struct {
// DB pgwrapper.DB
//}
//
//func New(db pgwrapper.DB) LetterRepository.LetterDB {
// return dataBase{DB: db}
//}
//
//func (dbInfo dataBase) SaveMail(letter LetterModel.Letter) error {
// _, err := dbInfo.DB.Model(&letter).Insert()
// if err != nil {
// return LetterRepository.SaveLetterError
// }
// return nil
//}
//
//func (dbInfo dataBase) GetReceivedLetters(email uint64) (error, []LetterModel.Letter) {
// var letters []LetterModel.Letter
// exist := dbInfo.DB.Model(&letters).Where("receiver=?", email).Select()
// if exist != nil {
// return LetterRepository.ReceivedLetterError, nil
// }
// return nil, letters
//}
//
//func (dbInfo dataBase) GetSendedLetters(email uint64) (error, []LetterModel.Letter) {
// var letters []LetterModel.Letter
// exist := dbInfo.DB.Model(&letters).Where("sender=?", email).Select()
// if exist != nil {
// return LetterRepository.SentLetterError, nil
// }
// return nil, letters
//}
//
//func (dbInfo dataBase) WatchLetter(uint64) (error, LetterModel.Letter) {
// return nil, LetterModel.Letter{}
//}
|
package setr
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01300102 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:setr.013.001.02 Document"`
Message *SwitchOrderV02 `xml:"setr.013.001.02"`
}
func (d *Document01300102) AddMessage() *SwitchOrderV02 {
d.Message = new(SwitchOrderV02)
return d.Message
}
// Scope
// The SwitchOrder message is sent by an instructing party, eg, an investment manager or its authorised representative, to an executing party, eg, a transfer agent. There may be one or more intermediary parties between the instructing party and the executing party. The intermediary party is, for example, an intermediary or a concentrator.
// This message is used to instruct the executing party to switch from a specified amount/quantity of specified financial instruments to a specified amount/quantity of different financial instruments.
// Usage
// The SwitchOrder message is used when the instructing party, ie, an investor, wants to change its investments within the same fund family, according to the terms of the prospectus.
type SwitchOrderV02 struct {
// Reference assigned to a set of orders or trades in order to link them together.
MasterReference *iso20022.AdditionalReference3 `xml:"MstrRef,omitempty"`
// Collective reference identifying a set of messages.
PoolReference *iso20022.AdditionalReference3 `xml:"PoolRef,omitempty"`
// Reference to a linked message that was previously sent.
PreviousReference []*iso20022.AdditionalReference3 `xml:"PrvsRef,omitempty"`
// Information related to the switch order
SwitchOrderDetails *iso20022.SwitchOrder2 `xml:"SwtchOrdrDtls"`
// The information related to an intermediary.
IntermediaryDetails []*iso20022.Intermediary4 `xml:"IntrmyDtls,omitempty"`
// Information provided when the message is a copy of a previous message.
CopyDetails *iso20022.CopyInformation1 `xml:"CpyDtls,omitempty"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
Extension []*iso20022.Extension1 `xml:"Xtnsn,omitempty"`
}
func (s *SwitchOrderV02) AddMasterReference() *iso20022.AdditionalReference3 {
s.MasterReference = new(iso20022.AdditionalReference3)
return s.MasterReference
}
func (s *SwitchOrderV02) AddPoolReference() *iso20022.AdditionalReference3 {
s.PoolReference = new(iso20022.AdditionalReference3)
return s.PoolReference
}
func (s *SwitchOrderV02) AddPreviousReference() *iso20022.AdditionalReference3 {
newValue := new(iso20022.AdditionalReference3)
s.PreviousReference = append(s.PreviousReference, newValue)
return newValue
}
func (s *SwitchOrderV02) AddSwitchOrderDetails() *iso20022.SwitchOrder2 {
s.SwitchOrderDetails = new(iso20022.SwitchOrder2)
return s.SwitchOrderDetails
}
func (s *SwitchOrderV02) AddIntermediaryDetails() *iso20022.Intermediary4 {
newValue := new(iso20022.Intermediary4)
s.IntermediaryDetails = append(s.IntermediaryDetails, newValue)
return newValue
}
func (s *SwitchOrderV02) AddCopyDetails() *iso20022.CopyInformation1 {
s.CopyDetails = new(iso20022.CopyInformation1)
return s.CopyDetails
}
func (s *SwitchOrderV02) AddExtension() *iso20022.Extension1 {
newValue := new(iso20022.Extension1)
s.Extension = append(s.Extension, newValue)
return newValue
}
|
package main
import "basic-rabbitmq/RabbitMQ"
func main() {
// routing worker two
workerOne := RabbitMQ.NewRabbitMQRouting("exGolang", "workerTwo")
workerOne.RecieveRouting()
}
|
package forkexec
import (
"syscall"
"github.com/criyle/go-sandbox/pkg/mount"
"github.com/criyle/go-sandbox/pkg/rlimit"
)
// Runner is the configuration including the exec path, argv
// and resource limits. It can creates tracee for ptrace-based tracer.
// It can also create unshared process in another namespace
type Runner struct {
// argv and env for execve syscall for the child process
Args []string
Env []string
// if exec_fd is defined, then at the end, fd_execve is called
ExecFile uintptr
// POSIX Resource limit set by set rlimit
RLimits []rlimit.RLimit
// file disriptors map for new process, from 0 to len - 1
Files []uintptr
// work path set by chdir(dir) (current working directory for child)
// if pivot_root is defined, this will execute after changed to new root
WorkDir string
// seccomp syscall filter applied to child
Seccomp *syscall.SockFprog
// clone unshare flag to create linux namespace, effective when clone child
// since unshare syscall does not join the new pid group
CloneFlags uintptr
// mounts defines the mount syscalls after unshare mount namespace
// need CAP_SYS_ADMIN inside the namespace (e.g. unshare user namespace)
// if pivot root is provided, relative target is better for chdir-mount meta
// and pivot root will mount as tmpfs before any mount
Mounts []mount.SyscallParams
// pivot_root defines a readonly new root after unshare mount namespace
// it should be a directory in absolute path and should used with mounts
// Call path:
// mount("tmpfs", root, "tmpfs", 0, nil)
// chdir(root)
// [do mounts]
// mkdir("old_root")
// pivot_root(root, "old_root")
// umount("old_root", MNT_DETACH)
// rmdir("old_root")
// mount("tmpfs", "/", "tmpfs", MS_BIND | MS_REMOUNT | MS_RDONLY | MS_NOATIME | MS_NOSUID, nil)
PivotRoot string
// HostName and DomainName to be set after unshare UTS & user (CAP_SYS_ADMIN)
HostName, DomainName string
// UidMappings / GidMappings for unshared user namespaces, no-op if mapping is null
UIDMappings []syscall.SysProcIDMap
GIDMappings []syscall.SysProcIDMap
// Credential holds user and group identities to be assumed
// by a child process started by StartProcess.
Credential *syscall.Credential
// Parent and child process with sync sataus through a socket pair.
// SyncFunc will invoke with the child pid. If SyncFunc return some error,
// parent will signal child to stop and report the error
// SyncFunc is called right before execve, thus it could track cpu more accurately
SyncFunc func(int) error
// ptrace controls child process to call ptrace(PTRACE_TRACEME)
// runtime.LockOSThread is required for tracer to call ptrace syscalls
Ptrace bool
// no_new_privs calls prctl(PR_SET_NO_NEW_PRIVS) to 0 to disable calls to
// setuid processes. It is automatically enabled when seccomp filter is provided
NoNewPrivs bool
// stop before seccomp calls kill(getpid(), SIGSTOP) to wait for tracer to continue
// right before the calls to seccomp. It is automatically enabled when seccomp
// filter and ptrace are provided since kill might not be available after
// seccomp and execve might be traced by ptrace
// cannot stop after seccomp since kill might not be allowed by seccomp filter
StopBeforeSeccomp bool
// GidMappingsEnableSetgroups allows / disallows setgroups syscall.
// deny if GIDMappings is nil
GIDMappingsEnableSetgroups bool
// drop_caps calls cap_set(self, 0) to drop all capabilities
// from effective, permitted, inheritable capability sets before execve
// it should avoid calls to set ambient capabilities
DropCaps bool
// UnshareCgroupAfterSync specifies whether to unshare cgroup namespace after
// sync (the syncFunc might be add the child to the cgroup)
UnshareCgroupAfterSync bool
// CTTY specifies if set the fd 0 as controlling TTY
CTTY bool
}
|
package snowflake
import (
"sync"
"time"
)
type DefaultSequence struct {
mu sync.Mutex
elapsed int64
sequence int64
}
func (s *DefaultSequence) Next(_ int16, epoch time.Time) (int64, int64, error) {
s.mu.Lock()
defer s.mu.Unlock()
elapsed := time.Since(epoch).Nanoseconds() / 1e6
if s.elapsed < elapsed {
s.elapsed = elapsed
s.sequence = 0
} else {
s.sequence = (s.sequence + 1) & SequenceBitMask
if s.sequence == 0 {
s.elapsed++
overtime := time.Duration(s.elapsed - elapsed)
time.Sleep(overtime * time.Millisecond)
}
}
return s.elapsed, s.sequence, nil
}
|
package main
import (
"code.google.com/p/go.net/websocket"
"encoding/json"
"fmt"
"github.com/fmstephe/matching_engine/client"
"github.com/fmstephe/matching_engine/coordinator"
"github.com/fmstephe/matching_engine/matcher"
"github.com/fmstephe/matching_engine/msg"
"github.com/fmstephe/matching_engine/q"
"github.com/fmstephe/simpleid"
"io"
"net/http"
"os"
)
var traderMaker *client.TraderMaker
var idMaker = simpleid.NewIdMaker()
const (
clientOriginId = iota
serverOriginId = iota
)
func main() {
pwd, err := os.Getwd()
if err != nil {
println(err.Error())
return
}
// Create matching engine + client
clientToServer := q.NewSimpleQ("Client To Server")
serverToClient := q.NewSimpleQ("Server To Client")
// Matching Engine
m := matcher.NewMatcher(100)
var clientSvr *client.Server
clientSvr, traderMaker = client.NewServer()
coordinator.InMemory(serverToClient, clientToServer, clientSvr, clientOriginId, "Client.........", true)
coordinator.InMemory(clientToServer, serverToClient, m, serverOriginId, "Matching Engine", true)
http.Handle("/wsconn", websocket.Handler(handleTrader))
http.Handle("/", http.FileServer(http.Dir(pwd+"/html/")))
if err := http.ListenAndServe("127.0.0.1:8081", nil); err != nil {
println(err.Error())
}
}
func handleTrader(ws *websocket.Conn) {
traderId := uint32(idMaker.Id()) // NB: A fussy man would check that the id generated fitted inside uint32
orders, responses := traderMaker.Make(traderId)
go reader(ws, traderId, orders)
writer(ws, traderId, responses)
}
func reader(ws *websocket.Conn, traderId uint32, orders chan<- *msg.Message) {
defer close(orders)
defer ws.Close()
for {
var data string
if err := websocket.Message.Receive(ws, &data); err != nil {
logError(traderId, err)
return
}
m := &msg.Message{}
if err := json.Unmarshal([]byte(data), m); err != nil {
logError(traderId, err)
return
}
m.TraderId = traderId
println("WebSocket......: " + m.String())
orders <- m
}
}
func writer(ws *websocket.Conn, traderId uint32, responses chan *client.Response) {
defer ws.Close()
for r := range responses {
b, err := json.Marshal(r)
if err != nil {
logError(traderId, err)
return
}
if _, err = ws.Write(b); err != nil {
logError(traderId, err)
return
}
}
}
func logError(traderId uint32, err error) {
if err == io.EOF {
println(fmt.Sprintf("Closing connection for trader %d", traderId))
} else {
println(fmt.Sprintf("Error for trader %d: %s", traderId, err.Error()))
}
}
|
package gpacker
type entrytype byte
const (
TBinary entrytype = 0x01 + iota
TText
TImage
TFont
)
|
package controllers
import (
"github.com/yydzero/cherry/models"
)
type ArticleController struct {
CherryController
}
// Signup will register new user
// TODO: pg 不支持 byte[] 类型。
func (this *ArticleController) Get() {
id, err := this.GetId()
if err != nil {
this.Fail(err.Error())
return
}
article := models.Articles{Id: id}
if err = o.Read(&article); err != nil {
this.Fail(err.Error())
return
}
this.Resource(article)
}
func (this *ArticleController) Delete() {
id, err := this.GetId()
if err != nil {
this.Fail(err.Error())
return
}
if _, err := o.Delete(&models.Articles{Id: id}); err != nil {
this.Fail(err.Error())
return
}
this.Ok("group deleted")
}
|
package gofakeit
import "fmt"
func Example() {
Seed(11)
fmt.Println("Name:", Name())
fmt.Println("Email:", Email())
fmt.Println("Phone:", Phone())
fmt.Println("Address:", Address().Address)
// Output:
// Name: Markus Moen
// Email: alaynawuckert@kozey.biz
// Phone: (570)245-7485
// Address: 75776 Lake View land, Sterlingstad, New Hampshire 82250-2868
}
|
package utils
import (
"fmt"
"os"
"github.com/joho/godotenv"
)
type utilsEnv struct{}
// Env : utility functions for environment variables
var Env utilsEnv
// Load environment variables from .env file if existent (else assume pre-loaded)
func (utilsEnv) Load() {
err := godotenv.Load()
if err == nil {
Log.Insta <- ". | env vars loaded from .env"
} else if os.IsNotExist(err) {
Log.Insta <- ". | env vars pre-loaded"
} else {
panic(err)
}
}
// Get mandatory environment variable (else exit)
func (utilsEnv) GetOrExit(key string) string {
val, exists := os.LookupEnv(key)
if !exists {
panic(fmt.Sprintf("Missing env var: %s", key))
}
return val
}
// Get optional environment variable (else return "")
func (utilsEnv) GetOrEmpty(key string) string {
val, _ := os.LookupEnv(key)
return val
}
|
package main
import (
"io"
"log"
"net/http"
)
type Handler struct {
}
func (handler Handler) ServeHTTP(res http.ResponseWriter, req *http.Request) {
switch req.URL.Path {
case "/dog":
io.WriteString(res, "doggy dog")
case "/cat":
io.WriteString(res, "Kitty cat")
}
}
func main() {
handler := Handler{}
err := http.ListenAndServe(":8080", handler)
log.Fatalln(err)
}
|
package main
import (
"fmt"
"html/template"
"log"
"math/rand"
"net/http"
"os/exec"
"runtime"
"strings"
"time"
)
type (
// структура описывающая пару слов: ангийский и русский варианты
Word struct {
En string
Ru string
}
// структура, в которую буждем сохранять ответ пользователя
Answer struct {
Ru string // ответ пользователя
En string // английское слово, которое надо перевести
IsRight bool // верный или неверный ответ
RightAnswer string // правильный ответ
}
// структру данных для html шаблона
DataForTemplate struct {
CurrentWord Word `json:"current_word"` // текущее слово
UserAnswers []Answer // список ответов пользователя
RightCnt, WrongCnt int // счетчики правильных и неправильных ответов
}
)
var (
// массив, в который будем накапливать ответы пользователя
userAnswers = []Answer{}
// набор html шаблонов, которые загружаются из директории static
tmpl = template.Must(template.ParseGlob("static/*.html"))
// словарь: массив с парами слов
dictonary = []Word{}
// текущее слово. Объявляем переменную глобально, чтобы можно было иметь доступ из разных функций
currentWord Word
//счетчики правильных и неправильных ответов
rightCnt, wrongCnt int
)
// точка запуска программы
func main() {
// создаем генератор случайных чисел при запуске. Если этого не сделать, то rand будет выдавать всегда одну и ту же последовательность
rand.Seed(time.Now().UnixNano())
// читаем данные из excel файла - заполняем массив слов в словаре
readDictionary()
// прописываем роутинг: url и функция, которая вызывается при переходе на этот url
http.HandleFunc("/", mainPage)
http.HandleFunc("/enRu", enRu)
// открываем приложение в браузере
go openBrowser("http://localhost:9090")
// стартуем сервер на порту 9090. Открыть можно localhost:9090
err := http.ListenAndServe(":9090", nil)
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
// рендеринг главной страницы
func mainPage(w http.ResponseWriter, r *http.Request) {
// просто отображаем страницу index.html без всяких данных
tmpl.ExecuteTemplate(w, "index.html", nil)
}
// рендеринг страницы с тренажером
func enRu(w http.ResponseWriter, r *http.Request) {
// вариант когда пришел запрос типа POST. Это происходит в том случае если пользваотель написал ответ и нажал кнопку "отправить" или enter на клавиатуре
if r.Method == "POST" {
// обрабатываем форму с ответом
r.ParseForm()
// извлекаем значение поля "answer"
userAnswer := r.Form.Get("answer")
// проверяем ответ пользователя.
// В результате выполнения функции получаем объект Answer c заполненными полями и проставленным занчением - правильный ответ или нет
answer := checkUserAnswer(userAnswer)
// добавляем полученный объект Answer в массив с ответами.
// Добавляем его в начало массива - для этого создаем массив из одного элемента и объекдиням его с существующим массивом ответов. При добавлении массива к массиву нужно ставить ...
userAnswers = append([]Answer{answer}, userAnswers...)
}
// формируем новый вопрос
{
// случайным образом выбираем индекс в диапозоне [0, длина словаря-1]
index := rand.Intn(len(dictonary) - 1)
// по индексу находим слово в массиве
currentWord = dictonary[index]
}
// формируем данные для html шаблона. Передаем туда текущее выбранное слово, массив ответов и итоговые счетчики
data := DataForTemplate{CurrentWord: currentWord, UserAnswers: userAnswers, RightCnt: rightCnt, WrongCnt: wrongCnt}
// рендерим html страницу - передавая в объект http.ResponseWriter имя html шаблона (в данном случае "index.html") и объект с данными
err := tmpl.ExecuteTemplate(w, "enRu.html", data)
if err != nil {
fmt.Println(err)
}
}
// функция проверки ответа пользователя
func checkUserAnswer(userAnswer string) Answer {
// создаем объект типа Answer и сразу при создании заполняем его поля, кроме поля IsRight
res := Answer{Ru: userAnswer, En: currentWord.En, RightAnswer: currentWord.Ru}
// проверяем содержит ли ответ пользователя правильный вариант
if strings.Contains(userAnswer, currentWord.Ru) {
// увеличиваем счетчик правильных ответов
rightCnt++
// заполняем поле IsRight
res.IsRight = true
} else {
// увеличиваем счетчик неверных ответов
wrongCnt++
// заполняем поле IsRight
res.IsRight = false
}
return res
}
// openBrowser tries to open the URL in a browser,
// and returns whether it succeed in doing so.
func openBrowser(url string) bool {
var args []string
switch runtime.GOOS {
case "darwin":
args = []string{"open"}
case "windows":
args = []string{"cmd", "/c", "start"}
default:
args = []string{"xdg-open"}
}
cmd := exec.Command(args[0], append(args[1:], url)...)
return cmd.Start() == nil
}
|
package r10kshelldeployer
// Option defines a function prototype to apply options to the Shell instance.
type Option func(*Shell)
// WithConfig sets all the shell configurations.
func WithConfig(cfg *Config) Option {
return func(s *Shell) {
if cfg == nil {
return
}
if cfg.Command != "" {
s.cfg.Command = cfg.Command
s.cfg.Args = nil
}
if cfg.Args != nil {
s.cfg.Args = cfg.Args
}
s.cfg.Environment = append(s.cfg.Environment, cfg.Environment...)
}
}
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"encoding/json"
"sort"
"cloud.google.com/go/bigtable"
pb "github.com/datacommonsorg/mixer/internal/proto"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// GetStatSeries implements API for Mixer.GetStatSeries.
// Endpoint: /stat/series
// TODO(shifucun): consilidate and dedup the logic among these similar APIs.
func (s *Server) GetStatSeries(
ctx context.Context, in *pb.GetStatSeriesRequest) (
*pb.GetStatSeriesResponse, error) {
place := in.GetPlace()
statVar := in.GetStatVar()
if place == "" {
return nil, status.Errorf(codes.InvalidArgument,
"Missing required argument: place")
}
if statVar == "" {
return nil, status.Errorf(codes.InvalidArgument,
"Missing required argument: stat_var")
}
filterProp := &ObsProp{
Mmethod: in.GetMeasurementMethod(),
Operiod: in.GetObservationPeriod(),
Unit: in.GetUnit(),
Sfactor: in.GetScalingFactor(),
}
rowList, keyTokens := buildStatsKey([]string{place}, []string{statVar})
btData, err := readStats(ctx, s.store, rowList, keyTokens)
if err != nil {
return nil, err
}
obsTimeSeries := btData[place][statVar]
if obsTimeSeries == nil {
return nil, status.Errorf(codes.NotFound,
"No data for %s, %s", place, statVar)
}
series := obsTimeSeries.SourceSeries
series = filterSeries(series, filterProp)
sort.Sort(byRank(series))
resp := pb.GetStatSeriesResponse{Series: map[string]float64{}}
if len(series) > 0 {
resp.Series = series[0].Val
}
return &resp, nil
}
// GetStatAll implements API for Mixer.GetStatAll.
// Endpoint: /stat/set/series/all
// Endpoint: /stat/all
func (s *Server) GetStatAll(ctx context.Context, in *pb.GetStatAllRequest) (
*pb.GetStatAllResponse, error) {
places := in.GetPlaces()
statVars := in.GetStatVars()
if len(places) == 0 {
return nil, status.Errorf(codes.InvalidArgument,
"Missing required argument: place")
}
if len(statVars) == 0 {
return nil, status.Errorf(codes.InvalidArgument,
"Missing required argument: stat_var")
}
// Initialize result with place and stat var dcids.
result := &pb.GetStatAllResponse{
PlaceData: make(map[string]*pb.PlaceStat),
}
for _, place := range places {
result.PlaceData[place] = &pb.PlaceStat{
StatVarData: make(map[string]*pb.ObsTimeSeries),
}
for _, statVar := range statVars {
result.PlaceData[place].StatVarData[statVar] = nil
}
}
rowList, keyTokens := buildStatsKey(places, statVars)
cacheData, err := readStatsPb(ctx, s.store, rowList, keyTokens)
if err != nil {
return nil, err
}
for place, placeData := range cacheData {
for statVar, data := range placeData {
result.PlaceData[place].StatVarData[statVar] = data
}
}
return result, nil
}
// GetStats implements API for Mixer.GetStats.
// Endpoint: /stat/set/series
// Endpoint: /bulk/stats
func (s *Server) GetStats(ctx context.Context, in *pb.GetStatsRequest) (
*pb.GetStatsResponse, error) {
placeDcids := in.GetPlace()
statsVarDcid := in.GetStatsVar()
if len(placeDcids) == 0 {
return nil, status.Errorf(codes.InvalidArgument,
"Missing required argument: place")
}
if statsVarDcid == "" {
return nil, status.Errorf(codes.InvalidArgument,
"Missing required argument: stat_var")
}
filterProp := &ObsProp{
Mmethod: in.GetMeasurementMethod(),
Operiod: in.GetObservationPeriod(),
Unit: in.GetUnit(),
}
var rowList bigtable.RowList
var keyTokens map[string]*placeStatVar
rowList, keyTokens = buildStatsKey(placeDcids, []string{statsVarDcid})
result := map[string]*ObsTimeSeries{}
cacheData, err := readStats(ctx, s.store, rowList, keyTokens)
if err != nil {
return nil, err
}
for place := range cacheData {
result[place] = cacheData[place][statsVarDcid]
}
// Fill missing place data and result result
for _, dcid := range placeDcids {
if _, ok := result[dcid]; !ok {
result[dcid] = nil
}
}
for _, obsSeries := range result {
obsSeries.filterAndRank(filterProp)
}
jsonRaw, err := json.Marshal(result)
if err != nil {
return nil, err
}
return &pb.GetStatsResponse{Payload: string(jsonRaw)}, nil
}
// GetStatSetSeries implements API for Mixer.GetStatSetSeries.
// Endpoint: /v1/stat/set/series
func (s *Server) GetStatSetSeries(ctx context.Context, in *pb.GetStatSetSeriesRequest) (
*pb.GetStatSetSeriesResponse, error) {
places := in.GetPlaces()
statVars := in.GetStatVars()
if len(places) == 0 {
return nil, status.Errorf(
codes.InvalidArgument, "Missing required argument: places")
}
if len(statVars) == 0 {
return nil, status.Errorf(
codes.InvalidArgument, "Missing required argument: stat_vars")
}
rowList, keyTokens := buildStatsKey(places, statVars)
// Initialize result with place and stat var dcids.
result := &pb.GetStatSetSeriesResponse{
Data: make(map[string]*pb.SeriesMap),
}
for _, place := range places {
result.Data[place] = &pb.SeriesMap{
Data: make(map[string]*pb.Series),
}
for _, statVar := range statVars {
result.Data[place].Data[statVar] = nil
}
}
cacheData, err := readStatsPb(ctx, s.store, rowList, keyTokens)
if err != nil {
return nil, err
}
for place, placeData := range cacheData {
for statVar, data := range placeData {
if data != nil {
result.Data[place].Data[statVar] = getBestSeries(data)
}
}
}
return result, nil
}
|
package main
func isValidSudoku(board [][]byte) bool {
for i := 0; i < 9; i++ {
mp1 := make(map[byte]bool)
mp2 := make(map[byte]bool)
mp3 := make(map[byte]bool)
for j := 0; j < 9; j++ {
if board[i][j] != '.' {
if _, ok := mp1[board[i][j]]; !ok {
mp1[board[i][j]] = true
} else {
return false
}
}
if board[j][i] != '.' {
if _, ok := mp2[board[j][i]]; !ok {
mp2[board[j][i]] = true
} else {
return false
}
}
if board[3*(i/3)+j/3][3*(i%3)+j%3] != '.' {
if _, ok := mp3[board[3*(i/3)+j/3][3*(i%3)+j%3]]; !ok {
mp3[board[3*(i/3)+j/3][3*(i%3)+j%3]] = true
} else {
return false
}
}
}
}
return true
}
|
package main
import (
"image"
"log"
"os"
"path/filepath"
"reflect"
"runtime"
"sync"
"time"
"lec/lecimg"
)
// Work represents a job to do
type Work struct {
dir string
filename string
quit bool
}
// Worker is a worker to process images.
type Worker struct {
workChan <-chan Work
}
func collectImages(workChan chan<- Work, finChan chan<- bool, srcDir string, watch bool, watchDelay int) {
defer func() {
finChan <- true
}()
lastCheckTime := time.Unix(0, 0)
var files []os.FileInfo
var err error
for {
// List modified image files
files, lastCheckTime, err = lecimg.ListModifiedImages(srcDir, watchDelay, lastCheckTime)
if err != nil {
log.Println(err)
break
}
// add works
for _, file := range files {
workChan <- Work{srcDir, file.Name(), false}
}
if watch {
// sleep for a while
time.Sleep(time.Duration(5) * time.Second)
} else {
break
}
}
}
func work(worker Worker, filters []lecimg.Filter, destDir string, wg *sync.WaitGroup) {
defer func() {
wg.Done()
}()
for {
work := <-worker.workChan
if work.quit {
break
}
log.Printf("[R] %v\n", work.filename)
src, err := lecimg.LoadImage(filepath.Join(work.dir, work.filename))
if err != nil {
log.Printf("Error : %v : %v\n", work.filename, err)
continue
}
// run filters
var dest image.Image
for _, filter := range filters {
result := filter.Run(lecimg.NewFilterSource(src, work.filename, -1))
result.Log()
resultImg := result.Img()
if resultImg == nil {
log.Printf("Filter result is nil. filter: %v\n", reflect.TypeOf(filter))
break
}
dest = resultImg
src = dest
}
// save dest Img
err = lecimg.SaveJpeg(dest, destDir, work.filename, 80)
if err != nil {
log.Printf("Error : %v : %v\n", work.filename, err)
continue
}
}
}
func startWorks(config *Config) {
// set maxProcess
runtime.GOMAXPROCS(config.maxProcess)
// Create channels
workChan := make(chan Work, 100)
finChan := make(chan bool)
// WaitGroup
wg := sync.WaitGroup{}
// start collector
go collectImages(workChan, finChan, config.src.dir, config.watch, config.watchDelay)
var filters []lecimg.Filter
for _, filterOption := range config.filterOptions {
filters = append(filters, filterOption.filter)
}
// start workers
for i := 0; i < config.maxProcess; i++ {
worker := Worker{workChan}
wg.Add(1)
go work(worker, filters, config.dest.dir, &wg)
}
// wait for collector finish
<-finChan
// finish workers
for i := 0; i < config.maxProcess; i++ {
workChan <- Work{"", "", true}
}
wg.Wait()
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-08-13 09:01
# @File : lt_191_Number_of_1_Bits.go
# @Description :
# @Attention :
*/
package byte
func hammingWeight(num uint32) int {
count := 0
for i := 0; i < 32; i++ {
count += int(num >> uint32(i) & uint32(1))
}
return count
}
|
package models
import (
"net/http"
"time"
mesherykube "github.com/layer5io/meshkit/utils/kubernetes"
"github.com/vmihailenco/taskq/v3"
)
// HandlerInterface defines the methods a Handler should define
type HandlerInterface interface {
ServerVersionHandler(w http.ResponseWriter, r *http.Request)
ProviderMiddleware(http.Handler) http.Handler
AuthMiddleware(http.Handler) http.Handler
SessionInjectorMiddleware(func(http.ResponseWriter, *http.Request, *Preference, *User, Provider)) http.Handler
GraphqlMiddleware(http.Handler) func(http.ResponseWriter, *http.Request, *Preference, *User, Provider)
ProviderHandler(w http.ResponseWriter, r *http.Request)
ProvidersHandler(w http.ResponseWriter, r *http.Request)
ProviderUIHandler(w http.ResponseWriter, r *http.Request)
ProviderCapabilityHandler(w http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
ProviderComponentsHandler(w http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
TokenHandler(w http.ResponseWriter, r *http.Request, provider Provider, fromMiddleWare bool)
LoginHandler(w http.ResponseWriter, r *http.Request, provider Provider, fromMiddleWare bool)
LogoutHandler(w http.ResponseWriter, req *http.Request, provider Provider)
UserHandler(w http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
K8SConfigHandler(w http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
GetContextsFromK8SConfig(w http.ResponseWriter, req *http.Request)
KubernetesPingHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
LoadTestHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
LoadTestUsingSMPHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
CollectStaticMetrics(config *SubmitMetricsConfig) error
FetchResultsHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
FetchAllResultsHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
GetResultHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
GetSMPServiceMeshes(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
FetchSmiResultsHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
MeshAdapterConfigHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
MeshOpsHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
AdaptersHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
EventStreamHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
AdapterPingHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
GrafanaConfigHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
GrafanaBoardsHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
GrafanaQueryHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
GrafanaQueryRangeHandler(w http.ResponseWriter, req *http.Request)
GrafanaPingHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
SaveSelectedGrafanaBoardsHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
ScanPromGrafanaHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
ScanPrometheusHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
ScanGrafanaHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
PrometheusConfigHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
GrafanaBoardImportForPrometheusHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
PrometheusQueryHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
PrometheusQueryRangeHandler(w http.ResponseWriter, req *http.Request)
PrometheusPingHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
PrometheusStaticBoardHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
SaveSelectedPrometheusBoardsHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
UserPrefsHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
UserTestPreferenceHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
UserTestPreferenceStore(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
UserTestPreferenceGet(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
UserTestPreferenceDelete(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
SavePerformanceProfileHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
GetPerformanceProfilesHandler(w http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
GetPerformanceProfileHandler(w http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
DeletePerformanceProfileHandler(w http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
SessionSyncHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
PatternFileHandler(rw http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
OAMRegisterHandler(rw http.ResponseWriter, r *http.Request)
OAMComponentDetailsHandler(rw http.ResponseWriter, r *http.Request)
OAMComponentDetailByIDHandler(rw http.ResponseWriter, r *http.Request)
PatternFileRequestHandler(rw http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
DeleteMesheryPatternHandler(rw http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
GetMesheryPatternHandler(rw http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
FilterFileHandler(rw http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
GetMesheryFilterFileHandler(rw http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
FilterFileRequestHandler(rw http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
GetMesheryFilterHandler(rw http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
DeleteMesheryFilterHandler(rw http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
ApplicationFileHandler(rw http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
ApplicationFileRequestHandler(rw http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
GetMesheryApplicationHandler(rw http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
DeleteMesheryApplicationHandler(rw http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
ExtensionsEndpointHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
LoadExtensionFromPackage(w http.ResponseWriter, req *http.Request, provider Provider) error
ExtensionsVersionHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
SaveScheduleHandler(w http.ResponseWriter, req *http.Request, prefObj *Preference, user *User, provider Provider)
GetSchedulesHandler(w http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
GetScheduleHandler(w http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
DeleteScheduleHandler(w http.ResponseWriter, r *http.Request, prefObj *Preference, user *User, provider Provider)
}
// HandlerConfig holds all the config pieces needed by handler methods
type HandlerConfig struct {
// SessionName string
// RefCookieName string
// SessionStore sessions.Store
AdapterTracker AdaptersTrackerInterface
QueryTracker QueryTrackerInterface
Queue taskq.Queue
KubeConfigFolder string
KubeClient *mesherykube.Client
GrafanaClient *GrafanaClient
GrafanaClientForQuery *GrafanaClient
PrometheusClient *PrometheusClient
PrometheusClientForQuery *PrometheusClient
// GraphQLHandler http.Handler
// GraphQLPlaygroundHandler http.Handler
Providers map[string]Provider
ProviderCookieName string
ProviderCookieDuration time.Duration
}
// SubmitMetricsConfig is used to store config used for submitting metrics
type SubmitMetricsConfig struct {
TestUUID, ResultID, PromURL string
StartTime, EndTime time.Time
// TokenKey,
TokenVal string
Provider Provider
}
|
package dataStruct
import "fmt"
type LinkNode struct {
Data int
Prev *LinkNode
Next *LinkNode
}
// todo 生成一个新的链表节点
func (_this *LinkNode) NewLink(value int) *LinkNode {
return &LinkNode{value, nil, nil}
}
// todo 将一个节点加入链表的末尾
func (_this *LinkNode) Push(node *LinkNode) *LinkNode {
var next *LinkNode = _this
//可以确定的是 当前的节点一定不为空
if next.Next == nil {
if node.Data > next.Data {
next.Next = node
node.Prev = next
return next
} else {
node.Next = next
next.Prev = node
return node
}
}
var rootNode *LinkNode = next;
//新加的节点插入到2个节点之间
for ; next.Next != nil; next = next.Next {
if node.Data > next.Data && node.Data <= next.Next.Data {
node.Next = next.Next
next.Next.Prev = node
next.Next = node
node.Prev = next
return rootNode
}
}
//此时还是没找到,意味着,node对应的数是最大的,直接放在最后
next.Next = node
node.Prev = next
return rootNode
}
// todo 将最后一个节点取出
func (_this *LinkNode) Pop() *LinkNode {
next := _this
for next.Next != nil {
next = next.Next
}
popedNode := next
next = nil
return popedNode
}
func GetNode(value int) *LinkNode {
return &LinkNode{value, nil, nil}
}
func PrintLink(node *LinkNode) {
//循环遍历节点
for ;node!=nil ;node = node.Next {
fmt.Printf("%d\t", node.Data)
}
}
|
package exoscale
import (
"context"
"testing"
"github.com/stretchr/testify/require"
cloudprovider "k8s.io/cloud-provider"
)
func TestGetZoneByProviderID(t *testing.T) {
ctx := context.Background()
p, ts := newMockInstanceAPI()
zones := &zones{p: p}
defer ts.Close()
zone, err := zones.GetZoneByProviderID(ctx, testInstanceProviderID)
require.NoError(t, err)
require.NotNil(t, zone)
expectedZone := cloudprovider.Zone{Region: testInstanceZoneName}
require.Equal(t, expectedZone, zone)
}
func TestGetZoneByNodeName(t *testing.T) {
ctx := context.Background()
p, ts := newMockInstanceAPI()
zones := &zones{p: p}
defer ts.Close()
zone, err := zones.GetZoneByNodeName(ctx, testInstanceName)
require.NoError(t, err)
require.NotNil(t, zone)
expectedZone := cloudprovider.Zone{Region: testInstanceZoneName}
require.Equal(t, expectedZone, zone)
}
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"net"
"reflect"
"testing"
)
func TestIPString(t *testing.T) {
const str = "1.2.3.4"
ip := NewIP(net.ParseIP(str))
var got string
allocs := testing.AllocsPerRun(1000, func() {
got = ip.String()
})
if got != str {
t.Errorf("got %q; want %q", got, str)
}
if allocs != 1 {
t.Errorf("allocs = %v; want 1", allocs)
}
}
var icmpRequestBuffer = []byte{
// IP header up to checksum
0x45, 0x00, 0x00, 0x27, 0xde, 0xad, 0x00, 0x00, 0x40, 0x01, 0x8c, 0x15,
// source ip
0x01, 0x02, 0x03, 0x04,
// destination ip
0x05, 0x06, 0x07, 0x08,
// ICMP header
0x08, 0x00, 0x7d, 0x22,
// "request_payload"
0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
}
var icmpRequestDecode = ParsedPacket{
b: icmpRequestBuffer,
subofs: 20,
dataofs: 24,
length: len(icmpRequestBuffer),
IPProto: ICMP,
SrcIP: NewIP(net.ParseIP("1.2.3.4")),
DstIP: NewIP(net.ParseIP("5.6.7.8")),
SrcPort: 0,
DstPort: 0,
}
var icmpReplyBuffer = []byte{
0x45, 0x00, 0x00, 0x25, 0x21, 0x52, 0x00, 0x00, 0x40, 0x01, 0x49, 0x73,
// source ip
0x05, 0x06, 0x07, 0x08,
// destination ip
0x01, 0x02, 0x03, 0x04,
// ICMP header
0x00, 0x00, 0xe6, 0x9e,
// "reply_payload"
0x72, 0x65, 0x70, 0x6c, 0x79, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
}
var icmpReplyDecode = ParsedPacket{
b: icmpReplyBuffer,
subofs: 20,
dataofs: 24,
length: len(icmpReplyBuffer),
IPProto: ICMP,
SrcIP: NewIP(net.ParseIP("1.2.3.4")),
DstIP: NewIP(net.ParseIP("5.6.7.8")),
SrcPort: 0,
DstPort: 0,
}
// IPv6 Router Solicitation
var ipv6PacketBuffer = []byte{
0x60, 0x00, 0x00, 0x00, 0x00, 0x08, 0x3a, 0xff,
0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfb, 0x57, 0x1d, 0xea, 0x9c, 0x39, 0x8f, 0xb7,
0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
0x85, 0x00, 0x38, 0x04, 0x00, 0x00, 0x00, 0x00,
}
var ipv6PacketDecode = ParsedPacket{
b: ipv6PacketBuffer,
IPProto: IPv6,
}
// This is a malformed IPv4 packet.
// Namely, the string "tcp_payload" follows the first byte of the IPv4 header.
var unknownPacketBuffer = []byte{
0x45, 0x74, 0x63, 0x70, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
}
var unknownPacketDecode = ParsedPacket{
b: unknownPacketBuffer,
IPProto: Unknown,
}
var tcpPacketBuffer = []byte{
// IP header up to checksum
0x45, 0x00, 0x00, 0x37, 0xde, 0xad, 0x00, 0x00, 0x40, 0x06, 0x49, 0x5f,
// source ip
0x01, 0x02, 0x03, 0x04,
// destination ip
0x05, 0x06, 0x07, 0x08,
// TCP header with SYN, ACK set
0x00, 0x7b, 0x02, 0x37, 0x00, 0x00, 0x12, 0x34, 0x00, 0x00, 0x00, 0x00,
0x50, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
// "request_payload"
0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
}
var tcpPacketDecode = ParsedPacket{
b: tcpPacketBuffer,
subofs: 20,
dataofs: 40,
length: len(tcpPacketBuffer),
IPProto: TCP,
SrcIP: NewIP(net.ParseIP("1.2.3.4")),
DstIP: NewIP(net.ParseIP("5.6.7.8")),
SrcPort: 123,
DstPort: 567,
TCPFlags: TCPSynAck,
}
var udpRequestBuffer = []byte{
// IP header up to checksum
0x45, 0x00, 0x00, 0x2b, 0xde, 0xad, 0x00, 0x00, 0x40, 0x11, 0x8c, 0x01,
// source ip
0x01, 0x02, 0x03, 0x04,
// destination ip
0x05, 0x06, 0x07, 0x08,
// UDP header
0x00, 0x7b, 0x02, 0x37, 0x00, 0x17, 0x72, 0x1d,
// "request_payload"
0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
}
var udpRequestDecode = ParsedPacket{
b: udpRequestBuffer,
subofs: 20,
dataofs: 28,
length: len(udpRequestBuffer),
IPProto: UDP,
SrcIP: NewIP(net.ParseIP("1.2.3.4")),
DstIP: NewIP(net.ParseIP("5.6.7.8")),
SrcPort: 123,
DstPort: 567,
}
var udpReplyBuffer = []byte{
// IP header up to checksum
0x45, 0x00, 0x00, 0x29, 0x21, 0x52, 0x00, 0x00, 0x40, 0x11, 0x49, 0x5f,
// source ip
0x05, 0x06, 0x07, 0x08,
// destination ip
0x01, 0x02, 0x03, 0x04,
// UDP header
0x02, 0x37, 0x00, 0x7b, 0x00, 0x15, 0xd3, 0x9d,
// "reply_payload"
0x72, 0x65, 0x70, 0x6c, 0x79, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
}
var udpReplyDecode = ParsedPacket{
b: udpReplyBuffer,
subofs: 20,
dataofs: 28,
length: len(udpReplyBuffer),
IPProto: UDP,
SrcIP: NewIP(net.ParseIP("1.2.3.4")),
DstIP: NewIP(net.ParseIP("5.6.7.8")),
SrcPort: 567,
DstPort: 123,
}
func TestParsedPacket(t *testing.T) {
tests := []struct {
name string
qdecode ParsedPacket
want string
}{
{"tcp", tcpPacketDecode, "TCP{1.2.3.4:123 > 5.6.7.8:567}"},
{"icmp", icmpRequestDecode, "ICMP{1.2.3.4:0 > 5.6.7.8:0}"},
{"unknown", unknownPacketDecode, "Unknown{???}"},
{"ipv6", ipv6PacketDecode, "IPv6{???}"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.qdecode.String()
if got != tt.want {
t.Errorf("got %q; want %q", got, tt.want)
}
})
}
allocs := testing.AllocsPerRun(1000, func() {
tests[0].qdecode.String()
})
if allocs != 1 {
t.Errorf("allocs = %v; want 1", allocs)
}
}
func TestDecode(t *testing.T) {
tests := []struct {
name string
buf []byte
want ParsedPacket
}{
{"icmp", icmpRequestBuffer, icmpRequestDecode},
{"ipv6", ipv6PacketBuffer, ipv6PacketDecode},
{"unknown", unknownPacketBuffer, unknownPacketDecode},
{"tcp", tcpPacketBuffer, tcpPacketDecode},
{"udp", udpRequestBuffer, udpRequestDecode},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var got ParsedPacket
got.Decode(tt.buf)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("got %v; want %v", got, tt.want)
}
})
}
allocs := testing.AllocsPerRun(1000, func() {
var got ParsedPacket
got.Decode(tests[0].buf)
})
if allocs != 0 {
t.Errorf("allocs = %v; want 0", allocs)
}
}
func BenchmarkDecode(b *testing.B) {
benches := []struct {
name string
buf []byte
}{
{"icmp", icmpRequestBuffer},
{"unknown", unknownPacketBuffer},
{"tcp", tcpPacketBuffer},
}
for _, bench := range benches {
b.Run(bench.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
var p ParsedPacket
p.Decode(bench.buf)
}
})
}
}
func TestMarshalRequest(t *testing.T) {
// Too small to hold our packets, but only barely.
var small [20]byte
var large [64]byte
icmpHeader := icmpRequestDecode.ICMPHeader()
udpHeader := udpRequestDecode.UDPHeader()
tests := []struct {
name string
header Header
want []byte
}{
{"icmp", &icmpHeader, icmpRequestBuffer},
{"udp", &udpHeader, udpRequestBuffer},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := tt.header.Marshal(small[:])
if err != errSmallBuffer {
t.Errorf("got err: nil; want: %s", errSmallBuffer)
}
dataOffset := tt.header.Len()
dataLength := copy(large[dataOffset:], []byte("request_payload"))
end := dataOffset + dataLength
err = tt.header.Marshal(large[:end])
if err != nil {
t.Errorf("got err: %s; want nil", err)
}
if !bytes.Equal(large[:end], tt.want) {
t.Errorf("got %x; want %x", large[:end], tt.want)
}
})
}
}
func TestMarshalResponse(t *testing.T) {
var buf [64]byte
icmpHeader := icmpRequestDecode.ICMPHeader()
udpHeader := udpRequestDecode.UDPHeader()
tests := []struct {
name string
header Header
want []byte
}{
{"icmp", &icmpHeader, icmpReplyBuffer},
{"udp", &udpHeader, udpReplyBuffer},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.header.ToResponse()
dataOffset := tt.header.Len()
dataLength := copy(buf[dataOffset:], []byte("reply_payload"))
end := dataOffset + dataLength
err := tt.header.Marshal(buf[:end])
if err != nil {
t.Errorf("got err: %s; want nil", err)
}
if !bytes.Equal(buf[:end], tt.want) {
t.Errorf("got %x; want %x", buf[:end], tt.want)
}
})
}
}
|
package main
import "fmt"
func main() {
funSlice1()
}
//定义切片
func funSlice1(){
// 声明切片类型
var a []string //声明一个字符串切片
var b = []int{} //声明一个整型切片并初始化
var c = []bool{false, true} //声明一个布尔切片并初始化
fmt.Println(a) //[]
fmt.Println(b) //[]
fmt.Println(c) //[false true]
fmt.Println(a == nil) //true
fmt.Println(b == nil) //false
fmt.Println(c == nil) //false
//fmt.Println(c == d) //切片是引用类型,不支持直接比较,只能和nil比较
}
//要检查切片是否为空,请始终使用len(s) == 0来判断,而不应该使用s == nil来判断。
func funSlice2(){
var s1 []int //len(s1)=0;cap(s1)=0;s1==nil
s2 := []int{} //len(s2)=0;cap(s2)=0;s2!=nil
s3 := make([]int, 0) //len(s3)=0;cap(s3)=0;s3!=nil
fmt.Println("切片内容为:",s1,"切片长度为:",len(s1),"切片深度为:",cap(s1),"切片是否为nil:",s1==nil)
fmt.Println("切片内容为:",s2,"切片长度为:",len(s2),"切片深度为:",cap(s2),"切片是否为nil:",s2==nil)
fmt.Println("切片内容为:",s3,"切片长度为:",len(s3),"切片深度为:",cap(s3),"切片是否为nil:",s3==nil)
if len(s1) == 0 && len(s2) == 0 && len(s3) == 0{
fmt.Println("切片为空")
}
}
//切片的赋值拷贝 下面的代码拷贝前后两个变量共享底层数组,对一个切片的修改会影响另一个切片的内容,这点需要特别注意。
func funSliceCopy() {
s1 := make([]int, 3) //[0 0 0]
s2 := s1 //将s1直接赋值给s2,s1和s2共用一个底层数组
s2[0] = 100
fmt.Println(s1) //[100 0 0]
fmt.Println(s2) //[100 0 0]
// copy(destSlice, srcSlice []T)也可以实现同样的功能
// srcSlice: 数据来源切片
// destSlice: 目标切片
}
// 切片删除元素
func funSliceDelete() {
// 从切片中删除元素
a := []int{30, 31, 32, 33, 34, 35, 36, 37}
// 要删除索引为2的元素
a = append(a[:2], a[3:]...)
fmt.Println(a) //[30 31 33 34 35 36 37]
} |
// Copyright 2019 Radiation Detection and Imaging (RDI), LLC
// Use of this source code is governed by the BSD 3-clause
// license that can be found in the LICENSE file.
package main
import (
"context"
"flag"
"fmt"
"log"
"net/http"
"os"
"os/signal"
"runtime/pprof"
"strconv"
"strings"
"time"
"github.com/rditech/rdi-live/live"
"github.com/rditech/rdi-live/live/handlers/callback"
"github.com/rditech/rdi-live/live/handlers/client"
"github.com/rditech/rdi-live/live/handlers/ingress"
"github.com/rditech/rdi-live/live/handlers/login"
"github.com/rditech/rdi-live/live/handlers/logout"
"github.com/alicebob/miniredis"
"github.com/go-redis/redis"
"github.com/gorilla/mux"
"github.com/skratchdot/open-golang/open"
"golang.org/x/net/websocket"
)
var (
openBrowser = flag.Bool("b", false, "open a browser window and connect to server")
cpuProfile = flag.String("cpuprofile", "", "output file for cpu profiling")
)
func printUsage() {
fmt.Fprintf(os.Stderr,
`Usage: `+os.Args[0]+` [options]
Description
options:
`,
)
flag.PrintDefaults()
}
func main() {
flag.Usage = printUsage
flag.Parse()
// Define redis connection
redisAddr := os.Getenv("REDIS_ADDR")
if len(redisAddr) == 0 {
s, err := miniredis.Run()
if err != nil {
log.Println("unable to start miniredis server:", err)
}
redisAddr = s.Addr()
}
redisClient := redis.NewClient(&redis.Options{Addr: redisAddr})
defer redisClient.Close()
ping := redisClient.Ping()
if ping.Err() != nil {
log.Fatalf("unable to ping redis server: %v\n", ping.Err())
} else {
log.Printf("successfully connected to redis server at %v with status %v\n", redisAddr, ping.String())
}
// Define handlers
callbackHandler := http.HandlerFunc(callback.LoginCallback)
clientHandler := &client.ClientHandler{Redis: redisClient, Addr: redisAddr}
clientHandler.MaxNPR = float64(100)
if len(os.Getenv("MAX_NPR")) > 0 {
if max, err := strconv.ParseFloat(os.Getenv("MAX_NPR"), 64); err == nil {
clientHandler.MaxNPR = max
}
}
clientHandler.EnableCompression = true
wsc := &ingress.WsCollector{Redis: redisClient, Addr: redisAddr}
ingressHandler := websocket.Handler(wsc.Collect)
logoutHandler := http.HandlerFunc(logout.Logout)
webdataHandler := http.StripPrefix("/webdata/", http.FileServer(live.WebdataBox))
rootHandler := http.StripPrefix("/", http.FileServer(live.WebdataBox))
// Define http server and routes
port := os.Getenv("PORT")
if len(port) == 0 {
port = "8080"
}
router := mux.NewRouter()
if len(os.Getenv("AUTH0_CLIENT_ID")) > 0 {
log.Println("Enabling Auth0 login with client ID", os.Getenv("AUTH0_CLIENT_ID"))
wsc.DefaultNamespace = "rdi-data-dev1"
router.Handle("/callback", callbackHandler)
router.Handle("/client", login.LoginMiddleware(clientHandler))
router.Handle("/ingress", ingressHandler)
router.Handle("/logout", logoutHandler)
router.PathPrefix("/webdata/").Handler(webdataHandler)
router.PathPrefix("/").Handler(login.LoginMiddleware(rootHandler))
} else {
wsc.DefaultNamespace = "everyone"
router.Handle("/client", clientHandler)
router.Handle("/ingress", ingressHandler)
router.PathPrefix("/webdata/").Handler(webdataHandler)
router.PathPrefix("/").Handler(rootHandler)
}
srv := &http.Server{Addr: ":" + port, Handler: router}
switch strings.ToLower(os.Getenv("SECURE_ONLY")) {
case "true", "on":
log.Println("Enabling HTTP proxy securing middleware")
srv = &http.Server{Addr: ":" + port, Handler: Secure(router)}
}
// Turn on cpu profiling if output file is specified
if *cpuProfile != "" {
f, err := os.Create(*cpuProfile)
if err != nil {
log.Fatal("could not create cpu profile file: ", err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
// Set up interrupt for nice quitting
go func() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
<-c
srv.Shutdown(context.Background())
}()
// Open a browser window if flag is set
if *openBrowser {
// Instruct the clientHandler to shutdown the server when clients all
// disconnect
clientHandler.Srv = srv
go func() {
time.Sleep(10 * time.Millisecond)
open.Run("http://localhost:" + port)
}()
}
// Launch HTTP server and main display routine
log.Println("http server started on :" + port)
if err := srv.ListenAndServe(); err != nil {
log.Println("ListenAndServe: ", err)
}
log.Println("successful quit")
}
// Middleware for redirecting http requests that are behind an HTTP proxy to
// https
func Secure(next http.Handler) http.Handler {
return http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
if strings.ToLower(r.Header.Get("x-forwarded-proto")) == "http" {
target := "https://" + r.Host + r.URL.Path
if len(r.URL.RawQuery) > 0 {
target += "?" + r.URL.RawQuery
}
log.Printf("redirect to: %s", target)
http.Redirect(w, r, target,
http.StatusTemporaryRedirect)
return
}
next.ServeHTTP(w, r)
},
)
}
|
package model
import "github.com/Yangshuting/golang_model/lib"
func MigrateUserFromKuaiMao702SelfDB(cc *lib.Cusctx) {}
|
// Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl_test
import (
"context"
"errors"
"testing"
"time"
"github.com/ngaut/pools"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/testkit"
"github.com/stretchr/testify/require"
)
// this test file include some test that will cause data race, mainly because restartWorkers modify d.ctx
func getDDLSchemaVer(t *testing.T, d ddl.DDL) int64 {
m, err := d.Stats(nil)
require.NoError(t, err)
v := m["ddl_schema_version"]
return v.(int64)
}
// restartWorkers will stop the old DDL and create a new DDL and start it.
func restartWorkers(t *testing.T, store kv.Storage, d *domain.Domain) {
err := d.DDL().Stop()
require.NoError(t, err)
newDDL := ddl.NewDDL(context.Background(), ddl.WithStore(d.Store()), ddl.WithInfoCache(d.InfoCache()), ddl.WithLease(d.DDL().GetLease()))
d.SetDDL(newDDL)
err = newDDL.Start(pools.NewResourcePool(func() (pools.Resource, error) {
session := testkit.NewTestKit(t, store).Session()
session.GetSessionVars().CommonGlobalLoaded = true
return session, nil
}, 128, 128, 5))
require.NoError(t, err)
}
// runInterruptedJob should be called concurrently with restartWorkers
func runInterruptedJob(t *testing.T, store kv.Storage, d ddl.DDL, job *model.Job, doneCh chan error) {
var (
history *model.Job
err error
)
ctx := testkit.NewTestKit(t, store).Session()
ctx.SetValue(sessionctx.QueryString, "skip")
err = d.DoDDLJob(ctx, job)
if errors.Is(err, context.Canceled) {
endlessLoopTime := time.Now().Add(time.Minute)
for history == nil {
// imitate DoDDLJob's logic, quit only find history
history, _ = ddl.GetHistoryJobByID(testkit.NewTestKit(t, store).Session(), job.ID)
if history != nil {
err = history.Error
}
time.Sleep(10 * testLease)
if time.Now().After(endlessLoopTime) {
err = errors.New("runInterruptedJob may enter endless loop")
break
}
}
}
doneCh <- err
}
func testRunInterruptedJob(t *testing.T, store kv.Storage, d *domain.Domain, job *model.Job) {
done := make(chan error, 1)
go runInterruptedJob(t, store, d.DDL(), job, done)
ticker := time.NewTicker(d.DDL().GetLease())
defer ticker.Stop()
for {
select {
case <-ticker.C:
restartWorkers(t, store, d)
time.Sleep(time.Millisecond * 20)
case err := <-done:
require.Nil(t, err)
return
}
}
}
func TestSchemaResume(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomainWithSchemaLease(t, testLease)
require.True(t, dom.DDL().OwnerManager().IsOwner())
dbInfo, err := testSchemaInfo(store, "test_restart")
require.NoError(t, err)
job := &model.Job{
SchemaID: dbInfo.ID,
Type: model.ActionCreateSchema,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{dbInfo},
}
testRunInterruptedJob(t, store, dom, job)
testCheckSchemaState(t, store, dbInfo, model.StatePublic)
job = &model.Job{
SchemaID: dbInfo.ID,
Type: model.ActionDropSchema,
BinlogInfo: &model.HistoryInfo{},
}
testRunInterruptedJob(t, store, dom, job)
testCheckSchemaState(t, store, dbInfo, model.StateNone)
}
func TestStat(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomainWithSchemaLease(t, testLease)
dbInfo, err := testSchemaInfo(store, "test_restart")
require.NoError(t, err)
testCreateSchema(t, testkit.NewTestKit(t, store).Session(), dom.DDL(), dbInfo)
// TODO: Get this information from etcd.
// m, err := d.Stats(nil)
// c.Assert(err, IsNil)
// c.Assert(m[ddlOwnerID], Equals, d.uuid)
job := &model.Job{
SchemaID: dbInfo.ID,
Type: model.ActionDropSchema,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{true},
}
done := make(chan error, 1)
go runInterruptedJob(t, store, dom.DDL(), job, done)
ticker := time.NewTicker(dom.DDL().GetLease() * 1)
defer ticker.Stop()
ver := getDDLSchemaVer(t, dom.DDL())
LOOP:
for {
select {
case <-ticker.C:
require.GreaterOrEqual(t, getDDLSchemaVer(t, dom.DDL()), ver)
restartWorkers(t, store, dom)
time.Sleep(time.Millisecond * 20)
case err := <-done:
// TODO: Get this information from etcd.
// m, err := d.Stats(nil)
require.Nil(t, err)
break LOOP
}
}
}
func TestTableResume(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomainWithSchemaLease(t, testLease)
dbInfo, err := testSchemaInfo(store, "test_table")
require.NoError(t, err)
testCreateSchema(t, testkit.NewTestKit(t, store).Session(), dom.DDL(), dbInfo)
defer func() {
testDropSchema(t, testkit.NewTestKit(t, store).Session(), dom.DDL(), dbInfo)
}()
require.True(t, dom.DDL().OwnerManager().IsOwner())
tblInfo, err := testTableInfo(store, "t1", 3)
require.NoError(t, err)
job := &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionCreateTable,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{tblInfo},
}
testRunInterruptedJob(t, store, dom, job)
testCheckTableState(t, store, dbInfo, tblInfo, model.StatePublic)
job = &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionDropTable,
BinlogInfo: &model.HistoryInfo{},
}
testRunInterruptedJob(t, store, dom, job)
testCheckTableState(t, store, dbInfo, tblInfo, model.StateNone)
}
|
package connection
import "github.com/exasol/exasol-driver-go/pkg/errors"
type RowCount struct {
affectedRows int64
}
func (res *RowCount) LastInsertId() (int64, error) {
return 0, errors.ErrNoLastInsertID
}
func (res *RowCount) RowsAffected() (int64, error) {
return res.affectedRows, nil
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2019-05-04 10:38
# @File : reflect.go
# @Description : 反射的util
*/
package utils
import (
"fmt"
"reflect"
"unsafe"
)
func ConvT2InterfaceSlice(data interface{}) []interface{} {
value, b := IsSlice(data)
if !b {
return nil
}
l := value.Len()
res := make([]interface{}, l)
for i := 0; i < l; i++ {
res[i] = value.Index(i).Interface()
}
return res
}
// 判断是否是切片
func IsSlice(data interface{}) (reflect.Value, bool) {
valueOf := reflect.ValueOf(data)
if valueOf.Kind() == reflect.Slice {
return valueOf, true
}
return valueOf, false
}
/*
当结构体中含有指针时,转换会导致垃圾回收的问题。
如果是 []byte 转 []T 可能会导致起始地址未对齐的问题 ([]byte 有可能从奇数位置切片)。
该转换操作可能依赖当前系统,不同类型的处理器之间有差异。
*/
// []T 转换为[]X
func ConvT2TypeSlice(slice interface{}, newSliceType reflect.Type) interface{} {
sv := reflect.ValueOf(slice)
if sv.Kind() != reflect.Slice {
fmt.Sprintf("[ConvT2TypeSlice]Slice called with non-slice value of type %T", slice)
return nil
}
if newSliceType.Kind() != reflect.Slice {
fmt.Sprintf("[ConvT2TypeSlice]Slice called with non-slice type of type %T", newSliceType)
}
newSlice := reflect.New(newSliceType)
hdr := (*reflect.SliceHeader)(unsafe.Pointer(newSlice.Pointer()))
hdr.Cap = sv.Cap() * int(sv.Type().Elem().Size()) / int(newSliceType.Elem().Size())
hdr.Len = sv.Len() * int(sv.Type().Elem().Size()) / int(newSliceType.Elem().Size())
hdr.Data = uintptr(sv.Pointer())
return newSlice.Elem().Interface()
}
|
package main
import "fmt"
var BuildID = "dev"
func main() {
fmt.Printf("Build: %v\n", BuildID)
fmt.Printf("2+2=%d\n", add(2, 2))
}
func add(a, b int) int {
return a + b
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.