text
stringlengths 11
4.05M
|
|---|
package main
import (
"fmt"
)
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
// 想法:
// 遍历树,将得到的结果放入数组,然后再对数组求两数之和
// 采用中序遍历,这样得到的结果是一个生序的数组,就不用再进行排序了
func findTarget(root *TreeNode, k int) bool {
st := []*TreeNode{}
nums := []int{}
for len(st) > 0 || root != nil {
len := len(st)
if root != nil {
st = append(st, root)
root = root.Left
} else {
cur := st[len-1]
nums = append(nums, cur.Val)
st = st[:len-1]
root = cur.Right
}
}
return checkTwoSum(nums, k)
}
func checkTwoSum(nums []int, target int) bool {
len := len(nums)
i := 0
j := len - 1
for i < j {
if nums[i]+nums[j] > target {
j--
} else if nums[i]+nums[j] < target {
i++
} else {
return true
}
}
return false
}
func main() {
node2 := &TreeNode{Val: 2, Left: nil, Right: nil}
node4 := &TreeNode{Val: 4, Left: nil, Right: nil}
node3 := &TreeNode{Val: 3, Left: node2, Right: node4}
node7 := &TreeNode{Val: 7, Left: nil, Right: nil}
node6 := &TreeNode{Val: 6, Left: nil, Right: node7}
node5 := &TreeNode{Val: 5, Left: node3, Right: node6}
fmt.Println(findTarget(node5, 9))
}
|
package run
import (
"testing"
floc "gopkg.in/workanator/go-floc.v1"
)
func TestSequenceInactive(t *testing.T) {
// Construct the flow control object.
flow := floc.NewFlow()
defer flow.Release()
flow.Complete(nil)
// Construct the state object which as data contains the counter.
state := floc.NewState(new(int))
defer state.Release()
// Counstruct the result job.
job := Sequence(jobIncrement)
// Run the job.
floc.Run(flow, state, updateCounter, job)
if getCounter(state) != 0 {
t.Fatalf("%s expects counter to be zero", t.Name())
}
}
|
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package boomer
import (
"fmt"
"github.com/sschepens/gohistogram"
"strings"
"sync"
"time"
)
const (
barChar = "∎"
)
type report struct {
avgTotal float64
fastest float64
slowest float64
average float64
rps float64
results chan *result
start time.Time
total time.Duration
errorDist map[string]int
statusCodeDist map[int]int
sizeTotal int64
output string
wg *sync.WaitGroup
histo *gohistogram.NumericHistogram
}
func newReport(size int, results chan *result, output string) *report {
wg := &sync.WaitGroup{}
r := &report{
output: output,
results: results,
start: time.Now(),
statusCodeDist: make(map[int]int),
errorDist: make(map[string]int),
wg: wg,
histo: gohistogram.NewHistogram(10),
}
wg.Add(1)
go r.process()
return r
}
func (r *report) process() {
for res := range r.results {
if res.err != nil {
r.errorDist[res.err.Error()]++
} else {
sec := res.duration.Seconds()
if r.slowest == 0 || sec > r.slowest {
r.slowest = sec
}
if r.fastest == 0 || r.fastest > sec {
r.fastest = sec
}
r.histo.Add(res.duration.Seconds())
r.avgTotal += res.duration.Seconds()
r.statusCodeDist[res.statusCode]++
if res.contentLength > 0 {
r.sizeTotal += int64(res.contentLength)
}
}
}
r.wg.Done()
}
func (r *report) finalize() {
r.wg.Wait()
r.total = time.Now().Sub(r.start)
count := float64(r.histo.Count())
r.rps = count / r.total.Seconds()
r.average = r.avgTotal / count
r.print()
}
func (r *report) print() {
if r.output == "csv" {
r.printCSV()
return
}
if r.histo.Count() > 0 {
fmt.Printf("\nSummary:\n")
fmt.Printf(" Total:\t%4.4f secs.\n", r.total.Seconds())
fmt.Printf(" Slowest:\t%4.4f secs.\n", r.slowest)
fmt.Printf(" Fastest:\t%4.4f secs.\n", r.fastest)
fmt.Printf(" Average:\t%4.4f secs.\n", r.average)
fmt.Printf(" Requests/sec:\t%4.4f\n", r.rps)
if r.sizeTotal > 0 {
fmt.Printf(" Total Data Received:\t%d bytes.\n", r.sizeTotal)
fmt.Printf(" Response Size per Request:\t%d bytes.\n", r.sizeTotal/int64(r.histo.Count()))
}
r.printStatusCodes()
r.printHistogram()
r.printLatencies()
}
if len(r.errorDist) > 0 {
r.printErrors()
}
}
func (r *report) printCSV() {
//for i, val := range r.lats {
// fmt.Printf("%v,%4.4f\n", i+1, val)
//}
}
// Prints percentile latencies.
func (r *report) printLatencies() {
pctls := []int{10, 25, 50, 75, 90, 95, 99}
fmt.Printf("\nLatency distribution:\n")
cent := float64(100)
for _, p := range pctls {
q := r.histo.Quantile(float64(p) / cent)
if q > 0 {
fmt.Printf(" %v%% in %4.4f secs.\n", p, q)
}
}
}
func (r *report) printHistogram() {
fmt.Printf("\nResponse time histogram:\n")
bins := r.histo.Bins()
max := bins[0].Count
for i := 1; i < len(bins); i++ {
if bins[i].Count > max {
max = bins[i].Count
}
}
for i := 0; i < len(bins); i++ {
// Normalize bar lengths.
var barLen uint64
if max > 0 {
barLen = bins[i].Count * 40 / max
}
fmt.Printf(" %4.3f [%v]\t|%v\n", bins[i].Value, bins[i].Count, strings.Repeat(barChar, int(barLen)))
}
}
// Prints status code distribution.
func (r *report) printStatusCodes() {
fmt.Printf("\nStatus code distribution:\n")
for code, num := range r.statusCodeDist {
fmt.Printf(" [%d]\t%d responses\n", code, num)
}
}
func (r *report) printErrors() {
fmt.Printf("\nError distribution:\n")
for err, num := range r.errorDist {
fmt.Printf(" [%d]\t%s\n", num, err)
}
}
|
package impl
import (
"errors"
"fmt"
"net"
"sync"
)
type Connection struct {
tcpConn net.Conn
conns *map[string]net.Conn
inChannel chan []byte
outChannel chan []byte
closeChannel chan []byte
mutex sync.Mutex
isClosed bool
}
func InitCreateConnection(tcpConn net.Conn, tcpConns *map[string]net.Conn) (conn *Connection, err error) {
conn = &Connection{
tcpConn: tcpConn,
conns: tcpConns,
inChannel: make(chan []byte, 1024),
outChannel: make(chan []byte, 1024),
closeChannel: make(chan []byte, 1),
}
go conn.ReadLoop()
go conn.WriteLoop()
return
}
func (conn *Connection) ReadMessage() (data []byte, err error) {
select {
case data = <-conn.inChannel:
case <-conn.closeChannel:
err = errors.New("connection is closed")
}
return
}
func (conn *Connection) WriteMessage(data []byte) (err error) {
select {
case conn.outChannel <- data:
case <-conn.closeChannel:
err = errors.New("connection is closed")
}
return
}
func (conn *Connection) Close() {
conn.tcpConn.Close()
conn.mutex.Lock()
if !conn.isClosed {
close(conn.closeChannel)
conn.isClosed = true
}
conn.mutex.Unlock()
}
func (conn *Connection) ReadLoop() {
var n int
var err error
buf := make([]byte, 1024)
for {
if n, err = conn.tcpConn.Read(buf); err != nil {
delete(*conn.conns, conn.tcpConn.RemoteAddr().String())
goto ERR
}
select {
case conn.inChannel <- buf[:n]:
case <-conn.closeChannel:
goto ERR
}
}
ERR:
conn.Close()
}
func (conn *Connection) WriteLoop() {
var data []byte
var err error
var key string
var connObject net.Conn
for {
select {
case data = <-conn.outChannel:
case <-conn.closeChannel:
goto ERR
}
for key, connObject = range *conn.conns {
fmt.Println("connection connected is:", key, connObject)
if _, err = connObject.Write(data); err != nil {
delete(*conn.conns, key)
goto ERR
}
}
}
ERR:
conn.Close()
}
|
// Package main - задание пятого урока для курса go-core.
package main
import (
"fmt"
"go.core/lesson5/pkg/crawler"
"go.core/lesson5/pkg/crawler/spider"
"go.core/lesson5/pkg/index"
"go.core/lesson5/pkg/storage"
"go.core/lesson5/pkg/storage/bstree"
"strings"
)
type Engine struct {
Index index.Service
Storage storage.Storager
}
func main() {
urls := []string{"https://golangs.org", "https://altech.online"}
var binaryTree bstree.Tree
// Инициализируем краулер со сканером пауком
var s spider.Scanner
c := crawler.New(s)
// Инициализация движка с сервисами
engine := Engine{
Index: index.New(),
Storage: storage.New(&binaryTree),
}
fmt.Print("Сканирование сайтов ")
docs, err := c.Scan(urls, 2)
if err != nil {
return
}
fmt.Println("завершено.")
fmt.Print("Индексирование страниц ")
for _, doc := range docs {
engine.Index.Add(doc)
engine.Storage.Add(doc)
}
fmt.Println("завершено.")
var str string
for {
fmt.Print("Введите поисковый запрос: ")
_, err := fmt.Scanf("%s\n", &str)
if err != nil {
fmt.Println("Программа завершила работу.")
return
}
IDs := engine.Index.Find(strings.ToLower(str))
var res []crawler.Document
for _, id := range IDs {
if d, ok := engine.Storage.Document(id); ok != false {
res = append(res, d)
}
}
fmt.Printf("Результаты поиска по запросу \"%s\":\nНайдено всего: %d\n", str, len(res))
for _, doc := range res {
fmt.Println(doc)
}
}
}
|
package main
import (
"reflect"
"testing"
)
//func TestMain(t *testing.T) {
//
//}
func TestPreprocessURL(t *testing.T) {
urls := []string{"google.com", "http://someweb.com"}
expectedURLs := []string{"http://google.com", "http://someweb.com"}
processedURLs := preprocessURL(urls)
if !reflect.DeepEqual(expectedURLs, processedURLs) {
t.Fatalf("expected: %v, got: %v", expectedURLs, processedURLs)
}
}
|
package cache
import (
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/aws/aws-sdk-go/service/resourcegroupstaggingapi"
"github.com/karlseguin/ccache/v2"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elbv2"
)
func Test_Cachable(t *testing.T) {
if !isCachable("DescribeTags") {
t.Errorf("DescribeTags should be isCachable")
}
if !isCachable("ListTags") {
t.Errorf("ListTags should be isCachable")
}
if !isCachable("GetSubnets") {
t.Errorf("GetSubnets should be isCachable")
}
if isCachable("CreateTags") {
t.Errorf("CreateTags should not be isCachable")
}
}
var myCustomResolver = func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
if service == endpoints.ElasticloadbalancingServiceID {
return endpoints.ResolvedEndpoint{
URL: server.URL,
}, nil
}
if service == endpoints.Ec2ServiceID {
return endpoints.ResolvedEndpoint{
URL: server.URL + "/ec2",
}, nil
}
if service == endpoints.TaggingServiceID {
return endpoints.ResolvedEndpoint{
URL: server.URL + "/tagging",
}, nil
}
return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
}
var server *httptest.Server
func newSession() *session.Session {
s := session.Must(session.NewSession(&aws.Config{
Region: aws.String("us-west-2"),
EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
Credentials: credentials.NewStaticCredentials("AKID", "SECRET_KEY", "TOKEN"),
}))
return s
}
func Test_CachedError(t *testing.T) {
///ThrottledException: Rate exceeded
server = httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.WriteHeader(400)
rw.Write([]byte(`{ "code": "400", "message": "ThrottlingException"}`))
}))
defer server.Close()
s := newSession()
cacheCfg := NewConfig(10*time.Second, 1*time.Hour, 5000, 500)
AddCaching(s, cacheCfg)
svc := resourcegroupstaggingapi.New(s)
for i := 1; i < 10; i++ {
req, _ := svc.GetResourcesRequest(&resourcegroupstaggingapi.GetResourcesInput{})
err := req.Send()
if err == nil {
t.Errorf("400 error not received")
}
if IsCacheHit(req.HTTPRequest.Context()) {
t.Errorf("400 error was received from cache")
}
}
}
func Test_Cache(t *testing.T) {
server = httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.Write(describeInstancesResponse)
}))
defer server.Close()
s := newSession()
cacheCfg := NewConfig(10*time.Second, 1*time.Hour, 5000, 500)
AddCaching(s, cacheCfg)
svc := ec2.New(s)
for i := 1; i < 10; i++ {
descInstancesOutput, err := svc.DescribeInstances(
&ec2.DescribeInstancesInput{InstanceIds: []*string{aws.String("i-0ace172143b1159d6")}})
if err != nil {
t.Errorf("DescribeInstances returned an unexpected error %v", err)
}
if len(descInstancesOutput.Reservations) != 1 {
t.Errorf("DescribeInstances did not return 1 reservation")
}
if len(descInstancesOutput.Reservations[0].Instances) != 1 {
t.Errorf("DescribeInstances did not return 1 instance")
}
instanceId := "i-1234567890abcdef0"
if aws.StringValue(descInstancesOutput.Reservations[0].Instances[0].InstanceId) != instanceId {
t.Errorf("DescribeInstances returned InstanceId %v not %v",
aws.StringValue(descInstancesOutput.Reservations[0].Instances[0].InstanceId), instanceId)
}
}
}
var cacheHit = false
func Test_CacheFlush(t *testing.T) {
server = httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.Write(describeInstancesResponse)
}))
defer server.Close()
s := newSession()
cacheCfg := NewConfig(10*time.Second, 1*time.Hour, 5000, 500)
AddCaching(s, cacheCfg)
s.Handlers.Complete.PushBack(func(r *request.Request) {
if IsCacheHit(r.HTTPRequest.Context()) != cacheHit {
t.Errorf("DescribeInstances expected cache hit %v, got %v", IsCacheHit(r.HTTPRequest.Context()), cacheHit)
}
})
svc := ec2.New(s)
cacheHit = false
_, err := svc.DescribeInstances(
&ec2.DescribeInstancesInput{InstanceIds: []*string{aws.String("i-0ace172143b1159d6")}})
if err != nil {
t.Errorf("DescribeInstances returned an unexpected error %v", err)
}
cacheHit = true
_, err = svc.DescribeInstances(
&ec2.DescribeInstancesInput{InstanceIds: []*string{aws.String("i-0ace172143b1159d6")}})
if err != nil {
t.Errorf("DescribeInstances returned an unexpected error %v", err)
}
cacheCfg.FlushCache("ec2")
cacheHit = false
_, err = svc.DescribeInstances(
&ec2.DescribeInstancesInput{InstanceIds: []*string{aws.String("i-0ace172143b1159d6")}})
if err != nil {
t.Errorf("DescribeInstances returned an unexpected error %v", err)
}
}
func Test_BackgroundTTLPruning(t *testing.T) {
server = httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.Write(describeInstancesResponse)
}))
defer server.Close()
s := newSession()
cacheCfg := NewConfig(400*time.Millisecond, 500*time.Millisecond, 5000, 500)
AddCaching(s, cacheCfg)
s.Handlers.Complete.PushBack(func(r *request.Request) {
if IsCacheHit(r.HTTPRequest.Context()) != cacheHit {
t.Errorf("DescribeInstances expected cache hit %v, got %v", IsCacheHit(r.HTTPRequest.Context()), cacheHit)
}
})
svc := ec2.New(s)
cacheHit = false
_, err := svc.DescribeInstances(
&ec2.DescribeInstancesInput{InstanceIds: []*string{aws.String("i-0ace172143b1159d6")}})
if err != nil {
t.Errorf("DescribeInstances returned an unexpected error %v", err)
}
cacheHit = true
_, err = svc.DescribeInstances(
&ec2.DescribeInstancesInput{InstanceIds: []*string{aws.String("i-0ace172143b1159d6")}})
if err != nil {
t.Errorf("DescribeInstances returned an unexpected error %v", err)
}
c, ok := cacheCfg.caches.Load("ec2.DescribeInstances")
if !ok {
t.Errorf("DescribeInstances cache not found: %v", err)
}
cObj := c.(*ccache.Cache)
// TTL expired - should have 1 item in cache
time.Sleep(401 * time.Millisecond)
if cObj.ItemCount() < 1 {
t.Error("DescribeInstances cache had 0 items")
}
// Background pruning done - should have 0 items in cache
time.Sleep(100 * time.Millisecond)
if cObj.ItemCount() > 0 {
t.Error("DescribeInstances cache had more than 0 items")
}
}
func Test_FlushOperationCache(t *testing.T) {
server = httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.Write(describeInstancesResponse)
}))
defer server.Close()
s := newSession()
cacheCfg := NewConfig(10*time.Second, 1*time.Hour, 5000, 500)
AddCaching(s, cacheCfg)
s.Handlers.Complete.PushBack(func(r *request.Request) {
if IsCacheHit(r.HTTPRequest.Context()) != cacheHit {
t.Errorf("DescribeInstances expected cache hit %v, got %v", IsCacheHit(r.HTTPRequest.Context()), cacheHit)
}
})
svc := ec2.New(s)
cacheHit = false
_, err := svc.DescribeInstances(
&ec2.DescribeInstancesInput{InstanceIds: []*string{aws.String("i-0ace172143b1159d6")}})
if err != nil {
t.Errorf("DescribeInstances returned an unexpected error %v", err)
}
cacheHit = false
_, err = svc.DescribeVolumes(&ec2.DescribeVolumesInput{})
if err != nil {
t.Errorf("DescribeTags returned an unexpected error %v", err)
}
cacheHit = true
_, err = svc.DescribeInstances(
&ec2.DescribeInstancesInput{InstanceIds: []*string{aws.String("i-0ace172143b1159d6")}})
if err != nil {
t.Errorf("DescribeInstances returned an unexpected error %v", err)
}
cacheCfg.FlushOperationCache("ec2", "DescribeInstances")
cacheHit = false
_, err = svc.DescribeInstances(
&ec2.DescribeInstancesInput{InstanceIds: []*string{aws.String("i-0ace172143b1159d6")}})
if err != nil {
t.Errorf("DescribeInstances returned an unexpected error %v", err)
}
cacheHit = true
_, err = svc.DescribeVolumes(&ec2.DescribeVolumesInput{})
if err != nil {
t.Errorf("DescribeTags returned an unexpected error %v", err)
}
}
func Test_FlushSkipExcluded(t *testing.T) {
server = httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.Write(describeInstancesResponse)
}))
defer server.Close()
s := newSession()
cacheCfg := NewConfig(10*time.Millisecond, 1*time.Hour, 5000, 500)
cacheCfg.SetExcludeFlushing("ec2", "DescribeInstances", true)
AddCaching(s, cacheCfg)
s.Handlers.Complete.PushBack(func(r *request.Request) {
if IsCacheHit(r.HTTPRequest.Context()) != cacheHit {
t.Errorf("DescribeInstances expected cache hit %v, got %v", IsCacheHit(r.HTTPRequest.Context()), cacheHit)
}
})
svc := ec2.New(s)
cacheHit = false
_, err := svc.DescribeInstances(
&ec2.DescribeInstancesInput{InstanceIds: []*string{aws.String("i-0ace172143b1159d6")}})
if err != nil {
t.Errorf("DescribeInstances returned an unexpected error %v", err)
}
cacheHit = false
_, err = svc.DescribeVolumes(&ec2.DescribeVolumesInput{})
if err != nil {
t.Errorf("DescribeTags returned an unexpected error %v", err)
}
cacheHit = true
_, err = svc.DescribeInstances(
&ec2.DescribeInstancesInput{InstanceIds: []*string{aws.String("i-0ace172143b1159d6")}})
if err != nil {
t.Errorf("DescribeInstances returned an unexpected error %v", err)
}
cacheCfg.FlushOperationCache("ec2", "DescribeInstances")
cacheCfg.FlushCache("ec2")
cacheHit = true
_, err = svc.DescribeInstances(
&ec2.DescribeInstancesInput{InstanceIds: []*string{aws.String("i-0ace172143b1159d6")}})
if err != nil {
t.Errorf("DescribeInstances returned an unexpected error %v", err)
}
cacheHit = false
_, err = svc.DescribeVolumes(&ec2.DescribeVolumesInput{})
if err != nil {
t.Errorf("DescribeTags returned an unexpected error %v", err)
}
cacheHit = false
time.Sleep(time.Millisecond * 11)
_, err = svc.DescribeInstances(
&ec2.DescribeInstancesInput{InstanceIds: []*string{aws.String("i-0ace172143b1159d6")}})
if err != nil {
t.Errorf("DescribeInstances returned an unexpected error %v", err)
}
}
func Test_IsMutating(t *testing.T) {
cacheCfg := NewConfig(10*time.Second, 1*time.Hour, 5000, 500)
if !cacheCfg.isMutating("ec2", "TerminateInstances") {
t.Errorf("expected TerminateInstances to be mutating")
}
cacheCfg.SetCacheMutating("ec2", "TerminateInstances", false)
if cacheCfg.isMutating("ec2", "TerminateInstances") {
t.Errorf("expected TerminateInstances to be non-mutating")
}
}
func Test_IsExcluded(t *testing.T) {
cacheCfg := NewConfig(10*time.Second, 1*time.Hour, 5000, 500)
if cacheCfg.isExcluded("ec2.DescribeInstanceTypes") {
t.Errorf("expected TerminateInstances to not be excluded")
}
cacheCfg.SetExcludeFlushing("ec2", "DescribeInstanceTypes", true)
if !cacheCfg.isExcluded("ec2.DescribeInstanceTypes") {
t.Errorf("expected TerminateInstances to be excluded")
}
}
func Test_AutoCacheFlush(t *testing.T) {
server = httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.Write(describeInstancesResponse)
}))
defer server.Close()
s := newSession()
cacheCfg := NewConfig(10*time.Second, 1*time.Hour, 5000, 500)
AddCaching(s, cacheCfg)
s.Handlers.Complete.PushBack(func(r *request.Request) {
if IsCacheHit(r.HTTPRequest.Context()) != cacheHit {
t.Errorf("%v expected cache hit %v, got %v", r.Operation.Name, IsCacheHit(r.HTTPRequest.Context()), cacheHit)
}
})
svc := ec2.New(s)
cacheHit = false
_, err := svc.DescribeInstances(
&ec2.DescribeInstancesInput{InstanceIds: []*string{aws.String("i-0ace172143b1159d6")}})
if err != nil {
t.Errorf("DescribeInstances returned an unexpected error %v", err)
}
cacheHit = true
_, err = svc.DescribeInstances(
&ec2.DescribeInstancesInput{InstanceIds: []*string{aws.String("i-0ace172143b1159d6")}})
if err != nil {
t.Errorf("DescribeInstances returned an unexpected error %v", err)
}
// Make non Get/Describe/List query, should flush ec2 cache
cacheHit = false
_, err = svc.CreateKeyPair(&ec2.CreateKeyPairInput{KeyName: aws.String("name")})
if err != nil {
t.Errorf("CreateKeyPair returned an unexpected error %v", err)
}
cacheHit = false
_, err = svc.DescribeInstances(
&ec2.DescribeInstancesInput{InstanceIds: []*string{aws.String("i-0ace172143b1159d6")}})
if err != nil {
t.Errorf("DescribeInstances returned an unexpected error %v", err)
}
// Make non Get/Describe/List query to non-ec2 service, should not flush ec2 cache
cacheHit = false
elbv2svc := elbv2.New(s)
_, err = elbv2svc.DeleteLoadBalancer(&elbv2.DeleteLoadBalancerInput{LoadBalancerArn: aws.String("arn")})
if err != nil {
t.Errorf("DeleteLoadBalancer returned an unexpected error %v", err)
}
cacheHit = true
_, err = svc.DescribeInstances(
&ec2.DescribeInstancesInput{InstanceIds: []*string{aws.String("i-0ace172143b1159d6")}})
if err != nil {
t.Errorf("DescribeInstances returned an unexpected error %v", err)
}
}
var describeInstancesResponse = []byte(`<DescribeInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>8f7724cf-496f-496e-8fe3-example</requestId>
<reservationSet>
<item>
<reservationId>r-1234567890abcdef0</reservationId>
<ownerId>123456789012</ownerId>
<groupSet/>
<instancesSet>
<item>
<instanceId>i-1234567890abcdef0</instanceId>
<imageId>ami-bff32ccc</imageId>
<instanceState>
<code>16</code>
<name>running</name>
</instanceState>
<privateDnsName>ip-192-168-1-88.eu-west-1.compute.internal</privateDnsName>
<dnsName>ec2-54-194-252-215.eu-west-1.compute.amazonaws.com</dnsName>
<reason/>
<keyName>my_keypair</keyName>
<amiLaunchIndex>0</amiLaunchIndex>
<productCodes/>
<instanceType>t2.micro</instanceType>
<launchTime>2018-05-08T16:46:19.000Z</launchTime>
<placement>
<availabilityZone>eu-west-1c</availabilityZone>
<groupName/>
<tenancy>default</tenancy>
</placement>
<monitoring>
<state>disabled</state>
</monitoring>
<subnetId>subnet-56f5f633</subnetId>
<vpcId>vpc-11112222</vpcId>
<privateIpAddress>192.168.1.88</privateIpAddress>
<ipAddress>54.194.252.215</ipAddress>
<sourceDestCheck>true</sourceDestCheck>
<groupSet>
<item>
<groupId>sg-e4076980</groupId>
<groupName>SecurityGroup1</groupName>
</item>
</groupSet>
<architecture>x86_64</architecture>
<rootDeviceType>ebs</rootDeviceType>
<rootDeviceName>/dev/xvda</rootDeviceName>
<blockDeviceMapping>
<item>
<deviceName>/dev/xvda</deviceName>
<ebs>
<volumeId>vol-1234567890abcdef0</volumeId>
<status>attached</status>
<attachTime>2015-12-22T10:44:09.000Z</attachTime>
<deleteOnTermination>true</deleteOnTermination>
</ebs>
</item>
</blockDeviceMapping>
<virtualizationType>hvm</virtualizationType>
<clientToken>xMcwG14507example</clientToken>
<tagSet>
<item>
<key>Name</key>
<value>Server_1</value>
</item>
</tagSet>
<hypervisor>xen</hypervisor>
<networkInterfaceSet>
<item>
<networkInterfaceId>eni-551ba033</networkInterfaceId>
<subnetId>subnet-56f5f633</subnetId>
<vpcId>vpc-11112222</vpcId>
<description>Primary network interface</description>
<ownerId>123456789012</ownerId>
<status>in-use</status>
<macAddress>02:dd:2c:5e:01:69</macAddress>
<privateIpAddress>192.168.1.88</privateIpAddress>
<privateDnsName>ip-192-168-1-88.eu-west-1.compute.internal</privateDnsName>
<sourceDestCheck>true</sourceDestCheck>
<groupSet>
<item>
<groupId>sg-e4076980</groupId>
<groupName>SecurityGroup1</groupName>
</item>
</groupSet>
<attachment>
<attachmentId>eni-attach-39697adc</attachmentId>
<deviceIndex>0</deviceIndex>
<status>attached</status>
<attachTime>2018-05-08T16:46:19.000Z</attachTime>
<deleteOnTermination>true</deleteOnTermination>
</attachment>
<association>
<publicIp>54.194.252.215</publicIp>
<publicDnsName>ec2-54-194-252-215.eu-west-1.compute.amazonaws.com</publicDnsName>
<ipOwnerId>amazon</ipOwnerId>
</association>
<privateIpAddressesSet>
<item>
<privateIpAddress>192.168.1.88</privateIpAddress>
<privateDnsName>ip-192-168-1-88.eu-west-1.compute.internal</privateDnsName>
<primary>true</primary>
<association>
<publicIp>54.194.252.215</publicIp>
<publicDnsName>ec2-54-194-252-215.eu-west-1.compute.amazonaws.com</publicDnsName>
<ipOwnerId>amazon</ipOwnerId>
</association>
</item>
</privateIpAddressesSet>
<ipv6AddressesSet>
<item>
<ipv6Address>2001:db8:1234:1a2b::123</ipv6Address>
</item>
</ipv6AddressesSet>
</item>
</networkInterfaceSet>
<iamInstanceProfile>
<arn>arn:aws:iam::123456789012:instance-profile/AdminRole</arn>
<id>ABCAJEDNCAA64SSD123AB</id>
</iamInstanceProfile>
<ebsOptimized>false</ebsOptimized>
<cpuOptions>
<coreCount>1</coreCount>
<threadsPerCore>1</threadsPerCore>
</cpuOptions>
</item>
</instancesSet>
</item>
</reservationSet>
</DescribeInstancesResponse>`)
|
package polar2cartesian
import (
"fmt"
"math"
"sync"
)
// polar 极坐标
type polar struct {
radius float64
θ float64
}
// cartesian 笛卡尔坐标
type cartesian struct {
x float64
y float64
}
func (c *cartesian) String() string {
return fmt.Sprintf("x=%.2f, y=%.2f", c.x, c.y)
}
// wg 等待组, 用于等待所有goroutine执行完毕
var wg sync.WaitGroup
func Polar2Cartesian() {
questions := make(chan polar)
defer close(questions)
answers := createSolver(questions)
defer close(answers)
wg.Add(3)
interact(questions, answers)
wg.Wait() // 执行等待
}
func createSolver(questions chan polar) chan cartesian {
result := make(chan cartesian)
go func() {
for {
polarCoord := <-questions // receive
θ := polarCoord.θ * math.Pi / 180.0
x := polarCoord.radius * math.Cos(θ)
y := polarCoord.radius * math.Sin(θ)
result <- cartesian{x, y} // send
}
}()
return result
}
func interact(questions chan polar, answers chan cartesian) {
questions <- polar{float64(5), float64(30.5)} // send
coord := <-answers // receive
fmt.Println(coord.String())
wg.Done() // 声明完成等待一次
questions <- polar{float64(5), float64(-30.25)}
coord = <-answers
fmt.Println(coord.String())
wg.Done()
questions <- polar{float64(1.0), float64(90)}
coord = <-answers
fmt.Println(coord.String())
wg.Done()
}
|
package backend
import "errors"
var (
// ErrTableAlreadyExists occures when creating table exists
ErrTableAlreadyExists = errors.New("the table already exists")
// ErrTableNotFound occures when creating table exists
ErrTableNotFound = errors.New("there is no such table")
// ErrIndexNotFound occurs when a table doesn't contain given column.
ErrIndexNotFound = errors.New("there is no index corresponding column name")
)
|
package server
import (
"encoding/gob"
"server/libs/common/event"
"server/util"
)
var (
core *Server
)
func NewServer(app Apper, id int32) *Server {
s := &Server{}
core = s
s.AppId = id
s.WaitGroup = &util.WaitGroupWrapper{}
s.exitChannel = make(chan struct{})
s.shutdown = make(chan struct{})
s.Eventer = NewEvent()
s.clientList = NewClientList()
s.apper = app
s.Emitter = event.NewEventList()
s.kernel = NewKernel()
s.channel = make(map[string]*Channel, 32)
s.s2chelper = NewS2CHelper()
s.c2shelper = &C2SHelper{}
s.teleport = &TeleportHelper{}
s.globalHelper = NewGlobalDataHelper()
s.modules = make(map[string]Moduler)
RegisterRemote("S2CHelper", s.s2chelper)
RegisterRemote("Teleport", s.teleport)
RegisterRemote("GlobalHelper", s.globalHelper)
RegisterHandler("C2SHelper", s.c2shelper)
return s
}
func init() {
gob.Register([]interface{}{})
}
|
package meetup
import (
"beer/internal/domain/model"
"beer/internal/tools/customerror"
"context"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"testing"
)
func TestCalculateBeer(t *testing.T) {
var inputs = []struct {
beerPerson float64
totalGuest int64
expectedBox int64
}{
{beerPerson: 0.75, totalGuest: 100, expectedBox: 13},
{beerPerson: 0.75, totalGuest: 79, expectedBox: 10},
{beerPerson: 0.75, totalGuest: 480, expectedBox: 60},
{beerPerson: 1, totalGuest: 59, expectedBox: 10},
{beerPerson: 1, totalGuest: 60, expectedBox: 10},
{beerPerson: 1, totalGuest: 61, expectedBox: 11},
{beerPerson: 2, totalGuest: 119, expectedBox: 40},
{beerPerson: 2, totalGuest: 120, expectedBox: 40},
{beerPerson: 2, totalGuest: 121, expectedBox: 41},
}
for _, input := range inputs {
assert.Equal(t, input.expectedBox, calculateTotalBeer(input.beerPerson, input.totalGuest))
}
}
func TestServiceGetTotalBeerErrorGettingWeather(t *testing.T) {
mockWeatherRepository := &mockWeatherRepository{}
s := NewService(mockWeatherRepository)
mockWeatherRepository.Mock.On("GetCurrentWeather", mock.Anything).Return(nil, customerror.NewExternalServiceError("Service Unavailable", "weather_api", 503))
_, err := s.GetTotalBeer(context.Background(), &model.MeetUp{TotalGuests: 100, Location: model.Location{Latitude: -13, Longitude: 12}})
assert.NotNil(t,err)
assert.Equal(t,"Service Unavailable",err.Error())
}
func TestServiceGetTotalBeerLatitudeGreaterThan90(t *testing.T) {
mockWeatherRepository := &mockWeatherRepository{}
s := NewService(mockWeatherRepository)
_, err := s.GetTotalBeer(context.Background(), &model.MeetUp{TotalGuests: 100, Location: model.Location{Latitude: 100, Longitude: 12}})
assert.NotNil(t,err)
assert.Equal(t,"invalid latitude",err.Error())
}
func TestServiceGetTotalBeerLatitudeLessThan90Negative(t *testing.T) {
mockWeatherRepository := &mockWeatherRepository{}
s := NewService(mockWeatherRepository)
_, err := s.GetTotalBeer(context.Background(), &model.MeetUp{TotalGuests: 100, Location: model.Location{Latitude: -100, Longitude: 12}})
assert.NotNil(t,err)
assert.Equal(t,"invalid latitude",err.Error())
}
func TestServiceGetTotalBeerLongitudeGreaterThan90(t *testing.T) {
mockWeatherRepository := &mockWeatherRepository{}
s := NewService(mockWeatherRepository)
_, err := s.GetTotalBeer(context.Background(), &model.MeetUp{TotalGuests: 100, Location: model.Location{Latitude: 12, Longitude: 100}})
assert.NotNil(t,err)
assert.Equal(t,"invalid longitude",err.Error())
}
func TestServiceGetTotalBeerLongitudeLessThan90Negative(t *testing.T) {
mockWeatherRepository := &mockWeatherRepository{}
s := NewService(mockWeatherRepository)
_, err := s.GetTotalBeer(context.Background(), &model.MeetUp{TotalGuests: 100, Location: model.Location{Latitude: 12, Longitude: -100}})
assert.NotNil(t,err)
assert.Equal(t,"invalid longitude",err.Error())
}
|
package sdkconnector
import (
"os"
"github.com/hyperledger/fabric-sdk-go/pkg/client/resmgmt"
"github.com/hyperledger/fabric-sdk-go/pkg/common/errors/retry"
packager "github.com/hyperledger/fabric-sdk-go/pkg/fab/ccpackager/gopackager"
"github.com/hyperledger/fabric-sdk-go/pkg/fabsdk"
)
//InstallCC packages and installs chaincode on the given peer.
func InstallCC(setup *OrgSetup, chaincodePath string, chaincodeName string, chaincodeVersion string, peerURL string) error {
resourceManagerClientContext := setup.sdk.Context(fabsdk.WithUser(setup.AdminName), fabsdk.WithOrg(setup.OrgName))
resMgmtClient, err := resmgmt.New(resourceManagerClientContext)
if err != nil {
return err
}
ccPkg, err := packager.NewCCPackage(chaincodePath, os.Getenv("GOPATH"))
if err != nil {
return err
}
installCCReq := resmgmt.InstallCCRequest{Name: chaincodeName, Path: chaincodePath, Version: chaincodeVersion, Package: ccPkg}
_, err = resMgmtClient.InstallCC(installCCReq, resmgmt.WithRetry(retry.DefaultResMgmtOpts), resmgmt.WithTargetFilter(&urlTargetFilter{url: peerURL}))
if err != nil {
return err
}
return nil
}
|
package main
import "fmt"
// 279. 完全平方数
// 给定正整数 n,找到若干个完全平方数(比如 1, 4, 9, 16, ...)使得它们的和等于 n。你需要让组成和的完全平方数的个数最少。
// https://leetcode-cn.com/problems/perfect-squares/
func main() {
fmt.Println(numSquares(3))
}
// 法一:动态规划
func numSquares(n int) int {
dp := make([]int, n+1)
dp[1] = 1
for i := 2; i <= n; i++ {
dp[i] = dp[i-1] + 1
for x := 2; i-x*x >= 0; x++ {
dp[i] = getMin(dp[i], dp[i-x*x]+1)
}
}
return dp[n]
}
// 法二:BFS
// 将n视为根节点,减去一个平方数后的值视为下一层
// 问题转化为求树的最小层数
func numSquares2(n int) (level int) {
if n == 1 {
return 1
}
queue := []int{n}
visited := make(map[int]bool)
for len(queue) > 0 {
size := len(queue)
level++
for i := 0; i < size; i++ {
cur := queue[i]
for j := 1; j*j <= cur; j++ {
next := cur - j*j
if next == 0 {
return level
}
if !visited[next] {
queue = append(queue, next)
visited[next] = true
}
}
}
}
return level
}
func getMin(a, b int) int {
if a < b {
return a
}
return b
}
|
package main
import (
"fmt"
"strconv"
)
/*
Go语言内置包之strconv
实现了基本数据类型和其字符串表示的相互转换,主要有以下常用函数:
Atoi()、Itia()、parse系列、format系列、append系列
string与int类型转换
将字符串类型的整数转换为int类型
func Atoi(s string) (i int, err error)
将int类型数据转换为对应的字符串表示
func Itoa(i int) string
Parse系列函数
Parse类函数用于转换字符串为给定类型的值
func ParseBool(str string) (value bool, err error)
它接受1、0、t、f、T、F、true、false、True、False、TRUE、FALSE;否则返回错误。
func ParseInt(s string, base int, bitSize int) (i int64, err error)
func ParseUint(s string, base int, bitSize int) (n uint64, err error)
func ParseFloat(s string, bitSize int) (f float64, err error)
Format系列函数
实现了将给定类型数据格式化为string类型数据的功能
*/
func main() {
b, _ := strconv.ParseBool("true")
f, _ := strconv.ParseFloat("3.1415", 64)
i, _ := strconv.ParseInt("-2", 10, 64)
u, _ := strconv.ParseUint("10", 10, 64)
fmt.Println(b)
fmt.Println(f)
fmt.Println(i)
fmt.Println(u)
}
|
/*
* Copyright 2020-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package route
import (
"context"
"errors"
"fmt"
"sync"
"github.com/opencord/voltha-lib-go/v4/pkg/log"
"github.com/opencord/voltha-protos/v4/go/voltha"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var ErrNoRoute = errors.New("no route")
// Hop represent a route hop
type Hop struct {
DeviceID string
Ingress uint32
Egress uint32
}
// PathID is the identification of a route between two logical ports
type PathID struct {
Ingress uint32
Egress uint32
}
type OFPortLink struct {
Ingress uint32
Egress uint32
}
// listDevicePortsFunc returns device ports
type listDevicePortsFunc func(ctx context.Context, id string) (map[uint32]*voltha.Port, error)
// DeviceRoutes represent the set of routes between logical ports of a logical device
type DeviceRoutes struct {
logicalDeviceID string
rootDeviceID string
listDevicePorts listDevicePortsFunc
logicalPorts map[uint32]*voltha.LogicalPort
RootPorts map[uint32]uint32
rootPortsLock sync.RWMutex
Routes map[PathID][]Hop
routeBuildLock sync.RWMutex
devicesPonPorts map[string][]*voltha.Port
childConnectionPort map[string]uint32
}
// NewDeviceRoutes creates device graph instance
func NewDeviceRoutes(logicalDeviceID, rootDeviceID string, deviceMgr listDevicePortsFunc) *DeviceRoutes {
return &DeviceRoutes{
logicalDeviceID: logicalDeviceID,
rootDeviceID: rootDeviceID,
listDevicePorts: deviceMgr,
RootPorts: make(map[uint32]uint32),
Routes: make(map[PathID][]Hop),
devicesPonPorts: make(map[string][]*voltha.Port),
childConnectionPort: make(map[string]uint32),
logicalPorts: make(map[uint32]*voltha.LogicalPort),
}
}
//IsRootPort returns true if the port is a root port on a logical device
func (dr *DeviceRoutes) IsRootPort(port uint32) bool {
dr.rootPortsLock.RLock()
defer dr.rootPortsLock.RUnlock()
_, exist := dr.RootPorts[port]
return exist
}
func (dr *DeviceRoutes) GetRoute(ctx context.Context, ingress, egress uint32) ([]Hop, error) {
dr.routeBuildLock.Lock()
defer dr.routeBuildLock.Unlock()
if route, exist := dr.Routes[PathID{Ingress: ingress, Egress: egress}]; exist {
return route, nil
}
uniPort, nniPort, err := dr.getLogicalPorts(ingress, egress)
if err != nil {
return nil, fmt.Errorf("no route from:%d to:%d %w", ingress, egress, err)
}
childPonPort, err := dr.getChildPonPort(ctx, uniPort.DeviceId)
if err != nil {
return nil, err
}
rootDevicePonPort, err := dr.getParentPonPort(ctx, uniPort.DeviceId)
if err != nil {
return nil, err
}
dr.Routes[PathID{Ingress: nniPort.OfpPort.PortNo, Egress: uniPort.DevicePortNo}] = []Hop{
{DeviceID: nniPort.DeviceId, Ingress: nniPort.DevicePortNo, Egress: rootDevicePonPort},
{DeviceID: uniPort.DeviceId, Ingress: childPonPort, Egress: uniPort.DevicePortNo},
}
dr.Routes[PathID{Ingress: uniPort.DevicePortNo, Egress: nniPort.OfpPort.PortNo}] = getReverseRoute(
dr.Routes[PathID{Ingress: nniPort.OfpPort.PortNo, Egress: uniPort.DevicePortNo}])
return dr.Routes[PathID{Ingress: ingress, Egress: egress}], nil
}
//ComputeRoutes calculates all the routes between the logical ports. This will clear up any existing route
func (dr *DeviceRoutes) ComputeRoutes(ctx context.Context, lps map[uint32]*voltha.LogicalPort) error {
dr.routeBuildLock.Lock()
defer dr.routeBuildLock.Unlock()
logger.Debugw(ctx, "computing-all-routes", log.Fields{"len-logical-ports": len(lps)})
var err error
defer func() {
// On error, clear the routes - any flow request or a port add/delete will trigger the rebuild
if err != nil {
dr.reset()
}
}()
if len(lps) < 2 {
return fmt.Errorf("not enough logical port :%w", ErrNoRoute)
}
dr.reset()
// Setup the physical ports to logical ports map, the nni ports as well as the root ports map
physPortToLogicalPortMap := make(map[string]uint32)
nniPorts := make([]*voltha.LogicalPort, 0)
for _, lp := range lps {
physPortToLogicalPortMap[concatDeviceIDPortID(lp.DeviceId, lp.DevicePortNo)] = lp.OfpPort.PortNo
if lp.RootPort {
nniPorts = append(nniPorts, lp)
dr.RootPorts[lp.OfpPort.PortNo] = lp.OfpPort.PortNo
}
dr.logicalPorts[lp.OfpPort.PortNo] = lp
}
if len(nniPorts) == 0 {
return fmt.Errorf("no nni port :%w", ErrNoRoute)
}
var copyFromNNIPort *voltha.LogicalPort
for idx, nniPort := range nniPorts {
if idx == 0 {
copyFromNNIPort = nniPort
} else if len(dr.Routes) > 0 {
dr.copyFromExistingNNIRoutes(nniPort, copyFromNNIPort)
return nil
}
// Get root device
rootDeviceID := nniPort.DeviceId
rootDevicePorts, err := dr.getDeviceWithCacheUpdate(ctx, nniPort.DeviceId)
if err != nil {
return err
}
if len(rootDevicePorts) == 0 {
err = status.Errorf(codes.FailedPrecondition, "no-port-%s", rootDeviceID)
return err
}
for _, rootDevicePort := range rootDevicePorts {
if rootDevicePort.Type == voltha.Port_PON_OLT {
logger.Debugw(ctx, "peers", log.Fields{"root-device-id": rootDeviceID, "port-no": rootDevicePort.PortNo, "len-peers": len(rootDevicePort.Peers)})
for _, rootDevicePeer := range rootDevicePort.Peers {
childDeviceID := rootDevicePeer.DeviceId
childDevicePorts, err := dr.getDeviceWithCacheUpdate(ctx, rootDevicePeer.DeviceId)
if err != nil {
return err
}
childPonPort, err := dr.getChildPonPort(ctx, childDeviceID)
if err != nil {
return err
}
for _, childDevicePort := range childDevicePorts {
if childDevicePort.Type == voltha.Port_ETHERNET_UNI {
childLogicalPort, exist := physPortToLogicalPortMap[concatDeviceIDPortID(childDeviceID, childDevicePort.PortNo)]
if !exist {
// This can happen if this logical port has not been created yet for that device
continue
}
dr.Routes[PathID{Ingress: nniPort.OfpPort.PortNo, Egress: childLogicalPort}] = []Hop{
{DeviceID: rootDeviceID, Ingress: nniPort.DevicePortNo, Egress: rootDevicePort.PortNo},
{DeviceID: childDeviceID, Ingress: childPonPort, Egress: childDevicePort.PortNo},
}
dr.Routes[PathID{Ingress: childLogicalPort, Egress: nniPort.OfpPort.PortNo}] = getReverseRoute(
dr.Routes[PathID{Ingress: nniPort.OfpPort.PortNo, Egress: childLogicalPort}])
}
}
}
}
}
}
return nil
}
// AddPort augments the current set of routes with new routes corresponding to the logical port "lp". If the routes have
// not been built yet then use logical port "lps" to compute all current routes (lps includes lp)
func (dr *DeviceRoutes) AddPort(ctx context.Context, lp *voltha.LogicalPort, deviceID string, devicePorts map[uint32]*voltha.Port, lps map[uint32]*voltha.LogicalPort) error {
logger.Debugw(ctx, "add-port-to-routes", log.Fields{"port": lp, "count-logical-ports": len(lps)})
// Adding NNI port
if lp.RootPort {
return dr.AddNNIPort(ctx, lp, deviceID, devicePorts, lps)
}
// Adding UNI port
return dr.AddUNIPort(ctx, lp, deviceID, devicePorts, lps)
}
// AddUNIPort setup routes between the logical UNI port lp and all registered NNI ports
func (dr *DeviceRoutes) AddUNIPort(ctx context.Context, lp *voltha.LogicalPort, deviceID string, devicePorts map[uint32]*voltha.Port, lps map[uint32]*voltha.LogicalPort) error {
logger.Debugw(ctx, "add-uni-port-to-routes", log.Fields{"port": lp, "count-logical-ports": len(lps)})
dr.routeBuildLock.Lock()
defer dr.routeBuildLock.Unlock()
// Add port to logical ports
dr.logicalPorts[lp.OfpPort.PortNo] = lp
// Update internal structures with device data
dr.updateCache(deviceID, devicePorts)
// Adding a UNI port
childPonPort, err := dr.getChildPonPort(ctx, lp.DeviceId)
if err != nil {
return err
}
rootDevicePonPort, err := dr.getParentPonPort(ctx, deviceID)
if err != nil {
return err
}
// Adding a UNI port
for _, lPort := range lps {
if lPort.RootPort {
dr.Routes[PathID{Ingress: lPort.OfpPort.PortNo, Egress: lp.OfpPort.PortNo}] = []Hop{
{DeviceID: lPort.DeviceId, Ingress: lPort.DevicePortNo, Egress: rootDevicePonPort},
{DeviceID: lp.DeviceId, Ingress: childPonPort, Egress: lp.DevicePortNo},
}
dr.Routes[PathID{Ingress: lp.OfpPort.PortNo, Egress: lPort.OfpPort.PortNo}] = getReverseRoute(
dr.Routes[PathID{Ingress: lPort.OfpPort.PortNo, Egress: lp.OfpPort.PortNo}])
}
}
return nil
}
// AddNNIPort setup routes between the logical NNI port lp and all registered UNI ports
func (dr *DeviceRoutes) AddNNIPort(ctx context.Context, lp *voltha.LogicalPort, deviceID string, devicePorts map[uint32]*voltha.Port, lps map[uint32]*voltha.LogicalPort) error {
logger.Debugw(ctx, "add-port-to-routes", log.Fields{"port": lp, "logical-ports-count": len(lps), "device-id": deviceID})
dr.routeBuildLock.Lock()
defer dr.routeBuildLock.Unlock()
// Update internal structures with device data
dr.updateCache(deviceID, devicePorts)
// Setup the physical ports to logical ports map, the nni ports as well as the root ports map
physPortToLogicalPortMap := make(map[string]uint32)
for _, lp := range lps {
physPortToLogicalPortMap[concatDeviceIDPortID(lp.DeviceId, lp.DevicePortNo)] = lp.OfpPort.PortNo
if lp.RootPort {
dr.rootPortsLock.Lock()
dr.RootPorts[lp.OfpPort.PortNo] = lp.OfpPort.PortNo
dr.rootPortsLock.Unlock()
}
dr.logicalPorts[lp.OfpPort.PortNo] = lp
}
for _, rootDevicePort := range devicePorts {
if rootDevicePort.Type == voltha.Port_PON_OLT {
logger.Debugw(ctx, "peers", log.Fields{"root-device-id": deviceID, "port-no": rootDevicePort.PortNo, "len-peers": len(rootDevicePort.Peers)})
for _, rootDevicePeer := range rootDevicePort.Peers {
childDeviceID := rootDevicePeer.DeviceId
childDevicePorts, err := dr.getDeviceWithCacheUpdate(ctx, rootDevicePeer.DeviceId)
if err != nil {
continue
}
childPonPort, err := dr.getChildPonPort(ctx, childDeviceID)
if err != nil {
continue
}
for _, childDevicePort := range childDevicePorts {
childLogicalPort, exist := physPortToLogicalPortMap[concatDeviceIDPortID(childDeviceID, childDevicePort.PortNo)]
if !exist {
// This can happen if this logical port has not been created yet for that device
continue
}
if childDevicePort.Type == voltha.Port_ETHERNET_UNI {
dr.Routes[PathID{Ingress: lp.OfpPort.PortNo, Egress: childLogicalPort}] = []Hop{
{DeviceID: deviceID, Ingress: lp.DevicePortNo, Egress: rootDevicePort.PortNo},
{DeviceID: childDeviceID, Ingress: childPonPort, Egress: childDevicePort.PortNo},
}
dr.Routes[PathID{Ingress: childLogicalPort, Egress: lp.OfpPort.PortNo}] = getReverseRoute(
dr.Routes[PathID{Ingress: lp.OfpPort.PortNo, Egress: childLogicalPort}])
}
}
}
}
}
return nil
}
// AddAllPorts setups up new routes using all ports on the device. lps includes the device's logical port
func (dr *DeviceRoutes) AddAllPorts(ctx context.Context, deviceID string, devicePorts map[uint32]*voltha.Port, lps map[uint32]*voltha.LogicalPort) error {
logger.Debugw(ctx, "add-all-port-to-routes", log.Fields{"logical-ports-count": len(lps), "device-id": deviceID})
for _, lp := range lps {
if lp.DeviceId == deviceID {
if err := dr.AddPort(ctx, lp, deviceID, devicePorts, lps); err != nil {
return err
}
}
}
return nil
}
// Print prints routes
func (dr *DeviceRoutes) Print(ctx context.Context) error {
dr.routeBuildLock.RLock()
defer dr.routeBuildLock.RUnlock()
logger.Debugw(ctx, "Print", log.Fields{"logical-device-id": dr.logicalDeviceID, "logical-ports": dr.logicalPorts})
if logger.V(log.DebugLevel) {
output := ""
routeNumber := 1
for k, v := range dr.Routes {
key := fmt.Sprintf("LP:%d->LP:%d", k.Ingress, k.Egress)
val := ""
for _, i := range v {
val += fmt.Sprintf("{%d->%s->%d},", i.Ingress, i.DeviceID, i.Egress)
}
val = val[:len(val)-1]
output += fmt.Sprintf("%d:{%s=>%s} ", routeNumber, key, fmt.Sprintf("[%s]", val))
routeNumber++
}
if len(dr.Routes) == 0 {
logger.Debugw(ctx, "no-routes-found", log.Fields{"logical-device-id": dr.logicalDeviceID})
} else {
logger.Debugw(ctx, "graph_routes", log.Fields{"logical-device-id": dr.logicalDeviceID, "Routes": output})
}
}
return nil
}
// isUpToDate returns true if device is up to date
func (dr *DeviceRoutes) isUpToDate(ldPorts map[uint32]*voltha.LogicalPort) bool {
dr.routeBuildLock.Lock()
defer dr.routeBuildLock.Unlock()
numNNI, numUNI := 0, 0
if ldPorts != nil {
if len(dr.logicalPorts) != len(ldPorts) {
return false
}
numNNI = len(dr.RootPorts)
numUNI = len(ldPorts) - numNNI
}
return len(dr.Routes) == numNNI*numUNI*2
}
// IsRoutesEmpty returns true if there are no routes
func (dr *DeviceRoutes) IsRoutesEmpty() bool {
dr.routeBuildLock.RLock()
defer dr.routeBuildLock.RUnlock()
return len(dr.Routes) == 0
}
// GetHalfRoute returns a half route that has only the egress hop set or the ingress hop set
func (dr *DeviceRoutes) GetHalfRoute(nniAsEgress bool, ingress, egress uint32) ([]Hop, error) {
dr.routeBuildLock.RLock()
defer dr.routeBuildLock.RUnlock()
routes := make([]Hop, 0)
for routeLink, path := range dr.Routes {
// If nniAsEgress is set then the half route will only have the egress hop set where the egress port needs to be
// an NNI port
if nniAsEgress {
// Prioritize a specific egress NNI port if set
if egress != 0 && dr.IsRootPort(egress) && routeLink.Egress == egress {
routes = append(routes, Hop{})
routes = append(routes, path[1])
return routes, nil
}
if egress == 0 && dr.IsRootPort(routeLink.Egress) {
routes = append(routes, Hop{})
routes = append(routes, path[1])
return routes, nil
}
} else {
// Here we use the first route whose ingress port matches the ingress input parameter
if ingress != 0 && routeLink.Ingress == ingress {
routes = append(routes, path[0])
routes = append(routes, Hop{})
return routes, nil
}
}
}
return routes, fmt.Errorf("no half route found for ingress port %d, egress port %d and nni as egress %t", ingress, egress, nniAsEgress)
}
//getDeviceWithCacheUpdate returns the from the model and updates the PON ports map of that device.
func (dr *DeviceRoutes) getDeviceWithCacheUpdate(ctx context.Context, deviceID string) (map[uint32]*voltha.Port, error) {
devicePorts, err := dr.listDevicePorts(ctx, deviceID)
if err != nil {
logger.Errorw(ctx, "device-not-found", log.Fields{"device-id": deviceID, "error": err})
return nil, err
}
dr.updateCache(deviceID, devicePorts)
return devicePorts, nil
}
//copyFromExistingNNIRoutes copies routes from an existing set of NNI routes
func (dr *DeviceRoutes) copyFromExistingNNIRoutes(newNNIPort *voltha.LogicalPort, copyFromNNIPort *voltha.LogicalPort) {
updatedRoutes := make(map[PathID][]Hop)
for key, val := range dr.Routes {
if key.Ingress == copyFromNNIPort.OfpPort.PortNo {
updatedRoutes[PathID{Ingress: newNNIPort.OfpPort.PortNo, Egress: key.Egress}] = []Hop{
{DeviceID: newNNIPort.DeviceId, Ingress: newNNIPort.DevicePortNo, Egress: val[0].Egress},
val[1],
}
}
if key.Egress == copyFromNNIPort.OfpPort.PortNo {
updatedRoutes[PathID{Ingress: key.Ingress, Egress: newNNIPort.OfpPort.PortNo}] = []Hop{
val[0],
{DeviceID: newNNIPort.DeviceId, Ingress: val[1].Ingress, Egress: newNNIPort.DevicePortNo},
}
}
updatedRoutes[key] = val
}
dr.Routes = updatedRoutes
}
// reset cleans up the device graph
func (dr *DeviceRoutes) reset() {
dr.rootPortsLock.Lock()
dr.RootPorts = make(map[uint32]uint32)
dr.rootPortsLock.Unlock()
dr.Routes = make(map[PathID][]Hop)
dr.logicalPorts = make(map[uint32]*voltha.LogicalPort)
dr.devicesPonPorts = make(map[string][]*voltha.Port)
dr.childConnectionPort = make(map[string]uint32)
}
//concatDeviceIdPortId formats a portid using the device id and the port number
func concatDeviceIDPortID(deviceID string, portNo uint32) string {
return fmt.Sprintf("%s:%d", deviceID, portNo)
}
//getReverseRoute returns the reverse of the route
func getReverseRoute(route []Hop) []Hop {
reverse := make([]Hop, len(route))
for i, j := 0, len(route)-1; j >= 0; i, j = i+1, j-1 {
reverse[i].DeviceID, reverse[i].Ingress, reverse[i].Egress = route[j].DeviceID, route[j].Egress, route[j].Ingress
}
return reverse
}
// getChildPonPort returns the child PON port number either from cache or from the model. If it is from the model then
// it updates the PON ports map of that device.
func (dr *DeviceRoutes) getChildPonPort(ctx context.Context, deviceID string) (uint32, error) {
if port, exist := dr.devicesPonPorts[deviceID]; exist {
// Return only the first PON port of that child device
return port[0].PortNo, nil
}
// Get child device from model
if _, err := dr.getDeviceWithCacheUpdate(ctx, deviceID); err != nil {
logger.Errorw(ctx, "device-not-found", log.Fields{"device-id": deviceID, "error": err})
return 0, err
}
// Try again
if port, exist := dr.devicesPonPorts[deviceID]; exist {
// Return only the first PON port of that child device
return port[0].PortNo, nil
}
return 0, fmt.Errorf("pon port not found %s", deviceID)
}
// getParentPonPort returns the parent PON port of the child device
func (dr *DeviceRoutes) getParentPonPort(ctx context.Context, childDeviceID string) (uint32, error) {
if pNo, exist := dr.childConnectionPort[childDeviceID]; exist {
return pNo, nil
}
// Get parent device from the model
if _, err := dr.getDeviceWithCacheUpdate(ctx, dr.rootDeviceID); err != nil {
logger.Errorw(ctx, "device-not-found", log.Fields{"device-id": dr.rootDeviceID, "error": err})
return 0, err
}
// Try again
if pNo, exist := dr.childConnectionPort[childDeviceID]; exist {
return pNo, nil
}
return 0, fmt.Errorf("pon port associated with child device %s not found", childDeviceID)
}
func (dr *DeviceRoutes) updateCache(deviceID string, devicePorts map[uint32]*voltha.Port) {
for _, port := range devicePorts {
if port.Type == voltha.Port_PON_ONU || port.Type == voltha.Port_PON_OLT {
dr.devicesPonPorts[deviceID] = append(dr.devicesPonPorts[deviceID], port)
for _, peer := range port.Peers {
if port.Type == voltha.Port_PON_ONU {
dr.childConnectionPort[port.DeviceId] = peer.PortNo
} else {
dr.childConnectionPort[peer.DeviceId] = port.PortNo
}
}
}
}
}
func (dr *DeviceRoutes) getLogicalPorts(ingress, egress uint32) (uniPort, nniPort *voltha.LogicalPort, err error) {
inPort, exist := dr.logicalPorts[ingress]
if !exist {
err = fmt.Errorf("ingress port %d not found", ingress)
return
}
outPort, exist := dr.logicalPorts[egress]
if !exist {
err = fmt.Errorf("egress port %d not found", egress)
return
}
if inPort.RootPort {
nniPort = inPort
uniPort = outPort
} else {
nniPort = outPort
uniPort = inPort
}
return
}
|
package dorisloader
import (
"context"
)
type bulkWorker struct {
p *BulkProcessor
i int
bulkActions int
bulkSize int
service *BulkService
flushC chan struct{}
flushAckC chan struct{}
}
// newBulkWorker creates a new bulkWorker instance.
func newBulkWorker(p *BulkProcessor, i int) *bulkWorker {
return &bulkWorker{
p: p,
i: i,
bulkActions: p.bulkActions,
bulkSize: p.bulkSize,
service: NewBulkService(p.c).DB(p.db).Table(p.table),
flushC: make(chan struct{}),
flushAckC: make(chan struct{}),
}
}
// work waits for bulk requests and manual flush calls on the respective
// channels and is invoked as a goroutine when the bulk processor is started.
func (w *bulkWorker) work(ctx context.Context) {
defer func() {
w.p.workerWg.Done()
close(w.flushAckC)
close(w.flushC)
}()
var stop bool
for !stop {
var err error
select {
case row, open := <-w.p.rows:
if open {
w.service.Add(row)
if w.commitRequired() {
err = w.commit(ctx)
}
} else {
// Channel closed: Stop.
stop = true
if w.service.NumberOfRows() > 0 {
err = w.commit(ctx)
}
}
case <-w.flushC:
// Commit outstanding requests
if w.service.NumberOfRows() > 0 {
err = w.commit(ctx)
}
w.flushAckC <- struct{}{}
}
if err != nil {
if !stop {
// TODO
}
}
}
}
// commit commits the bulk requests in the given service,
// invoking callbacks as specified.
func (w *bulkWorker) commit(ctx context.Context) error {
//var res *BulkResponse
// commitFunc will commit bulk requests and, on failure, be retried
// via exponential backoff
commitFunc := func() error {
var err error
// Save requests because they will be reset in service.Do
_, err = w.service.Do(ctx)
if err != nil {
return err
}
return nil
}
// notifyFunc will be called if retry fails
notifyFunc := func(err error) {
// TODO
}
// Commit bulk requests
err := RetryNotify(commitFunc, w.p.backoff, notifyFunc)
if err != nil {
// TODO
}
return err
}
func (w *bulkWorker) commitRequired() bool {
if w.bulkActions > 0 && w.service.NumberOfRows() >= w.bulkActions {
return true
}
if w.bulkSize > 0 && w.service.EstimatedSizeInBytes() >= int64(w.bulkSize) {
return true
}
return false
}
|
package main
import (
"bytes"
"math/rand"
"net"
"testing"
)
func TestServer(t *testing.T) {
CONNECT := []byte{
// fixed header:
0x10, // CONNECT
0x1d, // remaining length (29 bytes)
// variable header:
0x00, 0x06, // protocol name length
0x4d, 0x51, 0x49, 0x73, 0x64, 0x70, // protocol name "MQIsdp"
0x03, // mqtt protocol version 3
0x01, // connect flags (clean session: 1)
0x00, 0x3c, // keep alive timer: 60s
// payload
0x00, 0x0f, // client id length
0x48, 0x49, 0x4d, 0x51, 0x54, 0x54, 0x2d, 0x54, 0x65, 0x73, 0x74, // client id "HIMQTT-Test"
0x00, 0x00, 0x00, 0x00} // id appendix (num_client as hex)
conn, err := net.Dial("tcp", ":1883")
if err != nil {
t.Errorf("TCP conn error: %v", err)
return
}
conn.Write(CONNECT)
CONNACK := []byte{
// fixed header:
0x20, // CONNACK
0x02, // remaining length (2 bytes)
// variable header:
0x00, 0x00} // return code 0 (Connection Accepted)
buf := make([]byte, len(CONNACK))
n, err := conn.Read(buf)
if err != nil {
t.Errorf("TCP conn read err: %v", err)
return
}
if n != len(CONNACK) || bytes.Compare(CONNACK, buf) != 0 {
t.Errorf("MQTT CONNACK failed:\nExpected: %v\nGot : %v", CONNACK, buf)
return
}
//////////////////////////////////////////////////////////////////////////////
SUBSCRIBE := []byte{
// fixed header:
0x82, // SUBSCRIBE (qos 1)
0x08, // remaining length (8 bytes)
// variable header:
0x12, 0x34, // message id
0x00, 0x03, // topic name length
0x61, 0x2f, 0x62, // topic "a/b"
0x00} // qos 0
conn.Write(SUBSCRIBE)
SUBACK := []byte{
// fixed header:
0x90, // SUBACK
0x03, // remaining length (3 bytes)
// variable header:
0x12, 0x34, // message id
// payload:
0x00} // granted qos (0)
buf = make([]byte, len(SUBACK))
n, err = conn.Read(buf)
if err != nil {
t.Errorf("TCP conn read err: %v", err)
return
}
if n != len(SUBACK) || bytes.Compare(SUBACK, buf) != 0 {
t.Errorf("MQTT SUBACK failed:\nExpected: %v\nGot : %v", SUBACK, buf)
return
}
//////////////////////////////////////////////////////////////////////////////
PUBLISH_HEAD := []byte{
// fixed header:
0x30, // PUBLISH (qos 0)
0x45, // remaining length (69 bytes)
// variable header:
0x00, 0x03, // topic name length
0x61, 0x2f, 0x62} // topic "a/b"
PUBLISH_BODY := make([]byte, 64)
rand.Read(PUBLISH_BODY)
conn.Write(PUBLISH_HEAD)
conn.Write(PUBLISH_BODY)
//////////////////////////////////////////////////////////////////////////////
buf = make([]byte, 7)
n, err = conn.Read(buf)
if err != nil {
t.Errorf("TCP conn read err: %v", err)
return
}
if n != 7 || bytes.Compare(PUBLISH_HEAD, buf) != 0 {
t.Errorf("MQTT PUBLISH HEAD recv failed:\nExpected: %v\nGot : %v", PUBLISH_HEAD, buf)
return
}
buf = make([]byte, 64)
n, err = conn.Read(buf)
if err != nil {
t.Errorf("TCP conn read err: %v", err)
return
}
if n != 64 || bytes.Compare(PUBLISH_BODY, buf) != 0 {
t.Errorf("MQTT PUBLISH BODY recv failed:\nExpected: %v\nGot : %v", PUBLISH_BODY, buf)
return
}
//////////////////////////////////////////////////////////////////////////////
DISCONNECT := []byte{
// fixed header:
0xe0, // DISCONNECT (qos 0)
0x00} // remaining length (0 bytes)
conn.Write(DISCONNECT)
conn.Close()
}
|
package main
import "fmt"
func main() {
const occupancyLimit = 12
var occupancyLimit1 uint8
var occupancyLimit2 int64
var occupancyLimit3 float32
occupancyLimit1 = occupancyLimit
occupancyLimit2 = occupancyLimit
occupancyLimit3 = occupancyLimit
fmt.Println(occupancyLimit1, occupancyLimit2, occupancyLimit3)
}
|
package pov
import (
"errors"
"time"
"github.com/qlcchain/go-qlc/common"
"github.com/qlcchain/go-qlc/common/types"
)
type ConsensusFake struct {
chainR PovConsensusChainReader
}
func NewConsensusFake(chainR PovConsensusChainReader) *ConsensusFake {
consFake := &ConsensusFake{chainR: chainR}
return consFake
}
func (c *ConsensusFake) Init() error {
return nil
}
func (c *ConsensusFake) Start() error {
return nil
}
func (c *ConsensusFake) Stop() error {
return nil
}
func (c *ConsensusFake) PrepareHeader(header *types.PovHeader) error {
target, err := c.calcNextRequiredTarget(header)
if err != nil {
return err
}
header.Target = target
return nil
}
func (c *ConsensusFake) FinalizeHeader(header *types.PovHeader) error {
return nil
}
func (c *ConsensusFake) VerifyHeader(header *types.PovHeader) error {
if header.GetNonce() != header.GetHeight() {
return errors.New("bad nonce")
}
return nil
}
func (c *ConsensusFake) SealHeader(header *types.PovHeader, cbAccount *types.Account, quitCh chan struct{}, resultCh chan<- *types.PovHeader) error {
go func() {
copyHdr := header.Copy()
select {
case <-quitCh:
case <-time.After(time.Second):
copyHdr.Nonce = copyHdr.GetHeight()
voteHash := copyHdr.ComputeVoteHash()
copyHdr.VoteSignature = cbAccount.Sign(voteHash)
select {
case resultCh <- copyHdr:
default:
}
}
}()
return nil
}
func (c *ConsensusFake) calcNextRequiredTarget(header *types.PovHeader) (types.Signature, error) {
var targetSig types.Signature
err := targetSig.FromBigInt(common.PovGenesisTargetInt)
if err != nil {
return types.ZeroSignature, err
}
return targetSig, nil
}
|
// Copyright (c) 2020 Hirotsuna Mizuno. All rights reserved.
// Use of this source code is governed by the MIT license that can be found in
// the LICENSE file.
package speedio_test
import (
"io"
"testing"
"time"
"github.com/tunabay/go-randdata"
"github.com/tunabay/go-speedio"
)
//
func TestMeterReader_test1(t *testing.T) {
t.Parallel()
src := randdata.New(randdata.Binary, 0, 500000)
r := speedio.NewMeterReader(src)
done := make(chan struct{})
go func() {
ticker := time.NewTicker(time.Second / 2)
defer ticker.Stop()
for {
select {
case <-ticker.C:
t.Log("bitrate:", r.BitRate())
case <-done:
return
}
}
}()
buf := make([]byte, 20000)
sum := 0
r.Start()
ticker := time.NewTicker(time.Second / 5)
for {
<-ticker.C
n, err := io.ReadFull(r, buf)
if 0 < n {
sum += n
}
if err != nil {
if err != io.EOF && err != io.ErrUnexpectedEOF {
t.Error(err)
}
break
}
if n < 1 {
t.Errorf("unexpected n: %d", n)
break
}
}
ticker.Stop()
if err := r.Close(); err != nil {
t.Error(err)
}
close(done)
bc, et, br := r.Total()
t.Logf("total(n=%d): %v, %v, %v", sum, bc, et, br)
}
|
package main
import "time"
type Network struct {
Time time.Time
Addresses []Address
}
type Address struct {
IP string
MAC string
Vendor string
}
type VendorRecord struct {
MACPrefix string
Vendor string
}
|
package agent
import (
"net"
"shellbin/internal/logger"
)
type tcpListener struct {
port string
aChan chan Agent
}
func (t tcpListener) Listen() {
l, err := net.Listen("tcp", "0.0.0.0:"+t.port)
if err != nil {
panic(err.Error())
}
defer l.Close()
for {
c, err := l.Accept()
if err != nil {
logger.Write(err.Error())
}
go t.newConnHandler(c)
}
}
func (t tcpListener) newConnHandler(conn net.Conn) {
buff := make([]byte, 16)
n, err := conn.Read(buff)
if err != nil {
logger.Write(err.Error())
conn.Close()
}
if n != 16 {
conn.Close()
return
}
c := tcpClient{
conn: conn,
token: string(buff),
readChan: make(chan []byte, 16),
writeChan: make(chan []byte, 16),
}
go c.read()
go c.write()
t.aChan <- c
}
|
package bag01
// New 创建01背包问题
func New(itemsW []int, w int) *Bag01 {
n := len(itemsW)
if n == 0 || w == 0 {
panic("error params")
}
// 初始化状态数组
status := make([]bool, w+1)
return &Bag01{itemsW, w, status}
}
// Bag01 01背包问题
// 固定物品范围的前提下,求背包可放物品最大重量
type Bag01 struct {
// 可放物品的重量
itemsW []int
// 背包承重能力
wCap int
// 状态数组 [重量]
status []bool
}
// MaxWeight 动态规划求解
func (b *Bag01) MaxWeight() int {
n := len(b.itemsW)
// 赋值 第一层状态数组
b.status[0] = true
b.status[b.itemsW[0]] = true
// 求解 其他层状态数组
for i := 1; i < n; i++ {
for j := b.wCap - b.itemsW[i]; j >= 0; j-- {
// 上一层存在此重量
if b.status[j] {
b.status[j+b.itemsW[i]] = true
}
}
}
// 求解 最大重量
for j := b.wCap; j >= 0; j-- {
if b.status[j] {
return j
}
}
return 0
}
|
package fields
import (
"encoding/json"
"reflect"
rc_fields "github.com/square/p2/pkg/rc/fields"
"github.com/square/p2/pkg/types"
"k8s.io/kubernetes/pkg/labels"
)
// Types stored in the actual pod cluster document
type ID string
type AvailabilityZone string
type ClusterName string
type Annotations map[string]interface{}
type MinHealthPercentage int
// label keys used by pod selector
const (
AvailabilityZoneLabel = types.AvailabilityZoneLabel
ClusterNameLabel = types.ClusterNameLabel
PodIDLabel = types.PodIDLabel
)
func (id ID) String() string {
return string(id)
}
func (az AvailabilityZone) String() string {
return string(az)
}
func (cn ClusterName) String() string {
return string(cn)
}
type PodCluster struct {
// GUID for this cluster
ID ID
// The ID of the pods that the cluster contains
PodID types.PodID
// Represents a region the pod cluster inhabits. P2 doesn't use this
// value but it is useful for implementations that care about
// geographical location of pod clusters
AvailabilityZone AvailabilityZone
// Human-readable name for the pod cluster. Must be unique within a
// (PodID, AvailabilityZone) space
Name ClusterName
// Selector to identify the pods that are members of this pod cluster
PodSelector labels.Selector
// AllocationStrategy tweaks certain characteristic about how pods
// within this cluster are managed. For example the "static" strategy will
// never transfer a pod from one node to another without human
// intervention whereas the "dynamic" strategy will
AllocationStrategy rc_fields.Strategy
// Free-form annotations for implementation-specific information on top
// of pod clusters
Annotations Annotations
// Minimum health percentage that this pod cluster should have
MinHealthPercentage MinHealthPercentage
}
func (pc *PodCluster) Equals(other *PodCluster) bool {
if pc == nil && other == nil {
return true
} else if other == nil || pc == nil {
return false
}
if pc.Name != other.Name ||
pc.PodID != other.PodID ||
pc.ID != other.ID ||
pc.AvailabilityZone != other.AvailabilityZone {
return false
}
if pc.PodSelector != nil && other.PodSelector == nil ||
pc.PodSelector == nil && other.PodSelector != nil {
return false
}
if pc.PodSelector != nil && other.PodSelector != nil &&
pc.PodSelector.String() != other.PodSelector.String() {
return false
}
if pc.AllocationStrategy != other.AllocationStrategy {
return false
}
return reflect.DeepEqual(pc.Annotations, other.Annotations)
}
// Unfortunately due to weirdness of marshaling label selectors, we have to
// implement it ourselves. RawPodCluster mimics PodCluster but has a string
// type for PodSelector instead of labels.Selector
type RawPodCluster struct {
ID ID `json:"id"`
PodID types.PodID `json:"pod_id"`
AvailabilityZone AvailabilityZone `json:"availability_zone"`
Name ClusterName `json:"name"`
PodSelector string `json:"pod_selector"`
Annotations Annotations `json:"annotations"`
AllocationStrategy rc_fields.Strategy `json:"allocation_strategy"`
MinHealthPercentage MinHealthPercentage `json:"min_health_percentage"`
}
// MarshalJSON implements the json.Marshaler interface for serializing the
// PodCluster to JSON format.
//
// The PodCluster struct contains a labels.Selector interface, and unmarshaling
// into a nil, non-empty interface is impossible (unless the value is a JSON
// null), because the unmarshaler doesn't know what structure to allocate
// there. Since we don't own labels.Selector, we have to implement the json
// marshaling here to wrap around the interface value
func (pc PodCluster) MarshalJSON() ([]byte, error) {
return json.Marshal(pc.ToRaw())
}
// Converts a pod cluster to a type that will marshal cleanly to JSON.
func (pc PodCluster) ToRaw() RawPodCluster {
var podSel string
if pc.PodSelector != nil {
podSel = pc.PodSelector.String()
}
return RawPodCluster{
ID: pc.ID,
PodID: pc.PodID,
AvailabilityZone: pc.AvailabilityZone,
Name: pc.Name,
PodSelector: podSel,
Annotations: pc.Annotations,
AllocationStrategy: pc.AllocationStrategy,
MinHealthPercentage: pc.MinHealthPercentage,
}
}
var _ json.Marshaler = PodCluster{}
// UnmarshalJSON implements the json.Unmarshaler interface for deserializing the JSON
// representation of an PodCluster.
func (pc *PodCluster) UnmarshalJSON(b []byte) error {
var rawPC RawPodCluster
if err := json.Unmarshal(b, &rawPC); err != nil {
return err
}
podSel, err := labels.Parse(rawPC.PodSelector)
if err != nil {
return err
}
*pc = PodCluster{
ID: rawPC.ID,
PodID: rawPC.PodID,
AvailabilityZone: rawPC.AvailabilityZone,
Name: rawPC.Name,
PodSelector: podSel,
Annotations: rawPC.Annotations,
AllocationStrategy: rawPC.AllocationStrategy,
MinHealthPercentage: rawPC.MinHealthPercentage,
}
return nil
}
var _ json.Unmarshaler = &PodCluster{}
|
package main
import (
"bufio"
"fmt"
"io"
"net"
)
func main() {
connectControl()
}
var (
CONTROL_PORT string = "8009"
)
func connectControl() {
var tcpAddr *net.TCPAddr
//这里在一台机测试,所以没有连接到公网,可以修改到公网ip
tcpAddr, _ = net.ResolveTCPAddr("tcp", "127.0.0.1:" + CONTROL_PORT)
conn, err := net.DialTCP("tcp", nil, tcpAddr)
if err != nil {
fmt.Println("Client connect error ! " + err.Error())
return
}
fmt.Println(conn.LocalAddr().String() + " : Client connected!8009")
reader := bufio.NewReader(conn)
for {
s, err := reader.ReadString('\n')
if err != nil || err == io.EOF {
break
} else {
//接收到new的指令的时候,新建一个tcp连接
if s == "new\n" {
go combine()
}
if s == "hi" {
//忽略掉hi的请求
}
}
}
}
func combine() {
local := connectLocal()
remote := connectRemote()
if local != nil && remote != nil {
joinConn(local, remote)
} else {
if local != nil {
err := local.Close()
if err!=nil{
fmt.Println("close local:" + err.Error())
}
}
if remote != nil {
err := remote.Close()
if err!=nil{
fmt.Println("close remote:" + err.Error())
}
}
}
}
func joinConn(local *net.TCPConn, remote *net.TCPConn) {
f := func(local *net.TCPConn, remote *net.TCPConn) {
defer local.Close()
defer remote.Close()
_, err := io.Copy(local, remote)
if err != nil {
fmt.Println(err.Error())
return
}
fmt.Println("end")
}
go f(local, remote)
go f(remote, local)
}
func connectLocal() *net.TCPConn {
var tcpAddr *net.TCPAddr
tcpAddr, _ = net.ResolveTCPAddr("tcp", "127.0.0.1:8000")
conn, err := net.DialTCP("tcp", nil, tcpAddr)
if err != nil {
fmt.Println("Client connect error ! " + err.Error())
return nil
}
fmt.Println(conn.LocalAddr().String() + " : Client connected!8000")
return conn
}
func connectRemote() *net.TCPConn {
var tcpAddr *net.TCPAddr
tcpAddr, _ = net.ResolveTCPAddr("tcp", "127.0.0.1:8008")
conn, err := net.DialTCP("tcp", nil, tcpAddr)
if err != nil {
fmt.Println("Client connect error ! " + err.Error())
return nil
}
fmt.Println(conn.LocalAddr().String() + " : Client connected!8008")
return conn;
}
|
package annotations
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGetTrafficType(t *testing.T) {
tests := []struct {
desc string
annotations map[string]string
want string
err bool
}{
{
desc: "unknown service type",
annotations: map[string]string{
"mesh.traefik.io/traffic-type": "hello",
},
err: true,
},
{
desc: "returns the default traffic-type if not set",
annotations: map[string]string{},
want: ServiceTypeHTTP,
},
{
desc: "http",
annotations: map[string]string{
"mesh.traefik.io/traffic-type": "http",
},
want: ServiceTypeHTTP,
},
{
desc: "tcp",
annotations: map[string]string{
"mesh.traefik.io/traffic-type": "tcp",
},
want: ServiceTypeTCP,
},
{
desc: "udp",
annotations: map[string]string{
"mesh.traefik.io/traffic-type": "udp",
},
want: ServiceTypeUDP,
},
}
for _, test := range tests {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
tt, err := GetTrafficType(ServiceTypeHTTP, test.annotations)
if test.err {
require.Error(t, err)
return
}
require.NoError(t, err)
assert.Equal(t, test.want, tt)
})
}
}
func TestGetScheme(t *testing.T) {
tests := []struct {
desc string
annotations map[string]string
want string
err bool
}{
{
desc: "unknown scheme",
annotations: map[string]string{
"mesh.traefik.io/scheme": "hello",
},
err: true,
},
{
desc: "returns the default scheme if not set",
annotations: map[string]string{},
want: SchemeHTTP,
},
{
desc: "http",
annotations: map[string]string{
"mesh.traefik.io/scheme": "http",
},
want: SchemeHTTP,
},
{
desc: "https",
annotations: map[string]string{
"mesh.traefik.io/scheme": "https",
},
want: SchemeHTTPS,
},
{
desc: "h2c",
annotations: map[string]string{
"mesh.traefik.io/scheme": "h2c",
},
want: SchemeH2C,
},
}
for _, test := range tests {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
scheme, err := GetScheme(test.annotations)
if test.err {
require.Error(t, err)
return
}
require.NoError(t, err)
assert.Equal(t, test.want, scheme)
})
}
}
func TestGetRetryAttempts(t *testing.T) {
tests := []struct {
desc string
annotations map[string]string
want int
err bool
wantNotFound bool
}{
{
desc: "invalid",
annotations: map[string]string{
"mesh.traefik.io/retry-attempts": "hello",
},
err: true,
},
{
desc: "valid",
annotations: map[string]string{
"mesh.traefik.io/retry-attempts": "2",
},
want: 2,
},
{
desc: "not set",
annotations: map[string]string{},
err: true,
wantNotFound: true,
},
}
for _, test := range tests {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
attempts, err := GetRetryAttempts(test.annotations)
if test.err {
require.Error(t, err)
assert.Equal(t, test.wantNotFound, errors.Is(err, ErrNotFound))
return
}
require.NoError(t, err)
assert.Equal(t, test.want, attempts)
})
}
}
func TestGetCircuitBreakerExpression(t *testing.T) {
tests := []struct {
desc string
annotations map[string]string
want string
err bool
wantNotFound bool
}{
{
desc: "valid",
annotations: map[string]string{
"mesh.traefik.io/circuit-breaker-expression": "LatencyAtQuantileMS(50.0) > 100",
},
want: "LatencyAtQuantileMS(50.0) > 100",
},
{
desc: "not set",
annotations: map[string]string{},
err: true,
wantNotFound: true,
},
}
for _, test := range tests {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
value, err := GetCircuitBreakerExpression(test.annotations)
if test.err {
require.Error(t, err)
assert.Equal(t, test.wantNotFound, errors.Is(err, ErrNotFound))
return
}
require.NoError(t, err)
assert.Equal(t, test.want, value)
})
}
}
func TestGetRateLimitBurst(t *testing.T) {
tests := []struct {
desc string
annotations map[string]string
want int
err bool
wantNotFound bool
}{
{
desc: "invalid",
annotations: map[string]string{
"mesh.traefik.io/ratelimit-burst": "hello",
},
err: true,
},
{
desc: "valid",
annotations: map[string]string{
"mesh.traefik.io/ratelimit-burst": "200",
},
want: 200,
},
{
desc: "not set",
annotations: map[string]string{},
err: true,
wantNotFound: true,
},
}
for _, test := range tests {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
value, err := GetRateLimitBurst(test.annotations)
if test.err {
require.Error(t, err)
assert.Equal(t, test.wantNotFound, errors.Is(err, ErrNotFound))
return
}
require.NoError(t, err)
assert.Equal(t, test.want, value)
})
}
}
func TestGetRateLimitAverage(t *testing.T) {
tests := []struct {
desc string
annotations map[string]string
want int
err bool
wantNotFound bool
}{
{
desc: "invalid",
annotations: map[string]string{
"mesh.traefik.io/ratelimit-average": "hello",
},
err: true,
},
{
desc: "valid",
annotations: map[string]string{
"mesh.traefik.io/ratelimit-average": "100",
},
want: 100,
},
{
desc: "not set",
annotations: map[string]string{},
err: true,
wantNotFound: true,
},
}
for _, test := range tests {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
value, err := GetRateLimitAverage(test.annotations)
if test.err {
require.Error(t, err)
assert.Equal(t, test.wantNotFound, errors.Is(err, ErrNotFound))
return
}
require.NoError(t, err)
assert.Equal(t, test.want, value)
})
}
}
func Test_getAnnotation(t *testing.T) {
tests := []struct {
desc string
annotations map[string]string
name string
want string
exists bool
}{
{
desc: "should return the traefik mesh annotation value",
annotations: map[string]string{
"mesh.traefik.io/foo": "bar",
},
name: "foo",
want: "bar",
exists: true,
},
{
desc: "should return the deprecated maesh annotation value",
annotations: map[string]string{
"maesh.containo.us/foo": "bar",
},
name: "foo",
want: "bar",
exists: true,
},
{
desc: "should return the traefik mesh annotation value",
annotations: map[string]string{
"mesh.traefik.io/foo": "bar",
"maesh.containo.us/foo": "fuzzy",
},
name: "foo",
want: "bar",
exists: true,
},
{
desc: "should return not found",
annotations: map[string]string{
"mesh.traefik.io/foo": "bar",
"maesh.containo.us/foo": "fuzzy",
},
name: "bar",
want: "",
},
}
for _, test := range tests {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
value, exists := getAnnotation(test.annotations, test.name)
assert.Equal(t, test.want, value)
assert.Equal(t, test.exists, exists)
})
}
}
|
package goSolution
import "testing"
func TestFindDuplicate(t *testing.T) {
paths := []string {"root/a 1.txt(abcd) 2.txt(efgh)","root/c 3.txt(abcd)","root/c/d 4.txt(efgh)","root 4.txt(efgh)"}
AssertEqual(t, [][]string {{"root/a/1.txt","root/c/3.txt"}, {"root/a/2.txt","root/c/d/4.txt","root/4.txt"}}, findDuplicate(paths))
}
|
package main
import "fmt"
func main() {
classNum, stuNum := 2, 5
var passCount int = 0
var totalSum float64 = 0
var j = 1
for ; j <= classNum; j++ {
var i = 1
sum := 0.0
for ; i <= stuNum; i++ {
var grade float64
fmt.Printf("输入%d班第%d个学生的分数:\n", j, i)
_, err := fmt.Scanf("%f", &grade)
if err != nil {
fmt.Println(err)
return
}
if grade >= 60 {
passCount++
}
sum += grade
}
totalSum += sum
fmt.Printf("%d班平均分是:%v;合格人数:%v\n", j, sum/float64(stuNum), passCount)
}
fmt.Printf("%d个班总分是%v;平均分是:%v\n", classNum, totalSum, totalSum/float64(stuNum*classNum))
}
|
// 该文件由 make.go 自动生成,请勿手动修改!
package static
var assets = map[string][]byte{
"./style.css": []byte(`@charset "utf-8";
:root {
--aside-width: 350px;
--aside-footer-height: 140px;
--aside-header-height: 80px;
}
/*============== reset =================*/
body {
margin: 0
}
a {
text-decoration: none;
color: #3b8bba;
}
/*=============== aside ================*/
aside {
background: rgb(189,189,189);
position: fixed;
top: 0;
left: 0;
bottom: 0;
width: var(--aside-width);
box-sizing: border-box;
}
aside header {
padding: 1rem;
position: absolute;
left: 0;
top: 0;
width: var(--aside-width);
height: var(--aside-header-height);
box-sizing: border-box;
}
aside menu {
box-sizing: border-box;
overflow-y:scroll;
position: absolute;
left: 0;
top: var(--aside-header-height);
bottom: var(--aside-footer-height);
width: var(--aside-width);
}
aside footer {
padding: 1rem;
position: -webkit-sticky;
position: absolute;
bottom: 0;
left: 0;
height: var(--aside-footer-height);
width: var(--aside-width);
box-sizing: border-box;
}
aside menu ul>li {
cursor: pointer;
line-height: 1.5;
}
/*=============== main ================*/
main {
margin-left: var(--aside-width);
padding: 1rem;
}
/*=============== .api ================*/
main .api{
padding:1rem;
margin:1rem 0rem;
border:1px solid #eee;
}
.api h3{
cursor:pointer;
margin:0rem;
display:flex;
align-items:center;
}
.api h4{
font-size:1.1rem;
margin-bottom:.2rem;
border-bottom:1px solid #eee;
padding-bottom:.2rem;
}
.api h5{
margin:.8rem 0rem .2rem 0rem;
font-size:1rem;
}
.api h3 .method{
width:5rem;
font-weight:bold;
text-transform:uppercase;
}
.api h3 .get{
color:green;
}
.api h3 .options{
color:green;
}
.api h3 .delete{
color:red;
}
.api h3 .put,.api h3 .patch{
color:rgb(193,174,49);
}
.api h3 .post{
color:rgb(240,114,11);
}
.api h3 .url{
margin-right:2rem;
}
.api h4 .success{
color:green;
margin-right:1rem;
}
.api h4 .error{
color:red;
margin-right:1rem;
}
.api table{
text-align:left;
border-collapse:collapse;
border:1px solid #ddd;
}
.api table thead tr{
background:#eee;
}
.api table tr{
border-bottom:1px solid #ddd;
line-height:1.5rem;
}
.api table tbody th .parent{
color:#ccc;
}
.api table th, .api table td{
padding:.3rem 1rem;
}
`), "./app.js": []byte(`"use strict";
// 代码缩进的空格数量。
let indentSize = 4
// 对应 vars.JSONDataDirName 的值
let dataDirName = 'data'
$(()=>{
initTemplate()
})
function initTemplate() {
Handlebars.registerPartial('examples', $('#examples').html())
Handlebars.registerPartial('params', $('#params').html())
Handlebars.registerPartial('headers', $('#headers').html())
Handlebars.registerPartial('response', $('#response').html())
Handlebars.registerHelper('dateFormat', formatDate)
Handlebars.registerHelper('elapsedFormat', formatElapsed)
let pageTpl = Handlebars.compile($('#page').html())
let apiTpl = Handlebars.compile($('#api').html())
fetch('./'+dataDirName+'/page.json').then((resp)=>{
return resp.json();
}).then((json)=>{
$('#app').html(pageTpl(json))
document.title = json.title + ' | ' + json.appName
loadApis(json)
})
// 加载 api 模板内容,json 为对应的数据
function loadApis(json) {
let menu = $('aside .menu')
menu.find('li.content').on('click', (event)=>{
$('main').html(json.content)
})
menu.find('li.api').on('click', (event)=>{
let path = $(event.target).attr('data-path')
fetch(path).then((resp)=>{
return resp.json()
}).then((json)=>{
$('main').html(apiTpl(json))
indentCode()
prettifyParams()
highlightCode()
}).catch((reason)=>{
console.error(reason)
})
})
} // end loadApis
}
// 美化带有子元素的参数显示
function prettifyParams() {
$('.request .params tbody th,.response .params tbody th').each(function(index, elem){
let text = $(elem).text()
text = text.replace(/(.*\.{1})/,'<span class="parent">$1</span>')
$(elem).html(text)
})
}
// 代码高亮,依赖于是否能访问网络。
function highlightCode() {
if (typeof(Prism) != 'undefined') {
Prism.plugins.autoloader.languages_path='https://cdn.bootcss.com/prism/1.5.1/components/'
Prism.highlightAll(false)
}
}
// 调整缩进
function indentCode() {
$('pre code').each((index, elem)=>{
let code = $(elem).text()
$(elem).text(alignCode(code))
})
}
// 对齐代码。
function alignCode(code) {
return code.replace(/^\s*/gm, (word)=>{
word = word.replace('\t', repeatSpace(indentSize))
// 按 indentSize 的倍数取得缩进的量
let len = Math.ceil((word.length-2)/indentSize)*indentSize
return repeatSpace(len)
}).replace(/[ ]{12}/gm, '') // 产生的 json 中,会被格式化成缩进12个空格
}
function repeatSpace(len) {
var code = []
while(code.length < len) {
code.push(' ')
}
return code.join('')
}
function formatDate(unix) {
let date = new Date(unix*1000)
let str = []
str.push(date.getFullYear(), '-')
str.push(date.getMonth(), '-')
str.push(date.getDate(), ' ')
str.push(date.getHours(), ':')
str.push(date.getMinutes(), ':')
str.push(date.getSeconds())
return str.join('')
}
function formatElapsed(number) {
return (number / 100000000).toFixed(4) + '秒'
}
`), "./index.html": []byte(`<!DOCTYPE html>
<html lang="zh-cmn-Hans">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<title>APIDOC</title>
<link rel="stylesheet" href="./style.css" />
<link href="https://cdn.bootcss.com/prism/1.5.1/themes/prism.min.css" rel="stylesheet" />
<script src="https://cdn.bootcss.com/jquery/3.2.1/jquery.min.js"></script>
<script src="https://cdn.bootcss.com/handlebars.js/4.0.11/handlebars.min.js"></script>
<script src="https://cdn.bootcss.com/prism/1.5.1/prism.min.js" data-manual></script>
<script src="https://cdn.bootcss.com/prism/1.5.1/plugins/autoloader/prism-autoloader.min.js"></script>
</head>
<body>
<div id="app"></div>
<script id="page" type="text/x-handlebars-template">
<aside>
<header><h1>{{title}}</h1></header>
<menu>
<ul class="menu">
<li class="menu-item content" data-path="content">home</li>
{{#each groups}}
<li class="menu-item api" data-path="{{this}}">{{@key}}</li>
{{/each}}
</ul>
</menu>
<footer>
<p>内容由<a href="{{appURL}}">{{appName}}</a>编译于 <time>{{dateFormat date}}</time>,用时{{elapsedFormat elapsed}}。</p>
{{#if licenseName}}
<p>内容采用<a href="{{licenseURL}}">{{licenseName}}</a>进行许可。</p>
{{/if}}
</footer>
</aside>
<main id="main">{{{content}}}</main>
</script>
<script id="examples" type="text/x-handlebars-template">
{{#each examples}}
<pre><code class="language-{{type}}">{{code}}
</code></pre>
{{/each}}
</script>
<script id="params" type="text/x-handlebars-template">
<table class="params">
<thead>
<tr><th>名称</th><th>类型</th><th>描述</th></tr>
</thead>
<tbody>
{{#each params}}
<tr>
<th>{{name}}</th>
<td>{{type}}</td>
<td>{{summary}}</td>
</tr>
{{/each}}
</tbody>
</table>
</script>
<script id="headers" type="text/x-handlebars-template">
<table>
<thead>
<tr><th>名称</th><th>描述</th></tr>
</thead>
<tbody>
{{#each headers}}
<tr>
<th>{{@key}}</th>
<td>{{this}}</td>
</tr>
{{/each}}
</tbody>
</table>
</script>
<script id="response" type="text/x-handlebars-template">
{{#if response.headers}}
<h5>请求头</h5>
{{> headers headers=response.headers}}
{{/if}}
{{#if response.params}}
<h5>参数:</h5>
{{> params params=response.params}}
{{/if}}
{{#if response.examples}}
<h5>示例:</h5>
{{> examples examples=response.examples}}
{{/if}}
</script>
<script id="api" type="text/x-handlebars-template">
<h2>{{name}}</h2>
{{#each apis}}
<section class="api">
<h3>
<span class="method {{method}}">{{method}}</span>
<span class="url">{{url}}</span>
<span class="summary">{{summary}}</span>
</h3>
<div class="content">
{{#if description}}
<p class="description">{{description}}</p>
{{/if}}
{{#if queries}}
<h5>查询参数</h5>
{{> params params=queries}}
{{/if}}
{{#if params}}
<h5>参数</h5>
{{> params params=params}}
{{/if}}
{{#if request}}
<div class="request">
<h4>请求{{#if request.type}}: {{request.type}}{{/if}}</h4>
<div>
{{#if request.headers}}
<h5>报头:</h5>
{{> headers headers=request.headers}}
{{/if}}
{{#if request.params}}
<h5>参数:</h5>
{{> params params=request.params}}
{{/if}}
{{#if request.examples}}
<h5>示例:</h5>
{{> examples examples=request.examples}}
{{/if}}
</div>
</div>
{{/if}}
{{#if success}}
<div class="response success">
<h4><span class="success">SUCCESS:</span>{{success.code}}, {{success.summary}}</h4>
{{> response response=success}}
</div>
{{/if}}
{{#if error}}
<div class="response error">
<h4><span class="error">ERROR:</span>{{error.code}}, {{error.summary}}</h4>
{{> response response=error}}
</div>
{{/if}}
</div>
</section>
{{/each}}
</script>
<script src="./app.js"></script>
</body>
</html>
`)}
|
/*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package audit
import (
"crypto/md5"
"fmt"
"os"
"time"
)
// Logger is an interface for logging.
type Logger interface {
Info(args ...interface{})
}
// SplitSlice splits a slice of any type into smaller slices of given length.
// It returns a 2D slice of the same type as the input slice.
// If the length of the input slice is not divisible by the given length, the last slice may have fewer elements.
func SplitSlice[T any](slice []T, length int) [][]T {
var result [][]T
for i := 0; i < len(slice); i += length {
end := i + length
if end > len(slice) {
end = len(slice)
}
result = append(result, slice[i:end])
}
return result
}
// GetEnvWithDefault takes two string parameters, key and defaultValue.
// It uses the os.Getenv function to retrieve the value of the environment variable specified by key.
// If the value is an empty string, it returns the defaultValue parameter.
// Otherwise, it returns the value of the environment variable.
func GetEnvWithDefault(key, defaultValue string) string {
value := os.Getenv(key)
if value == "" {
return defaultValue
}
return value
}
// GenerateEventID generate event id, format: app_code-YYYYMMDDHHMMSS-substring(MD5(随机因子)),8,24)
func GenerateEventID(appCode, factor string) string {
currentTime := time.Now().Format("20060102150405")
// NOCC:gas/crypto(设计如此)
hash := fmt.Sprintf("%x", md5.Sum([]byte(factor)))
result := fmt.Sprintf("%s-%s-%s", appCode, currentTime, hash[8:24])
return result
}
|
/*****************************************************************
* Copyright©,2020-2022, email: 279197148@qq.com
* Version: 1.0.0
* @Author: yangtxiang
* @Date: 2020-08-03 11:28
* Description:
*****************************************************************/
package netstream
import (
"errors"
"github.com/go-xe2/x/core/logger"
"github.com/go-xe2/x/sync/xsafeMap"
"net"
"sync"
"sync/atomic"
)
type TStreamServer struct {
wg sync.WaitGroup
listener net.Listener
addr net.Addr
heartbeatRun int32
clients *xsafeMap.TStrAnyMap
handler ServerStreamHandler
isListener int32
isInterrupted int32
closed chan byte
options *TStmServerOptions
requests *xsafeMap.TStrAnyMap
}
var _ StreamConnHandler = (*TStreamServer)(nil)
func NewStreamServer(listenAddr string, options *TStmServerOptions) (*TStreamServer, error) {
if options == nil {
options = DefaultStmServerOptions
}
addr, err := net.ResolveTCPAddr("tcp", listenAddr)
if err != nil {
return nil, err
}
return &TStreamServer{
options: options,
addr: addr,
clients: xsafeMap.NewStrAnyMap(),
heartbeatRun: 0,
closed: make(chan byte, 1),
requests: xsafeMap.NewStrAnyMap(),
}, nil
}
func NewStreamServerByListener(listener net.Listener, options *TStmServerOptions) *TStreamServer {
if options == nil {
options = DefaultStmServerOptions
}
return &TStreamServer{
options: options,
addr: listener.Addr(),
listener: listener,
clients: xsafeMap.NewStrAnyMap(),
heartbeatRun: 0,
closed: make(chan byte, 1),
requests: xsafeMap.NewStrAnyMap(),
}
}
func (p *TStreamServer) Log(level logger.LogLevel, args ...interface{}) {
Log("TStreamServer", p.options.GetLogger(), level, args...)
}
func (p *TStreamServer) SetHandler(handler ServerStreamHandler) {
p.handler = handler
}
func (p *TStreamServer) listen() error {
if p.isListening() {
return nil
}
if p.listener == nil {
l, err := net.Listen(p.addr.Network(), p.addr.String())
if err != nil {
return err
}
p.listener = l
}
atomic.StoreInt32(&p.isListener, 1)
return nil
}
func (p *TStreamServer) getInterrupted() bool {
n := atomic.LoadInt32(&p.isInterrupted)
return n != 0
}
func (p *TStreamServer) getListener() net.Listener {
if n := atomic.LoadInt32(&p.isListener); n != 0 {
return p.listener
}
return nil
}
func (p *TStreamServer) accept() (*tStreamConn, error) {
interrupted := p.getInterrupted()
if interrupted {
return nil, errors.New("已中断服务")
}
listener := p.getListener()
if listener == nil {
return nil, errors.New("未设置服务监听地址")
}
conn, err := listener.Accept()
if err != nil {
return nil, err
}
return newStreamConnByConn(p, conn, p.options.TConnOptions), nil
}
func (p *TStreamServer) isListening() bool {
n := atomic.LoadInt32(&p.isListener)
return n != 0 && p.listener != nil
}
func (p *TStreamServer) Open() error {
if p.isListening() {
return errors.New("套接字服务已经打开")
}
if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil {
return err
} else {
p.listener = l
}
return nil
}
func (p *TStreamServer) Addr() net.Addr {
if p.listener != nil {
return p.listener.Addr()
}
return p.addr
}
func (p *TStreamServer) Close() error {
var err error
select {
case <-p.closed:
return nil
default:
}
close(p.closed)
if p.isListening() {
atomic.StoreInt32(&p.isListener, 0)
err = p.listener.Close()
p.listener = nil
}
return err
}
func (p *TStreamServer) interrupt() error {
atomic.StoreInt32(&p.isInterrupted, 1)
if e := p.Close(); e != nil {
p.Log(logger.LEVEL_WARN, "中断服务出错:", e)
}
return nil
}
func (p *TStreamServer) innerAccept() error {
client, err := p.accept()
if err != nil {
// 服务关闭退出
return nil
}
select {
case <-p.closed:
// 已经关闭服务
return nil
default:
}
if client != nil {
p.wg.Add(1)
id := client.Id()
go func() {
defer func() {
if e := recover(); e != nil {
p.Log(logger.LEVEL_WARN, "结束客户端[", id, "]连接出错:", e)
}
}()
defer p.wg.Done()
client.Start()
p.heartbeatProcessLoop()
client.doOnConnect()
client.WaitStop()
}()
}
return nil
}
func (p *TStreamServer) acceptLoop() error {
for {
select {
case <-p.closed:
// 已经关闭服务
return nil
default:
}
err := p.innerAccept()
if err != nil {
p.Log(logger.LEVEL_ERRO, "出错,服务已退出:", err)
return err
}
}
}
func (p *TStreamServer) Serve() error {
err := p.listen()
if err != nil {
return err
}
return p.acceptLoop()
}
func (p *TStreamServer) Stop() error {
select {
case <-p.closed:
return nil
default:
}
// 不再接受新的连接
// 关闭已连接的客户端
p.clients.Foreach(func(k string, v interface{}) bool {
cli := v.(*tStreamConn)
if e := cli.Close(); e != nil {
p.Log(logger.LEVEL_WARN, "关闭客户端出错:", e)
}
return true
})
// 关闭服务
_ = p.interrupt()
p.wg.Wait()
return nil
}
|
package provider
import (
"context"
"fmt"
"github.com/ottogroup/penelope/pkg/config"
"github.com/ottogroup/penelope/pkg/http/impersonate"
)
type defaultImpersonatedTokenConfigProvider struct {
}
func NewDefaultImpersonatedTokenConfigProvider() impersonate.TargetPrincipalForProjectProvider {
return &defaultImpersonatedTokenConfigProvider{}
}
func (ip *defaultImpersonatedTokenConfigProvider) GetTargetPrincipalForProject(context.Context, string) (string, error) {
if config.DefaultProviderPrincipalForProjectPathEnv.Exist() {
return config.DefaultProviderPrincipalForProjectPathEnv.MustGet(), nil
}
return "", fmt.Errorf("no default target principal provided")
}
|
package hall_call_handler
//********
// Routines for computing a suitability score for each elevator and handling of designated hall call orders.
// Moved here because of loops when it was in order_handler.
//********
import (
"math"
"time"
elevio "../elev_driver"
slog "../sessionlog"
statemachine "../stateMachine"
turnofflights "../turn_off_lights"
)
var numFloors = 4
//every elevator run compute score to see who is most suitable to take new order
func ComputeScore(dir int, order elevio.ButtonEvent, atFloor int, idle bool) int {
N := numFloors - 1
d := int(math.Abs(float64(atFloor - order.Floor)))
if idle {
return (N + 3) - d
}
//towards call
if ((dir == 1) == (order.Floor > atFloor)) && ((dir == 1) == (order.Floor >= atFloor)) {
if order.Button == 0 { //same dir button
return (N + 2) - d
} else if order.Button == 1 { //opposite dir button
return (N + 1) - d
}
}
//away from call
return 1
}
func HandleHallCall(order elevio.ButtonEvent, score int) {
atFloor := statemachine.GetFloor()
if atFloor == order.Floor && statemachine.GetDirection() == 0 {
elevio.SetDoorOpenLamp(true)
time.Sleep(2 * time.Second)
elevio.SetDoorOpenLamp(false)
turnofflights.TurnOffLightTransmit(statemachine.GetFloor())
} else if score > 1 {
slog.StoreInSessionLog(order.Floor, true)
elevio.SetButtonLamp(order.Button, order.Floor, true)
} else {
slog.StoreInSessionLog(order.Floor, false)
elevio.SetButtonLamp(order.Button, order.Floor, true)
}
}
|
package main
import (
"fmt"
"" // put in link from master
"" // put in link from master
)
type OS_Swift struct {
Login string
Password string
TenantID string
EndPointURL string
Tenant string
SwiftHandler swift.Connection
}
func (s *OS_Swift) Auth() string {
s.SwiftHandler = swift.Connection{
UserName: s.Login,
ApiKey: s.Password,
AuthUrl: s.EndPointURL,
Tenant: s.Tenant,
}
return "Swift Auth"
}
func (s *OS_Swift) ListBuckets() string {
containers, err := s.SwiftHandler.ContainerNames(nil)
if err != nil {
log.Fatal(err)
}
for i := range containers {
fmt.Println(fmt.Sprintf("%+v", containers[i]))
}
return "Swift ListBucket"
}
func (s *OS_Swift) CreateBucket(bucketName string) string {
err := s.SwiftHandler.ContainerCreate(bucketName, nil)
if err != nil {
log.Fatal(err)
}
return "Swift Create Bucket"
}
func (s *OS_Swift) DeleteBucket(bucketName string) string {
err := s.SwiftHandler.ContainerDelete(bucketName)
if err != nil {
log.Fatal(err)
}
return "Swift Delete Bucket"
}
func (s *OS_Swift) Put(src string, dst string) string {
//_, err := s.SwiftHandler.ObjectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers)
return "Swift Put Object"
}
func (s *OS_Swift) Get() string {
return "Swift Get Object"
}
func (s *OS_Swift) Del() string {
return "Swift Put Object"
}
//
|
package app
import (
"io/ioutil"
"path/filepath"
"testing"
"github.com/10gen/realm-cli/internal/cloud/realm"
"github.com/10gen/realm-cli/internal/local"
"github.com/10gen/realm-cli/internal/utils/test/assert"
"github.com/10gen/realm-cli/internal/utils/test/mock"
"github.com/Netflix/go-expect"
)
func TestAppInitInputsResolve(t *testing.T) {
t.Run("should return an error if ran from a directory that already has a project", func(t *testing.T) {
profile, teardown := mock.NewProfileFromTmpDir(t, "app_init_input_test")
defer teardown()
assert.Nil(t, ioutil.WriteFile(
filepath.Join(profile.WorkingDirectory, local.FileConfig.String()),
[]byte(`{"name":"eggcorn"}`),
0666,
))
var i initInputs
assert.Equal(t, errProjectExists{}, i.Resolve(profile, nil))
})
for _, tc := range []struct {
description string
inputs initInputs
procedure func(c *expect.Console)
test func(t *testing.T, i initInputs)
}{
{
description: "with no flags set should prompt for just name and set location deployment model and environment to defaults",
procedure: func(c *expect.Console) {
c.ExpectString("App Name")
c.SendLine("test-app")
c.ExpectEOF()
},
test: func(t *testing.T, i initInputs) {
assert.Equal(t, "test-app", i.Name)
assert.Equal(t, flagDeploymentModelDefault, i.DeploymentModel)
assert.Equal(t, flagLocationDefault, i.Location)
assert.Equal(t, realm.EnvironmentNone, i.Environment)
},
},
{
description: "with a name flag set should prompt for nothing else and set location deployment model and environment to defaults",
inputs: initInputs{newAppInputs: newAppInputs{Name: "test-app"}},
procedure: func(c *expect.Console) {},
test: func(t *testing.T, i initInputs) {
assert.Equal(t, "test-app", i.Name)
assert.Equal(t, flagDeploymentModelDefault, i.DeploymentModel)
assert.Equal(t, flagLocationDefault, i.Location)
assert.Equal(t, realm.EnvironmentNone, i.Environment)
},
},
{
description: "with name location deployment model and environment flags set should prompt for nothing else",
inputs: initInputs{newAppInputs: newAppInputs{
Name: "test-app",
DeploymentModel: realm.DeploymentModelLocal,
Location: realm.LocationOregon,
Environment: realm.EnvironmentDevelopment,
}},
procedure: func(c *expect.Console) {},
test: func(t *testing.T, i initInputs) {
assert.Equal(t, "test-app", i.Name)
assert.Equal(t, realm.DeploymentModelLocal, i.DeploymentModel)
assert.Equal(t, realm.LocationOregon, i.Location)
assert.Equal(t, realm.EnvironmentDevelopment, i.Environment)
},
},
} {
t.Run(tc.description, func(t *testing.T) {
profile := mock.NewProfile(t)
_, console, _, ui, consoleErr := mock.NewVT10XConsole()
assert.Nil(t, consoleErr)
defer console.Close()
doneCh := make(chan (struct{}))
go func() {
defer close(doneCh)
tc.procedure(console)
}()
assert.Nil(t, tc.inputs.Resolve(profile, ui))
console.Tty().Close() // flush the writers
<-doneCh // wait for procedure to complete
tc.test(t, tc.inputs)
})
}
}
|
// Copyright 2015 by caixw, All rights reserved.
// Use of this source code is governed by a MIT
// license that can be found in the LICENSE file.
package main
import (
"time"
"github.com/issue9/term/colors"
)
type logLevel int
// 是否不显示被标记为IGNORE的日志内容。
var showIgnoreLog = false
const (
succ logLevel = iota
info
warn
erro
ignore
max // 永远在最后,用于判断logLevel的值有没有超标
)
// 每个日志类型的名称。
var levelStrings = map[logLevel]string{
succ: "SUCCESS",
info: "INFO",
warn: "WARINNG",
erro: "ERROR",
ignore: "IGNORE",
}
// 每个日志类型对应的颜色。
var levelColors = map[logLevel]colors.Color{
succ: colors.Green,
info: colors.Blue,
warn: colors.Magenta,
erro: colors.Red,
ignore: colors.Default,
}
// 输出指定级别的日志信息。
func log(level logLevel, msg ...interface{}) {
if level < 0 || level >= max {
panic("log:无效的level值")
}
if level == ignore && !showIgnoreLog {
return
}
data := time.Now().Format("2006-01-02 15:04:05 ")
colors.Print(colors.Stdout, colors.Default, colors.Default, data)
colors.Print(colors.Stdout, levelColors[level], colors.Default, "[", levelStrings[level], "] ")
colors.Println(colors.Stdout, levelColors[level], colors.Default, msg...)
}
|
// Source : https://oj.leetcode.com/problems/longest-common-prefix/
// Author : Austin Vern Songer
// Date : 2016-04-13
/**********************************************************************************
*
* Write a function to find the longest common prefix string amongst an array of strings.
*
**********************************************************************************/
package main
import (
"fmt"
)
func longestPrefix (strs []string) string {
var word string
if len(strs) <= 0 { return word }
for i := 1; i <= len(strs[0]); i++ {
w := (strs[0])[:i]
match := true
for j := 1; j < len(strs); j++ {
if i > len(strs[j]) || w != (strs[j])[:i] {
match = false
break
}
}
if !match {
return word
}
word = w
}
return word
}
func main() {
s := []string{"abab"}
fmt.Println(longestPrefix(s))
}
|
// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0.
package export
import (
"bytes"
"testing"
"github.com/stretchr/testify/require"
)
func TestEscape(t *testing.T) {
var bf bytes.Buffer
str := []byte(`MWQeWw""'\rNmtGxzGp`)
expectStrBackslash := `MWQeWw\"\"\'\\rNmtGxzGp`
expectStrWithoutBackslash := `MWQeWw""''\rNmtGxzGp`
expectStrBackslashDoubleQuote := `MWQeWw\"\"'\\rNmtGxzGp`
expectStrWithoutBackslashDoubleQuote := `MWQeWw""""'\rNmtGxzGp`
escapeSQL(str, &bf, true)
require.Equal(t, expectStrBackslash, bf.String())
bf.Reset()
escapeSQL(str, &bf, false)
require.Equal(t, expectStrWithoutBackslash, bf.String())
bf.Reset()
opt := &csvOption{
delimiter: []byte(`"`),
separator: []byte(`,`),
}
escapeCSV(str, &bf, true, opt)
require.Equal(t, expectStrBackslashDoubleQuote, bf.String())
bf.Reset()
escapeCSV(str, &bf, false, opt)
require.Equal(t, expectStrWithoutBackslashDoubleQuote, bf.String())
bf.Reset()
str = []byte(`a|*|b"cd`)
expectedStrWithDelimiter := `a|*|b""cd`
expectedStrBackslashWithoutDelimiter := `a\|*\|b"cd`
expectedStrWithoutDelimiter := `a|*|b"cd`
escapeCSV(str, &bf, false, opt)
require.Equal(t, expectedStrWithDelimiter, bf.String())
bf.Reset()
opt.delimiter = []byte("")
opt.separator = []byte(`|*|`)
escapeCSV(str, &bf, true, opt)
require.Equal(t, expectedStrBackslashWithoutDelimiter, bf.String())
bf.Reset()
escapeCSV(str, &bf, false, opt)
require.Equal(t, expectedStrWithoutDelimiter, bf.String())
}
|
package main
import (
"fmt"
"strconv"
)
func main() {
fmt.Println(strconv.ParseBool("1")) //true
fmt.Println(strconv.ParseBool("t"))
fmt.Println(strconv.ParseBool("T"))
fmt.Println(strconv.ParseBool("true"))
fmt.Println(strconv.ParseBool("True"))
fmt.Println(strconv.FormatBool(0 < 1))
fmt.Println(strconv.FormatBool(0 > 1))
rst := make([]byte, 0)
rst = strconv.AppendBool(rst, 0 < 1)
fmt.Printf("%s\n", rst)
rst = strconv.AppendBool(rst, 0 > 1)
fmt.Printf("%s\n", rst)
s := "0.12345678901234567890"
f, err := strconv.ParseFloat(s, 32)
fmt.Println(f, err)
fmt.Println(float32(f), err)
f, err = strconv.ParseFloat(s, 64)
fmt.Println(f, err)
//base: 进位制, bitSize:指定整数类型int8,int32,in64, 0 代表int
fmt.Println(strconv.ParseInt("123", 10, 8))
fmt.Println(strconv.ParseInt("12345", 10, 8)) // value out of range
fmt.Println(strconv.ParseInt("2147483647", 10, 0)) //解析为int类型
fmt.Println(strconv.ParseInt("0xFF", 16, 0)) //invalid syntax 错误语法
fmt.Println(strconv.ParseInt("FF", 16, 0)) //255
fmt.Println(strconv.ParseInt("0xFF", 0, 0)) //255
fmt.Println(strconv.Atoi("2147483647"))
}
|
package main
import (
"fmt"
"strings"
)
// https://leetcode-cn.com/problems/text-justification/
//------------------------------------------------------------------------------
func fullJustify(words []string, maxWidth int) []string {
N := len(words)
const FMT = "%%%ds"
var res []string
add := func(start, end, total int) {
line, spaces := "", maxWidth-total
if start == end {
line = words[end] + fmt.Sprintf(fmt.Sprintf(FMT, spaces), "")
} else {
line += words[start]
for i := start + 1; i <= end; i++ {
count := spaces / (end - i + 1)
if count*(end-i+1) < spaces {
count++
}
line += fmt.Sprintf(fmt.Sprintf(FMT, count), "") + words[i]
spaces -= count
}
}
res = append(res, line)
}
start, last, total := 0, -1, 0
for i := 0; i < N; i++ {
l := len(words[i])
if length := total + l + (i - start); length >= maxWidth {
if start != i && length != maxWidth {
add(start, i-1, total)
start, last, total = i, i-1, l
} else {
add(start, i, total+l)
start, last, total = i+1, i, 0
}
} else {
total += l
}
}
if s := strings.Join(words[last+1:], " "); s != "" {
if len(s) < maxWidth {
s += fmt.Sprintf(fmt.Sprintf(FMT, maxWidth-len(s)), "")
}
res = append(res, s)
}
return res
}
func main() {
cases := []struct {
words []string
maxWidth int
}{
{
[]string{"a"},
2,
},
{
words: []string{"This", "is", "an", "example",
"of", "text", "justification."},
maxWidth: 16,
},
{
words: []string{"What", "must", "be", "acknowledgment", "shall", "be"},
maxWidth: 16,
},
{
words: []string{"Science", "is", "what", "we", "understand", "well",
"enough", "to", "explain", "to", "a", "computer.",
"Art", "is", "everything", "else", "we", "do"},
maxWidth: 20,
},
}
realCase := cases[0:]
for i, c := range realCase {
fmt.Println("## case", i)
// solve
res := fullJustify(c.words, c.maxWidth)
for _, s := range res {
fmt.Println(len(s), "|", s, "|")
}
}
}
|
package structures
// Package defines an AS3 package, which doesn't really exist in JS. The package is not used at all
// to print out JS, but it may be needed to avoid name collisions, and therefore it is implemented,
// but don't expect any output from it soon.
// Package does validate that every class must be wrapped by a package, and that only classes can
// exist within packages, all other pieces of code (blocks, structures) will be considered as
// syntax errors.
type Package struct {
name string
classes []*Class
nameComplete bool
}
// Add inserts
func (p *Package) Add(structure Structure) Structure {
if c, isClass := structure.(*Class); isClass {
structure.setParent(c)
p.classes = append(p.classes, c)
return c
}
if p.nameComplete {
s := &Code{}
s.Add(structure)
s.setParent(p)
return s
}
if structure.String() == "{" {
p.nameComplete = true
return p
}
p.name += structure.String()
return p
}
// Parent returns nil, packages have no parent.
func (p *Package) Parent() Structure {
return nil
}
// Children returns the slice of classes defined in this package.
func (p *Package) Children() []Structure {
var classes []Structure
for _, c := range p.classes {
classes = append(classes, c)
}
return classes
}
// SetParent does nothing, packages have no parent.
func (p *Package) setParent(structure Structure) {}
func (p *Package) String() string {
return p.name
}
|
package mgr
import (
"fmt"
"testing"
"github.com/go-xorm/xorm"
"github.com/jchprj/GeoOrderTest/cfg"
"github.com/go-sql-driver/mysql"
)
func TestCreateEngine(t *testing.T) {
cfg.InitConfig("../docker/config.yml")
err := initMySQL()
if err != nil {
t.Errorf("init err %v", err)
}
engine, err := GetEngine()
if err != nil {
t.Errorf("err %v", err)
}
if engine == nil {
t.Errorf("engine err all nil")
}
if err := engine.Ping(); err != nil {
t.Errorf("engine Ping %v", err)
}
}
func Test__(t *testing.T) {
conf := mysql.Config{
User: "root",
Passwd: "123456",
Net: "tcp",
Addr: "localhost:3306",
DBName: "geoordertest",
AllowNativePasswords: true,
Params: map[string]string{"charset": "utf8"},
}
dsn := conf.FormatDSN()
fmt.Println(dsn)
engine, err := xorm.NewEngine("mysql", dsn)
if err != nil {
t.Errorf("err %v", err)
}
if engine == nil {
t.Errorf("engine err all nil")
}
if err := engine.Ping(); err != nil {
t.Errorf("engine Ping %v", err)
}
}
|
package api
import (
"encoding/json"
utils "github.com/kevinbarbary/go-lms/utils"
"log"
"net/http"
)
type TokenInfo struct {
Expires string
Now string
SecondsRemaining int64
URL string
SiteID string
LoginID string
}
func CheckToken(token, useragent, site, check string) (TokenInfo, Timestamp, string, string, error) {
response, err := Call("POST", utils.Endpoint("/auth/check"), token, useragent, site, Params{"Token": check}, true)
if err != nil {
log.Print("CheckToken Error - invalid response from API call... ", err.Error())
return TokenInfo{}, 0, "", "", err
}
data, e, help, t, newToken, user := extract(response)
if e != "" {
log.Print("CheckToken Error... ", e)
}
if help != "" {
log.Print("CheckToken help... ", help)
}
timestamp := t / 10000
if data == nil {
log.Print("CheckToken... NO DATA")
return TokenInfo{}, timestamp, newToken, user, err
}
byteData, err := json.Marshal(data)
if err != nil {
log.Print("CheckToken - Marshal fail... ", err.Error())
return TokenInfo{}, timestamp, newToken, user, err
}
var result TokenInfo
err = json.Unmarshal(byteData, &result)
if err != nil {
log.Print("CheckToken - Unmarshal fail... ", err.Error())
return TokenInfo{}, timestamp, newToken, user, err
}
return result, timestamp, newToken, user, err
}
func saveTokenCookie(w http.ResponseWriter, token, domain string) {
utils.SaveCookie(w, "token", token, domain)
}
func SaveToken(w http.ResponseWriter, token, domain string) {
if token != "" {
saveTokenCookie(w, token, domain)
}
}
func GetToken(r *http.Request) string {
token, err := utils.GetCookieValue(r, "token")
if err != nil || token == "" {
token, _ = Auth(utils.GetSite(r), "", "", r.UserAgent(), false)
}
return token
}
func TokenUser() string {
// the Course-Source API allows using ! in place of a LoginID to use the user in the token,
// it also prefixes the auth token with a ! if the token contains a user
return "!"
}
func GetSignedInTokenFlag(token string) string { // @todo - change the return type to rune ?
if token != "" && token[:1] == TokenUser() {
// if the token has an ! prefix then it contains a user
return TokenUser()
}
return ""
}
func CheckTokenSignedIn(token string) bool {
return GetSignedInTokenFlag(token) == TokenUser()
}
func CheckSignedIn(r *http.Request) bool {
token, err := utils.GetCookieValue(r, "token")
return err == nil && CheckTokenSignedIn(token)
}
func GetTokenIfSignedIn(r *http.Request) string {
// get the current token but only if it contains a user
token, err := utils.GetCookieValue(r, "token")
if err != nil || !CheckTokenSignedIn(token) {
return ""
}
return token
}
|
package main
import (
"fmt"
"net"
)
func main() {
p := make([]byte, 44100*0.02*2)
// fmt.Print("the p is ")
// fmt.Println(len(p))
// time.Sleep(time.Second * 2)
addr := net.UDPAddr{
Port: 2000,
IP: net.ParseIP("192.168.25.18"),
}
ser, err := net.ListenUDP("udp", &addr)
fmt.Printf("WUT?")
for {
_, _, err = ser.ReadFromUDP(p)
fmt.Println("__nop__")
fmt.Println(p)
chk(err)
if err != nil {
fmt.Println(err)
}
}
}
func chk(err error) {
if err != nil {
panic(err)
}
}
|
package main
import "fmt"
func main() {
x := 100
switch {
case x < 5:
fmt.Println("X é menor que 5")
case x == 5:
fmt.Println("X é igual 5")
case x > 5 && x <= 10:
fmt.Println("X é maior que 5")
default:
fmt.Println("X é maior que 10")
}
y := "b"
switch y {
case "a":
fmt.Println("Y é a letra A")
case "b":
fmt.Println("Y é a letra B")
fallthrough
case "c", "d", "e":
fmt.Println("Y é a letra C, D ou E")
default:
fmt.Println("Y é qualquer outra coisa")
}
}
|
package dataloaders
// go:generate go run github.com/vektah/dataloaden UserLoader string *github.com/GlitchyGlitch/typinger/models.User
// go:generate go run github.com/vektah/dataloaden ArticlesLoader string []*github.com/GlitchyGlitch/typinger/models.Article
type Loaders struct {
UserByIDs *UserLoader
ArticlesByUserIDs *ArticlesLoader
}
func newLoaders(rep repos) *Loaders {
return &Loaders{
UserByIDs: newUserByIDs(rep),
ArticlesByUserIDs: newArticlesByUserIDs(rep),
}
}
|
package encoding
import (
"fmt"
gkeys "github.com/number571/go-cryptopro/gost_r_34_10_2012"
"github.com/number571/tendermint/crypto"
"github.com/number571/tendermint/crypto/gost256"
"github.com/number571/tendermint/crypto/gost512"
"github.com/number571/tendermint/libs/json"
pc "github.com/number571/tendermint/proto/tendermint/crypto"
)
func init() {
json.RegisterType((*pc.PublicKey)(nil), "tendermint.crypto.PublicKey")
json.RegisterType((*pc.PublicKey_Gost512)(nil), "tendermint.crypto.PublicKey_Gost512")
json.RegisterType((*pc.PublicKey_Gost256)(nil), "tendermint.crypto.PublicKey_Gost256")
}
// PubKeyToProto takes crypto.PubKey and transforms it to a protobuf Pubkey
func PubKeyToProto(k crypto.PubKey) (pc.PublicKey, error) {
var kp pc.PublicKey
switch k := k.(type) {
case gost512.PubKey:
kp = pc.PublicKey{
Sum: &pc.PublicKey_Gost512{
Gost512: k.Bytes(),
},
}
case gost256.PubKey:
kp = pc.PublicKey{
Sum: &pc.PublicKey_Gost256{
Gost256: k.Bytes(),
},
}
default:
return kp, fmt.Errorf("toproto: key type %v is not supported", k)
}
return kp, nil
}
// PubKeyFromProto takes a protobuf Pubkey and transforms it to a crypto.Pubkey
func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) {
switch k := k.Sum.(type) {
case *pc.PublicKey_Gost512:
if len(k.Gost512) != gost512.PubKeySize {
return nil, fmt.Errorf("invalid size for PubKeyGost512. Got %d, expected %d",
len(k.Gost512), gost512.PubKeySize)
}
pk, err := gkeys.LoadPubKey(k.Gost512)
if err != nil {
return nil, err
}
return gost512.PubKey(pk.(gkeys.PubKey512)), nil
case *pc.PublicKey_Gost256:
if len(k.Gost256) != gost256.PubKeySize {
return nil, fmt.Errorf("invalid size for PubKeyGost256. Got %d, expected %d",
len(k.Gost256), gost256.PubKeySize)
}
pk, err := gkeys.LoadPubKey(k.Gost256)
if err != nil {
return nil, err
}
return gost256.PubKey(pk.(gkeys.PubKey256)), nil
default:
return nil, fmt.Errorf("fromproto: key type %v is not supported", k)
}
}
|
/*
* Copyright 2018-present Open Networking Foundation
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package device
import (
"context"
"errors"
"io"
"strconv"
"strings"
"sync"
"time"
"github.com/golang/protobuf/ptypes/empty"
"github.com/opencord/voltha-go/db/model"
"github.com/opencord/voltha-go/rw_core/core/device/event"
"github.com/opencord/voltha-go/rw_core/utils"
"github.com/opencord/voltha-lib-go/v4/pkg/kafka"
"github.com/opencord/voltha-lib-go/v4/pkg/log"
"github.com/opencord/voltha-protos/v4/go/openflow_13"
"github.com/opencord/voltha-protos/v4/go/voltha"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// LogicalManager represent logical device manager attributes
type LogicalManager struct {
*event.Manager
logicalDeviceAgents sync.Map
deviceMgr *Manager
kafkaICProxy kafka.InterContainerProxy
dbPath *model.Path
ldProxy *model.Proxy
defaultTimeout time.Duration
logicalDevicesLoadingLock sync.RWMutex
logicalDeviceLoadingInProgress map[string][]chan int
}
func (ldMgr *LogicalManager) addLogicalDeviceAgentToMap(agent *LogicalAgent) {
if _, exist := ldMgr.logicalDeviceAgents.Load(agent.logicalDeviceID); !exist {
ldMgr.logicalDeviceAgents.Store(agent.logicalDeviceID, agent)
}
}
// getLogicalDeviceAgent returns the logical device agent. If the device is not in memory then the device will
// be loaded from dB and a logical device agent created to managed it.
func (ldMgr *LogicalManager) getLogicalDeviceAgent(ctx context.Context, logicalDeviceID string) *LogicalAgent {
logger.Debugw(ctx, "get-logical-device-agent", log.Fields{"logical-device-id": logicalDeviceID})
agent, ok := ldMgr.logicalDeviceAgents.Load(logicalDeviceID)
if ok {
lda := agent.(*LogicalAgent)
if lda.logicalDevice == nil {
// This can happen when an agent for the logical device has been created but the logical device
// itself is not ready for action as it is waiting for switch and port capabilities from the
// relevant adapter. In such a case prevent any request aimed at that logical device.
logger.Debugf(ctx, "Logical device %s is not ready to serve requests", logicalDeviceID)
return nil
}
return lda
}
// Try to load into memory - loading will also create the logical device agent
if err := ldMgr.load(ctx, logicalDeviceID); err == nil {
if agent, ok = ldMgr.logicalDeviceAgents.Load(logicalDeviceID); ok {
return agent.(*LogicalAgent)
}
}
return nil
}
func (ldMgr *LogicalManager) deleteLogicalDeviceAgent(logicalDeviceID string) {
ldMgr.logicalDeviceAgents.Delete(logicalDeviceID)
}
// GetLogicalDevice provides a cloned most up to date logical device. If device is not in memory
// it will be fetched from the dB
func (ldMgr *LogicalManager) GetLogicalDevice(ctx context.Context, id *voltha.ID) (*voltha.LogicalDevice, error) {
logger.Debugw(ctx, "getlogicalDevice", log.Fields{"logical-device-id": id})
if agent := ldMgr.getLogicalDeviceAgent(ctx, id.Id); agent != nil {
return agent.GetLogicalDeviceReadOnly(ctx)
}
return nil, status.Errorf(codes.NotFound, "%s", id)
}
//ListLogicalDevices returns the list of all logical devices
func (ldMgr *LogicalManager) ListLogicalDevices(ctx context.Context, _ *empty.Empty) (*voltha.LogicalDevices, error) {
logger.Debug(ctx, "ListAllLogicalDevices")
var logicalDevices []*voltha.LogicalDevice
if err := ldMgr.ldProxy.List(ctx, &logicalDevices); err != nil {
logger.Errorw(ctx, "failed-to-list-logical-devices-from-cluster-proxy", log.Fields{"error": err})
return nil, err
}
return &voltha.LogicalDevices{Items: logicalDevices}, nil
}
func (ldMgr *LogicalManager) createLogicalDevice(ctx context.Context, device *voltha.Device) (*string, error) {
logger.Debugw(ctx, "creating-logical-device", log.Fields{"device-id": device.Id})
// Sanity check
if !device.Root {
return nil, errors.New("device-not-root")
}
// Create a logical device agent - the logical device Id is based on the mac address of the device
// For now use the serial number - it may contain any combination of alphabetic characters and numbers,
// with length varying from eight characters to a maximum of 14 characters. Mac Address is part of oneof
// in the Device model. May need to be moved out.
id := utils.CreateLogicalDeviceID()
sn := strings.Replace(device.MacAddress, ":", "", -1)
if id == "" {
logger.Errorw(ctx, "mac-address-not-set", log.Fields{"device-id": device.Id, "serial-number": sn})
return nil, errors.New("mac-address-not-set")
}
logger.Debugw(ctx, "logical-device-id", log.Fields{"logical-device-id": id})
agent := newLogicalAgent(ctx, id, sn, device.Id, ldMgr, ldMgr.deviceMgr, ldMgr.dbPath, ldMgr.ldProxy, ldMgr.defaultTimeout)
ldMgr.addLogicalDeviceAgentToMap(agent)
// Update the root device with the logical device Id reference
if err := ldMgr.deviceMgr.setParentID(ctx, device, id); err != nil {
logger.Errorw(ctx, "failed-setting-parent-id", log.Fields{"logical-device-id": id, "device-id": device.Id})
return nil, err
}
go func() {
//TODO: either wait for the agent to be started before returning, or
// implement locks in the agent to ensure request are not processed before start() is complete
err := agent.start(log.WithSpanFromContext(context.Background(), ctx), false)
if err != nil {
logger.Errorw(ctx, "unable-to-create-the-logical-device", log.Fields{"error": err})
ldMgr.deleteLogicalDeviceAgent(id)
}
}()
logger.Debug(ctx, "creating-logical-device-ends")
return &id, nil
}
// stopManagingLogicalDeviceWithDeviceId stops the management of the logical device. This implies removal of any
// reference of this logical device in cache. The device Id is passed as param because the logical device may already
// have been removed from the model. This function returns the logical device Id if found
func (ldMgr *LogicalManager) stopManagingLogicalDeviceWithDeviceID(ctx context.Context, id string) string {
logger.Infow(ctx, "stop-managing-logical-device", log.Fields{"device-id": id})
// Go over the list of logical device agents to find the one which has rootDeviceId as id
var ldID = ""
ldMgr.logicalDeviceAgents.Range(func(key, value interface{}) bool {
ldAgent := value.(*LogicalAgent)
if ldAgent.rootDeviceID == id {
logger.Infow(ctx, "stopping-logical-device-agent", log.Fields{"logical-device-id": key})
if err := ldAgent.stop(ctx); err != nil {
logger.Errorw(ctx, "failed-to-stop-LDAgent", log.Fields{"error": err})
return false
}
ldID = key.(string)
ldMgr.logicalDeviceAgents.Delete(ldID)
}
return true
})
return ldID
}
//getLogicalDeviceFromModel retrieves the logical device data from the model.
func (ldMgr *LogicalManager) getLogicalDeviceFromModel(ctx context.Context, lDeviceID string) (*voltha.LogicalDevice, error) {
logicalDevice := &voltha.LogicalDevice{}
if have, err := ldMgr.ldProxy.Get(ctx, lDeviceID, logicalDevice); err != nil {
logger.Errorw(ctx, "failed-to-get-logical-devices-from-cluster-proxy", log.Fields{"error": err})
return nil, err
} else if !have {
return nil, status.Error(codes.NotFound, lDeviceID)
}
return logicalDevice, nil
}
// load loads a logical device manager in memory
func (ldMgr *LogicalManager) load(ctx context.Context, lDeviceID string) error {
if lDeviceID == "" {
return nil
}
// Add a lock to prevent two concurrent calls from loading the same device twice
ldMgr.logicalDevicesLoadingLock.Lock()
if _, exist := ldMgr.logicalDeviceLoadingInProgress[lDeviceID]; !exist {
if ldAgent, _ := ldMgr.logicalDeviceAgents.Load(lDeviceID); ldAgent == nil {
ldMgr.logicalDeviceLoadingInProgress[lDeviceID] = []chan int{make(chan int, 1)}
ldMgr.logicalDevicesLoadingLock.Unlock()
if _, err := ldMgr.getLogicalDeviceFromModel(ctx, lDeviceID); err == nil {
logger.Debugw(ctx, "loading-logical-device", log.Fields{"lDeviceId": lDeviceID})
agent := newLogicalAgent(ctx, lDeviceID, "", "", ldMgr, ldMgr.deviceMgr, ldMgr.dbPath, ldMgr.ldProxy, ldMgr.defaultTimeout)
if err := agent.start(ctx, true); err != nil {
return err
}
ldMgr.logicalDeviceAgents.Store(agent.logicalDeviceID, agent)
} else {
logger.Debugw(ctx, "logicalDevice not in model", log.Fields{"lDeviceId": lDeviceID})
}
// announce completion of task to any number of waiting channels
ldMgr.logicalDevicesLoadingLock.Lock()
if v, ok := ldMgr.logicalDeviceLoadingInProgress[lDeviceID]; ok {
for _, ch := range v {
close(ch)
}
delete(ldMgr.logicalDeviceLoadingInProgress, lDeviceID)
}
ldMgr.logicalDevicesLoadingLock.Unlock()
} else {
ldMgr.logicalDevicesLoadingLock.Unlock()
}
} else {
ch := make(chan int, 1)
ldMgr.logicalDeviceLoadingInProgress[lDeviceID] = append(ldMgr.logicalDeviceLoadingInProgress[lDeviceID], ch)
ldMgr.logicalDevicesLoadingLock.Unlock()
// Wait for the channel to be closed, implying the process loading this device is done.
<-ch
}
if _, exist := ldMgr.logicalDeviceAgents.Load(lDeviceID); exist {
return nil
}
return status.Errorf(codes.Aborted, "Error loading logical device %s", lDeviceID)
}
func (ldMgr *LogicalManager) deleteLogicalDevice(ctx context.Context, device *voltha.Device) error {
logger.Debugw(ctx, "deleting-logical-device", log.Fields{"device-id": device.Id})
// Sanity check
if !device.Root {
return errors.New("device-not-root")
}
logDeviceID := device.ParentId
if agent := ldMgr.getLogicalDeviceAgent(ctx, logDeviceID); agent != nil {
// Stop the logical device agent
if err := agent.stop(ctx); err != nil {
logger.Errorw(ctx, "failed-to-stop-agent", log.Fields{"error": err})
return err
}
//Remove the logical device agent from the Map
ldMgr.deleteLogicalDeviceAgent(logDeviceID)
}
logger.Debug(ctx, "deleting-logical-device-ends")
return nil
}
func (ldMgr *LogicalManager) getLogicalDeviceID(ctx context.Context, device *voltha.Device) (*string, error) {
// Device can either be a parent or a child device
if device.Root {
// Parent device. The ID of a parent device is the logical device ID
return &device.ParentId, nil
}
// Device is child device
// retrieve parent device using child device ID
// TODO: return (string, have) instead of *string
// also: If not root device, just return device.parentID instead of loading the parent device.
if parentDevice := ldMgr.deviceMgr.getParentDevice(ctx, device); parentDevice != nil {
return &parentDevice.ParentId, nil
}
return nil, status.Errorf(codes.NotFound, "%s", device.Id)
}
func (ldMgr *LogicalManager) getLogicalDeviceIDFromDeviceID(ctx context.Context, deviceID string) (*string, error) {
// Get the device
var device *voltha.Device
var err error
if device, err = ldMgr.deviceMgr.getDeviceReadOnly(ctx, deviceID); err != nil {
return nil, err
}
return ldMgr.getLogicalDeviceID(ctx, device)
}
// ListLogicalDeviceFlows returns the flows of logical device
func (ldMgr *LogicalManager) ListLogicalDeviceFlows(ctx context.Context, id *voltha.ID) (*openflow_13.Flows, error) {
logger.Debugw(ctx, "ListLogicalDeviceFlows", log.Fields{"logical-device-id": id.Id})
agent := ldMgr.getLogicalDeviceAgent(ctx, id.Id)
if agent == nil {
return nil, status.Errorf(codes.NotFound, "%s", id.Id)
}
flows := agent.listLogicalDeviceFlows()
ctr, ret := 0, make([]*openflow_13.OfpFlowStats, len(flows))
for _, flow := range flows {
ret[ctr] = flow
ctr++
}
return &openflow_13.Flows{Items: ret}, nil
}
// ListLogicalDeviceFlowGroups returns logical device flow groups
func (ldMgr *LogicalManager) ListLogicalDeviceFlowGroups(ctx context.Context, id *voltha.ID) (*openflow_13.FlowGroups, error) {
logger.Debugw(ctx, "ListLogicalDeviceFlowGroups", log.Fields{"logical-device-id": id.Id})
agent := ldMgr.getLogicalDeviceAgent(ctx, id.Id)
if agent == nil {
return nil, status.Errorf(codes.NotFound, "%s", id.Id)
}
groups := agent.listLogicalDeviceGroups()
ctr, ret := 0, make([]*openflow_13.OfpGroupEntry, len(groups))
for _, group := range groups {
ret[ctr] = group
ctr++
}
return &openflow_13.FlowGroups{Items: ret}, nil
}
// ListLogicalDevicePorts returns logical device ports
func (ldMgr *LogicalManager) ListLogicalDevicePorts(ctx context.Context, id *voltha.ID) (*voltha.LogicalPorts, error) {
logger.Debugw(ctx, "ListLogicalDevicePorts", log.Fields{"logical-device-id": id.Id})
agent := ldMgr.getLogicalDeviceAgent(ctx, id.Id)
if agent == nil {
return nil, status.Errorf(codes.NotFound, "%s", id.Id)
}
ports := agent.listLogicalDevicePorts(ctx)
ctr, ret := 0, make([]*voltha.LogicalPort, len(ports))
for _, port := range ports {
ret[ctr] = port
ctr++
}
return &voltha.LogicalPorts{Items: ret}, nil
}
// GetLogicalDevicePort returns logical device port details
func (ldMgr *LogicalManager) GetLogicalDevicePort(ctx context.Context, lPortID *voltha.LogicalPortId) (*voltha.LogicalPort, error) {
// Get the logical device where this port is attached
agent := ldMgr.getLogicalDeviceAgent(ctx, lPortID.Id)
if agent == nil {
return nil, status.Errorf(codes.NotFound, "%s", lPortID.Id)
}
for _, port := range agent.listLogicalDevicePorts(ctx) {
if port.Id == lPortID.PortId {
return port, nil
}
}
return nil, status.Errorf(codes.NotFound, "%s-%s", lPortID.Id, lPortID.PortId)
}
// updateLogicalPort sets up a logical port on the logical device based on the device port
// information, if needed
func (ldMgr *LogicalManager) updateLogicalPort(ctx context.Context, device *voltha.Device, devicePorts map[uint32]*voltha.Port, port *voltha.Port) error {
ldID, err := ldMgr.getLogicalDeviceID(ctx, device)
if err != nil || *ldID == "" {
// This is not an error as the logical device may not have been created at this time. In such a case,
// the ports will be created when the logical device is ready.
return nil
}
if agent := ldMgr.getLogicalDeviceAgent(ctx, *ldID); agent != nil {
if err := agent.updateLogicalPort(ctx, device, devicePorts, port); err != nil {
return err
}
}
return nil
}
// deleteLogicalPort removes the logical port associated with a child device
func (ldMgr *LogicalManager) deleteLogicalPorts(ctx context.Context, deviceID string) error {
logger.Debugw(ctx, "deleting-logical-ports", log.Fields{"device-id": deviceID})
// Get logical port
ldID, err := ldMgr.getLogicalDeviceIDFromDeviceID(ctx, deviceID)
if err != nil {
return err
}
if agent := ldMgr.getLogicalDeviceAgent(ctx, *ldID); agent != nil {
if err = agent.deleteLogicalPorts(ctx, deviceID); err != nil {
logger.Warnw(ctx, "delete-logical-ports-failed", log.Fields{"logical-device-id": *ldID})
return err
}
}
logger.Debug(ctx, "deleting-logical-ports-ends")
return nil
}
func (ldMgr *LogicalManager) setupUNILogicalPorts(ctx context.Context, childDevice *voltha.Device, childDevicePorts map[uint32]*voltha.Port) error {
logger.Debugw(ctx, "setupUNILogicalPorts", log.Fields{"childDeviceId": childDevice.Id, "parentDeviceId": childDevice.ParentId, "current-data": childDevice})
// Sanity check
if childDevice.Root {
return errors.New("Device-root")
}
// Get the logical device id parent device
parentID := childDevice.ParentId
logDeviceID := ldMgr.deviceMgr.GetParentDeviceID(ctx, parentID)
logger.Debugw(ctx, "setupUNILogicalPorts", log.Fields{"logDeviceId": logDeviceID, "parentId": parentID})
if parentID == "" || logDeviceID == "" {
return errors.New("device-in-invalid-state")
}
if agent := ldMgr.getLogicalDeviceAgent(ctx, logDeviceID); agent != nil {
if err := agent.setupUNILogicalPorts(ctx, childDevice, childDevicePorts); err != nil {
return err
}
}
return nil
}
func (ldMgr *LogicalManager) deleteAllLogicalPorts(ctx context.Context, device *voltha.Device) error {
logger.Debugw(ctx, "deleteAllLogicalPorts", log.Fields{"device-id": device.Id})
var ldID *string
var err error
//Get the logical device Id for this device
if ldID, err = ldMgr.getLogicalDeviceID(ctx, device); err != nil {
logger.Warnw(ctx, "no-logical-device-found", log.Fields{"device-id": device.Id, "error": err})
return err
}
if agent := ldMgr.getLogicalDeviceAgent(ctx, *ldID); agent != nil {
if err := agent.deleteAllLogicalPorts(ctx); err != nil {
return err
}
}
return nil
}
func (ldMgr *LogicalManager) updatePortState(ctx context.Context, deviceID string, portNo uint32, state voltha.OperStatus_Types) error {
logger.Debugw(ctx, "updatePortState", log.Fields{"device-id": deviceID, "state": state, "portNo": portNo})
var ldID *string
var err error
//Get the logical device Id for this device
if ldID, err = ldMgr.getLogicalDeviceIDFromDeviceID(ctx, deviceID); err != nil {
logger.Warnw(ctx, "no-logical-device-found", log.Fields{"device-id": deviceID, "error": err})
return err
}
if agent := ldMgr.getLogicalDeviceAgent(ctx, *ldID); agent != nil {
if err := agent.updatePortState(ctx, portNo, state); err != nil {
return err
}
}
return nil
}
// UpdateLogicalDeviceFlowTable updates logical device flow table
func (ldMgr *LogicalManager) UpdateLogicalDeviceFlowTable(ctx context.Context, flow *openflow_13.FlowTableUpdate) (*empty.Empty, error) {
logger.Debugw(ctx, "UpdateLogicalDeviceFlowTable", log.Fields{"logical-device-id": flow.Id})
agent := ldMgr.getLogicalDeviceAgent(ctx, flow.Id)
if agent == nil {
return nil, status.Errorf(codes.NotFound, "%s", flow.Id)
}
return &empty.Empty{}, agent.updateFlowTable(ctx, flow.FlowMod)
}
// UpdateLogicalDeviceMeterTable - This function sends meter mod request to logical device manager and waits for response
func (ldMgr *LogicalManager) UpdateLogicalDeviceMeterTable(ctx context.Context, meter *openflow_13.MeterModUpdate) (*empty.Empty, error) {
logger.Debugw(ctx, "UpdateLogicalDeviceMeterTable", log.Fields{"logical-device-id": meter.Id})
agent := ldMgr.getLogicalDeviceAgent(ctx, meter.Id)
if agent == nil {
return nil, status.Errorf(codes.NotFound, "%s", meter.Id)
}
return &empty.Empty{}, agent.updateMeterTable(ctx, meter.MeterMod)
}
// ListLogicalDeviceMeters returns logical device meters
func (ldMgr *LogicalManager) ListLogicalDeviceMeters(ctx context.Context, id *voltha.ID) (*openflow_13.Meters, error) {
logger.Debugw(ctx, "ListLogicalDeviceMeters", log.Fields{"logical-device-id": id.Id})
agent := ldMgr.getLogicalDeviceAgent(ctx, id.Id)
if agent == nil {
return nil, status.Errorf(codes.NotFound, "%s", id.Id)
}
meters := agent.listLogicalDeviceMeters()
ctr, ret := 0, make([]*openflow_13.OfpMeterEntry, len(meters))
for _, meter := range meters {
ret[ctr] = meter
ctr++
}
return &openflow_13.Meters{Items: ret}, nil
}
// UpdateLogicalDeviceFlowGroupTable updates logical device flow group table
func (ldMgr *LogicalManager) UpdateLogicalDeviceFlowGroupTable(ctx context.Context, flow *openflow_13.FlowGroupTableUpdate) (*empty.Empty, error) {
logger.Debugw(ctx, "UpdateGroupTable", log.Fields{"logical-device-id": flow.Id})
agent := ldMgr.getLogicalDeviceAgent(ctx, flow.Id)
if agent == nil {
return nil, status.Errorf(codes.NotFound, "%s", flow.Id)
}
return &empty.Empty{}, agent.updateGroupTable(ctx, flow.GroupMod)
}
// EnableLogicalDevicePort enables logical device port
func (ldMgr *LogicalManager) EnableLogicalDevicePort(ctx context.Context, id *voltha.LogicalPortId) (*empty.Empty, error) {
logger.Debugw(ctx, "EnableLogicalDevicePort", log.Fields{"logical-device-id": id})
agent := ldMgr.getLogicalDeviceAgent(ctx, id.Id)
if agent == nil {
return nil, status.Errorf(codes.NotFound, "%s", id.Id)
}
portNo, err := strconv.ParseUint(id.PortId, 10, 32)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "failed to parse %s as a number", id.PortId)
}
return &empty.Empty{}, agent.enableLogicalPort(ctx, uint32(portNo))
}
// DisableLogicalDevicePort disables logical device port
func (ldMgr *LogicalManager) DisableLogicalDevicePort(ctx context.Context, id *voltha.LogicalPortId) (*empty.Empty, error) {
logger.Debugw(ctx, "DisableLogicalDevicePort", log.Fields{"logical-device-id": id})
agent := ldMgr.getLogicalDeviceAgent(ctx, id.Id)
if agent == nil {
return nil, status.Errorf(codes.NotFound, "%s", id.Id)
}
portNo, err := strconv.ParseUint(id.PortId, 10, 32)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "failed to parse %s as a number", id.PortId)
}
return &empty.Empty{}, agent.disableLogicalPort(ctx, uint32(portNo))
}
func (ldMgr *LogicalManager) packetIn(ctx context.Context, logicalDeviceID string, port uint32, transactionID string, packet []byte) error {
logger.Debugw(ctx, "packetIn", log.Fields{"logical-device-id": logicalDeviceID, "port": port})
if agent := ldMgr.getLogicalDeviceAgent(ctx, logicalDeviceID); agent != nil {
agent.packetIn(ctx, port, transactionID, packet)
} else {
logger.Error(ctx, "logical-device-not-exist", log.Fields{"logical-device-id": logicalDeviceID})
}
return nil
}
// StreamPacketsOut sends packets to adapter
func (ldMgr *LogicalManager) StreamPacketsOut(packets voltha.VolthaService_StreamPacketsOutServer) error {
ctx := context.Background()
logger.Debugw(ctx, "StreamPacketsOut-request", log.Fields{"packets": packets})
loop:
for {
select {
case <-packets.Context().Done():
logger.Infow(ctx, "StreamPacketsOut-context-done", log.Fields{"packets": packets, "error": packets.Context().Err()})
break loop
default:
}
packet, err := packets.Recv()
if err == io.EOF {
logger.Debugw(ctx, "Received-EOF", log.Fields{"packets": packets})
break loop
}
if err != nil {
logger.Errorw(ctx, "Failed to receive packet out", log.Fields{"error": err})
continue
}
if agent := ldMgr.getLogicalDeviceAgent(packets.Context(), packet.Id); agent != nil {
agent.packetOut(packets.Context(), packet.PacketOut)
} else {
logger.Errorf(ctx, "No logical device agent present", log.Fields{"logical-device-id": packet.Id})
}
}
logger.Debugw(ctx, "StreamPacketsOut-request-done", log.Fields{"packets": packets})
return nil
}
|
package templatecode
const (
templateController = `package controllers
// target
type target struct {
BaseController
}
// Index() 页面
func (this *target) Index() {
this.SimpleView()
}
// Add() 添加
func (this *target) Add() {
}
// Get() 查询单条记录
func (this *target) Get() {
}
// All() 查询多条记录
func (this *target) All() {
}
// Update() 更新
func (this *target) Update() {
}
// Delete() 删除记录
func (this *target) Delete() {
}
`
)
|
package main
import (
"fmt"
)
func main() {
var t int
fmt.Scan(&t)
for ;t>0;t-- {
var s int
fmt.Scan(&s)
v := make([]int, s)
for i:=0; i<s; i++ { fmt.Scan(&v[i]) }
p1 := 0
for ;v[p1]==0 && p1<s; p1++ {}
p2 := s-1
for ; v[p2]==0 && p2>=0; p2-- {}
ans := 0
for i:=p1; i<p2; i++ {
if v[i] == 0 { ans++ }
}
fmt.Println(ans)
}
}
|
package router
import (
"github.com/ArakiTakaki/golangWebLesson/controllers/api"
"github.com/gin-gonic/gin"
)
func apiSet(r *gin.RouterGroup) {
// /home/index.html に飛ぶ様に設定されている。(飛ばす場所)
r.GET("/items", api.NavItems)
r.GET("/meta", api.PageData)
}
|
package main
import (
"fmt"
)
func main() {
test := "hello"
test += "hello2"
test += "hello3"
fmt.Println(test)
fmt.Println("Hello, playground")
}
|
package store
import (
mystore "bookstore/store"
factory "bookstore/store/factory"
"sync"
)
// 在初始化中注册,只要有导入internal/store包,就自动完成了注册
func init() {
factory.Register("mem", &MemStore{
books: make(map[string]*mystore.Book),
})
}
type MemStore struct {
sync.RWMutex
books map[string]*mystore.Book
}
func (ms *MemStore) Create(book *mystore.Book) error {
ms.Lock()
defer ms.Unlock()
if _, ok := ms.books[book.Id]; ok {
return mystore.ErrExist
}
nBook := *book
ms.books[book.Id] = &nBook
return nil
}
func (ms *MemStore) Update(book *mystore.Book) error {
ms.Lock()
defer ms.Unlock()
oldBook, ok := ms.books[book.Id]
if !ok {
return mystore.ErrNotFound
}
nBook := *oldBook
if book.Name != "" {
nBook.Name = book.Name
}
if book.Authors != nil {
nBook.Authors = book.Authors
}
if book.Press != "" {
nBook.Press = book.Press
}
ms.books[book.Id] = &nBook
return nil
}
func (ms *MemStore) Get(id string) (mystore.Book, error) {
ms.RLock()
defer ms.RUnlock()
t, ok := ms.books[id]
if ok {
return *t, nil
}
return mystore.Book{}, mystore.ErrNotFound
}
func (ms *MemStore) Delete(id string) error {
ms.Lock()
defer ms.Unlock()
if _, ok := ms.books[id]; !ok {
return mystore.ErrNotFound
}
delete(ms.books, id)
return nil
}
func (ms *MemStore) GetAll() ([]mystore.Book, error) {
ms.RLock()
defer ms.RLocker().Unlock()
allBooks := make([]mystore.Book, 0, len(ms.books))
for _, book := range ms.books {
allBooks = append(allBooks, *book)
}
return allBooks, nil
}
|
package helper
import (
"math/rand"
"time"
)
type Charset string
const (
DefaultCharset = Charset("abcdefghijklmnopqrstuvwxyz1234567890")
NumricCharset = Charset("1234567890")
)
func init() {
rand.Seed(time.Now().Unix())
}
func (cs Charset) RandomStr(size int) string {
var (
charset = string(cs)
outp = ""
)
for i := 0; i < size; i++ {
outp += string(charset[rand.Intn(len(charset))])
}
return outp
}
|
package app
import (
"github.com/spf13/cobra"
rt "github.com/k82cn/myoci/pkg/runtime"
)
var runFlags rt.RunFlags
// RunCommand get the run command instance.
func RunCommand() *cobra.Command {
runCmd := &cobra.Command{
Use: "run",
Short: "Run an image as container",
Long: "Run an image as container",
Example: `myoci run -it /bin/sh`,
Run: func(cmd *cobra.Command, args []string) {
runFlags.Command = args[0]
if len(args) > 1 {
runFlags.Args = args[1:]
}
rt.Run(&runFlags)
},
}
setRunFlags(runCmd)
return runCmd
}
func setRunFlags(cmd *cobra.Command) {
cmd.Flags().BoolVarP(&runFlags.Terminal, "terminal", "t", true, "true")
cmd.Flags().BoolVarP(&runFlags.Interactive, "interactive", "i", true, "")
cmd.Flags().StringVarP(&runFlags.MemoryLimit, "memory", "m", "2048m", "The cgroup of memory")
}
|
// Package fail - state.go Determines state of servers from responses
package main
import "net/http"
import "time"
// http client
var client = &http.Client{Timeout: 10 * time.Second}
// states
var States = map[int]string{
0: "RED",
1: "GREEN",
2: "BLUE",
}
// Poll a server to get flood state
func getState(server string) int {
// Get reports
reports, err := GetJSON(server)
// Check state
if err == nil && reports.StatusCode == 200 && len(reports.Result.Features) == 0 {
return 1
} else if err == nil && reports.StatusCode == 200 && len(reports.Result.Features) > 0 {
return 2
}
// Default
return 0
}
func PollState(servers []string) string {
state := 0
for _, name := range servers {
val := getState(name)
if val == 0 {
state = 0 // always set state 0
} else if val > state {
state = val // raise state
}
}
return States[state]
}
|
package model
import (
"errors"
"fmt"
"time"
"walletApi/src/common"
"github.com/astaxie/beego/orm"
_ "github.com/go-sql-driver/mysql"
)
//应用版本
type AppVersion struct {
Id int64 `orm:"auto" from:"id" description:"主键ID"`
AppName string `orm:"size(80)" valid:"Required" form:"appName" description:"应用名称"`
Version string `orm:"size(20)" valid:"Required" form:"version" description:"版本"`
PlatType int `orm:"size(10)" valid:"Required" form:"platType" description:"平台类型 1、android 2、IOS"`
UpgradeType int `orm:"size(10)" valid:"Required" form:"upgradeType" description:"升级类型 1 、强制升级 2、可忽略"`
IsCurrent int `orm:"size(10)" valid:"Required" form:"isCurrent" description:"是否是当前版本 1、表示是 2、表示不是"`
AppAddr string `orm:"size(100)" form:"appAddr" description:"下载地址"`
AppDesc string `orm:"size(100)" form:"appDesc" description:"应用描述"`
CreateTime time.Time `orm:"auto_now_add;type(datetime)" description:"创建时间"`
CreateTimeFmt string `orm:"-" description:"创建时间"`
}
type Version struct {
Id int64 `json:"id" description:"记录ID"`
AppName string `json:"appName" description:"app名称"`
Version string `json:"version" description:"版本号"`
PlatType int `json:"platType" description:"平台类型 1、andorid 2、Ios"`
UpgradeType int `json:"upgradeType" description:"升级类型 1 、强制升级 2、可忽略"`
IsCurrent int `json:"isCurrent" description:"是否是当前版本 1、表示是 2、表示不是"`
AppAddr string `json:"appAddr" description:"下载地址"`
Size string `orm:"-" json:"size" description:"文件大小"`
AppDesc string `json:"appDesc" description:"app描述"`
CreateTime time.Time `json:"createTime" description:"创建时间"`
CreateTimeFmt string `json:"createTimeFmt"`
}
func (u *AppVersion) TableName() string {
return "t_app_version"
}
//添加版本
func AddVersion(version *AppVersion) error {
o := orm.NewOrm()
ver := new(AppVersion)
//未找到记录也会报错
o.Raw("SELECT * FROM t_app_version WHERE app_name = ? AND plat_type=? AND version=?", version.AppName, version.PlatType, version.Version).QueryRow(ver)
if ver != nil && ver.AppName != "" {
return errors.New("版本信息已存在,不能重复添加!")
}
beginTx := false
if version.IsCurrent == 1 {
o.Begin()
_, err := o.Raw("UPDATE t_app_version SET is_current = 2 WHERE app_name = ? AND plat_type=?", version.AppName, version.PlatType).Exec()
if err != nil {
o.Rollback()
return err
}
beginTx = true
}
_, err := o.Insert(version)
if beginTx {
if err != nil {
o.Rollback()
} else {
o.Commit()
}
}
return err
}
//更新版本信息
func UpdateVersion(version *AppVersion) error {
o := orm.NewOrm()
ver := new(AppVersion)
o.Raw("SELECT * FROM t_app_version WHERE id<>? AND app_name = ? AND plat_type=? AND version=?", version.Id, version.AppName, version.PlatType, version.Version).QueryRow(ver)
if ver != nil && ver.AppName != "" {
return errors.New("修改失败,版本号已存在!")
}
beginTx := false
if version.IsCurrent == 1 {
o.Begin()
_, err := o.Raw("UPDATE t_app_version SET is_current = 2 WHERE id<>? AND app_name = ? AND plat_type=?", version.Id, version.AppName, version.PlatType).Exec()
if err != nil {
o.Rollback()
return err
}
beginTx = true
}
_, err := o.Update(version, "AppName", "Version", "PlatType", "UpgradeType", "IsCurrent", "AppAddr", "AppDesc")
if beginTx {
if err != nil {
o.Rollback()
} else {
o.Commit()
}
}
return err
}
//删除版本
func DeleteVersion(id int64) error {
o := orm.NewOrm()
version := AppVersion{Id: id}
err := o.Read(&version)
if err != nil {
return err
}
if version.IsCurrent == 1 {
return fmt.Errorf("不能删除正在使用的版本!")
}
_, err = o.Delete(&version)
if err != nil {
return err
}
return nil
}
//根据应用名称、平台类型查询当前使用的版本
func GetCurrentVersion(appName string, platType int64) (*AppVersion, error) {
o := orm.NewOrm()
version := new(AppVersion)
err := o.Raw("SELECT * FROM t_app_version WHERE is_current=1 AND app_name = ? AND plat_type=?", appName, platType).QueryRow(version)
if err != nil {
return nil, err
}
return version, nil
}
//根据id查询版本详细信息
func (v *AppVersion) GetVersionInfo(id string) (*AppVersion, error) {
o := orm.NewOrm()
version := new(AppVersion)
err := o.Raw("SELECT * FROM t_app_version WHERE id = ?", id).QueryRow(version)
if err != nil {
return nil, err
}
return version, nil
}
func (v *AppVersion) List(pageSize, pageNo int, search string) map[string]interface{} {
o := orm.NewOrm()
qs := o.QueryTable("t_app_version")
cond := orm.NewCondition()
if search != "" {
if common.IsValidVersion(search) {
cond = cond.And("Version__exact", search)
} else {
cond = cond.And("AppName__icontains", search).Or("AppDesc__icontains", search)
}
qs = qs.SetCond(cond)
}
resultMap := make(map[string]interface{})
cnt, err := qs.Count()
if err == nil && cnt > 0 {
var versions []AppVersion
_, err := qs.Limit(pageSize, (pageNo-1)*pageSize).OrderBy("-Id").All(&versions)
if err == nil {
resultMap["total"] = cnt
for i, v := range versions {
v.CreateTimeFmt = common.FormatTime(v.CreateTime)
versions[i] = v
}
resultMap["data"] = versions
return resultMap
}
}
resultMap["total"] = 0
resultMap["data"] = nil
return resultMap
}
func ListByPlatType(pageSize, pageNo, platType int) map[string]interface{} {
o := orm.NewOrm()
qs := o.QueryTable("t_app_version")
cond := orm.NewCondition()
cond = cond.And("PlatType__exact", platType)
qs = qs.SetCond(cond)
resultMap := make(map[string]interface{})
cnt, err := qs.Count()
if err == nil && cnt > 0 {
var versions []AppVersion
var arr []Version
_, err := qs.Limit(pageSize, (pageNo-1)*pageSize).OrderBy("-CreateTime").All(&versions)
if err == nil && len(versions) > 0 {
resultMap["total"] = cnt
for _, ob := range versions {
ob.CreateTimeFmt = common.FormatTime(ob.CreateTime)
var version Version
version.Id = ob.Id
version.AppName = ob.AppName
version.Version = ob.Version
version.AppAddr = ob.AppAddr
version.AppDesc = ob.AppDesc
version.CreateTime = ob.CreateTime
version.UpgradeType = ob.UpgradeType
version.CreateTimeFmt = ob.CreateTimeFmt
version.IsCurrent = ob.IsCurrent
version.PlatType = ob.PlatType
arr = append(arr, version)
}
resultMap["data"] = arr
return resultMap
}
}
resultMap["total"] = 0
resultMap["data"] = nil
return resultMap
}
//初始化模型
func init() {
// 需要在init中注册定义的model
orm.RegisterModel(new(AppVersion))
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"reflect"
"testing"
"time"
"github.com/davecgh/go-spew/spew"
"k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
utilpointer "k8s.io/utils/pointer"
)
func (proxier *FakeProxier) addEndpoints(endpoints *v1.Endpoints) {
proxier.endpointsChanges.Update(nil, endpoints)
}
func (proxier *FakeProxier) updateEndpoints(oldEndpoints, endpoints *v1.Endpoints) {
proxier.endpointsChanges.Update(oldEndpoints, endpoints)
}
func (proxier *FakeProxier) deleteEndpoints(endpoints *v1.Endpoints) {
proxier.endpointsChanges.Update(endpoints, nil)
}
func TestGetLocalEndpointIPs(t *testing.T) {
testCases := []struct {
endpointsMap EndpointsMap
expected map[types.NamespacedName]sets.String
}{{
// Case[0]: nothing
endpointsMap: EndpointsMap{},
expected: map[types.NamespacedName]sets.String{},
}, {
// Case[1]: unnamed port
endpointsMap: EndpointsMap{
makeServicePortName("ns1", "ep1", "", v1.ProtocolTCP): []Endpoint{
&BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false},
},
},
expected: map[types.NamespacedName]sets.String{},
}, {
// Case[2]: unnamed port local
endpointsMap: EndpointsMap{
makeServicePortName("ns1", "ep1", "", v1.ProtocolTCP): []Endpoint{
&BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true},
},
},
expected: map[types.NamespacedName]sets.String{
{Namespace: "ns1", Name: "ep1"}: sets.NewString("1.1.1.1"),
},
}, {
// Case[3]: named local and non-local ports for the same IP.
endpointsMap: EndpointsMap{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolTCP): []Endpoint{
&BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false},
&BaseEndpointInfo{Endpoint: "1.1.1.2:11", IsLocal: true},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolTCP): []Endpoint{
&BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: false},
&BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: true},
},
},
expected: map[types.NamespacedName]sets.String{
{Namespace: "ns1", Name: "ep1"}: sets.NewString("1.1.1.2"),
},
}, {
// Case[4]: named local and non-local ports for different IPs.
endpointsMap: EndpointsMap{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolTCP): []Endpoint{
&BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false},
},
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolTCP): []Endpoint{
&BaseEndpointInfo{Endpoint: "2.2.2.2:22", IsLocal: true},
&BaseEndpointInfo{Endpoint: "2.2.2.22:22", IsLocal: true},
},
makeServicePortName("ns2", "ep2", "p23", v1.ProtocolTCP): []Endpoint{
&BaseEndpointInfo{Endpoint: "2.2.2.3:23", IsLocal: true},
},
makeServicePortName("ns4", "ep4", "p44", v1.ProtocolTCP): []Endpoint{
&BaseEndpointInfo{Endpoint: "4.4.4.4:44", IsLocal: true},
&BaseEndpointInfo{Endpoint: "4.4.4.5:44", IsLocal: false},
},
makeServicePortName("ns4", "ep4", "p45", v1.ProtocolTCP): []Endpoint{
&BaseEndpointInfo{Endpoint: "4.4.4.6:45", IsLocal: true},
},
},
expected: map[types.NamespacedName]sets.String{
{Namespace: "ns2", Name: "ep2"}: sets.NewString("2.2.2.2", "2.2.2.22", "2.2.2.3"),
{Namespace: "ns4", Name: "ep4"}: sets.NewString("4.4.4.4", "4.4.4.6"),
},
}}
for tci, tc := range testCases {
// outputs
localIPs := tc.endpointsMap.getLocalEndpointIPs()
if !reflect.DeepEqual(localIPs, tc.expected) {
t.Errorf("[%d] expected %#v, got %#v", tci, tc.expected, localIPs)
}
}
}
func makeTestEndpoints(namespace, name string, eptFunc func(*v1.Endpoints)) *v1.Endpoints {
ept := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: make(map[string]string),
},
}
eptFunc(ept)
return ept
}
// This is a coarse test, but it offers some modicum of confidence as the code is evolved.
func TestEndpointsToEndpointsMap(t *testing.T) {
epTracker := NewEndpointChangeTracker("test-hostname", nil, nil, nil, false)
trueVal := true
falseVal := false
testCases := []struct {
desc string
newEndpoints *v1.Endpoints
expected map[ServicePortName][]*BaseEndpointInfo
isIPv6Mode *bool
}{
{
desc: "nothing",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {}),
expected: map[ServicePortName][]*BaseEndpointInfo{},
},
{
desc: "no changes, unnamed port",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "",
Port: 11,
Protocol: v1.ProtocolTCP,
}},
},
}
}),
expected: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "", v1.ProtocolTCP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
},
},
{
desc: "no changes, named port",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "port",
Port: 11,
Protocol: v1.ProtocolTCP,
}},
},
}
}),
expected: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "port", v1.ProtocolTCP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
},
},
{
desc: "new port",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Port: 11,
Protocol: v1.ProtocolTCP,
}},
},
}
}),
expected: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "", v1.ProtocolTCP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
},
},
{
desc: "remove port",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {}),
expected: map[ServicePortName][]*BaseEndpointInfo{},
},
{
desc: "new IP and port",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "2.2.2.2",
}},
Ports: []v1.EndpointPort{{
Name: "p1",
Port: 11,
Protocol: v1.ProtocolTCP,
}, {
Name: "p2",
Port: 22,
Protocol: v1.ProtocolTCP,
}},
},
}
}),
expected: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p1", v1.ProtocolTCP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
{Endpoint: "2.2.2.2:11", IsLocal: false},
},
makeServicePortName("ns1", "ep1", "p2", v1.ProtocolTCP): {
{Endpoint: "1.1.1.1:22", IsLocal: false},
{Endpoint: "2.2.2.2:22", IsLocal: false},
},
},
},
{
desc: "remove IP and port",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p1",
Port: 11,
Protocol: v1.ProtocolTCP,
}},
},
}
}),
expected: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p1", v1.ProtocolTCP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
},
},
{
desc: "rename port",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p2",
Port: 11,
Protocol: v1.ProtocolTCP,
}},
},
}
}),
expected: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p2", v1.ProtocolTCP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
},
},
{
desc: "renumber port",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p1",
Port: 22,
Protocol: v1.ProtocolTCP,
}},
},
}
}),
expected: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p1", v1.ProtocolTCP): {
{Endpoint: "1.1.1.1:22", IsLocal: false},
},
},
},
{
desc: "should omit IPv6 address in IPv4 mode",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "2001:db8:85a3:0:0:8a2e:370:7334",
}},
Ports: []v1.EndpointPort{{
Name: "p1",
Port: 11,
Protocol: v1.ProtocolTCP,
}, {
Name: "p2",
Port: 22,
Protocol: v1.ProtocolTCP,
}},
},
}
}),
expected: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p1", v1.ProtocolTCP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
makeServicePortName("ns1", "ep1", "p2", v1.ProtocolTCP): {
{Endpoint: "1.1.1.1:22", IsLocal: false},
},
},
isIPv6Mode: &falseVal,
},
{
desc: "should omit IPv4 address in IPv6 mode",
newEndpoints: makeTestEndpoints("ns1", "ep1", func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "2001:db8:85a3:0:0:8a2e:370:7334",
}},
Ports: []v1.EndpointPort{{
Name: "p1",
Port: 11,
Protocol: v1.ProtocolTCP,
}, {
Name: "p2",
Port: 22,
Protocol: v1.ProtocolTCP,
}},
},
}
}),
expected: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p1", v1.ProtocolTCP): {
{Endpoint: "[2001:db8:85a3:0:0:8a2e:370:7334]:11", IsLocal: false},
},
makeServicePortName("ns1", "ep1", "p2", v1.ProtocolTCP): {
{Endpoint: "[2001:db8:85a3:0:0:8a2e:370:7334]:22", IsLocal: false},
},
},
isIPv6Mode: &trueVal,
},
}
for _, tc := range testCases {
epTracker.isIPv6Mode = tc.isIPv6Mode
// outputs
newEndpoints := epTracker.endpointsToEndpointsMap(tc.newEndpoints)
if len(newEndpoints) != len(tc.expected) {
t.Errorf("[%s] expected %d new, got %d: %v", tc.desc, len(tc.expected), len(newEndpoints), spew.Sdump(newEndpoints))
}
for x := range tc.expected {
if len(newEndpoints[x]) != len(tc.expected[x]) {
t.Errorf("[%s] expected %d endpoints for %v, got %d", tc.desc, len(tc.expected[x]), x, len(newEndpoints[x]))
} else {
for i := range newEndpoints[x] {
ep := newEndpoints[x][i].(*BaseEndpointInfo)
if !(reflect.DeepEqual(*ep, *(tc.expected[x][i]))) {
t.Errorf("[%s] expected new[%v][%d] to be %v, got %v", tc.desc, x, i, tc.expected[x][i], *ep)
}
}
}
}
}
}
func TestUpdateEndpointsMap(t *testing.T) {
var nodeName = testHostname
emptyEndpoint := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{}
}
unnamedPort := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
unnamedPortLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
namedPortLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
namedPort := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
namedPortRenamed := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11-2",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
namedPortRenumbered := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 22,
Protocol: v1.ProtocolUDP,
}},
}}
}
namedPortsLocalNoLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "1.1.1.2",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}, {
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}},
}}
}
multipleSubsets := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.2",
}},
Ports: []v1.EndpointPort{{
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}},
}}
}
multipleSubsetsWithLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.2",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}},
}}
}
multipleSubsetsMultiplePortsLocal := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}, {
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.3",
}},
Ports: []v1.EndpointPort{{
Name: "p13",
Port: 13,
Protocol: v1.ProtocolUDP,
}},
}}
}
multipleSubsetsIPsPorts1 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "1.1.1.2",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}, {
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.3",
}, {
IP: "1.1.1.4",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p13",
Port: 13,
Protocol: v1.ProtocolUDP,
}, {
Name: "p14",
Port: 14,
Protocol: v1.ProtocolUDP,
}},
}}
}
multipleSubsetsIPsPorts2 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "2.2.2.1",
}, {
IP: "2.2.2.2",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p21",
Port: 21,
Protocol: v1.ProtocolUDP,
}, {
Name: "p22",
Port: 22,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexBefore1 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexBefore2 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "2.2.2.2",
NodeName: &nodeName,
}, {
IP: "2.2.2.22",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p22",
Port: 22,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "2.2.2.3",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p23",
Port: 23,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexBefore4 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "4.4.4.4",
NodeName: &nodeName,
}, {
IP: "4.4.4.5",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p44",
Port: 44,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "4.4.4.6",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p45",
Port: 45,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexAfter1 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.1",
}, {
IP: "1.1.1.11",
}},
Ports: []v1.EndpointPort{{
Name: "p11",
Port: 11,
Protocol: v1.ProtocolUDP,
}},
}, {
Addresses: []v1.EndpointAddress{{
IP: "1.1.1.2",
}},
Ports: []v1.EndpointPort{{
Name: "p12",
Port: 12,
Protocol: v1.ProtocolUDP,
}, {
Name: "p122",
Port: 122,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexAfter3 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "3.3.3.3",
}},
Ports: []v1.EndpointPort{{
Name: "p33",
Port: 33,
Protocol: v1.ProtocolUDP,
}},
}}
}
complexAfter4 := func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{
IP: "4.4.4.4",
NodeName: &nodeName,
}},
Ports: []v1.EndpointPort{{
Name: "p44",
Port: 44,
Protocol: v1.ProtocolUDP,
}},
}}
}
testCases := []struct {
// previousEndpoints and currentEndpoints are used to call appropriate
// handlers OnEndpoints* (based on whether corresponding values are nil
// or non-nil) and must be of equal length.
name string
previousEndpoints []*v1.Endpoints
currentEndpoints []*v1.Endpoints
oldEndpoints map[ServicePortName][]*BaseEndpointInfo
expectedResult map[ServicePortName][]*BaseEndpointInfo
expectedStaleEndpoints []ServiceEndpoint
expectedStaleServiceNames map[ServicePortName]bool
expectedHealthchecks map[types.NamespacedName]int
}{{
name: "empty",
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{},
expectedResult: map[ServicePortName][]*BaseEndpointInfo{},
expectedStaleEndpoints: []ServiceEndpoint{},
expectedStaleServiceNames: map[ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
name: "no change, unnamed port",
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPort),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPort),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
},
expectedResult: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
},
expectedStaleEndpoints: []ServiceEndpoint{},
expectedStaleServiceNames: map[ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
name: "no change, named port, local",
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortLocal),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortLocal),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: true},
},
},
expectedResult: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: true},
},
},
expectedStaleEndpoints: []ServiceEndpoint{},
expectedStaleServiceNames: map[ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, {
name: "no change, multiple subsets",
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsets),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsets),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{Endpoint: "1.1.1.2:12", IsLocal: false},
},
},
expectedResult: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{Endpoint: "1.1.1.2:12", IsLocal: false},
},
},
expectedStaleEndpoints: []ServiceEndpoint{},
expectedStaleServiceNames: map[ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
name: "no change, multiple subsets, multiple ports, local",
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsMultiplePortsLocal),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsMultiplePortsLocal),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: true},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:12", IsLocal: true},
},
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
{Endpoint: "1.1.1.3:13", IsLocal: false},
},
},
expectedResult: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: true},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:12", IsLocal: true},
},
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
{Endpoint: "1.1.1.3:13", IsLocal: false},
},
},
expectedStaleEndpoints: []ServiceEndpoint{},
expectedStaleServiceNames: map[ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, {
name: "no change, multiple endpoints, subsets, IPs, and ports",
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsIPsPorts1),
makeTestEndpoints("ns2", "ep2", multipleSubsetsIPsPorts2),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsIPsPorts1),
makeTestEndpoints("ns2", "ep2", multipleSubsetsIPsPorts2),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
{Endpoint: "1.1.1.2:11", IsLocal: true},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:12", IsLocal: false},
{Endpoint: "1.1.1.2:12", IsLocal: true},
},
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
{Endpoint: "1.1.1.3:13", IsLocal: false},
{Endpoint: "1.1.1.4:13", IsLocal: true},
},
makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
{Endpoint: "1.1.1.3:14", IsLocal: false},
{Endpoint: "1.1.1.4:14", IsLocal: true},
},
makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
{Endpoint: "2.2.2.1:21", IsLocal: false},
{Endpoint: "2.2.2.2:21", IsLocal: true},
},
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
{Endpoint: "2.2.2.1:22", IsLocal: false},
{Endpoint: "2.2.2.2:22", IsLocal: true},
},
},
expectedResult: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
{Endpoint: "1.1.1.2:11", IsLocal: true},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:12", IsLocal: false},
{Endpoint: "1.1.1.2:12", IsLocal: true},
},
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
{Endpoint: "1.1.1.3:13", IsLocal: false},
{Endpoint: "1.1.1.4:13", IsLocal: true},
},
makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
{Endpoint: "1.1.1.3:14", IsLocal: false},
{Endpoint: "1.1.1.4:14", IsLocal: true},
},
makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
{Endpoint: "2.2.2.1:21", IsLocal: false},
{Endpoint: "2.2.2.2:21", IsLocal: true},
},
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
{Endpoint: "2.2.2.1:22", IsLocal: false},
{Endpoint: "2.2.2.2:22", IsLocal: true},
},
},
expectedStaleEndpoints: []ServiceEndpoint{},
expectedStaleServiceNames: map[ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 2,
makeNSN("ns2", "ep2"): 1,
},
}, {
name: "add an Endpoints",
previousEndpoints: []*v1.Endpoints{
nil,
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPortLocal),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{},
expectedResult: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: true},
},
},
expectedStaleEndpoints: []ServiceEndpoint{},
expectedStaleServiceNames: map[ServicePortName]bool{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, {
name: "remove an Endpoints",
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPortLocal),
},
currentEndpoints: []*v1.Endpoints{
nil,
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: true},
},
},
expectedResult: map[ServicePortName][]*BaseEndpointInfo{},
expectedStaleEndpoints: []ServiceEndpoint{{
Endpoint: "1.1.1.1:11",
ServicePortName: makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
name: "add an IP and port",
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortsLocalNoLocal),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
},
expectedResult: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
{Endpoint: "1.1.1.2:11", IsLocal: true},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:12", IsLocal: false},
{Endpoint: "1.1.1.2:12", IsLocal: true},
},
},
expectedStaleEndpoints: []ServiceEndpoint{},
expectedStaleServiceNames: map[ServicePortName]bool{
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, {
name: "remove an IP and port",
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortsLocalNoLocal),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
{Endpoint: "1.1.1.2:11", IsLocal: true},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:12", IsLocal: false},
{Endpoint: "1.1.1.2:12", IsLocal: true},
},
},
expectedResult: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
},
expectedStaleEndpoints: []ServiceEndpoint{{
Endpoint: "1.1.1.2:11",
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
}, {
Endpoint: "1.1.1.1:12",
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
}, {
Endpoint: "1.1.1.2:12",
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
name: "add a subset",
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsetsWithLocal),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
},
expectedResult: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{Endpoint: "1.1.1.2:12", IsLocal: true},
},
},
expectedStaleEndpoints: []ServiceEndpoint{},
expectedStaleServiceNames: map[ServicePortName]bool{
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns1", "ep1"): 1,
},
}, {
name: "remove a subset",
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", multipleSubsets),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{Endpoint: "1.1.1.2:12", IsLocal: false},
},
},
expectedResult: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
},
expectedStaleEndpoints: []ServiceEndpoint{{
Endpoint: "1.1.1.2:12",
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
name: "rename a port",
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortRenamed),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
},
expectedResult: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
},
expectedStaleEndpoints: []ServiceEndpoint{{
Endpoint: "1.1.1.1:11",
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[ServicePortName]bool{
makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
name: "renumber a port",
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPort),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", namedPortRenumbered),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
},
expectedResult: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:22", IsLocal: false},
},
},
expectedStaleEndpoints: []ServiceEndpoint{{
Endpoint: "1.1.1.1:11",
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[ServicePortName]bool{},
expectedHealthchecks: map[types.NamespacedName]int{},
}, {
name: "complex add and remove",
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", complexBefore1),
makeTestEndpoints("ns2", "ep2", complexBefore2),
nil,
makeTestEndpoints("ns4", "ep4", complexBefore4),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", complexAfter1),
nil,
makeTestEndpoints("ns3", "ep3", complexAfter3),
makeTestEndpoints("ns4", "ep4", complexAfter4),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
{Endpoint: "2.2.2.2:22", IsLocal: true},
{Endpoint: "2.2.2.22:22", IsLocal: true},
},
makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP): {
{Endpoint: "2.2.2.3:23", IsLocal: true},
},
makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
{Endpoint: "4.4.4.4:44", IsLocal: true},
{Endpoint: "4.4.4.5:44", IsLocal: true},
},
makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP): {
{Endpoint: "4.4.4.6:45", IsLocal: true},
},
},
expectedResult: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
{Endpoint: "1.1.1.11:11", IsLocal: false},
},
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
{Endpoint: "1.1.1.2:12", IsLocal: false},
},
makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): {
{Endpoint: "1.1.1.2:122", IsLocal: false},
},
makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): {
{Endpoint: "3.3.3.3:33", IsLocal: false},
},
makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
{Endpoint: "4.4.4.4:44", IsLocal: true},
},
},
expectedStaleEndpoints: []ServiceEndpoint{{
Endpoint: "2.2.2.2:22",
ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
}, {
Endpoint: "2.2.2.22:22",
ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
}, {
Endpoint: "2.2.2.3:23",
ServicePortName: makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP),
}, {
Endpoint: "4.4.4.5:44",
ServicePortName: makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP),
}, {
Endpoint: "4.4.4.6:45",
ServicePortName: makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP),
}},
expectedStaleServiceNames: map[ServicePortName]bool{
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): true,
makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{
makeNSN("ns4", "ep4"): 1,
},
}, {
name: "change from 0 endpoint address to 1 unnamed port",
previousEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", emptyEndpoint),
},
currentEndpoints: []*v1.Endpoints{
makeTestEndpoints("ns1", "ep1", unnamedPort),
},
oldEndpoints: map[ServicePortName][]*BaseEndpointInfo{},
expectedResult: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): {
{Endpoint: "1.1.1.1:11", IsLocal: false},
},
},
expectedStaleEndpoints: []ServiceEndpoint{},
expectedStaleServiceNames: map[ServicePortName]bool{
makeServicePortName("ns1", "ep1", "", v1.ProtocolUDP): true,
},
expectedHealthchecks: map[types.NamespacedName]int{},
},
}
for tci, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fp := newFakeProxier()
fp.hostname = nodeName
// First check that after adding all previous versions of endpoints,
// the fp.oldEndpoints is as we expect.
for i := range tc.previousEndpoints {
if tc.previousEndpoints[i] != nil {
fp.addEndpoints(tc.previousEndpoints[i])
}
}
fp.endpointsMap.Update(fp.endpointsChanges)
compareEndpointsMapsStr(t, fp.endpointsMap, tc.oldEndpoints)
// Now let's call appropriate handlers to get to state we want to be.
if len(tc.previousEndpoints) != len(tc.currentEndpoints) {
t.Fatalf("[%d] different lengths of previous and current endpoints", tci)
return
}
for i := range tc.previousEndpoints {
prev, curr := tc.previousEndpoints[i], tc.currentEndpoints[i]
switch {
case prev == nil:
fp.addEndpoints(curr)
case curr == nil:
fp.deleteEndpoints(prev)
default:
fp.updateEndpoints(prev, curr)
}
}
result := fp.endpointsMap.Update(fp.endpointsChanges)
newMap := fp.endpointsMap
compareEndpointsMapsStr(t, newMap, tc.expectedResult)
if len(result.StaleEndpoints) != len(tc.expectedStaleEndpoints) {
t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedStaleEndpoints), len(result.StaleEndpoints), result.StaleEndpoints)
}
for _, x := range tc.expectedStaleEndpoints {
found := false
for _, stale := range result.StaleEndpoints {
if stale == x {
found = true
break
}
}
if !found {
t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.StaleEndpoints)
}
}
if len(result.StaleServiceNames) != len(tc.expectedStaleServiceNames) {
t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedStaleServiceNames), len(result.StaleServiceNames), result.StaleServiceNames)
}
for svcName := range tc.expectedStaleServiceNames {
found := false
for _, stale := range result.StaleServiceNames {
if stale == svcName {
found = true
}
}
if !found {
t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.StaleServiceNames)
}
}
if !reflect.DeepEqual(result.HCEndpointsLocalIPSize, tc.expectedHealthchecks) {
t.Errorf("[%d] expected healthchecks %v, got %v", tci, tc.expectedHealthchecks, result.HCEndpointsLocalIPSize)
}
})
}
}
func TestLastChangeTriggerTime(t *testing.T) {
t0 := time.Date(2018, 01, 01, 0, 0, 0, 0, time.UTC)
t1 := t0.Add(time.Second)
t2 := t1.Add(time.Second)
t3 := t2.Add(time.Second)
createEndpoints := func(namespace, name string, triggerTime time.Time) *v1.Endpoints {
e := makeTestEndpoints(namespace, name, func(ept *v1.Endpoints) {
ept.Subsets = []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "1.1.1.1"}},
Ports: []v1.EndpointPort{{Port: 11}},
}}
})
e.Annotations[v1.EndpointsLastChangeTriggerTime] = triggerTime.Format(time.RFC3339Nano)
return e
}
createName := func(namespace, name string) types.NamespacedName {
return types.NamespacedName{Namespace: namespace, Name: name}
}
modifyEndpoints := func(endpoints *v1.Endpoints, triggerTime time.Time) *v1.Endpoints {
e := endpoints.DeepCopy()
e.Subsets[0].Ports[0].Port++
e.Annotations[v1.EndpointsLastChangeTriggerTime] = triggerTime.Format(time.RFC3339Nano)
return e
}
testCases := []struct {
name string
scenario func(fp *FakeProxier)
expected map[types.NamespacedName][]time.Time
}{
{
name: "Single addEndpoints",
scenario: func(fp *FakeProxier) {
e := createEndpoints("ns", "ep1", t0)
fp.addEndpoints(e)
},
expected: map[types.NamespacedName][]time.Time{createName("ns", "ep1"): {t0}},
},
{
name: "addEndpoints then updatedEndpoints",
scenario: func(fp *FakeProxier) {
e := createEndpoints("ns", "ep1", t0)
fp.addEndpoints(e)
e1 := modifyEndpoints(e, t1)
fp.updateEndpoints(e, e1)
},
expected: map[types.NamespacedName][]time.Time{createName("ns", "ep1"): {t0, t1}},
},
{
name: "Add two endpoints then modify one",
scenario: func(fp *FakeProxier) {
e1 := createEndpoints("ns", "ep1", t1)
fp.addEndpoints(e1)
e2 := createEndpoints("ns", "ep2", t2)
fp.addEndpoints(e2)
e11 := modifyEndpoints(e1, t3)
fp.updateEndpoints(e1, e11)
},
expected: map[types.NamespacedName][]time.Time{createName("ns", "ep1"): {t1, t3}, createName("ns", "ep2"): {t2}},
},
{
name: "Endpoints without annotation set",
scenario: func(fp *FakeProxier) {
e := createEndpoints("ns", "ep1", t1)
delete(e.Annotations, v1.EndpointsLastChangeTriggerTime)
fp.addEndpoints(e)
},
expected: map[types.NamespacedName][]time.Time{},
},
{
name: "addEndpoints then deleteEndpoints",
scenario: func(fp *FakeProxier) {
e := createEndpoints("ns", "ep1", t1)
fp.addEndpoints(e)
fp.deleteEndpoints(e)
},
expected: map[types.NamespacedName][]time.Time{},
},
{
name: "add then delete then add again",
scenario: func(fp *FakeProxier) {
e := createEndpoints("ns", "ep1", t1)
fp.addEndpoints(e)
fp.deleteEndpoints(e)
e = modifyEndpoints(e, t2)
fp.addEndpoints(e)
},
expected: map[types.NamespacedName][]time.Time{createName("ns", "ep1"): {t2}},
},
}
for _, tc := range testCases {
fp := newFakeProxier()
tc.scenario(fp)
result := fp.endpointsMap.Update(fp.endpointsChanges)
got := result.LastChangeTriggerTimes
if !reflect.DeepEqual(got, tc.expected) {
t.Errorf("%s: Invalid LastChangeTriggerTimes, expected: %v, got: %v",
tc.name, tc.expected, result.LastChangeTriggerTimes)
}
}
}
func TestEndpointSliceUpdate(t *testing.T) {
fqdnSlice := generateEndpointSlice("svc1", "ns1", 2, 5, 999, []string{"host1"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)})
fqdnSlice.AddressType = discovery.AddressTypeFQDN
testCases := map[string]struct {
startingSlices []*discovery.EndpointSlice
endpointChangeTracker *EndpointChangeTracker
namespacedName types.NamespacedName
paramEndpointSlice *discovery.EndpointSlice
paramRemoveSlice bool
expectedReturnVal bool
expectedCurrentChange map[ServicePortName][]*BaseEndpointInfo
}{
// test starting from an empty state
"add a simple slice that doesn't already exist": {
startingSlices: []*discovery.EndpointSlice{},
endpointChangeTracker: NewEndpointChangeTracker("host1", nil, nil, nil, true),
namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"},
paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 3, 999, []string{"host1", "host2"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
paramRemoveSlice: false,
expectedReturnVal: true,
expectedCurrentChange: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): {
&BaseEndpointInfo{Endpoint: "10.0.1.1:80"},
&BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.3:80"},
},
makeServicePortName("ns1", "svc1", "port-1", v1.ProtocolTCP): {
&BaseEndpointInfo{Endpoint: "10.0.1.1:443"},
&BaseEndpointInfo{Endpoint: "10.0.1.2:443", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.3:443"},
},
},
},
// test no modification to state - current change should be nil as nothing changes
"add the same slice that already exists": {
startingSlices: []*discovery.EndpointSlice{
generateEndpointSlice("svc1", "ns1", 1, 3, 999, []string{"host1", "host2"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
},
endpointChangeTracker: NewEndpointChangeTracker("host1", nil, nil, nil, true),
namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"},
paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 3, 999, []string{"host1", "host2"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
paramRemoveSlice: false,
expectedReturnVal: false,
expectedCurrentChange: nil,
},
// ensure that only valide address types are processed
"add an FQDN slice (invalid address type)": {
startingSlices: []*discovery.EndpointSlice{
generateEndpointSlice("svc1", "ns1", 1, 3, 999, []string{"host1", "host2"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
},
endpointChangeTracker: NewEndpointChangeTracker("host1", nil, nil, nil, true),
namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"},
paramEndpointSlice: fqdnSlice,
paramRemoveSlice: false,
expectedReturnVal: false,
expectedCurrentChange: nil,
},
// test additions to existing state
"add a slice that overlaps with existing state": {
startingSlices: []*discovery.EndpointSlice{
generateEndpointSlice("svc1", "ns1", 1, 3, 999, []string{"host1", "host2"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
generateEndpointSlice("svc1", "ns1", 2, 2, 999, []string{"host1", "host2"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
},
endpointChangeTracker: NewEndpointChangeTracker("host1", nil, nil, nil, true),
namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"},
paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 5, 999, []string{"host1"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
paramRemoveSlice: false,
expectedReturnVal: true,
expectedCurrentChange: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): {
&BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.3:80", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.4:80", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.5:80", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.2.1:80"},
&BaseEndpointInfo{Endpoint: "10.0.2.2:80", IsLocal: true},
},
makeServicePortName("ns1", "svc1", "port-1", v1.ProtocolTCP): {
&BaseEndpointInfo{Endpoint: "10.0.1.1:443", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.2:443", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.3:443", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.4:443", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.5:443", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.2.1:443"},
&BaseEndpointInfo{Endpoint: "10.0.2.2:443", IsLocal: true},
},
},
},
// test additions to existing state with partially overlapping slices and ports
"add a slice that overlaps with existing state and partial ports": {
startingSlices: []*discovery.EndpointSlice{
generateEndpointSlice("svc1", "ns1", 1, 3, 999, []string{"host1", "host2"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
generateEndpointSlice("svc1", "ns1", 2, 2, 999, []string{"host1", "host2"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
},
endpointChangeTracker: NewEndpointChangeTracker("host1", nil, nil, nil, true),
namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"},
paramEndpointSlice: generateEndpointSliceWithOffset("svc1", "ns1", 3, 1, 5, 999, []string{"host1"}, []*int32{utilpointer.Int32Ptr(80)}),
paramRemoveSlice: false,
expectedReturnVal: true,
expectedCurrentChange: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): {
&BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.3:80", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.4:80", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.5:80", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.2.1:80"},
&BaseEndpointInfo{Endpoint: "10.0.2.2:80", IsLocal: true},
},
makeServicePortName("ns1", "svc1", "port-1", v1.ProtocolTCP): {
&BaseEndpointInfo{Endpoint: "10.0.1.1:443"},
&BaseEndpointInfo{Endpoint: "10.0.1.2:443", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.3:443"},
&BaseEndpointInfo{Endpoint: "10.0.2.1:443"},
&BaseEndpointInfo{Endpoint: "10.0.2.2:443", IsLocal: true},
},
},
},
// test deletions from existing state with partially overlapping slices and ports
"remove a slice that overlaps with existing state": {
startingSlices: []*discovery.EndpointSlice{
generateEndpointSlice("svc1", "ns1", 1, 3, 999, []string{"host1", "host2"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
generateEndpointSlice("svc1", "ns1", 2, 2, 999, []string{"host1", "host2"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
},
endpointChangeTracker: NewEndpointChangeTracker("host1", nil, nil, nil, true),
namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"},
paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 5, 999, []string{"host1"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
paramRemoveSlice: true,
expectedReturnVal: true,
expectedCurrentChange: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): {
&BaseEndpointInfo{Endpoint: "10.0.2.1:80"},
&BaseEndpointInfo{Endpoint: "10.0.2.2:80", IsLocal: true},
},
makeServicePortName("ns1", "svc1", "port-1", v1.ProtocolTCP): {
&BaseEndpointInfo{Endpoint: "10.0.2.1:443"},
&BaseEndpointInfo{Endpoint: "10.0.2.2:443", IsLocal: true},
},
},
},
// ensure a removal that has no effect turns into a no-op
"remove a slice that doesn't even exist in current state": {
startingSlices: []*discovery.EndpointSlice{
generateEndpointSlice("svc1", "ns1", 1, 5, 999, []string{"host1"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
generateEndpointSlice("svc1", "ns1", 2, 2, 999, []string{"host1"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
},
endpointChangeTracker: NewEndpointChangeTracker("host1", nil, nil, nil, true),
namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"},
paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 3, 5, 999, []string{"host1"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
paramRemoveSlice: true,
expectedReturnVal: false,
expectedCurrentChange: nil,
},
// start with all endpoints ready, transition to no endpoints ready
"transition all endpoints to unready state": {
startingSlices: []*discovery.EndpointSlice{
generateEndpointSlice("svc1", "ns1", 1, 3, 999, []string{"host1", "host2"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
},
endpointChangeTracker: NewEndpointChangeTracker("host1", nil, nil, nil, true),
namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"},
paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 3, 1, []string{"host1"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
paramRemoveSlice: false,
expectedReturnVal: true,
expectedCurrentChange: map[ServicePortName][]*BaseEndpointInfo{},
},
// start with no endpoints ready, transition to all endpoints ready
"transition all endpoints to ready state": {
startingSlices: []*discovery.EndpointSlice{
generateEndpointSlice("svc1", "ns1", 1, 2, 1, []string{"host1", "host2"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
},
endpointChangeTracker: NewEndpointChangeTracker("host1", nil, nil, nil, true),
namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"},
paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 2, 999, []string{"host1"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
paramRemoveSlice: false,
expectedReturnVal: true,
expectedCurrentChange: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): {
&BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: true},
},
makeServicePortName("ns1", "svc1", "port-1", v1.ProtocolTCP): {
&BaseEndpointInfo{Endpoint: "10.0.1.1:443", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.2:443", IsLocal: true},
},
},
},
// start with some endpoints ready, transition to more endpoints ready
"transition some endpoints to ready state": {
startingSlices: []*discovery.EndpointSlice{
generateEndpointSlice("svc1", "ns1", 1, 3, 2, []string{"host1"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
generateEndpointSlice("svc1", "ns1", 2, 2, 2, []string{"host1"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
},
endpointChangeTracker: NewEndpointChangeTracker("host1", nil, nil, nil, true),
namespacedName: types.NamespacedName{Name: "svc1", Namespace: "ns1"},
paramEndpointSlice: generateEndpointSlice("svc1", "ns1", 1, 3, 3, []string{"host1"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
paramRemoveSlice: false,
expectedReturnVal: true,
expectedCurrentChange: map[ServicePortName][]*BaseEndpointInfo{
makeServicePortName("ns1", "svc1", "port-0", v1.ProtocolTCP): {
&BaseEndpointInfo{Endpoint: "10.0.1.1:80", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.2:80", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.2.1:80", IsLocal: true},
},
makeServicePortName("ns1", "svc1", "port-1", v1.ProtocolTCP): {
&BaseEndpointInfo{Endpoint: "10.0.1.1:443", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.1.2:443", IsLocal: true},
&BaseEndpointInfo{Endpoint: "10.0.2.1:443", IsLocal: true},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
initializeCache(tc.endpointChangeTracker.endpointSliceCache, tc.startingSlices)
got := tc.endpointChangeTracker.EndpointSliceUpdate(tc.paramEndpointSlice, tc.paramRemoveSlice)
if !reflect.DeepEqual(got, tc.expectedReturnVal) {
t.Errorf("EndpointSliceUpdate return value got: %v, want %v", got, tc.expectedReturnVal)
}
if tc.endpointChangeTracker.items == nil {
t.Errorf("Expected ect.items to not be nil")
}
changes := tc.endpointChangeTracker.checkoutChanges()
if tc.expectedCurrentChange == nil {
if len(changes) != 0 {
t.Errorf("Expected %s to have no changes", tc.namespacedName)
}
} else {
if len(changes) == 0 || changes[0] == nil {
t.Fatalf("Expected %s to have changes", tc.namespacedName)
}
compareEndpointsMapsStr(t, changes[0].current, tc.expectedCurrentChange)
}
})
}
}
func TestCheckoutChanges(t *testing.T) {
svcPortName0 := ServicePortName{types.NamespacedName{Namespace: "ns1", Name: "svc1"}, "port-0", v1.ProtocolTCP}
svcPortName1 := ServicePortName{types.NamespacedName{Namespace: "ns1", Name: "svc1"}, "port-1", v1.ProtocolTCP}
testCases := map[string]struct {
endpointChangeTracker *EndpointChangeTracker
expectedChanges []*endpointsChange
useEndpointSlices bool
items map[types.NamespacedName]*endpointsChange
appliedSlices []*discovery.EndpointSlice
pendingSlices []*discovery.EndpointSlice
}{
"empty slices": {
endpointChangeTracker: NewEndpointChangeTracker("", nil, nil, nil, true),
expectedChanges: []*endpointsChange{},
useEndpointSlices: true,
appliedSlices: []*discovery.EndpointSlice{},
pendingSlices: []*discovery.EndpointSlice{},
},
"without slices, empty items": {
endpointChangeTracker: NewEndpointChangeTracker("", nil, nil, nil, false),
expectedChanges: []*endpointsChange{},
items: map[types.NamespacedName]*endpointsChange{},
useEndpointSlices: false,
},
"without slices, simple items": {
endpointChangeTracker: NewEndpointChangeTracker("", nil, nil, nil, false),
expectedChanges: []*endpointsChange{{
previous: EndpointsMap{
svcPortName0: []Endpoint{newTestEp("10.0.1.1:80", ""), newTestEp("10.0.1.2:80", "")},
svcPortName1: []Endpoint{newTestEp("10.0.1.1:443", ""), newTestEp("10.0.1.2:443", "")},
},
current: EndpointsMap{
svcPortName0: []Endpoint{newTestEp("10.0.1.1:80", ""), newTestEp("10.0.1.2:80", "")},
},
}},
items: map[types.NamespacedName]*endpointsChange{
{Namespace: "ns1", Name: "svc1"}: {
previous: EndpointsMap{
svcPortName0: []Endpoint{newTestEp("10.0.1.1:80", ""), newTestEp("10.0.1.2:80", "")},
svcPortName1: []Endpoint{newTestEp("10.0.1.1:443", ""), newTestEp("10.0.1.2:443", "")},
},
current: EndpointsMap{
svcPortName0: []Endpoint{newTestEp("10.0.1.1:80", ""), newTestEp("10.0.1.2:80", "")},
},
},
},
useEndpointSlices: false,
},
"adding initial slice": {
endpointChangeTracker: NewEndpointChangeTracker("", nil, nil, nil, true),
expectedChanges: []*endpointsChange{{
previous: EndpointsMap{},
current: EndpointsMap{
svcPortName0: []Endpoint{newTestEp("10.0.1.1:80", "host1"), newTestEp("10.0.1.2:80", "host1")},
},
}},
useEndpointSlices: true,
appliedSlices: []*discovery.EndpointSlice{},
pendingSlices: []*discovery.EndpointSlice{
generateEndpointSlice("svc1", "ns1", 1, 3, 3, []string{"host1"}, []*int32{utilpointer.Int32Ptr(80)}),
},
},
"removing port in update": {
endpointChangeTracker: NewEndpointChangeTracker("", nil, nil, nil, true),
expectedChanges: []*endpointsChange{{
previous: EndpointsMap{
svcPortName0: []Endpoint{newTestEp("10.0.1.1:80", "host1"), newTestEp("10.0.1.2:80", "host1")},
svcPortName1: []Endpoint{newTestEp("10.0.1.1:443", "host1"), newTestEp("10.0.1.2:443", "host1")},
},
current: EndpointsMap{
svcPortName0: []Endpoint{newTestEp("10.0.1.1:80", "host1"), newTestEp("10.0.1.2:80", "host1")},
},
}},
useEndpointSlices: true,
appliedSlices: []*discovery.EndpointSlice{
generateEndpointSlice("svc1", "ns1", 1, 3, 3, []string{"host1"}, []*int32{utilpointer.Int32Ptr(80), utilpointer.Int32Ptr(443)}),
},
pendingSlices: []*discovery.EndpointSlice{
generateEndpointSlice("svc1", "ns1", 1, 3, 3, []string{"host1"}, []*int32{utilpointer.Int32Ptr(80)}),
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
if tc.useEndpointSlices {
for _, slice := range tc.appliedSlices {
tc.endpointChangeTracker.EndpointSliceUpdate(slice, false)
}
tc.endpointChangeTracker.checkoutChanges()
for _, slice := range tc.pendingSlices {
tc.endpointChangeTracker.EndpointSliceUpdate(slice, false)
}
} else {
tc.endpointChangeTracker.items = tc.items
}
changes := tc.endpointChangeTracker.checkoutChanges()
if len(tc.expectedChanges) != len(changes) {
t.Fatalf("Expected %d changes, got %d", len(tc.expectedChanges), len(changes))
}
for i, change := range changes {
expectedChange := tc.expectedChanges[i]
if !reflect.DeepEqual(change.previous, expectedChange.previous) {
t.Errorf("[%d] Expected change.previous: %+v, got: %+v", i, expectedChange.previous, change.previous)
}
if !reflect.DeepEqual(change.current, expectedChange.current) {
t.Errorf("[%d] Expected change.current: %+v, got: %+v", i, expectedChange.current, change.current)
}
}
})
}
}
// Test helpers
func compareEndpointsMapsStr(t *testing.T, newMap EndpointsMap, expected map[ServicePortName][]*BaseEndpointInfo) {
t.Helper()
if len(newMap) != len(expected) {
t.Errorf("expected %d results, got %d: %v", len(expected), len(newMap), newMap)
}
endpointEqual := func(a, b *BaseEndpointInfo) bool {
return a.Endpoint == b.Endpoint && a.IsLocal == b.IsLocal
}
for x := range expected {
if len(newMap[x]) != len(expected[x]) {
t.Errorf("expected %d endpoints for %v, got %d", len(expected[x]), x, len(newMap[x]))
t.Logf("Endpoints %+v", newMap[x])
} else {
for i := range expected[x] {
newEp, ok := newMap[x][i].(*BaseEndpointInfo)
if !ok {
t.Errorf("Failed to cast endpointsInfo")
continue
}
if !endpointEqual(newEp, expected[x][i]) {
t.Errorf("expected new[%v][%d] to be %v, got %v (IsLocal expected %v, got %v)", x, i, expected[x][i], newEp, expected[x][i].IsLocal, newEp.IsLocal)
}
}
}
}
}
func newTestEp(ep, host string) *BaseEndpointInfo {
endpointInfo := &BaseEndpointInfo{Endpoint: ep}
if host != "" {
endpointInfo.Topology = map[string]string{
"kubernetes.io/hostname": host,
}
}
return endpointInfo
}
func initializeCache(endpointSliceCache *EndpointSliceCache, endpointSlices []*discovery.EndpointSlice) {
for _, endpointSlice := range endpointSlices {
endpointSliceCache.updatePending(endpointSlice, false)
}
for _, tracker := range endpointSliceCache.trackerByServiceMap {
tracker.applied = tracker.pending
tracker.pending = endpointSliceInfoByName{}
}
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stmtstats
import (
"sync"
topsqlstate "github.com/pingcap/tidb/util/topsql/state"
"github.com/tikv/client-go/v2/tikvrpc"
"github.com/tikv/client-go/v2/tikvrpc/interceptor"
)
// CreateKvExecCounter creates an associated KvExecCounter from StatementStats.
// The created KvExecCounter can only be used during a single statement execution
// and cannot be reused.
func (s *StatementStats) CreateKvExecCounter(sqlDigest, planDigest []byte) *KvExecCounter {
return &KvExecCounter{
stats: s,
digest: SQLPlanDigest{SQLDigest: BinaryDigest(sqlDigest), PlanDigest: BinaryDigest(planDigest)},
marked: map[string]struct{}{},
}
}
// KvExecCounter is used to count the number of SQL executions of the kv layer.
// It internally calls addKvExecCount of StatementStats at the right time, to
// ensure the semantic of "SQL execution count of TiKV".
type KvExecCounter struct {
stats *StatementStats
marked map[string]struct{} // HashSet<Target>
digest SQLPlanDigest
mu sync.Mutex
}
// RPCInterceptor returns an interceptor.RPCInterceptor for client-go.
// The returned interceptor is generally expected to be bind to transaction or
// snapshot. In this way, the logic preset by KvExecCounter will be executed before
// each RPC request is initiated, in order to count the number of SQL executions of
// the TiKV dimension.
func (c *KvExecCounter) RPCInterceptor() interceptor.RPCInterceptor {
return interceptor.NewRPCInterceptor("kv-exec-counter", func(next interceptor.RPCInterceptorFunc) interceptor.RPCInterceptorFunc {
return func(target string, req *tikvrpc.Request) (*tikvrpc.Response, error) {
if topsqlstate.TopSQLEnabled() {
c.mark(target)
}
return next(target, req)
}
})
}
// mark this target during the current execution of statement.
// If this target is marked for the first time, then increase the number of execution.
// mark is thread-safe.
func (c *KvExecCounter) mark(target string) {
firstMark := false
c.mu.Lock()
if _, ok := c.marked[target]; !ok {
c.marked[target] = struct{}{}
firstMark = true
}
c.mu.Unlock()
if firstMark {
c.stats.addKvExecCount([]byte(c.digest.SQLDigest), []byte(c.digest.PlanDigest), target, 1)
}
}
|
package main
import (
"flag"
"fmt"
"github.com/slack-go/slack"
)
func main() {
var (
apiToken string
debug bool
)
flag.StringVar(&apiToken, "token", "YOUR_TOKEN_HERE", "Your Slack API Token")
flag.BoolVar(&debug, "debug", false, "Show JSON output")
flag.Parse()
api := slack.New(apiToken, slack.OptionDebug(debug))
// Get all stars for the usr.
params := slack.NewStarsParameters()
starredItems, _, err := api.GetStarred(params)
if err != nil {
fmt.Printf("Error getting stars: %s\n", err)
return
}
for _, s := range starredItems {
var desc string
switch s.Type {
case slack.TYPE_MESSAGE:
desc = s.Message.Text
case slack.TYPE_FILE:
desc = s.File.Name
case slack.TYPE_FILE_COMMENT:
desc = s.File.Name + " - " + s.Comment.Comment
case slack.TYPE_CHANNEL, slack.TYPE_IM, slack.TYPE_GROUP:
desc = s.Channel
}
fmt.Printf("Starred %s: %s\n", s.Type, desc)
}
}
|
package autoquad
/*
Generated using mavgen - https://github.com/ArduPilot/pymavlink/
Copyright 2020 queue-b <https://github.com/queue-b>
Permission is hereby granted, free of charge, to any person obtaining a copy
of the generated software (the "Generated Software"), to deal
in the Generated Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Generated Software, and to permit persons to whom the Generated
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Generated Software.
THE GENERATED SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE GENERATED SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE GENERATED SOFTWARE.
*/
import (
"bytes"
"encoding/binary"
"fmt"
"text/tabwriter"
"github.com/queue-b/go-mavlink2"
"github.com/queue-b/go-mavlink2/util"
)
/*AqTelemetryF Sends up to 20 raw float values. */
type AqTelemetryF struct {
/*Value1 value1 */
Value1 float32
/*Value2 value2 */
Value2 float32
/*Value3 value3 */
Value3 float32
/*Value4 value4 */
Value4 float32
/*Value5 value5 */
Value5 float32
/*Value6 value6 */
Value6 float32
/*Value7 value7 */
Value7 float32
/*Value8 value8 */
Value8 float32
/*Value9 value9 */
Value9 float32
/*Value10 value10 */
Value10 float32
/*Value11 value11 */
Value11 float32
/*Value12 value12 */
Value12 float32
/*Value13 value13 */
Value13 float32
/*Value14 value14 */
Value14 float32
/*Value15 value15 */
Value15 float32
/*Value16 value16 */
Value16 float32
/*Value17 value17 */
Value17 float32
/*Value18 value18 */
Value18 float32
/*Value19 value19 */
Value19 float32
/*Value20 value20 */
Value20 float32
/*Index Index of message */
Index uint16
/*HasExtensionFieldValues indicates if this message has any extensions and */
HasExtensionFieldValues bool
}
func (m *AqTelemetryF) String() string {
format := ""
var buffer bytes.Buffer
writer := tabwriter.NewWriter(&buffer, 0, 0, 2, ' ', 0)
format += "Name:\t%v/%v\n"
// Output field values based on the decoded message type
format += "Value1:\t%v \n"
format += "Value2:\t%v \n"
format += "Value3:\t%v \n"
format += "Value4:\t%v \n"
format += "Value5:\t%v \n"
format += "Value6:\t%v \n"
format += "Value7:\t%v \n"
format += "Value8:\t%v \n"
format += "Value9:\t%v \n"
format += "Value10:\t%v \n"
format += "Value11:\t%v \n"
format += "Value12:\t%v \n"
format += "Value13:\t%v \n"
format += "Value14:\t%v \n"
format += "Value15:\t%v \n"
format += "Value16:\t%v \n"
format += "Value17:\t%v \n"
format += "Value18:\t%v \n"
format += "Value19:\t%v \n"
format += "Value20:\t%v \n"
format += "Index:\t%v \n"
fmt.Fprintf(
writer,
format,
m.GetDialect(),
m.GetMessageName(),
m.Value1,
m.Value2,
m.Value3,
m.Value4,
m.Value5,
m.Value6,
m.Value7,
m.Value8,
m.Value9,
m.Value10,
m.Value11,
m.Value12,
m.Value13,
m.Value14,
m.Value15,
m.Value16,
m.Value17,
m.Value18,
m.Value19,
m.Value20,
m.Index,
)
writer.Flush()
return string(buffer.Bytes())
}
// GetVersion gets the MAVLink version of the Message contents
func (m *AqTelemetryF) GetVersion() int {
if m.HasExtensionFieldValues {
return 2
}
return 1
}
// GetDialect gets the name of the dialect that defines the Message
func (m *AqTelemetryF) GetDialect() string {
return "autoquad"
}
// GetMessageName gets the name of the Message
func (m *AqTelemetryF) GetMessageName() string {
return "AqTelemetryF"
}
// GetID gets the ID of the Message
func (m *AqTelemetryF) GetID() uint32 {
return 150
}
// HasExtensionFields returns true if the message definition contained extensions; false otherwise
func (m *AqTelemetryF) HasExtensionFields() bool {
return false
}
func (m *AqTelemetryF) getV1Length() int {
return 82
}
func (m *AqTelemetryF) getIOSlice() []byte {
return make([]byte, 82+1)
}
// Read sets the field values of the message from the raw message payload
func (m *AqTelemetryF) Read(frame mavlink2.Frame) (err error) {
version := frame.GetVersion()
// Ensure only Version 1 or Version 2 were specified
if version != 1 && version != 2 {
err = mavlink2.ErrUnsupportedVersion
return
}
// Don't attempt to Read V2 messages from V1 frames
if m.GetID() > 255 && version < 2 {
err = mavlink2.ErrDecodeV2MessageV1Frame
return
}
// binary.Read can panic; swallow the panic and return a sane error
defer func() {
if r := recover(); r != nil {
err = mavlink2.ErrPrivateField
}
}()
// Get a slice of bytes long enough for the all the AqTelemetryF fields
// binary.Read requires enough bytes in the reader to read all fields, even if
// the fields are just zero values. This also simplifies handling MAVLink2
// extensions and trailing zero truncation.
ioSlice := m.getIOSlice()
copy(ioSlice, frame.GetMessageBytes())
// Indicate if
if version == 2 && m.HasExtensionFields() {
ioSlice[len(ioSlice)-1] = 1
}
reader := bytes.NewReader(ioSlice)
err = binary.Read(reader, binary.LittleEndian, m)
return
}
// Write encodes the field values of the message to a byte array
func (m *AqTelemetryF) Write(version int) (output []byte, err error) {
var buffer bytes.Buffer
// Ensure only Version 1 or Version 2 were specified
if version != 1 && version != 2 {
err = mavlink2.ErrUnsupportedVersion
return
}
// Don't attempt to Write V2 messages to V1 bodies
if m.GetID() > 255 && version < 2 {
err = mavlink2.ErrEncodeV2MessageV1Frame
return
}
err = binary.Write(&buffer, binary.LittleEndian, *m)
if err != nil {
return
}
output = buffer.Bytes()
// V1 uses fixed message lengths and does not include any extension fields
// Truncate the byte slice to the correct length
// This also removes the trailing extra byte written for HasExtensionFieldValues
if version == 1 {
output = output[:m.getV1Length()]
}
// V2 uses variable message lengths and includes extension fields
// The variable length is caused by truncating any trailing zeroes from
// the end of the message before it is added to a frame
if version == 2 {
// Set HasExtensionFieldValues to zero so that it doesn't interfere with V2 truncation
output[len(output)-1] = 0
output = util.TruncateV2(buffer.Bytes())
}
return
}
|
package problem0239
import "testing"
func TestSolve(t *testing.T) {
t.Log(maxSlidingWindow([]int{1, 2, 3, -4, 5}, 2))
}
|
package wcloudmessage
import (
"github.com/satori/go.uuid"
)
type CloudMessageData struct {
TargetScreen string
Show_in_foreground bool
Notification Notification
}
type Notification struct {
Id uuid.UUID
OrderId string
OrderCommentId string
Time string
IsNotified bool
IsRead bool
Title string
Message string
Type int
State string
}
type ResidentNofification struct {
ReceiverId string
Message string
Category string
Object map[string]string
IsMobile int
Sender map[string]string
}
|
package main
import (
"github.com/go-redis/redis"
"fmt"
"strconv"
"github.com/samuel/go-zookeeper/zk"
"time"
"strings"
)
var client *redis.Client
var zkHandler *zk.Conn
func init() {
client = redis.NewClient(&redis.Options{
Addr: "10.96.90.6:6379",
Password: "", // no password set
DB: 0, // use default DB
})
zkHandler, _, _ = zk.Connect([]string{"10.96.90.6"}, time.Second) //*10)
}
func ReadAndIncrement(key string) {
val, err := client.Get(key).Result()
if err != nil {
panic(err)
}
intVal, _ := strconv.Atoi(val)
fmt.Printf("A:[key=%v,val=%v]\n", key, intVal)
err = client.Set(key, fmt.Sprintf("%v", intVal+1), 0).Err()
if err != nil {
panic(err)
}
}
func lock() string {
n, _ := zkHandler.Create(fmt.Sprintf("%s/lock-", "/didi"), []byte(" "), zk.FlagEphemeral|zk.FlagSequence, zk.WorldACL(zk.PermAll))
for {
children, _, _ := zkHandler.Children(fmt.Sprintf("%s", "/didi"))
tmp := strings.Split(n, "-")
nNum, _ := strconv.Atoi(formatSequenctialNodePath(tmp[1]))
isLowestNode := true
for _, child := range children {
tmp = strings.Split(child, "-")
childNum, _ := strconv.Atoi(formatSequenctialNodePath(tmp[1]))
if nNum > childNum {
isLowestNode = false
break
}
}
if isLowestNode {
return n
}
p := fmt.Sprintf("%v", nNum-1)[1:]
existed, _, ch, _ := zkHandler.ExistsW(p)
if existed {
<-ch
}
}
}
func unlock(node string) {
_, stat, _ := zkHandler.Get(node)
zkHandler.Delete(node, stat.Version)
}
func formatSequenctialNodePath(path string) string {
firstNonZeroIdx := -1
for idx, r := range path {
if r != '0' {
firstNonZeroIdx = idx
break
}
}
return path[firstNonZeroIdx:]
}
func main() {
unstop := make(chan int)
key := "my_key"
for i := 0; i < 200; i++ {
n := lock()
ReadAndIncrement(key)
unlock(n)
}
val, _ := client.Get(key).Result()
fmt.Println("A:final val:", val)
<-unstop
}
|
package main
import (
"crypto/md5"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"strings"
)
//目标目录的父目录
var Parents_dir string
//根据目标目录的路径,在目标目录的父目录下创建重复文件目录
var Same_file_dir string
//指定要去重的目录
var source_path string
//遍历目标目录的所有文件,存到slice中
var file_list []string
//经过计算对比md5值,将重复文件目录放到slice中
var same_file_list []string
//根据参数初始化变量
//父目录及重复文件目录创建
func init() {
s := flag.String("f","","-f C:\\Users\\yyw\\go\\src\\github.com\\yangyouwei\\quchong\\test 访问url")
flag.Parse()
if *s == "" {
flag.Usage()
panic("process exsit!")
}
source_path, _ = filepath.Abs(*s)
a := strings.LastIndex(source_path, "\\") //如果是linux系统使用 / 为分隔符,wondows 使用 \\
rs := []rune(source_path)
parents_dir := rs[:a]
Parents_dir = string(parents_dir)
dir_name := rs[a:]
Same_file_dir = Parents_dir + string(dir_name) + "-samefile"
err := os.Mkdir(Same_file_dir, os.ModePerm)
if err != nil {
fmt.Println(err)
}
}
func main() {
//遍历目录
fn := getfilelist(source_path)
file_list = *fn
//比较md5值,重复的放到重复的slice中,不重复的放到map中
sn := diff_md5(file_list)
same_file_list = *sn
//读取重复文件的slice,移动文件到指定目录
move_file(same_file_list,Parents_dir,Same_file_dir)
}
func getfilelist(source_path string) *[]string {
var s []string
s, err := GetAllFile(source_path, s)
if err != nil {
panic(err)
}
return &s
}
func GetAllFile(pathname string, s []string) ([]string, error) {
rd, err := ioutil.ReadDir(pathname)
if err != nil {
fmt.Println("read dir fail:", err)
return s, err
}
for _, fi := range rd {
if fi.IsDir() {
fullDir := pathname + "/" + fi.Name()
s, err = GetAllFile(fullDir, s)
if err != nil {
fmt.Println("read dir fail:", err)
return s, err
}
} else {
fullName := pathname + "/" + fi.Name()
s = append(s, fullName)
}
}
return s, nil
}
func diff_md5(fl []string) *[]string{
f := make(map[string]string)
var s []string
for _,k := range fl {
//计算md5值
a := md5_sum(k)
if _, ok := f[a]; ok {
//fmt.Println("true")
//重复的吸入重复slice中
s = append(s,k)
}else{
//fmt.Println("false")
//不重复的加入map中
f[a] = k
}
}
//fmt.Println("map : ", f)
//fmt.Println("slice :",s)
//fmt.Println("file_list : ",fl)
//fmt.Println("same file list : ",s)
return &s
}
func move_file(same_file []string, parents_dir string, same_file_path string) {
for _, v := range same_file {
filename := path.Base(v)
//创建目录
rs := []rune(parents_dir)
n := len(rs)
p1 := []rune(path.Dir(v))
//取出目标目录文件的子目录
sub_path := p1[n:]
//需要在存放相同文件的目录里创建文件相应的层级目录
d_patch := same_file_path + string(sub_path)
//创建文件原来的目录层级结构
err := os.MkdirAll(d_patch, os.ModePerm)
if err != nil {
log.Println(err)
break
}
//移动文件
err = os.Rename(v, d_patch+"\\"+filename)
if err != nil {
log.Println(err)
break
}
//打印移动结果
fmt.Println(v + "move to " + d_patch)
}
}
func md5_sum(file_path string) string {
f, err := os.Open(file_path)
if err != nil {
panic(err)
}
defer f.Close()
md5hash := md5.New()
if _, err := io.Copy(md5hash, f); err != nil {
fmt.Println("Copy", err)
return ""
}
md5hash.Sum(nil)
a := fmt.Sprint(md5hash.Sum(nil))
return string(a)
}
|
package main
import "encoding/json"
import "fmt"
import "bytes"
type Gender int
const (
GenderNotSet = iota
GenderMale
GenderFemale
GenderOther
)
var toString = map[Gender]string{
GenderNotSet: "Not Set",
GenderMale: "Male",
GenderFemale: "Female",
GenderOther: "Other",
}
var toID = map[string]Gender{
"Not Set": GenderNotSet,
"Male": GenderMale,
"Female": GenderFemale,
"Other": GenderOther,
}
func (g Gender) MarshalJSON() ([]byte, error) {
bufffer := bytes.NewBufferString(`"`)
bufffer.WriteString(toString[g])
bufffer.WriteString(`"`)
return bufffer.Bytes(), nil
}
func (g *Gender) UnmarshalJSON(b []byte) error {
var j string
if err := json.Unmarshal(b, &j); err != nil {
return err
}
*g = toID[j]
return nil
}
type Human struct {
Gender Gender `json:"gender`
}
func main() {
me := Human{
Gender: GenderMale,
}
prettyJSON, _ := json.MarshalIndent(me, "", " ")
fmt.Println(string(prettyJSON))
}
|
package main
import (
"testing"
"github.com/jackytck/projecteuler/tools"
)
func TestP50(t *testing.T) {
cases := []tools.TestCase{
{In: 100, Out: 41},
{In: 1000, Out: 953},
{In: 1000000, Out: 997651},
}
tools.TestIntInt(t, cases, consecutivePrimeSum, "P50")
}
|
// (C) Copyright 2012, Jeramey Crawford <jeramey@antihe.ro>. All
// rights reserved. Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha256_crypt
import "testing"
var sha256Crypt = New()
func TestGenerate(t *testing.T) {
data := []struct {
salt []byte
key []byte
out string
cost int
}{
{
[]byte("$5$saltstring"),
[]byte("Hello world!"),
"$5$saltstring$5B8vYYiY.CVt1RlTTf8KbXBH3hsxY/GNooZaBBGWEc5",
RoundsDefault,
},
{
[]byte("$5$rounds=10000$saltstringsaltstring"),
[]byte("Hello world!"),
"$5$rounds=10000$saltstringsaltst$3xv.VbSHBb41AL9AvLeujZkZRBAwqFM" +
"z2.opqey6IcA",
10000,
},
{
[]byte("$5$rounds=5000$toolongsaltstring"),
[]byte("This is just a test"),
"$5$rounds=5000$toolongsaltstrin$Un/5jzAHMgOGZ5.mWJpuVolil07guHPv" +
"OW8mGRcvxa5",
5000,
},
{
[]byte("$5$rounds=1400$anotherlongsaltstring"),
[]byte("a very much longer text to encrypt. " +
"This one even stretches over more" +
"than one line."),
"$5$rounds=1400$anotherlongsalts$Rx.j8H.h8HjEDGomFU8bDkXm3XIUnzyx" +
"f12oP84Bnq1",
1400,
},
{
[]byte("$5$rounds=77777$short"),
[]byte("we have a short salt string but not a short password"),
"$5$rounds=77777$short$JiO1O3ZpDAxGJeaDIuqCoEFysAe1mZNJRs3pw0KQRd/",
77777,
},
{
[]byte("$5$rounds=123456$asaltof16chars.."),
[]byte("a short string"),
"$5$rounds=123456$asaltof16chars..$gP3VQ/6X7UUEW3HkBn2w1/Ptq2jxPy" +
"zV/cZKmF/wJvD",
123456,
},
{
[]byte("$5$rounds=10$roundstoolow"),
[]byte("the minimum number is still observed"),
"$5$rounds=1000$roundstoolow$yfvwcWrQ8l/K0DAWyuPMDNHpIVlTQebY9l/g" +
"L972bIC",
1000,
},
}
for i, d := range data {
hash, err := sha256Crypt.Generate(d.key, d.salt)
if err != nil {
t.Fatal(err)
}
if hash != d.out {
t.Errorf("Test %d failed\nExpected: %s, got: %s", i, d.out, hash)
}
cost, err := sha256Crypt.Cost(hash)
if err != nil {
t.Fatal(err)
}
if cost != d.cost {
t.Errorf("Test %d failed\nExpected: %d, got: %d", i, d.cost, cost)
}
}
}
func TestVerify(t *testing.T) {
data := [][]byte{
[]byte("password"),
[]byte("12345"),
[]byte("That's amazing! I've got the same combination on my luggage!"),
[]byte("And change the combination on my luggage!"),
[]byte(" random spa c ing."),
[]byte("94ajflkvjzpe8u3&*j1k513KLJ&*()"),
}
for i, d := range data {
hash, err := sha256Crypt.Generate(d, nil)
if err != nil {
t.Fatal(err)
}
if err = sha256Crypt.Verify(hash, d); err != nil {
t.Errorf("Test %d failed: %s", i, d)
}
}
}
|
package server
import (
"context"
"io"
"net"
"sync"
"github.com/eclipse/paho.mqtt.golang/packets"
"github.com/htdvisser/squatt/session"
"go.uber.org/zap"
)
// Buffer sizes
var (
ClientSendBufferSize = 16
)
// Client connection
type Client struct {
server *Server
log *zap.Logger
remoteAddr string
session *session.Session
keepAlive *watchdog
sendCh chan packets.ControlPacket
ctx context.Context
cancel context.CancelFunc
errMu sync.Mutex
err error
}
type wrappedErr struct {
err error
}
func (c *Client) setError(err error) {
if err == nil {
return
}
c.errMu.Lock()
if c.err == nil {
c.err = err
}
c.errMu.Unlock()
c.cancel()
c.session.Disconnect()
}
func (c *Client) getError() error {
c.errMu.Lock()
defer c.errMu.Unlock()
return c.err
}
// NewClient creates a new MQTT Client
func (s *Server) NewClient() *Client {
c := &Client{
server: s,
log: s.log,
sendCh: make(chan packets.ControlPacket),
}
c.ctx, c.cancel = context.WithCancel(context.Background())
return c
}
// Handle the client connection
func (c *Client) Handle(conn net.Conn) error {
c.remoteAddr = conn.RemoteAddr().String()
return c.handle(conn)
}
func (c *Client) handle(rw io.ReadWriter) error {
waitSend := make(chan struct{})
go func() {
c.sendRoutine(rw)
close(waitSend)
}()
go c.receiveRoutine(rw)
<-c.ctx.Done()
if err := c.getError(); err == nil {
c.setError(c.ctx.Err())
}
close(c.sendCh)
c.keepAlive.Stop()
<-waitSend
return c.getError().(error)
}
|
package bench
import (
"reflect"
"testing"
)
// http://www.darkcoding.net/software/go-the-price-of-interface/
// Just getting the reflect.Value of a string.
// 45ns.
// Or, 60ns with GC enabled. Yes, quite a difference.
func Benchmark_ReflectGetValueOfString(b *testing.B) {
var slot string
for i := 0; i < b.N; i++ {
reflect.ValueOf(slot)
}
_ = slot
}
// This is fascinating because it's actually much faster.
// Like 5ns compared to 60 for the direct value.
// GC: no impact. 0 allocs (!)
func Benchmark_ReflectGetValueOfStringRef(b *testing.B) {
var slot string
var slotAddr_rv reflect.Value
for i := 0; i < b.N; i++ {
slotAddr_rv = reflect.ValueOf(&slot)
}
_ = slot
_ = slotAddr_rv
}
// Getting a reflect.Value of a string's address, then `Elem()`'ing back to the string type.
// This is the hop you need to do to have an *addressable* value that you can set.
// Like 8ns compared to the 9ns (appears `Elem()` adds ~3.5ns).
// GC: no impact. 0 allocs (!)
func Benchmark_ReflectGetValueOfStringRefElem(b *testing.B) {
var slot string
var slot_rav reflect.Value
for i := 0; i < b.N; i++ {
_ = reflect.ValueOf(&slot).Elem()
}
_ = slot
_ = slot_rav
}
// About 24ns, by the same rulers as the others.
// GC: no impact. 0 allocs (!)
func Benchmark_ReflectSetValue(b *testing.B) {
var slot string
slot_rav := reflect.ValueOf(&slot).Elem()
var val = "x"
val_rv := reflect.ValueOf(val)
for i := 0; i < b.N; i++ {
slot_rav.Set(val_rv)
}
_ = slot
_ = slot_rav
}
// Just setting something through an address, full types.
// Very fast (obviously): less than a single nano.
// GC: no impact. 0 allocs (!)
func Benchmark_DirectSetValue(b *testing.B) {
var slot string
slotAddr_v := &slot
var val = "x"
for i := 0; i < b.N; i++ {
*slotAddr_v = val
}
_ = slot
}
// Fit the address of a primitive into an `interface{}`, then type-switch
// it back to a primitive so we can directly set it.
// Still very fast: 2ns.
// GC: no impact. 0 allocs (!)
// Context: looking 4/5x faster than the equivalent ops with reflect
// (but that's maybe less of a margin than I might've expected).
func Benchmark_DirectInterfacedSetValue(b *testing.B) {
var slot string
var slotAddr_v interface{} = &slot
var val = "x"
for i := 0; i < b.N; i++ {
switch v2 := slotAddr_v.(type) {
case *interface{}:
// sigh
case *string:
*v2 = val
}
}
_ = slot
}
// Use a func to get that `interface{}` that's a pointer.
// This is emulating the fastest path we can do with an atlas with user-written functions.
// Still fast: 3ns.
// GC: no impact. 0 allocs (!)
func Benchmark_FuncedDirectInterfacedSetValue(b *testing.B) {
var slot string
addrFunc := func() interface{} { return &slot }
var val = "x"
var slotAddr_v interface{}
for i := 0; i < b.N; i++ {
slotAddr_v = addrFunc()
switch v2 := slotAddr_v.(type) {
case *interface{}:
// sigh
case *string:
*v2 = val
}
}
_ = slot
}
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !windows,!linux,!darwin,!openbsd,!freebsd
package router
import (
"github.com/tailscale/wireguard-go/device"
"github.com/tailscale/wireguard-go/tun"
"tailscale.com/types/logger"
)
func newUserspaceRouter(logf logger.Logf, tunname string, dev *device.Device, tuntap tun.Device, netChanged func()) Router {
return NewFakeRouter(logf, tunname, dev, tuntap, netChanged)
}
|
package main
import (
"fmt"
"log"
"os"
"github.com/resin-os/resin-provisioner/provisioner"
)
func init() {
// show date/time in log output.
log.SetFlags(log.LstdFlags)
}
func usage() {
fmt.Fprintf(os.Stderr, "usage: query: %s [config path]\n", os.Args[0])
fmt.Fprintf(os.Stderr, "usage: provision: %s [config path] [user id] [app id] [api key]\n",
os.Args[0])
os.Exit(1)
}
func main() {
if len(os.Args) < 5 && len(os.Args) != 2 {
usage()
}
configPath := os.Args[1]
api := provisioner.New(configPath)
// Simply output the provision state.
if len(os.Args) == 2 {
if state, err := api.State(); err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", err)
os.Exit(1)
} else {
fmt.Printf("%s\n", state)
}
return
}
userId := os.Args[2]
appId := os.Args[3]
apiKey := os.Args[4]
opts := &provisioner.ProvisionOpts{
UserId: userId, ApplicationId: appId, ApiKey: apiKey}
if err := api.Provision(opts); err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", err)
os.Exit(1)
}
}
|
/*
* Copyright (c) 2020. Ant Group. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*/
package config
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/require"
)
func TestLoadConfig(t *testing.T) {
buf := []byte(`{
"device": {
"backend": {
"type": "registry",
"config": {
"scheme": "https",
"host": "acr-nydus-registry-vpc.cn-hangzhou.cr.aliyuncs.com",
"repo": "test/myserver",
"auth": "",
"blob_url_scheme": "http",
"proxy": {
"url": "http://p2p-proxy:65001",
"fallback": true,
"ping_url": "http://p2p-proxy:40901/server/ping",
"check_interval": 5
},
"timeout": 5,
"connect_timeout": 5,
"retry_limit": 0
}
},
"cache": {
"type": "blobcache",
"config": {
"work_dir": "/cache"
}
}
},
"mode": "direct",
"digest_validate": true,
"iostats_files": true,
"enable_xattr": true,
"fs_prefetch": {
"enable": true,
"threads_count": 10,
"merging_size": 131072
}
}`)
var cfg DaemonConfig
err := json.Unmarshal(buf, &cfg)
require.Nil(t, err)
require.Equal(t, cfg.FSPrefetch.Enable, true)
require.Equal(t, cfg.FSPrefetch.MergingSize, 131072)
require.Equal(t, cfg.FSPrefetch.ThreadsCount, 10)
require.Equal(t, cfg.Device.Backend.Config.BlobUrlScheme, "http")
require.Equal(t, cfg.Device.Backend.Config.Proxy.CheckInterval, 5)
}
|
package main
import (
"crypto/md5"
"crypto/tls"
"crypto/x509"
"fmt"
"github.com/Azunyan1111/http-recoder/keys"
"github.com/elazarl/goproxy"
"log"
"net/http"
"net/http/httputil"
"os"
"regexp"
"strconv"
"strings"
)
func main() {
proxy := goproxy.NewProxyHttpServer()
proxy.Verbose = true
goproxyCa, _ := tls.X509KeyPair(keys.GetCaCert(), keys.GetCaKey())
goproxyCa.Leaf, _ = x509.ParseCertificate(goproxyCa.Certificate[0])
goproxy.GoproxyCa = goproxyCa
goproxy.OkConnect = &goproxy.ConnectAction{Action: goproxy.ConnectAccept, TLSConfig: goproxy.TLSConfigFromCA(&goproxyCa)}
goproxy.MitmConnect = &goproxy.ConnectAction{Action: goproxy.ConnectMitm, TLSConfig: goproxy.TLSConfigFromCA(&goproxyCa)}
goproxy.HTTPMitmConnect = &goproxy.ConnectAction{Action: goproxy.ConnectHTTPMitm, TLSConfig: goproxy.TLSConfigFromCA(&goproxyCa)}
goproxy.RejectConnect = &goproxy.ConnectAction{Action: goproxy.ConnectReject, TLSConfig: goproxy.TLSConfigFromCA(&goproxyCa)}
proxy.OnRequest(goproxy.ReqHostMatches(regexp.MustCompile("^.*$"))).
HandleConnect(goproxy.AlwaysMitm)
// ----------------------
proxy.OnResponse().DoFunc(func(resp *http.Response, ctx *goproxy.ProxyCtx) *http.Response {
if resp.Request.URL.Scheme == "http://" {
return resp
}
// Generate file path
var filePath []string
filePath = append(filePath, resp.Request.URL.Scheme)
filePath = append(filePath, resp.Request.URL.Host)
paths := strings.Split(resp.Request.URL.Path, "/")
filePath = append(filePath, paths...)
filePath = append(filePath, fmt.Sprintf("%x",md5.Sum([]byte(resp.Request.URL.RawQuery))))
filePath = append(filePath, strconv.Itoa(resp.StatusCode))
rowFilePath := "save-requests"
for _, a := range filePath {
if a == "" {
continue
}
rowFilePath += "/" + a
}
rowFilePath = strings.ReplaceAll(rowFilePath,":","")
// フォルダの作成
fmt.Println(rowFilePath)
err := os.MkdirAll(strings.Join(strings.Split(rowFilePath, "/")[:len(strings.Split(rowFilePath, "/"))-1], "/"), 0755)
if err != nil {
panic(err)
}
// ファイルの書き込み
dump, err := httputil.DumpResponse(resp, true)
if err != nil {
panic(err)
}
file, err := os.OpenFile(rowFilePath+".dump", os.O_WRONLY|os.O_CREATE, 0755)
if err != nil {
panic(err)
}
_, err = file.Write(dump)
if err != nil {
panic(err)
}
return resp
})
log.Fatal(http.ListenAndServe(":8085", proxy))
}
|
package agent
import ()
type Load interface {
Load(file string) error
}
type Crawl interface {
Crawl(loader *Loader) error
}
type Save interface {
ReArrange(channels Channels) error
Save() error
}
|
package fcache
import (
"sync"
"github.com/nuczzz/lru"
"sync/atomic"
)
// memCache memory cache.
type memCache struct {
// m map of memory cache.
m map[interface{}]*lru.Node
// needCryptKey crypt key or not.
needCryptKey bool
// lru lru control
lru *lru.LRU
// lock lock of memory cache data.
lock sync.RWMutex
// hitCount hit cache count
hitCount int64
// totalCount total count, contains hit count and missing count
totalCount int64
}
func (mc *memCache) deleteCallBack() func(key interface{}) error {
return func(key interface{}) error {
delete(mc.m, key)
return nil
}
}
func (mc *memCache) addNodeCallback() func(*lru.Node) {
return func(node *lru.Node) {
mc.m[node.Key] = node
}
}
// Set set memory cache with key-value pair, and covered if key already exist.
func (mc *memCache) Set(key string, value []byte, extra ...interface{}) error {
if mc.needCryptKey {
key = MD5(key)
}
mc.lock.Lock()
defer mc.lock.Unlock()
v := CacheValue{Value: value}
if data, ok := mc.m[key]; ok {
return mc.lru.Replace(data, v)
}
// memory cache ignore this error
return mc.lru.AddNewNode(key, v, extra...)
}
// Get get memory cache with key, a error will be return if key is not exist.
func (mc *memCache) Get(key string) (value []byte, extra interface{}, err error) {
if mc.needCryptKey {
key = MD5(key)
}
mc.lock.RLock()
defer mc.lock.RUnlock()
atomic.AddInt64(&mc.totalCount, 1)
if data, ok := mc.m[key]; ok {
// memory cache ignore this error
node, _ := mc.lru.Access(data)
if node == nil {
return nil, nil, nil
}
atomic.AddInt64(&mc.hitCount, 1)
return node.Value.(CacheValue).Value, node.Extra, nil
}
return nil, nil, nil
}
func (mc *memCache) GetHitInfo() (int64, int64) {
mc.lock.RLock()
defer mc.lock.RUnlock()
return atomic.LoadInt64(&mc.hitCount), atomic.LoadInt64(&mc.totalCount)
}
func (mc *memCache) Clear(key string) error {
if mc.needCryptKey {
key = MD5(key)
}
mc.lock.RLock()
defer mc.lock.RUnlock()
if data, ok := mc.m[key]; ok {
return mc.lru.Delete(data)
}
return nil
}
func (mc *memCache) ClearAll() error {
mc.lock.RLock()
defer mc.lock.RUnlock()
var err error
for _, node := range mc.lru.Traversal() {
if err = mc.lru.Delete(node); err != nil {
return err
}
}
return nil
}
func newMemCache(maxSize int64, needCryptKey bool, ttl int64) Cache {
if maxSize <= 0 {
maxSize = DefaultMaxMemCacheSize
}
mc := &memCache{
needCryptKey: needCryptKey,
m: make(map[interface{}]*lru.Node),
}
mc.lru = &lru.LRU{
MaxSize: maxSize,
TTL: ttl,
AddNodeCallBack: mc.addNodeCallback(),
DeleteNodeCallBack: mc.deleteCallBack(),
}
return mc
}
|
package main
import(
"fmt"
)
type Bet int
const(
NoBET Bet = iota
TICHU
GRAND_TICHU
)
type Suit int
const(
Spade Suit = iota
Heart
Diamond
Club
Special
)
type Card struct {
Suit Suit
Rank int // 2-10, plus J,Q,K,A (11,12,13,14)
}
const(
Sparrow = Card{Special, 1}
Dragon = Card{Special, 2}
Pheonix = Card{Special, 3}
Dog = Card{Special, 4}
)
type Player struct {
Bet Bet
Hand []Card
Winnings []Card
}
type Game struct {
Players [4]Player
CurrentPlayer int
// TODO handle three human players + dummy
}
func generate_deck() []Card {
var deck []Card // 52 + 4
deck[0] = Sparrow
deck[1] = Dragon
deck[2] = Pheonix
deck[3] = Dog
currentIndex := 4
suits := []Suit {Spade, Heart, Diamond, Club}
for _, suit := range suits {
for rank := 2; rank <= 14; rank++ {
deck[currentIndex] = Card{Suit: suit, Rank: rank}
currentIndex += 1
}
}
return deck
}
func shuffle_deck(deck *[]Card) {
}
func main() {
// play a game between four Tichu players
deck := generate_deck()
shuffle_deck(&deck)
var game Game
for i := 0; i < 4; i++ {
hand := deck[:14]
deck = deck[14:]
game.Players[i] = Player{Bet: NoBET, Hand: hand}
}
fmt.Println(game)
}
|
package rc522
const (
//MF522 command
PCD_IDLE = 0x00
PCD_AUTHENT = 0x0E
PCD_RECEIVE = 0x08
PCD_TRANSMIT = 0x04
PCD_TRANSCEIVE = 0x0C
PCD_RESETPHASE = 0x0F
PCD_CALCCRC = 0x03
//Mifare_One
PICC_REQIDL = 0x26
PICC_REQALL = 0x52
PICC_ANTICOLL1 = 0x93
PICC_ANTICOLL2 = 0x95
PICC_ANTICOLL3 = 0x97
PICC_AUTHENT1A = 0x60
PICC_AUTHENT1B = 0x61
PICC_READ = 0x30
PICC_WRITE = 0xA0
PICC_DECREMENT = 0xC0
PICC_INCREMENT = 0xC1
PICC_RESTORE = 0xC2
PICC_TRANSFER = 0xB0
PICC_HALT = 0x50
//MF522 FIFO
fifoLEN = 64
maxLen = 18
)
const ( //MF522 registers
_ = iota
CommandReg
ComIEnReg
DivlEnReg
ComIrqReg
DivIrqReg
ErrorReg
Status1Reg
Status2Reg
FIFODataReg
FIFOLevelReg
WaterLevelReg
ControlReg
BitFramingReg
CollReg
_
_
ModeReg
TxModeReg
RxModeReg
TxControlReg
TxASKReg
TxSelReg
RxSelReg
RxThresholdReg
DemodReg
_
_
MifareReg
_
_
SerialSpeedReg
_
CRCResultRegM
CRCResultRegL
_
ModWidthReg
_
RFCfgReg
GsNReg
CWGsCfgReg
ModGsCfgReg
TModeReg
TPrescalerReg
TReloadRegH
TReloadRegL
TCounterValueRegH
TCounterValueRegL
_
TestSel1Reg
TestSel2Reg
TestPinEnReg
TestPinValueReg
TestBusReg
AutoTestReg
VersionReg
AnalogTestReg
TestDAC1Reg
TestDAC2Reg
TestADCReg
)
|
package util
import (
"io"
"os"
"github.com/pkg/errors"
)
// ForEachFile iterates over every file in the specified directory path,
// invoking fn for each identified file.
//
// If path is not a directory, ForEachFile will return an error.
//
// If fn returns an error, iteration will stop and ForEachFile will return
// that error.
func ForEachFile(path string, fn func(os.FileInfo) error) error {
const scanSize = 1024
dir, err := os.Open(path)
if err != nil {
return err
}
defer func() {
_ = dir.Close()
}()
// Assert that path is a directory.
switch st, err := dir.Stat(); {
case err != nil:
return err
case !st.IsDir():
return errors.New("not a directory")
}
eof := false
for !eof {
files, err := dir.Readdir(scanSize)
if err != nil {
if err != io.EOF {
return err
}
eof = true
}
// Invoke callback for each file.
for _, f := range files {
if err := fn(f); err != nil {
return err
}
}
}
return nil
}
|
package gengateway
import (
"io"
"log"
"net/http"
"time"
"github.com/gogo/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"github.com/vanishs/gwsrpc/utils"
"github.com/vanishs/gwsrpc/ws"
"github.com/vanishs/gwsrpc/gengateway/genws"
"github.com/vanishs/gwsrpc/gwsrpcpbfile/aaa"
"github.com/vanishs/gwsrpc/gwsrpcpbfile/bbb"
"github.com/vanishs/gwsrpc/gwsrpcpbfile/demo"
)
type co struct {
cc *genws.Connection
addr string
id string
}
var (
aaaID string
bbbID string
demoID string
)
type hub struct {
connections map[string]*co
reinitall chan string
register chan *co
unregister chan *co
}
var h = hub{
connections: make(map[string]*co),
reinitall: make(chan string),
register: make(chan *co),
unregister: make(chan *co),
}
func (h *hub) run() {
for {
select {
case c := <-h.register:
h.connections[c.id] = c
c.cc.ServiceRestartaaa(aaaID)
c.cc.ServiceRestartbbb(bbbID)
c.cc.ServiceRestartdemo(demoID)
case c := <-h.unregister:
delete(h.connections, c.id)
case sename := <-h.reinitall:
switch sename {
case "aaa":
for _, v := range h.connections {
v.cc.ServiceRestartaaa(aaaID)
}
break
case "bbb":
for _, v := range h.connections {
v.cc.ServiceRestartbbb(bbbID)
}
break
case "demo":
for _, v := range h.connections {
v.cc.ServiceRestartdemo(demoID)
}
break
}
}
}
}
func (c *co) OnGwsrpcReg(id string) {
c.id = id
h.register <- c
}
func (c *co) OnGwsrpcOpen(cc *genws.Connection, addr string) {
c.addr = addr
c.cc = cc
}
func (c *co) OnGwsrpcClose() {
h.unregister <- c
}
func (c *co) OnLogin(request *aaa.Send) *aaa.Recv {
var c2request aaa.Send
c2request = *request
c2request.GWSRPCID = c.id
r, err := aaaClient.Login(context.Background(), &c2request)
if err != nil {
grpclog.Printf("aaaClient %v: ", err)
return nil
}
return r
}
func (c *co) OnLogout(request *aaa.Send) *aaa.Recv {
var c2request aaa.Send
c2request = *request
c2request.GWSRPCID = c.id
r, err := aaaClient.Logout(context.Background(), &c2request)
if err != nil {
grpclog.Printf("aaaClient %v: ", err)
return nil
}
return r
}
func (c *co) OnReg(request *aaa.Send) *aaa.Recv {
var c2request aaa.Send
c2request = *request
c2request.GWSRPCID = c.id
r, err := aaaClient.Reg(context.Background(), &c2request)
if err != nil {
grpclog.Printf("aaaClient %v: ", err)
return nil
}
return r
}
func (c *co) OnWatchMessage(request *aaa.Send, genwsc *genws.Connection, session []byte) error {
var c2request aaa.Send
c2request = *request
c2request.GWSRPCID = c.id
s, err := aaaClient.WatchMessage(context.Background(), &c2request)
if err != nil {
grpclog.Printf("aaaClient %v: ", err)
return err
}
for {
r, err := s.Recv()
if err == io.EOF {
break
} else if err != nil {
return err
}
body, err := proto.Marshal(r)
if err != nil {
return err
}
genws.RecvGRPC2GWSRPC(genwsc, session, body)
}
return nil
}
func (c *co) OnNewFile(request *bbb.Send) *bbb.Recv {
var c2request bbb.Send
c2request = *request
c2request.GWSRPCID = c.id
r, err := bbbClient.NewFile(context.Background(), &c2request)
if err != nil {
grpclog.Printf("bbbClient %v: ", err)
return nil
}
return r
}
func (c *co) OnChangeFile(request *bbb.Send) *bbb.Recv {
var c2request bbb.Send
c2request = *request
c2request.GWSRPCID = c.id
r, err := bbbClient.ChangeFile(context.Background(), &c2request)
if err != nil {
grpclog.Printf("bbbClient %v: ", err)
return nil
}
return r
}
func (c *co) OnDelFile(request *bbb.Send) *bbb.Recv {
var c2request bbb.Send
c2request = *request
c2request.GWSRPCID = c.id
r, err := bbbClient.DelFile(context.Background(), &c2request)
if err != nil {
grpclog.Printf("bbbClient %v: ", err)
return nil
}
return r
}
func (c *co) OnRr(request *demo.Send) *demo.Recv {
var c2request demo.Send
c2request = *request
c2request.GWSRPCID = c.id
r, err := demoClient.Rr(context.Background(), &c2request)
if err != nil {
grpclog.Printf("demoClient %v: ", err)
return nil
}
return r
}
var (
localaddr string
aaaAddr string
aaaTLS bool
aaaFile string
aaaOver string
aaaClient aaa.RPCClient
bbbAddr string
bbbTLS bool
bbbFile string
bbbOver string
bbbClient bbb.RPCClient
demoAddr string
demoTLS bool
demoFile string
demoOver string
demoClient demo.RPCClient
)
func aaaConn(gwid string) {
var opts []grpc.DialOption
var err error
if aaaTLS {
var sn string
if aaaOver != "" {
sn = aaaOver
}
var creds credentials.TransportCredentials
if aaaFile != "" {
creds, err = credentials.NewClientTLSFromFile(aaaFile, sn)
if err != nil {
grpclog.Fatalf("Failed to create TLS credentials %v", err)
}
} else {
creds = credentials.NewClientTLSFromCert(nil, sn)
}
opts = append(opts, grpc.WithTransportCredentials(creds))
} else {
opts = append(opts, grpc.WithInsecure())
}
conn, err := grpc.Dial(aaaAddr, opts...)
if err != nil {
grpclog.Fatalf("fail to dial: %v", err)
}
//defer aaaCliConn.Close()
aaaClient = aaa.NewRPCClient(conn)
go func() {
for {
ctx, _ := context.WithTimeout(context.Background(), 1*time.Second)
r, err := aaaClient.GWSRPCCHECK(ctx, &aaa.GWSRPCCHECKSEND{GWSRPCID: gwid})
if err != nil {
//grpclog.Printf("aaaClient %v: ", err)
time.Sleep(100 * time.Millisecond)
continue
}
if r.GWSRPCID != aaaID {
aaaID = r.GWSRPCID
h.reinitall <- "aaa"
}
time.Sleep(100 * time.Millisecond)
}
}()
}
func bbbConn(gwid string) {
var opts []grpc.DialOption
var err error
if bbbTLS {
var sn string
if bbbOver != "" {
sn = bbbOver
}
var creds credentials.TransportCredentials
if bbbFile != "" {
creds, err = credentials.NewClientTLSFromFile(bbbFile, sn)
if err != nil {
grpclog.Fatalf("Failed to create TLS credentials %v", err)
}
} else {
creds = credentials.NewClientTLSFromCert(nil, sn)
}
opts = append(opts, grpc.WithTransportCredentials(creds))
} else {
opts = append(opts, grpc.WithInsecure())
}
conn, err := grpc.Dial(bbbAddr, opts...)
if err != nil {
grpclog.Fatalf("fail to dial: %v", err)
}
//defer bbbCliConn.Close()
bbbClient = bbb.NewRPCClient(conn)
go func() {
for {
ctx, _ := context.WithTimeout(context.Background(), 1*time.Second)
r, err := bbbClient.GWSRPCCHECK(ctx, &bbb.GWSRPCCHECKSEND{GWSRPCID: gwid})
if err != nil {
//grpclog.Printf("bbbClient %v: ", err)
time.Sleep(100 * time.Millisecond)
continue
}
if r.GWSRPCID != bbbID {
bbbID = r.GWSRPCID
h.reinitall <- "bbb"
}
time.Sleep(100 * time.Millisecond)
}
}()
}
func demoConn(gwid string) {
var opts []grpc.DialOption
var err error
if demoTLS {
var sn string
if demoOver != "" {
sn = demoOver
}
var creds credentials.TransportCredentials
if demoFile != "" {
creds, err = credentials.NewClientTLSFromFile(demoFile, sn)
if err != nil {
grpclog.Fatalf("Failed to create TLS credentials %v", err)
}
} else {
creds = credentials.NewClientTLSFromCert(nil, sn)
}
opts = append(opts, grpc.WithTransportCredentials(creds))
} else {
opts = append(opts, grpc.WithInsecure())
}
conn, err := grpc.Dial(demoAddr, opts...)
if err != nil {
grpclog.Fatalf("fail to dial: %v", err)
}
//defer demoCliConn.Close()
demoClient = demo.NewRPCClient(conn)
go func() {
for {
ctx, _ := context.WithTimeout(context.Background(), 1*time.Second)
r, err := demoClient.GWSRPCCHECK(ctx, &demo.GWSRPCCHECKSEND{GWSRPCID: gwid})
if err != nil {
//grpclog.Printf("demoClient %v: ", err)
time.Sleep(100 * time.Millisecond)
continue
}
if r.GWSRPCID != demoID {
demoID = r.GWSRPCID
h.reinitall <- "demo"
}
time.Sleep(100 * time.Millisecond)
}
}()
}
func rpcmainOnConnect() genws.OnStruct {
c := &co{}
return c
}
//Gain Gateway Main function is Gain
func Gain(
_aaaAddr string,
_aaaTLS bool,
_aaaFile string,
_aaaOver string,
_bbbAddr string,
_bbbTLS bool,
_bbbFile string,
_bbbOver string,
_demoAddr string,
_demoTLS bool,
_demoFile string,
_demoOver string,
_localaddr string,
_localsslname string,
) {
pp, _ := io.Pipe()
log.Println("Gateway start.", proto.GoGoProtoPackageIsVersion2, pp)
localaddr = _localaddr
aaaAddr = _aaaAddr
aaaTLS = _aaaTLS
aaaFile = _aaaFile
aaaOver = _aaaOver
bbbAddr = _bbbAddr
bbbTLS = _bbbTLS
bbbFile = _bbbFile
bbbOver = _bbbOver
demoAddr = _demoAddr
demoTLS = _demoTLS
demoFile = _demoFile
demoOver = _demoOver
log.Println("run Gain")
go h.run()
gwid := utils.Randstr20()
aaaConn(gwid)
bbbConn(gwid)
demoConn(gwid)
http.HandleFunc("/", ws.ServeWs)
genws.SetHandleConnect(1024*1024*1, 256, 10, 60, rpcmainOnConnect)
var err error
if _localsslname == "" {
err = http.ListenAndServe(localaddr, nil)
} else {
err = http.ListenAndServeTLS(localaddr, _localsslname+".pem", _localsslname+".key", nil)
}
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
|
package example
func main() {
var str string = "hello"
integer := int(str) // complile error: cannot convert str (type string) to type int
}
|
package modules
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestGetMapUpdatedItems(t *testing.T) {
tm := NewTemplateKeyValueMaker(
&CrudEvent{
UpdatedData: `{"test":"t1","test_id":1}`,
},
&NotificationType{
TitleTemplate: "",
MessageTemplate: "",
})
assert.Equal(t, 2, len(tm.MapUpdateItems))
assert.Equal(t, "t1", tm.MapUpdateItems["test"])
subIDFloat := tm.MapUpdateItems["test_id"].(float64)
var subUint64 uint = uint(subIDFloat)
assert.Equal(t, uint(1), subUint64)
}
func TestTemplateKeyValueMaker(t *testing.T) {
crudEvent := &CrudEvent{
ResourceID: 1111,
ResourceName: "Product",
UpdatedData: `{"ID":1111,"Name":"GoodName","ContentID":1,"IsCheck":false}`,
}
notificationType := &NotificationType{
TitleTemplate: "title {{Product.Name}} {{Content.Name}} {{Product.IsCheck}}",
MessageTemplate: "message {{Content.Version}}",
ListItemTemplate: "list {{Product.Name}}",
}
templateKeyValueMaker := NewTemplateKeyValueMaker(crudEvent, notificationType)
getDBValueParams := templateKeyValueMaker.MakeGetValueParams()
assert.Equal(t, 2, len(getDBValueParams))
assert.Equal(t, "GoodName", templateKeyValueMaker.GetedValues["Name"])
assert.Equal(t, "false", templateKeyValueMaker.GetedValues["IsCheck"])
assert.Equal(t, uint(1), getDBValueParams["Content.Version"].ID)
assert.Equal(t, "version", getDBValueParams["Content.Version"].ColumnName)
assert.Equal(t, "contents", getDBValueParams["Content.Version"].TableName)
assert.Equal(t, uint(1), getDBValueParams["Content.Name"].ID)
assert.Equal(t, "name", getDBValueParams["Content.Name"].ColumnName)
assert.Equal(t, "contents", getDBValueParams["Content.Name"].TableName)
}
func TestMakeNotification(t *testing.T) {
baseGetedValues := map[string]string{
"Key": "base-test1-1",
"Key1": "base-test1-2",
"Key2": "base-3",
"Key3": fmt.Sprintf("%s", time.Now()),
}
listValues := []KeyValue{
map[string]string{
"List.Key": "list-test1-1",
"List.Key1": "list-test1-2",
"List.Key2": "3",
"List.Key3": fmt.Sprintf("%s", time.Now()),
},
map[string]string{
"List.Key": "list-test2-1",
"List.Key1": "list-test2-2",
"List.Key2": "4",
"List.Key3": fmt.Sprintf("%s", time.Now()),
},
map[string]string{
"List.Key": "list-test3-1",
"List.Key1": "list-test3-2",
"List.Key2": "5",
"List.Key3": fmt.Sprintf("%s", time.Now()),
},
}
// Template
notiType := &NotificationType{
TitleTemplate: "title {{Key}} {{NowMonth}}",
MessageTemplate: "message {{Key}} {{Key1}}\n{{ListItem}}",
ListItemTemplate: "- {{Key}} {{List.Key}} {{List.Key1}} {{List.Key2}}\n",
ReplaceText: "old:new,test1:newtest1",
}
notification := MakeNotification(notiType, baseGetedValues, listValues)
assert.NotNil(t, notification)
assert.Equal(t, fmt.Sprintf("title %s %d", baseGetedValues["Key"], time.Now().Month()), notification.Title)
assert.Equal(t, "message base-test1-1 base-test1-2\n- base-test1-1 list-test1-1 list-test1-2 3\n- base-test1-1 list-test2-1 list-test2-2 4\n- base-test1-1 list-test3-1 list-test3-2 5\n", notification.Message)
}
|
package dstate
import (
"errors"
"github.com/NamedKitten/discordgo"
"sync"
"time"
)
var (
ErrMemberNotFound = errors.New("Member not found")
ErrChannelNotFound = errors.New("Channel not found")
)
type GuildState struct {
sync.RWMutex
// ID is never mutated, so can be accessed without locking
ID int64 `json:"id"`
// The underlying guild, the members and channels fields shouldnt be used
Guild *discordgo.Guild `json:"guild"`
Members map[int64]*MemberState `json:"members"`
Channels map[int64]*ChannelState `json:"channels" `
MaxMessages int // Absolute max number of messages cached in a channel
MaxMessageDuration time.Duration // Max age of messages, if 0 ignored. (Only checks age whena new message is received on the channel)
RemoveOfflineMembers bool
userCache *Cache `json:"-"`
}
// NewGuildstate creates a new guild state, it only uses the passed state to get settings from
// Pass nil to use default settings
func NewGuildState(guild *discordgo.Guild, state *State) *GuildState {
gCop := new(discordgo.Guild)
*gCop = *guild
guildState := &GuildState{
ID: guild.ID,
Guild: gCop,
Members: make(map[int64]*MemberState),
Channels: make(map[int64]*ChannelState),
userCache: NewCache(),
}
if state != nil {
guildState.MaxMessages = state.MaxChannelMessages
guildState.MaxMessageDuration = state.MaxMessageAge
guildState.RemoveOfflineMembers = state.RemoveOfflineMembers
}
for _, channel := range gCop.Channels {
guildState.ChannelAddUpdate(false, channel)
}
if state.TrackMembers {
for _, member := range gCop.Members {
guildState.MemberAddUpdate(false, member)
}
for _, presence := range gCop.Presences {
guildState.PresenceAddUpdate(false, presence)
}
}
gCop.Presences = nil
gCop.Members = nil
gCop.Emojis = nil
gCop.Channels = nil
return guildState
}
// StrID is the same as above but formats it in a string first
func (g *GuildState) StrID() string {
return discordgo.StrID(g.ID)
}
// GuildUpdate updates the guild with new guild information
func (g *GuildState) GuildUpdate(lock bool, newGuild *discordgo.Guild) {
if lock {
g.Lock()
defer g.Unlock()
}
if newGuild.Roles == nil {
newGuild.Roles = g.Guild.Roles
}
if newGuild.Emojis == nil {
newGuild.Emojis = g.Guild.Emojis
}
if newGuild.VoiceStates == nil {
newGuild.VoiceStates = g.Guild.VoiceStates
}
if newGuild.MemberCount == 0 {
newGuild.MemberCount = g.Guild.MemberCount
}
// Create/update new channels
*g.Guild = *newGuild
for _, c := range newGuild.Channels {
g.ChannelAddUpdate(false, c)
}
// Remove removed channels
if newGuild.Channels != nil {
OUTER:
for _, checking := range g.Channels {
for _, c := range newGuild.Channels {
if c.ID == checking.ID {
continue OUTER
}
}
delete(g.Channels, checking.ID)
}
}
}
// LightCopy returns a light copy of the inner guild (no slices)
func (g *GuildState) LightCopy(lock bool) *discordgo.Guild {
if lock {
g.RLock()
defer g.RUnlock()
}
gCopy := new(discordgo.Guild)
*gCopy = *g.Guild
gCopy.Members = nil
gCopy.Presences = nil
gCopy.Channels = nil
gCopy.VoiceStates = nil
gCopy.Roles = nil
return gCopy
}
// Member returns a the member from an id, or nil if not found
func (g *GuildState) Member(lock bool, id int64) *MemberState {
if lock {
g.RLock()
defer g.RUnlock()
}
return g.Members[id]
}
// MemberCopy returns a full copy of a MemberState, this can be used without locking
// Warning: modifying slices in the state (such as roles) causes race conditions, they're only safe to access
func (g *GuildState) MemberCopy(lock bool, id int64) *MemberState {
if lock {
g.RLock()
defer g.RUnlock()
}
m := g.Member(false, id)
if m == nil {
return nil
}
return m.Copy()
}
// ChannelCopy returns a copy of a channel
// if deep is true, permissionoverwrites will be copied, otherwise nil
func (g *GuildState) ChannelCopy(lock bool, id int64, deep bool) *ChannelState {
if lock {
g.RLock()
defer g.RUnlock()
}
c := g.Channel(false, id)
if c == nil {
return nil
}
return c.Copy(false, deep)
}
// MemberAddUpdate adds or updates a member
func (g *GuildState) MemberAddUpdate(lock bool, newMember *discordgo.Member) {
if lock {
g.Lock()
defer g.Unlock()
}
existing, ok := g.Members[newMember.User.ID]
if ok {
existing.UpdateMember(newMember)
} else {
ms := &MemberState{
Guild: g,
ID: newMember.User.ID,
Bot: newMember.User.Bot,
}
ms.UpdateMember(newMember)
g.Members[newMember.User.ID] = ms
}
}
// MemberAdd adds a member to the GuildState
// It differs from addupdate in that it first increases the membercount and then calls MemberAddUpdate
// so it should only be called on the GuildMemberAdd event
func (g *GuildState) MemberAdd(lock bool, newMember *discordgo.Member) {
if lock {
g.Lock()
defer g.Unlock()
}
g.Guild.MemberCount++
g.MemberAddUpdate(false, newMember)
}
// MemberRemove removes a member from the guildstate
// it also decrements membercount, so only call this on the GuildMemberRemove event
// If you wanna remove a member purely from the state, use StateRemoveMember
func (g *GuildState) MemberRemove(lock bool, id int64) {
if lock {
g.Lock()
defer g.Unlock()
}
g.Guild.MemberCount--
delete(g.Members, id)
}
// StateRemoveMember removes a guildmember from the state and does NOT decrement member_count
func (g *GuildState) StateRemoveMember(lock bool, id int64) {
if lock {
g.Lock()
defer g.Unlock()
}
delete(g.Members, id)
}
// PresenceAddUpdate adds or updates a presence
func (g *GuildState) PresenceAddUpdate(lock bool, newPresence *discordgo.Presence) {
if lock {
g.Lock()
defer g.Unlock()
}
existing, ok := g.Members[newPresence.User.ID]
if ok {
existing.UpdatePresence(newPresence)
} else {
if newPresence.Status == discordgo.StatusOffline {
// Don't bother if this is the case, most likely just removed from the server and the state would be very incomplete
return
}
ms := &MemberState{
Guild: g,
ID: newPresence.User.ID,
Bot: newPresence.User.Bot,
}
ms.UpdatePresence(newPresence)
g.Members[newPresence.User.ID] = ms
}
if newPresence.Status == discordgo.StatusOffline && g.RemoveOfflineMembers {
// Remove after a minute incase they just restart the client or something
time.AfterFunc(time.Minute, func() {
g.Lock()
defer g.Unlock()
member := g.Member(false, newPresence.User.ID)
if member != nil {
if !member.PresenceSet || member.PresenceStatus == StatusOffline {
delete(g.Members, newPresence.User.ID)
}
}
})
}
}
func copyPresence(in *discordgo.Presence) *discordgo.Presence {
cop := new(discordgo.Presence)
*cop = *in
if in.Game != nil {
cop.Game = new(discordgo.Game)
*cop.Game = *in.Game
}
cop.User = new(discordgo.User)
*cop.User = *in.User
cop.Roles = nil
if in.Roles != nil {
cop.Roles = make([]int64, len(in.Roles))
copy(cop.Roles, in.Roles)
}
return cop
}
// Channel retrieves a channelstate by id
func (g *GuildState) Channel(lock bool, id int64) *ChannelState {
if lock {
g.RLock()
defer g.RUnlock()
}
return g.Channels[id]
}
// ChannelAddUpdate adds or updates a channel in the guildstate
func (g *GuildState) ChannelAddUpdate(lock bool, newChannel *discordgo.Channel) *ChannelState {
if lock {
g.Lock()
defer g.Unlock()
}
existing, ok := g.Channels[newChannel.ID]
if ok {
// Patch
existing.Update(false, newChannel)
return existing
}
state := NewChannelState(g, g, newChannel)
g.Channels[newChannel.ID] = state
return state
}
// ChannelRemove removes a channel from the GuildState
func (g *GuildState) ChannelRemove(lock bool, id int64) {
if lock {
g.Lock()
defer g.Unlock()
}
delete(g.Channels, id)
}
// Role returns a role by id
func (g *GuildState) Role(lock bool, id int64) *discordgo.Role {
if lock {
g.RLock()
defer g.RUnlock()
}
for _, role := range g.Guild.Roles {
if role.ID == id {
return role
}
}
return nil
}
func (g *GuildState) RoleAddUpdate(lock bool, newRole *discordgo.Role) {
if lock {
g.Lock()
defer g.Unlock()
}
existing := g.Role(false, newRole.ID)
if existing != nil {
*existing = *newRole
} else {
rCop := *newRole
g.Guild.Roles = append(g.Guild.Roles, &rCop)
}
}
func (g *GuildState) RoleRemove(lock bool, id int64) {
if lock {
g.Lock()
defer g.Unlock()
}
for i, v := range g.Guild.Roles {
if v.ID == id {
g.Guild.Roles = append(g.Guild.Roles[:i], g.Guild.Roles[i+1:]...)
return
}
}
}
func (g *GuildState) VoiceState(lock bool, userID int64) *discordgo.VoiceState {
if lock {
g.RLock()
defer g.RUnlock()
}
for _, v := range g.Guild.VoiceStates {
if v.UserID == userID {
return v
}
}
return nil
}
func (g *GuildState) VoiceStateUpdate(lock bool, update *discordgo.VoiceStateUpdate) {
if lock {
g.Lock()
defer g.Unlock()
}
// Handle Leaving Channel
if update.ChannelID == 0 {
for i, state := range g.Guild.VoiceStates {
if state.UserID == update.UserID {
g.Guild.VoiceStates = append(g.Guild.VoiceStates[:i], g.Guild.VoiceStates[i+1:]...)
return
}
}
}
existing := g.VoiceState(false, update.UserID)
if existing != nil {
*existing = *update.VoiceState
return
}
vsCopy := new(discordgo.VoiceState)
*vsCopy = *update.VoiceState
g.Guild.VoiceStates = append(g.Guild.VoiceStates, vsCopy)
return
}
// Calculates the permissions for a member.
// https://support.discordapp.com/hc/en-us/articles/206141927-How-is-the-permission-hierarchy-structured-
func (g *GuildState) MemberPermissions(lock bool, channelID int64, memberID int64) (apermissions int, err error) {
if lock {
g.RLock()
defer g.RUnlock()
}
if memberID == g.Guild.OwnerID {
return discordgo.PermissionAll, nil
}
mState := g.Member(false, memberID)
if mState == nil {
return 0, ErrMemberNotFound
}
return g.MemberPermissionsMS(false, channelID, mState)
}
// Calculates the permissions for a member.
// https://support.discordapp.com/hc/en-us/articles/206141927-How-is-the-permission-hierarchy-structured-
func (g *GuildState) MemberPermissionsMS(lock bool, channelID int64, mState *MemberState) (apermissions int, err error) {
if lock {
g.RLock()
defer g.RUnlock()
}
if mState.ID == g.Guild.OwnerID {
return discordgo.PermissionAll, nil
}
for _, role := range g.Guild.Roles {
if role.ID == g.Guild.ID {
apermissions |= role.Permissions
break
}
}
for _, role := range g.Guild.Roles {
for _, roleID := range mState.Roles {
if role.ID == roleID {
apermissions |= role.Permissions
break
}
}
}
// Administrator bypasses channel overrides
if apermissions&discordgo.PermissionAdministrator == discordgo.PermissionAdministrator {
apermissions |= discordgo.PermissionAll
return
}
cState := g.Channel(false, channelID)
if cState == nil {
err = ErrChannelNotFound
return
}
// Apply @everyone overrides from the channel.
for _, overwrite := range cState.PermissionOverwrites {
if g.Guild.ID == overwrite.ID {
apermissions &= ^overwrite.Deny
apermissions |= overwrite.Allow
break
}
}
denies := 0
allows := 0
// Member overwrites can override role overrides, so do two passes
for _, overwrite := range cState.PermissionOverwrites {
for _, roleID := range mState.Roles {
if overwrite.Type == "role" && roleID == overwrite.ID {
denies |= overwrite.Deny
allows |= overwrite.Allow
break
}
}
}
apermissions &= ^denies
apermissions |= allows
for _, overwrite := range cState.PermissionOverwrites {
if overwrite.Type == "member" && overwrite.ID == mState.ID {
apermissions &= ^overwrite.Deny
apermissions |= overwrite.Allow
break
}
}
if apermissions&discordgo.PermissionAdministrator == discordgo.PermissionAdministrator {
apermissions |= discordgo.PermissionAllChannel
}
return
}
func (g *GuildState) runGC(cacheExpirey time.Duration) (cacheN int) {
g.Lock()
if g.userCache != nil {
cacheN = g.userCache.EvictOldKeys(time.Now().Add(-cacheExpirey))
}
g.Unlock()
return
}
func (g *GuildState) UserCacheGet(lock bool, key interface{}) interface{} {
if lock {
g.RLock()
defer g.RUnlock()
}
if g.userCache == nil {
return nil
}
return g.userCache.Get(key)
}
func (g *GuildState) UserCacheSet(lock bool, key interface{}, value interface{}) {
if lock {
g.Lock()
defer g.Unlock()
}
if g.userCache == nil {
g.userCache = NewCache()
}
g.userCache.Set(key, value)
}
func (g *GuildState) UserCacheDel(lock bool, key interface{}) {
if lock {
g.Lock()
defer g.Unlock()
}
if g.userCache == nil {
g.userCache = NewCache()
return // nothing to delete
}
g.userCache.Del(key)
}
func (g *GuildState) UserCacheFetch(lock bool, key interface{}, fetchFunc CacheFetchFunc) (interface{}, error) {
if lock {
// check fastpatch
v := g.UserCacheGet(true, key)
if v != nil {
return v, nil
}
// fast path failed, use full lock
g.Lock()
defer g.Unlock()
}
if g.userCache == nil {
g.userCache = NewCache()
}
return g.userCache.Fetch(key, fetchFunc)
}
|
package blockchain
import (
"bytes"
"encoding/gob"
"fmt"
"log"
"time"
"github.com/neil-berg/blockchain/database"
)
// Block shape
type Block struct {
Data []byte
Hash []byte
PrevHash []byte
Nonce int
Timestamp time.Time
}
// Blockchain shape
type Blockchain struct {
// The blockchain's tip is the last block hash stored in the DB
tip []byte
// Instance of our DB
db *database.Database
}
// Iterator shape
type Iterator struct {
currentHash []byte
db *database.Database
}
// CreateBlock performs the block's proof-of-work, populating the block with a
// hash and nonce that can validated before attaching to the chain.
func CreateBlock(data string, prevHash []byte) *Block {
block := &Block{[]byte(data), []byte{}, prevHash, 0, time.Now()}
pow := NewProof(block)
nonce, hash := pow.Run()
fmt.Printf("Completed proof-of-work: \n \tNonce: \t%d\n \tHash: \t%x\n", nonce, hash)
block.Hash = hash[:]
block.Nonce = nonce
return block
}
// AddBlock adds a new block to the blockchain
func (chain *Blockchain) AddBlock(data string) error {
tipKey := []byte(database.TipKey)
tip, err := chain.db.Read(database.BlocksBucket, tipKey)
if err != nil {
log.Fatal(err)
}
block := CreateBlock(data, tip)
serializedBlock, err := block.Serialize()
err = chain.db.Write(database.BlocksBucket, block.Hash, serializedBlock)
err = chain.db.Write(database.BlocksBucket, tipKey, block.Hash)
if err != nil {
return err
}
fmt.Println("============= ADDED BLOCK ===============")
fmt.Printf("Timestamp:\t %v\n", block.Timestamp)
fmt.Printf("Data:\t\t %s\n", block.Data)
fmt.Printf("Hash:\t\t %x\n", block.Hash)
fmt.Printf("Previous hash:\t %x\n", block.PrevHash)
fmt.Printf("Nonce: \t\t %d\n", block.Nonce)
return nil
}
// GetNewIterator returns a new blockchain iterator
func (chain *Blockchain) GetNewIterator() *Iterator {
return &Iterator{chain.tip, chain.db}
}
// Next returns the next block in a blockchain iterator
func (iterator *Iterator) Next() *Block {
var block *Block
data, err := iterator.db.Read(database.BlocksBucket, iterator.currentHash)
if err != nil {
fmt.Println("Failed to get next block")
}
block, err = Deserialize(data)
if err != nil {
fmt.Println("Failed to deserialize the block")
}
iterator.currentHash = block.PrevHash
return block
}
// Serialize encodes a block into a gob
func (block *Block) Serialize() ([]byte, error) {
var result bytes.Buffer
encoder := gob.NewEncoder(&result)
err := encoder.Encode(block)
if err != nil {
return []byte{}, err
}
return result.Bytes(), nil
}
// Deserialize deserializes a slice of encoded gob bytes into a Block
func Deserialize(data []byte) (*Block, error) {
var block Block
decoder := gob.NewDecoder(bytes.NewReader(data))
err := decoder.Decode(&block)
if err != nil {
return &Block{}, err
}
return &block, nil
}
// Genesis returns the first block of the blockchain
func Genesis() *Block {
return CreateBlock("Genesis", []byte{})
}
// Init initializes a new blockchain
func Init(db *database.Database) *Blockchain {
var tip []byte
tipKey := []byte(database.TipKey)
emtpyBlocksBucket := db.EmptyBucket(database.BlocksBucket)
if emtpyBlocksBucket {
// Create and store the genesis block and it's hash as the new chain's tip
genesis := Genesis()
fmt.Println("creating genesis block")
serializedBlock, err := genesis.Serialize()
err = db.Write(database.BlocksBucket, genesis.Hash, serializedBlock)
fmt.Println("storing genisis")
err = db.Write(database.BlocksBucket, tipKey, genesis.Hash)
fmt.Println("storing tip")
if err != nil {
log.Fatal(err)
}
} else {
fmt.Println("reading tip")
// Blockchain exists, just read the tip
lastHash, err := db.Read(database.BlocksBucket, tipKey)
if err != nil {
log.Fatal(err)
}
tip = lastHash
fmt.Printf("Tip: %x\n", tip)
}
return &Blockchain{tip, db}
}
|
package unio
import (
"github.com/labstack/echo"
"reflect"
"strings"
)
/**
Middleware
Run all JSON body fields, and format the need
*/
func (m *Middleware) JsonFormatFields(formatter RequestFormatRule) echo.MiddlewareFunc {
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) (err error) {
mimeType := c.Request().Header.Get(echo.HeaderContentType)
// Use contains to make more accurate
if !strings.Contains(mimeType, echo.MIMEApplicationJSON) {
// Conversion will be done only for JSON request
return next(c)
}
buffer := Utils.GetBuffer(c)
var raw interface{}
err = c.Bind(&raw)
switch reflect.ValueOf(raw).Kind() {
//noinspection ALL
case reflect.Slice, reflect.Array:
s := reflect.ValueOf(raw)
body := []interface{}{}
for i := 0; i < s.Len(); i++ {
part := s.Index(i)
formatted := format(c, part.Interface(), formatter)
body = append(body, formatted)
}
buffer = Utils.InterfaceToBuffer(body)
default:
body := format(c, raw, formatter)
buffer = Utils.InterfaceToBuffer(body)
}
Utils.ResetBuffer(c, buffer)
return next(c)
}
}
}
/**
Formatter workaround
*/
func format(c echo.Context, raw interface{}, formatter RequestFormatRule) interface{} {
reflectRaw := reflect.ValueOf(raw)
body := map[string]interface{}{}
for _, k := range reflectRaw.MapKeys() {
key := k.String()
value := reflectRaw.MapIndex(k).Interface()
switch reflect.ValueOf(value).Kind() {
case reflect.Slice, reflect.Array:
s := reflect.ValueOf(value)
for i := 0; i < s.Len(); i++ {
part := s.Index(i)
body[key] = format(c, part.Interface(), formatter)
}
case reflect.Map:
body[key] = format(c, value, formatter)
default:
body[key] = formatter(c.Request().Method, key, value)
}
}
return body
}
|
package plient
import (
"net/http"
"net/url"
)
type Plient struct {
client *http.Client
headers []Header
}
type Header struct {
key string
value string
}
func create(proxy string, headers []Header) *Plient {
proxyUrl, err := url.Parse(proxy);
if err != nil {
panic("Proxy error")
}
client := &http.Client{Transport: &http.Transport{
Proxy: http.ProxyURL(proxyUrl),
}}
return &Plient{client: client, headers: headers}
}
func (p Plient) Get(_url string) (*http.Response, error) {
req, err := http.NewRequest("GET", _url, nil)
if err != nil {
return nil, err
}
p.prepare(req)
return p.client.Do(req)
}
func (p Plient) prepare(req *http.Request) {
for _, header := range p.headers {
req.Header.Set(header.key, header.value)
}
}
|
package parser
import (
"errors"
"strings"
)
const ErrorMsg = "invalid mysql url"
func ParseMysqlUrl(url string) (string, error) {
protocolAndRest := strings.Split(url, "://")
if len(protocolAndRest) != 2 {
return "", errors.New(ErrorMsg)
}
_ = protocolAndRest[0]
hostAndRest := strings.Split(protocolAndRest[1], "/")
if len(hostAndRest) != 2 {
return "", errors.New(ErrorMsg)
}
host := strings.Split(hostAndRest[0], "@")
if len(host) != 2 {
return "", errors.New(ErrorMsg)
}
userCredentials := host[0]
address := host[1]
dbNameAndAttributes := strings.Split(hostAndRest[1], "?")
if len(dbNameAndAttributes) < 1 {
return "", errors.New(ErrorMsg)
}
dbName := dbNameAndAttributes[0]
returnUrl := userCredentials + "@tcp(" + address + ")/" + dbName
return returnUrl, nil
}
|
package send
import (
"encoding/json"
"io/ioutil"
"net/http"
"net/url"
"strings"
"errors"
)
type SSAcao string
type SStatus string
const(
SendSms SSAcao = "sendsms"
BulkSms SSAcao = "bulksms"
StatusError SStatus = "error"
StatusSuccess SStatus = "success"
PARAM_ACAO string = "acao"
PARAM_LOGIN string = "login"
PARAM_TOKEN string = "token"
PARAM_NUMERO string = "numero"
PARAM_MSG string = "msg"
PARAM_CAMPANHA string = "campanha"
PARAM_DATA string = "data"
PARAM_HORA string = "hora"
MSG_N_INF string = "Mensagem não foi informada"
MSG_LEN_MAX_CARACTER string = "Tamanho máximo permitido da mensagem são de 160 caracteres"
MSG_ERROR_PARSER string = "Ocorreu um erro no Parser do Retorno"
)
type SStatusResponse struct {
Status string `json:"status"`
Cause string `json:"cause"`
Id string `json:"id"`
}
// Mensagem que será enviada
type SMsg struct {
Msg string
}
// Dados opcionais no envio do SMS
type SSendOptSMS struct {
Campanha string
Data string
Hora string
}
// Estrutura com os dados para solicitação de envio de sms a um microserviço
type SSendSMS struct {
UrlService string
Acao SSAcao
Login string
Token string
Numero string
Opt SSendOptSMS
Msg SMsg
}
// Solicita o envio de sms para o microserviço
func (s *SSendSMS) SendSMS() (string, error) {
var strId string = ""
if (SMsg{}) == s.Msg || len(strings.TrimSpace(s.Msg.Msg)) == 0 {
return strId, errors.New(MSG_N_INF)
} else {
_, err := valLenMsg(s.Msg.Msg)
if err != nil {
return strId, err
} else {
strUrl := s.UrlService
strAcao := string(s.Acao)
strLogin := s.Login
strToken := s.Token
strNum := s.Numero
strMsg := s.Msg.Msg
if err != nil {
return strId, err
} else {
params := &url.Values{}
params.Add(PARAM_ACAO, strAcao)
params.Add(PARAM_LOGIN, strLogin)
params.Add(PARAM_TOKEN, strToken)
params.Add(PARAM_NUMERO, strNum)
params.Add(PARAM_MSG, strMsg)
valOpt(s.Opt, params)
request := strUrl + params.Encode()
resp, err := http.Get(request)
defer resp.Body.Close()
if err != nil {
return strId, err
} else {
strData, errParser := ioutil.ReadAll(resp.Body)
if errParser != nil {
return strId, errParser
}
var statusResponse SStatusResponse = SStatusResponse{}
jsonUnMarshalErr := json.Unmarshal([]byte(string(strData)), &statusResponse)
// :( - Não gostei disso aqui. Mas não pensei em nada melhor no momento da escrita
if jsonUnMarshalErr != nil {
return strId, errors.New(MSG_ERROR_PARSER)
}
if statusResponse.Status == string(StatusSuccess) {
strId = statusResponse.Id
} else if statusResponse.Status == string(StatusError) {
return strId, errors.New(statusResponse.Cause)
}
//
}
}
}
}
return strId, nil
}
func valLenMsg(strMsg string) (int, error) {
lenRet := len(strings.TrimSpace(strMsg))
if lenRet > 160 {
return lenRet, errors.New(MSG_LEN_MAX_CARACTER)
}
return lenRet, nil
}
func valOpt(refOpt SSendOptSMS, paramsRequest *url.Values) {
if (SSendOptSMS{}) != refOpt {
if len(strings.TrimSpace(refOpt.Campanha)) > 0 {
paramsRequest.Add(PARAM_CAMPANHA, strings.TrimSpace(refOpt.Campanha))
}
if len(strings.TrimSpace(refOpt.Data)) > 0 {
paramsRequest.Add(PARAM_DATA, strings.TrimSpace(refOpt.Data))
}
if len(strings.TrimSpace(refOpt.Hora)) > 0 {
paramsRequest.Add(PARAM_HORA, strings.TrimSpace(refOpt.Hora))
}
}
}
|
package utils
import (
"os"
"strconv"
)
// GetEnv - для удобного чтения переменных окружения
func GetEnv(key string, defaultVal interface{}) interface{} {
if value, exists := os.LookupEnv(key); exists {
var result = defaultVal
switch defaultVal.(type) {
case int:
if res, err := strconv.Atoi(value); err == nil {
result = res
}
case int64:
if res, err := strconv.ParseInt(value, 10, 64); err == nil {
result = res
}
case float64:
if res, err := strconv.ParseFloat(value, 64); err == nil {
result = res
}
case string:
result = value
default:
return defaultVal
}
return result
}
return defaultVal
}
|
package testing
import (
"testing"
"github.com/brigadecore/brigade/sdk/v3"
"github.com/stretchr/testify/require"
)
func TestMockSystemClient(t *testing.T) {
require.Implements(t, (*sdk.SystemClient)(nil), &MockSystemClient{})
}
|
package main
type Def struct {
Dict string `json:"dict"`
Desc string `json:"desc"`
}
type Res struct {
Term string `json:"term"`
Defs []*Def `json:"definition,omitempty"`
Sugs []string `json:"suggestions,omitempty"`
}
|
/*
Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudify
import (
"fmt"
utils "github.com/cloudify-incubator/cloudify-rest-go-client/cloudify/utils"
)
// KubernetesLoadBalancer - type used for loadbalancers instances
const KubernetesLoadBalancer = "cloudify.nodes.ApplicationServer.kubernetes.LoadBalancer"
// KubernetesNode - type used for kubernetes instances
const KubernetesNode = "cloudify.nodes.ApplicationServer.kubernetes.Node"
// GetDeployment - return deployment by ID
func (cl *Client) GetDeployment(deploymentID string) (*Deployment, error) {
var params = map[string]string{}
params["id"] = deploymentID
deployments, err := cl.GetDeployments(params)
if err != nil {
return nil, err
}
if len(deployments.Items) != 1 {
return nil, fmt.Errorf("Returned wrong count of deployments:%+v", deploymentID)
}
return &deployments.Items[0], nil
}
// GetDeploymentInstancesHostGrouped - return instances grouped by host
func (cl *Client) GetDeploymentInstancesHostGrouped(params map[string]string) (map[string]NodeInstances, error) {
var result = map[string]NodeInstances{}
nodeInstances, err := cl.GetNodeInstances(params)
if err != nil {
return result, err
}
for _, nodeInstance := range nodeInstances.Items {
if nodeInstance.HostID != "" {
// add instance list if is not existed
if _, ok := result[nodeInstance.HostID]; ok == false {
result[nodeInstance.HostID] = NodeInstances{}
}
nodeHostInstance := result[nodeInstance.HostID]
// we repack intances to new struct so we need to update size/total
nodeHostInstance.Items = append(
nodeHostInstance.Items, nodeInstance,
)
nodeHostInstance.Metadata.Pagination.Total++
nodeHostInstance.Metadata.Pagination.Size++
result[nodeInstance.HostID] = nodeHostInstance
}
}
return result, nil
}
// GetDeploymentInstancesNodeGrouped - return instances grouped by node
func (cl *Client) GetDeploymentInstancesNodeGrouped(params map[string]string) (map[string]NodeInstances, error) {
var result = map[string]NodeInstances{}
nodeInstances, err := cl.GetNodeInstances(params)
if err != nil {
return result, err
}
for _, nodeInstance := range nodeInstances.Items {
if nodeInstance.NodeID != "" {
// add instance list if is not existed
if _, ok := result[nodeInstance.NodeID]; ok == false {
result[nodeInstance.NodeID] = NodeInstances{}
}
nodeHostInstance := result[nodeInstance.NodeID]
nodeHostInstance.Items = append(
nodeHostInstance.Items, nodeInstance,
)
nodeHostInstance.Metadata.Pagination.Total++
nodeHostInstance.Metadata.Pagination.Size++
result[nodeInstance.NodeID] = nodeHostInstance
}
}
return result, nil
}
// listNodeInstanceToNodeInstances - convert simple list of NodeInstance's to NodeInstances
func (cl *Client) listNodeInstanceToNodeInstances(instances []NodeInstance) *NodeInstances {
var result NodeInstances
result.Items = instances
result.Metadata.Pagination.Total = uint(len(instances))
result.Metadata.Pagination.Size = uint(len(instances))
result.Metadata.Pagination.Offset = 0
return &result
}
// listNodeToNodes - convert simple list of Node's to Nodes
func (cl *Client) listNodeToNodes(nodes []Node) *Nodes {
var result Nodes
result.Items = nodes
result.Metadata.Pagination.Total = uint(len(nodes))
result.Metadata.Pagination.Size = uint(len(nodes))
result.Metadata.Pagination.Offset = 0
return &result
}
// GetNodeInstancesWithType - Returned list of started node instances with some node type,
// used mainly for kubernetes, also check that all instances related to same hostId started
func (cl *Client) GetNodeInstancesWithType(params map[string]string, nodeType string) (*NodeInstances, error) {
nodeInstances, err := cl.GetNodeInstances(params)
if err != nil {
return nil, err
}
var nodeParams = map[string]string{}
if val, ok := params["deployment_id"]; ok {
nodeParams["deployment_id"] = val
}
nodes, err := cl.GetNodes(nodeParams)
if err != nil {
return nil, err
}
nodesWithCorrectType := nodes.GetNodeNamesWithType(nodeType)
instances := []NodeInstance{}
for _, nodeInstance := range nodeInstances.Items {
if utils.InList(nodesWithCorrectType, nodeInstance.NodeID) {
// add instance to list
instances = append(instances, nodeInstance)
}
}
return cl.listNodeInstanceToNodeInstances(instances), nil
}
// GetAliveNodeInstancesWithType - Returned list of alive node instances with some node type,
// used mainly for kubernetes, need to get instances that can be joined to cluster
// Useful for cloudprovider logic only.
func (cl *Client) GetAliveNodeInstancesWithType(params map[string]string, nodeType string) (*NodeInstances, error) {
nodeInstances, err := cl.GetNodeInstancesWithType(params, nodeType)
if err != nil {
return nil, err
}
// starting only because we restart kubelet after join
aliveStates := []string{
// "initializing", "creating", // workflow started for instance
// "created", "configuring", // create action, had ip
"configured", "starting", // configure action, joined to cluster
"started", // everything done
}
instances := []NodeInstance{}
for _, instance := range nodeInstances.Items {
if utils.InList(aliveStates, instance.State) {
instances = append(instances, instance)
}
}
return cl.listNodeInstanceToNodeInstances(instances), nil
}
// GetStartedNodeInstancesWithType - Returned list of started node instances with some node type,
// used mainly for kubernetes, also check that all instances related to same hostId started
// Useful for scale only.
func (cl *Client) GetStartedNodeInstancesWithType(params map[string]string, nodeType string) (*NodeInstances, error) {
nodeInstancesGrouped, err := cl.GetDeploymentInstancesHostGrouped(params)
if err != nil {
return nil, err
}
var nodeParams = map[string]string{}
if val, ok := params["deployment_id"]; ok {
nodeParams["deployment_id"] = val
}
nodes, err := cl.GetNodes(nodeParams)
if err != nil {
return nil, err
}
nodesWithCorrectType := nodes.GetNodeNamesWithType(nodeType)
instances := []NodeInstance{}
for _, nodeInstances := range nodeInstancesGrouped {
// Check tat everything started!
if !nodeInstances.AllAreStarted() {
continue
}
// check type
for _, nodeInstance := range nodeInstances.Items {
if utils.InList(nodesWithCorrectType, nodeInstance.NodeID) {
// add instance to list
instances = append(instances, nodeInstance)
}
}
}
return cl.listNodeInstanceToNodeInstances(instances), nil
}
// GetDeploymentScaleGroup - return scaling group by name and deployment
func (cl *Client) GetDeploymentScaleGroup(deploymentID, scaleGroupName string) (*ScalingGroup, error) {
deployment, err := cl.GetDeployment(deploymentID)
if err != nil {
return nil, err
}
if deployment.ScalingGroups != nil {
for groupName, scaleGroup := range deployment.ScalingGroups {
if scaleGroupName == groupName {
return &scaleGroup, nil
}
}
}
return nil, fmt.Errorf("No such scale group:%+v", scaleGroupName)
}
// GetDeploymentScaleGroupNodes - return nodes related to scaling group
func (cl *Client) GetDeploymentScaleGroupNodes(deploymentID, groupName, nodeType string) (*Nodes, error) {
// get all nodes
params := map[string]string{}
params["deployment_id"] = deploymentID
cloudNodes, err := cl.GetStartedNodesWithType(params, nodeType)
if err != nil {
return nil, err
}
// get scale group
scaleGroup, err := cl.GetDeploymentScaleGroup(deploymentID, groupName)
if err != nil {
return nil, err
}
// filter by scaling group
nodes := []Node{}
for _, node := range cloudNodes.Items {
for _, nodeID := range scaleGroup.Members {
if nodeID == node.ID || nodeID == node.HostID {
nodes = append(nodes, node)
}
}
}
return cl.listNodeToNodes(nodes), nil
}
// GetDeploymentScaleGroupInstances - return instances related to scaling group
func (cl *Client) GetDeploymentScaleGroupInstances(deploymentID, groupName, nodeType string) (*NodeInstances, error) {
// get all instances
params := map[string]string{}
params["deployment_id"] = deploymentID
cloudInstances, err := cl.GetStartedNodeInstancesWithType(params, nodeType)
if err != nil {
return nil, err
}
// get nodes in scale group (need to get nodes because we need host for each)
cloudNodes, err := cl.GetDeploymentScaleGroupNodes(deploymentID, groupName, nodeType)
if err != nil {
return nil, err
}
// filter by scaling group
instances := []NodeInstance{}
for _, instance := range cloudInstances.Items {
for _, node := range cloudNodes.Items {
if node.ID == instance.NodeID {
instances = append(instances, instance)
}
}
}
return cl.listNodeInstanceToNodeInstances(instances), nil
}
// GetDeploymentInstancesScaleGrouped - return instances grouped by scaleing group
func (cl *Client) GetDeploymentInstancesScaleGrouped(deploymentID, nodeType string) (map[string]NodeInstances, error) {
var result = map[string]NodeInstances{}
deployment, err := cl.GetDeployment(deploymentID)
if err != nil {
return result, err
}
var params = map[string]string{}
params["deployment_id"] = deploymentID
nodes, err := cl.GetStartedNodesWithType(params, nodeType)
if err != nil {
return result, err
}
cloudInstances, err := cl.GetStartedNodeInstancesWithType(params, nodeType)
if err != nil {
return result, err
}
if deployment.ScalingGroups != nil {
// check what types we have in members
for groupName, scaleGroup := range deployment.ScalingGroups {
var resultedInstances = []NodeInstance{}
var supportedMembers = []string{}
for _, member := range scaleGroup.Members {
supportedMembers = append(supportedMembers, member)
for _, node := range nodes.Items {
if node.HostID == member {
if !utils.InList(supportedMembers, node.ID) {
supportedMembers = append(supportedMembers, node.ID)
}
}
}
}
// search instance
for _, cloudInstance := range cloudInstances.Items {
if utils.InList(supportedMembers, cloudInstance.NodeID) {
resultedInstances = append(resultedInstances, cloudInstance)
}
}
result[groupName] = *cl.listNodeInstanceToNodeInstances(resultedInstances)
}
}
return result, nil
}
|
package racecond
import (
"fmt"
"sync"
)
//Program with race condition
var x = 0
func increment(wg *sync.WaitGroup) {
x = x + 1
wg.Done()
}
//RunIncrements runs "increment()" func "numIncrements" times
func RunIncrements(numIncrements int) {
var wg sync.WaitGroup
for i := 0; i < numIncrements; i++ {
wg.Add(1)
go increment(&wg)
}
wg.Wait()
fmt.Println("Final value is ", x)
}
|
// Package callbacks provides callback implementations for Discord API events.
package callbacks
import (
"context"
"fmt"
"time"
"github.com/bwmarrin/discordgo"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/ewohltman/ephemeral-roles/internal/pkg/logging"
)
const guildMembersPageLimit = 1000
// Config contains fields for the callback methods.
type Config struct {
Log logging.Interface
BotName string
BotKeyword string
RolePrefix string
RoleColor int
JaegerTracer opentracing.Tracer
ContextTimeout time.Duration
ReadyCounter prometheus.Counter
MessageCreateCounter prometheus.Counter
VoiceStateUpdateCounter prometheus.Counter
}
type roleID string
type roleIDMap map[roleID]*discordgo.Role
func mapGuildRoleIDs(guildRoles discordgo.Roles) roleIDMap {
guildRoleMap := make(roleIDMap)
for _, role := range guildRoles {
guildRoleMap[roleID(role.ID)] = role
}
return guildRoleMap
}
func lookupGuild(ctx context.Context, session *discordgo.Session, guildID string) (*discordgo.Guild, error) {
guild, err := session.State.Guild(guildID)
if err != nil {
guild, err = queryGuild(ctx, session, guildID)
if err != nil {
return nil, fmt.Errorf("unable to lookup guild: %w", err)
}
err = session.State.GuildAdd(guild)
if err != nil {
return nil, fmt.Errorf("unable to add guild to session cache: %w", err)
}
}
return guild, nil
}
func queryGuild(ctx context.Context, session *discordgo.Session, guildID string) (*discordgo.Guild, error) {
guild, err := session.GuildWithContext(ctx, guildID)
if err != nil {
return nil, fmt.Errorf("unable to query guild: %w", err)
}
members, err := recursiveGuildMembersWithContext(ctx, session, guildID, "", guildMembersPageLimit)
if err != nil {
return nil, fmt.Errorf("unable to query guild members: %w", err)
}
channels, err := session.GuildChannelsWithContext(ctx, guildID)
if err != nil {
return nil, fmt.Errorf("unable to query guild channels: %w", err)
}
guild.Members = members
guild.Channels = channels
return guild, nil
}
func recursiveGuildMembersWithContext(
ctx context.Context,
session *discordgo.Session,
guildID, after string,
limit int,
) ([]*discordgo.Member, error) {
guildMembers, err := session.GuildMembersWithContext(ctx, guildID, after, limit)
if err != nil {
return nil, err
}
if len(guildMembers) < guildMembersPageLimit {
return guildMembers, nil
}
nextGuildMembers, err := recursiveGuildMembersWithContext(
ctx,
session,
guildID,
guildMembers[len(guildMembers)-1].User.ID,
guildMembersPageLimit,
)
if err != nil {
return nil, err
}
guildMembers = append(guildMembers, nextGuildMembers...)
return guildMembers, nil
}
func createGuildRole(ctx context.Context, session *discordgo.Session, guildID, roleName string, roleColor int) (*discordgo.Role, error) {
const hoist = true
role, err := session.GuildRoleCreateWithContext(ctx, guildID)
if err != nil {
return nil, fmt.Errorf("unable to create ephemeral role: %w", err)
}
role, err = session.GuildRoleEditWithContext(
ctx,
guildID, role.ID,
roleName, roleColor,
hoist, role.Permissions, role.Mentionable,
)
if err != nil {
return nil, fmt.Errorf("unable to edit ephemeral role: %w", err)
}
err = session.State.RoleAdd(guildID, role)
if err != nil {
return nil, fmt.Errorf("unable to add ephemeral role to session cache: %w", err)
}
return role, nil
}
func addRoleToMember(ctx context.Context, session *discordgo.Session, guildID, userID, ephemeralRoleID string) error {
err := session.GuildMemberRoleAddWithContext(ctx, guildID, userID, ephemeralRoleID)
if err != nil {
return fmt.Errorf("unable to grant ephemeral role: %w", err)
}
return nil
}
func removeRoleFromMember(ctx context.Context, session *discordgo.Session, guildID, userID, ephemeralRoleID string) error {
err := session.GuildMemberRoleRemoveWithContext(ctx, guildID, userID, ephemeralRoleID)
if err != nil {
return fmt.Errorf("unable to revoke ephemeral role: %w", err)
}
return nil
}
|
// Fact returns the factorial of n.
func Fact(n int) int {
r := 1
for ; n > 1; n-- {
r *= n
}
return r
}
|
package main
import . "github.com/little-go/practices/visitor"
func main() {
info := Info{}
var v Visitor = &info
//v = LogVisitor{v}
//v = NameVisitor{v}
//v = OtherThingsVisitor{v}
loadFile := func(info *Info, err error) error {
info.Name = "Hao Chen"
info.Namespace = "MegaEase"
info.OtherThings = "We are running as remote team."
return nil
}
_ = v.Visit(loadFile)
}
|
package main
import (
"net"
"log"
"fmt"
"bufio"
)
// 入口函数
func main() {
conn,err:=net.Dial("tcp","127.0.0.1:3130")
if err!=nil {
log.Println(err)
}
fmt.Fprintf(conn,"GET / HTTP/1.0\r\n")
status,err:=bufio.NewReader(conn).ReadString('\n')
fmt.Println(status)
}
|
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
package shutdown
import (
"bytes"
"fmt"
"math/rand"
"net/http"
"os"
"runtime"
"runtime/pprof"
"strconv"
"strings"
"sync"
"testing"
"time"
)
func reset() {
SetTimeout(1 * time.Second)
sqM.Lock()
defer sqM.Unlock()
srM.Lock()
defer srM.Unlock()
wg = &sync.WaitGroup{}
shutdownRequested = false
shutdownQueue = [4][]iNotifier{}
shutdownFnQueue = [4][]fnNotify{}
shutdownFinished = make(chan struct{})
currentStage = Stage{-1}
onTimeOut = nil
}
func startTimer(t *testing.T) chan struct{} {
SetLogPrinter(t.Logf)
finished := make(chan struct{}, 0)
srM.RLock()
var to time.Duration
for i := range timeouts {
to += timeouts[i]
}
srM.RUnlock()
// Add some extra time.
toc := time.After((to * 10) / 9)
go func() {
select {
case <-toc:
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
panic("unexpected timeout while running test")
case <-finished:
return
}
}()
return finished
}
func TestBasic(t *testing.T) {
reset()
defer close(startTimer(t))
f := First()
ok := false
go func() {
select {
case n := <-f:
ok = true
close(n)
}
}()
Shutdown()
if !ok {
t.Fatal("did not get expected shutdown signal")
}
if !Started() {
t.Fatal("shutdown not marked started")
}
// Should just return at once.
Shutdown()
// Should also return at once.
Wait()
}
func TestPreShutdown(t *testing.T) {
reset()
defer close(startTimer(t))
f := PreShutdown()
ok := false
l := Lock()
go func() {
select {
case n := <-f:
ok = true
l()
close(n)
}
}()
tn := time.Now()
Shutdown()
dur := time.Now().Sub(tn)
if dur > time.Second {
t.Fatalf("timeout time was hit unexpected:%v", time.Now().Sub(tn))
}
if !ok {
t.Fatal("did not get expected shutdown signal")
}
if !Started() {
t.Fatal("shutdown not marked started")
}
}
func TestCancel(t *testing.T) {
reset()
defer close(startTimer(t))
f := First()
ok := false
go func() {
select {
case n := <-f:
ok = true
close(n)
}
}()
f.Cancel()
Shutdown()
if ok {
t.Fatal("got unexpected shutdown signal")
}
}
func TestCancel2(t *testing.T) {
reset()
defer close(startTimer(t))
f2 := First()
f := First()
var ok, ok2 bool
go func() {
select {
case n := <-f:
ok = true
close(n)
}
}()
go func() {
select {
case n := <-f2:
ok2 = true
close(n)
}
}()
f.Cancel()
Shutdown()
if ok {
t.Fatal("got unexpected shutdown signal")
}
if !ok2 {
t.Fatal("missing shutdown signal")
}
}
func TestCancelWait(t *testing.T) {
reset()
defer close(startTimer(t))
f := First()
ok := false
go func() {
select {
case n := <-f:
ok = true
close(n)
}
}()
f.CancelWait()
Shutdown()
if ok {
t.Fatal("got unexpected shutdown signal")
}
}
func TestCancelWait2(t *testing.T) {
reset()
defer close(startTimer(t))
f2 := First()
f := First()
var ok, ok2 bool
go func() {
select {
case n := <-f:
ok = true
close(n)
}
}()
go func() {
select {
case n := <-f2:
ok2 = true
close(n)
}
}()
f.CancelWait()
Shutdown()
if ok {
t.Fatal("got unexpected shutdown signal")
}
if !ok2 {
t.Fatal("missing shutdown signal")
}
}
// TestCancelWait3 assert that we can CancelWait, and that wait will wait until the
// specified stage.
func TestCancelWait3(t *testing.T) {
reset()
defer close(startTimer(t))
f := First()
var ok, ok2, ok3 bool
f2 := Second()
cancelled := make(chan struct{}, 0)
reached := make(chan struct{}, 0)
p2started := make(chan struct{}, 0)
_ = SecondFn(func() {
<-p2started
close(reached)
})
var wg sync.WaitGroup
go func() {
select {
case v := <-f2:
ok3 = true
close(v)
case <-cancelled:
}
}()
wg.Add(1)
go func() {
select {
case n := <-f:
ok = true
go func() {
wg.Done()
close(cancelled)
f2.CancelWait()
// We should be at stage 2
close(p2started)
<-reached
}()
wg.Wait()
time.Sleep(10 * time.Millisecond)
close(n)
}
}()
Shutdown()
if !ok {
t.Fatal("missing shutdown signal")
}
if ok2 {
t.Fatal("got unexpected shutdown signal")
}
if ok3 {
t.Fatal("got unexpected shutdown signal")
}
}
// TestCancelWait4 assert that we can CancelWait on a previous stage,
// and it doesn't block.
func TestCancelWait4(t *testing.T) {
reset()
defer close(startTimer(t))
f := Second()
var ok bool
f2 := First()
go func() {
select {
case n := <-f:
// Should not wait
f2.CancelWait()
ok = true
close(n)
}
}()
Shutdown()
if !ok {
t.Fatal("missing shutdown signal")
}
}
type logBuffer struct {
buf bytes.Buffer
fn func(string, ...interface{})
}
func (l *logBuffer) WriteF(format string, a ...interface{}) {
//fmt.Printf(format, a...)
l.fn(format, a...)
l.buf.WriteString(fmt.Sprintf(format, a...) + "\n")
}
// TestContextLog assert that context is logged as expected.
func TestContextLog(t *testing.T) {
reset()
defer close(startTimer(t))
SetTimeout(10 * time.Millisecond)
var buf = &logBuffer{fn: t.Logf}
SetLogPrinter(buf.WriteF)
txt1 := "arbitrary text"
txt2 := "something else"
txt3 := 456778
txt4 := time.Now()
txtL := "politically correct text"
_ = Lock(txtL)
_ = First(txt1)
_ = Second(txt2, txt3)
_ = ThirdFn(func() { select {} }, txt4)
Shutdown()
logged := buf.buf.String()
if !strings.Contains(logged, txt1) {
t.Errorf("Log should contain %s", txt1)
}
if !strings.Contains(logged, txt2) {
t.Errorf("Log should contain %s", txt2)
}
if !strings.Contains(logged, fmt.Sprintf("%v", txt3)) {
t.Errorf("Log should contain %v", txt3)
}
if !strings.Contains(logged, fmt.Sprintf("%v", txt4)) {
t.Errorf("Log should contain %v", txt4)
}
if !strings.Contains(logged, fmt.Sprintf("%v", txtL)) {
t.Errorf("Log should contain %v", txtL)
}
}
func TestFnCancelWait(t *testing.T) {
reset()
defer close(startTimer(t))
f := First()
var ok, ok2 bool
f2 := SecondFn(func() {
ok2 = true
})
var wg sync.WaitGroup
wg.Add(1)
go func() {
select {
case n := <-f:
ok = true
go func() {
wg.Done()
f2.CancelWait()
}()
wg.Wait()
time.Sleep(10 * time.Millisecond)
close(n)
}
}()
Shutdown()
if !ok {
t.Fatal("missing shutdown signal")
}
if ok2 {
t.Fatal("got unexpected shutdown signal")
}
}
func TestNilNotifier(t *testing.T) {
reset()
defer close(startTimer(t))
var reached = make(chan struct{})
var finished = make(chan struct{})
var testDone = make(chan struct{})
_ = ThirdFn(func() { close(reached); <-finished })
go func() { Shutdown(); close(testDone) }()
// Wait for stage 3
<-reached
tests := []Notifier{PreShutdown(), First(), Second(), Third(),
PreShutdownFn(func() {}), FirstFn(func() {}), SecondFn(func() {}), ThirdFn(func() {})}
for i := range tests {
if tests[i] != nil {
t.Errorf("Expected test %d to be nil, was %#v", i, tests[i])
}
}
close(finished)
<-testDone
}
func TestNilNotifierCancel(t *testing.T) {
reset()
defer close(startTimer(t))
var reached = make(chan struct{})
var finished = make(chan struct{})
var testDone = make(chan struct{})
_ = ThirdFn(func() { close(reached); <-finished })
go func() { Shutdown(); close(testDone) }()
// Wait for stage 3
<-reached
tests := []Notifier{PreShutdown(), First(), Second(), Third(),
PreShutdownFn(func() {}), FirstFn(func() {}), SecondFn(func() {}), ThirdFn(func() {})}
for i := range tests {
// All cancels should return at once.
tests[i].Cancel()
}
close(finished)
<-testDone
}
func TestNilNotifierCancelWait(t *testing.T) {
reset()
defer close(startTimer(t))
var reached = make(chan struct{})
var finished = make(chan struct{})
var testDone = make(chan struct{})
_ = ThirdFn(func() { close(reached); <-finished })
go func() { Shutdown(); close(testDone) }()
// Wait for stage 3
<-reached
tests := []Notifier{PreShutdown(), First(), Second(), Third(),
PreShutdownFn(func() {}), FirstFn(func() {}), SecondFn(func() {}), ThirdFn(func() {})}
for i := range tests {
// All cancel-waits should return at once.
tests[i].CancelWait()
}
close(finished)
<-testDone
}
func TestNilNotifierFollowing(t *testing.T) {
reset()
defer close(startTimer(t))
var reached = make(chan struct{})
var finished = make(chan struct{})
var testDone = make(chan struct{})
_ = PreShutdownFn(func() { close(reached); <-finished })
go func() { Shutdown(); close(testDone) }()
// Wait for stage 3
<-reached
tests := []Notifier{First(), Second(), Third(),
FirstFn(func() {}), SecondFn(func() {}), ThirdFn(func() {})}
for i := range tests {
if tests[i] == nil {
t.Errorf("Expected test %d to NOT be nil.", i)
continue
}
tests[i].Cancel()
}
close(finished)
<-testDone
}
func TestWait(t *testing.T) {
reset()
defer close(startTimer(t))
ok := make(chan bool)
go func() {
Wait()
close(ok)
}()
// Wait a little - enough to fail very often.
time.Sleep(time.Millisecond * 10)
select {
case <-ok:
t.Fatal("Wait returned before shutdown finished")
default:
}
Shutdown()
// ok should return, otherwise we wait for timeout, which will fail the test
<-ok
}
func TestTimeout(t *testing.T) {
reset()
SetTimeout(time.Millisecond * 100)
defer close(startTimer(t))
f := First()
go func() {
select {
case <-f:
}
}()
tn := time.Now()
Shutdown()
dur := time.Now().Sub(tn)
if dur > time.Second || dur < time.Millisecond*50 {
t.Fatalf("timeout time was unexpected:%v", time.Now().Sub(tn))
}
if !Started() {
t.Fatal("got unexpected shutdown signal")
}
}
func TestTimeoutN(t *testing.T) {
reset()
SetTimeout(time.Second * 2)
SetTimeoutN(Stage1, time.Millisecond*100)
defer close(startTimer(t))
f := First()
go func() {
select {
case <-f:
}
}()
tn := time.Now()
Shutdown()
dur := time.Now().Sub(tn)
if dur > time.Second || dur < time.Millisecond*50 {
t.Fatalf("timeout time was unexpected:%v", time.Now().Sub(tn))
}
if !Started() {
t.Fatal("got unexpected shutdown signal")
}
}
func TestTimeoutCallback(t *testing.T) {
reset()
SetTimeout(time.Second * 2)
SetTimeoutN(Stage1, time.Millisecond*100)
defer close(startTimer(t))
var gotStage Stage
var gotCtx string
OnTimeout(func(s Stage, ctx string) {
gotStage = s
gotCtx = ctx
})
defer OnTimeout(nil)
const testctx = "lock context"
f := First(testctx)
go func() {
select {
case <-f:
}
}()
tn := time.Now()
Shutdown()
dur := time.Now().Sub(tn)
if dur > time.Second || dur < time.Millisecond*50 {
t.Errorf("timeout time was unexpected:%v (%v->%v)", dur, tn, time.Now())
}
if !Started() {
t.Fatal("got unexpected shutdown signal")
}
if gotStage != Stage1 {
t.Errorf("want stage 1, got %+v", gotStage)
}
if !strings.Contains(gotCtx, testctx) {
t.Errorf("want context to contain %q, got %q", testctx, gotCtx)
}
}
func TestTimeoutN2(t *testing.T) {
reset()
SetTimeout(time.Millisecond * 100)
SetTimeoutN(Stage2, time.Second*2)
defer close(startTimer(t))
f := First()
go func() {
select {
case <-f:
}
}()
tn := time.Now()
Shutdown()
dur := time.Now().Sub(tn)
if dur > time.Second || dur < time.Millisecond*50 {
t.Fatalf("timeout time was unexpected:%v", time.Now().Sub(tn))
}
if !Started() {
t.Fatal("got unexpected shutdown signal")
}
}
func TestLock(t *testing.T) {
reset()
defer close(startTimer(t))
f := First()
ok := false
go func() {
select {
case n := <-f:
ok = true
close(n)
}
}()
got := Lock()
if got == nil {
t.Fatal("Unable to aquire lock")
}
got()
// Start 10 goroutines that aquire a lock.
var wg1, wg2 sync.WaitGroup
wg1.Add(10)
wg2.Add(10)
for i := 0; i < 10; i++ {
go func() {
defer wg1.Done()
wg2.Done() // Signal we are ready to take the lock
l := Lock()
if l != nil {
time.Sleep(timeouts[0] / 2)
l()
}
}()
}
// Wait for all goroutines to have aquired the lock
wg2.Wait()
Shutdown()
if !ok {
t.Fatal("shutdown signal not received")
}
if !Started() {
t.Fatal("expected that shutdown had started")
}
wg1.Wait()
}
func TestLockUnrelease(t *testing.T) {
reset()
defer close(startTimer(t))
SetTimeout(time.Millisecond * 500)
SetTimeoutN(StagePS, time.Millisecond*100)
got := Lock()
if got == nil {
t.Fatal("Unable to aquire lock")
}
defer got()
tn := time.Now()
Shutdown()
dur := time.Now().Sub(tn)
if dur > time.Second || dur < time.Millisecond*50 {
t.Fatalf("timeout time was unexpected:%v", time.Now().Sub(tn))
}
if !Started() {
t.Fatal("expected that shutdown had started")
}
}
func TestLockCallback(t *testing.T) {
reset()
defer close(startTimer(t))
SetTimeout(time.Millisecond * 50)
const testctx = "lock context"
var gotStage Stage
var gotCtx string
var wg sync.WaitGroup
wg.Add(1)
OnTimeout(func(s Stage, ctx string) {
gotStage = s
gotCtx = ctx
wg.Done()
})
defer OnTimeout(nil)
tn := time.Now()
got := Lock(testctx)
if got == nil {
t.Fatal("Unable to aquire lock")
}
wg.Wait()
dur := time.Now().Sub(tn)
if dur > time.Second || dur < time.Millisecond*30 {
t.Errorf("timeout time was unexpected:%v (%v->%v)", dur, tn, time.Now())
}
if gotStage != StagePS {
t.Errorf("want stage ps, got %+v", gotStage)
}
if !strings.Contains(gotCtx, testctx) {
t.Errorf("want context to contain %q, got %q", testctx, gotCtx)
}
}
func TestOrder(t *testing.T) {
reset()
defer close(startTimer(t))
t3 := Third()
if Started() {
t.Fatal("shutdown started unexpectedly")
}
t2 := Second()
if Started() {
t.Fatal("shutdown started unexpectedly")
}
t1 := First()
if Started() {
t.Fatal("shutdown started unexpectedly")
}
t0 := PreShutdown()
if Started() {
t.Fatal("shutdown started unexpectedly")
}
var ok0, ok1, ok2, ok3 bool
go func() {
for {
select {
//t0 must be first
case n := <-t0:
if ok0 || ok1 || ok2 || ok3 {
t.Fatal("unexpected order", ok0, ok1, ok2, ok3)
}
ok0 = true
close(n)
case n := <-t1:
if !ok0 || ok1 || ok2 || ok3 {
t.Fatal("unexpected order", ok0, ok1, ok2, ok3)
}
ok1 = true
close(n)
case n := <-t2:
if !ok0 || !ok1 || ok2 || ok3 {
t.Fatal("unexpected order", ok0, ok1, ok2, ok3)
}
ok2 = true
close(n)
case n := <-t3:
if !ok0 || !ok1 || !ok2 || ok3 {
t.Fatal("unexpected order", ok0, ok1, ok2, ok3)
}
ok3 = true
close(n)
return
}
}
}()
if ok0 || ok1 || ok2 || ok3 {
t.Fatal("shutdown has already happened", ok0, ok1, ok2, ok3)
}
Shutdown()
if !ok0 || !ok1 || !ok2 || !ok3 {
t.Fatal("did not get expected shutdown signal", ok0, ok1, ok2, ok3)
}
}
func TestRecursive(t *testing.T) {
reset()
defer close(startTimer(t))
if Started() {
t.Fatal("shutdown started unexpectedly")
}
t1 := First()
if Started() {
t.Fatal("shutdown started unexpectedly")
}
var ok1, ok2, ok3 bool
go func() {
for {
select {
case n := <-t1:
ok1 = true
t2 := Second()
close(n)
select {
case n := <-t2:
ok2 = true
t3 := Third()
close(n)
select {
case n := <-t3:
ok3 = true
close(n)
return
}
}
}
}
}()
if ok1 || ok2 || ok3 {
t.Fatal("shutdown has already happened", ok1, ok2, ok3)
}
Shutdown()
if !ok1 || !ok2 || !ok3 {
t.Fatal("did not get expected shutdown signal", ok1, ok2, ok3)
}
}
func TestBasicFn(t *testing.T) {
reset()
defer close(startTimer(t))
gotcall := false
// Register a function
_ = FirstFn(func() {
gotcall = true
})
// Start shutdown
Shutdown()
if !gotcall {
t.Fatal("did not get expected shutdown signal")
}
}
func setBool(i *bool) func() {
return func() {
*i = true
}
}
func TestFnOrder(t *testing.T) {
reset()
defer close(startTimer(t))
var ok1, ok2, ok3 bool
_ = ThirdFn(setBool(&ok3))
if Started() {
t.Fatal("shutdown started unexpectedly")
}
_ = SecondFn(setBool(&ok2))
if Started() {
t.Fatal("shutdown started unexpectedly")
}
_ = FirstFn(setBool(&ok1))
if Started() {
t.Fatal("shutdown started unexpectedly")
}
if ok1 || ok2 || ok3 {
t.Fatal("shutdown has already happened", ok1, ok2, ok3)
}
Shutdown()
if !ok1 || !ok2 || !ok3 {
t.Fatal("did not get expected shutdown signal", ok1, ok2, ok3)
}
}
func TestFnRecursive(t *testing.T) {
reset()
defer close(startTimer(t))
var ok1, ok2, ok3 bool
_ = FirstFn(func() {
ok1 = true
_ = SecondFn(func() {
ok2 = true
_ = ThirdFn(func() {
ok3 = true
})
})
})
if Started() {
t.Fatal("shutdown started unexpectedly")
}
if ok1 || ok2 || ok3 {
t.Fatal("shutdown has already happened", ok1, ok2, ok3)
}
Shutdown()
if !ok1 || !ok2 || !ok3 {
t.Fatal("did not get expected shutdown signal", ok1, ok2, ok3)
}
}
// When setting First or Second inside stage three they should be ignored.
func TestFnRecursiveRev(t *testing.T) {
reset()
defer close(startTimer(t))
var ok1, ok2, ok3 bool
_ = ThirdFn(func() {
ok3 = true
_ = SecondFn(func() {
ok2 = true
})
_ = FirstFn(func() {
ok1 = true
})
})
if Started() {
t.Fatal("shutdown started unexpectedly")
}
if ok1 || ok2 || ok3 {
t.Fatal("shutdown has already happened", ok1, ok2, ok3)
}
Shutdown()
if ok1 || ok2 || !ok3 {
t.Fatal("did not get expected shutdown signal", ok1, ok2, ok3)
}
}
func TestFnCancel(t *testing.T) {
reset()
defer close(startTimer(t))
var g0, g1, g2, g3 bool
// Register a function
notp := PreShutdownFn(func() {
g0 = true
})
not1 := FirstFn(func() {
g1 = true
})
not2 := SecondFn(func() {
g2 = true
})
not3 := ThirdFn(func() {
g3 = true
})
notp.Cancel()
not1.Cancel()
not2.Cancel()
not3.Cancel()
// Start shutdown
Shutdown()
if g1 || g2 || g3 || g0 {
t.Fatal("got unexpected shutdown signal", g0, g1, g2, g3)
}
}
func TestFnCancelWait2(t *testing.T) {
reset()
defer close(startTimer(t))
var g0, g1, g2, g3 bool
// Register a function
notp := PreShutdownFn(func() {
g0 = true
})
not1 := FirstFn(func() {
g1 = true
})
not2 := SecondFn(func() {
g2 = true
})
not3 := ThirdFn(func() {
g3 = true
})
notp.CancelWait()
not1.CancelWait()
not2.CancelWait()
not3.CancelWait()
// Start shutdown
Shutdown()
if g1 || g2 || g3 || g0 {
t.Fatal("got unexpected shutdown signal", g0, g1, g2, g3)
}
}
func TestFnPanic(t *testing.T) {
reset()
defer close(startTimer(t))
gotcall := false
// Register a function
_ = FirstFn(func() {
gotcall = true
panic("This is expected")
})
// Start shutdown
Shutdown()
if !gotcall {
t.Fatal("did not get expected shutdown signal")
}
}
func TestFnNotify(t *testing.T) {
reset()
defer close(startTimer(t))
gotcall := false
// Register a function
fn := FirstFn(func() {
gotcall = true
})
// Start shutdown
Shutdown()
// This must have a notification
_, ok := <-fn
if !ok {
t.Fatal("Notifier was closed before a notification")
}
// After this the channel must be closed
_, ok = <-fn
if ok {
t.Fatal("Notifier was not closed after initial notification")
}
if !gotcall {
t.Fatal("did not get expected shutdown signal")
}
}
func TestStatusTimerFn(t *testing.T) {
version := strings.Split(runtime.Version(), ".")
if len(version) >= 2 {
if minor, err := strconv.Atoi(version[1]); err == nil {
if minor < 9 {
t.Skip("Skipping test due to caller changes")
return
}
}
}
reset()
FirstFn(func() {
time.Sleep(time.Millisecond * 100)
})
_, file, line, _ := runtime.Caller(0)
want := fmt.Sprintf("%s:%d", file, line-3)
old := Logger
var b bytes.Buffer
SetLogPrinter(func(f string, val ...interface{}) {
b.WriteString(fmt.Sprintf(f+"\n", val...))
})
StatusTimer = time.Millisecond
Shutdown()
Logger = old
StatusTimer = time.Minute
if !strings.Contains(b.String(), want) {
t.Errorf("Expected logger to contain trace to %s, got: %v", want, b.String())
}
lines := strings.Split(b.String(), "\n")
for _, l := range lines {
if strings.Contains(l, want) {
t.Log("Got:", l)
break
}
}
}
func TestStatusTimer(t *testing.T) {
reset()
fn := First()
_, file, line, _ := runtime.Caller(0)
want := fmt.Sprintf("%s:%d", file, line-1)
go func() {
select {
case v := <-fn:
time.Sleep(100 * time.Millisecond)
close(v)
}
}()
old := Logger
var b bytes.Buffer
SetLogPrinter(func(f string, val ...interface{}) {
b.WriteString(fmt.Sprintf(f+"\n", val...))
})
StatusTimer = time.Millisecond
Shutdown()
Logger = old
StatusTimer = time.Minute
if !strings.Contains(b.String(), want) {
t.Errorf("Expected logger to contain trace to %s, got: %v", want, b.String())
}
lines := strings.Split(b.String(), "\n")
for _, l := range lines {
if strings.Contains(l, want) {
t.Log("Got:", l)
break
}
}
}
func TestFnSingleCancel(t *testing.T) {
reset()
defer close(startTimer(t))
var ok1, ok2, ok3, okcancel bool
_ = ThirdFn(func() {
ok3 = true
})
if Started() {
t.Fatal("shutdown started unexpectedly")
}
_ = SecondFn(func() {
ok2 = true
})
if Started() {
t.Fatal("shutdown started unexpectedly")
}
cancel := SecondFn(func() {
okcancel = true
})
if Started() {
t.Fatal("shutdown started unexpectedly")
}
_ = FirstFn(func() {
ok1 = true
})
if Started() {
t.Fatal("shutdown started unexpectedly")
}
if ok1 || ok2 || ok3 || okcancel {
t.Fatal("shutdown has already happened", ok1, ok2, ok3, okcancel)
}
cancel.Cancel()
Shutdown()
if !ok1 || !ok2 || !ok3 || okcancel {
t.Fatal("did not get expected shutdown signal", ok1, ok2, ok3, okcancel)
}
}
func TestCancelMulti(t *testing.T) {
reset()
SetTimeout(time.Second)
defer close(startTimer(t))
rand.Seed(0xC0CAC01A)
for i := 0; i < 1000; i++ {
var n Notifier
switch rand.Int31n(10) {
case 0:
n = PreShutdown()
case 1:
n = First()
case 2:
n = Second()
case 3:
n = Third()
case 4:
n = PreShutdownFn(func() {})
case 5:
n = FirstFn(func() {})
case 6:
n = SecondFn(func() {})
case 7:
n = ThirdFn(func() {})
}
go func(n Notifier, t time.Duration) {
time.Sleep(t)
n.Cancel()
}(n, time.Millisecond*time.Duration(rand.Intn(100)))
time.Sleep(time.Millisecond)
}
// Start shutdown
Shutdown()
}
func TestCancelMulti2(t *testing.T) {
reset()
SetTimeout(time.Second)
defer close(startTimer(t))
rand.Seed(0xC0CAC01A)
var wg sync.WaitGroup
wg.Add(1000)
for i := 0; i < 1000; i++ {
var n Notifier
switch rand.Int31n(10) {
case 0:
n = PreShutdown()
case 1:
n = First()
case 2:
n = Second()
case 3:
n = Third()
case 4:
n = PreShutdownFn(func() {})
case 5:
n = FirstFn(func() {})
case 6:
n = SecondFn(func() {})
case 7:
n = ThirdFn(func() {})
}
go func(n Notifier, r int) {
if r&1 == 0 {
n.Cancel()
wg.Done()
select {
case v, ok := <-n:
t.Errorf("Got notifier on %+v", n)
if ok {
close(v)
}
}
} else {
wg.Done()
select {
case v, ok := <-n:
if ok {
close(v)
}
}
}
}(n, rand.Intn(100))
}
wg.Wait()
// Start shutdown
Shutdown()
}
func TestCancelWaitMulti(t *testing.T) {
reset()
SetTimeout(time.Second)
defer close(startTimer(t))
rand.Seed(0xC0CAC01A)
for i := 0; i < 1000; i++ {
var n Notifier
switch rand.Int31n(10) {
case 0:
n = PreShutdown()
case 1:
n = First()
case 2:
n = Second()
case 3:
n = Third()
case 4:
n = PreShutdownFn(func() {})
case 5:
n = FirstFn(func() {})
case 6:
n = SecondFn(func() {})
case 7:
n = ThirdFn(func() {})
}
go func(n Notifier, t time.Duration) {
time.Sleep(t)
n.CancelWait()
}(n, time.Millisecond*time.Duration(rand.Intn(250)))
time.Sleep(time.Millisecond)
}
// Start shutdown
Shutdown()
}
func TestCancelWaitMulti2(t *testing.T) {
reset()
SetTimeout(time.Second)
defer close(startTimer(t))
rand.Seed(0xC0CAC01A)
var wg sync.WaitGroup
wg.Add(1000)
for i := 0; i < 1000; i++ {
var n Notifier
switch rand.Int31n(10) {
case 0:
n = PreShutdown()
case 1:
n = First()
case 2:
n = Second()
case 3:
n = Third()
case 4:
n = PreShutdownFn(func() {})
case 5:
n = FirstFn(func() {})
case 6:
n = SecondFn(func() {})
case 7:
n = ThirdFn(func() {})
}
go func(n Notifier, r int) {
if r%3 == 0 {
n.CancelWait()
wg.Done()
select {
case v, ok := <-n:
t.Errorf("Got notifier on %+v", n)
if ok {
close(v)
}
}
} else if r%2 == 1 {
wg.Done()
wg.Wait()
n.CancelWait()
} else {
wg.Done()
select {
case v, ok := <-n:
if ok {
close(v)
}
}
}
}(n, rand.Intn(100))
}
wg.Wait()
// Start shutdown
Shutdown()
}
// Get a notifier and perform our own code when we shutdown
func ExampleNotifier() {
shutdown := First()
select {
case n := <-shutdown:
// Do shutdown code ...
// Signal we are done
close(n)
}
}
// Get a notifier and perform our own function when we shutdown
func Example_functions() {
_ = FirstFn(func() {
// This function is called on shutdown
fmt.Println("First shutdown stage called")
})
// Will print the parameter when Shutdown() is called
}
// Note that the same effect of this example can also be achieved using the
// WrapHandlerFunc helper.
func ExampleLock() {
http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
// Get a lock while we have the lock, the server will not shut down.
lock := Lock()
if lock != nil {
defer lock()
} else {
// We are currently shutting down, return http.StatusServiceUnavailable
w.WriteHeader(http.StatusServiceUnavailable)
return
}
// ...
})
http.ListenAndServe(":8080", nil)
}
// Change timeout for a single stage
func ExampleSetTimeoutN() {
// Set timout for all stages
SetTimeout(time.Second)
// But give second stage more time
SetTimeoutN(Stage2, time.Second*10)
}
// This is an example, that could be your main function.
//
// We wait for jobs to finish in another goroutine, from
// where we initialize the shutdown.
//
// This is of course not a real-world problem, but there are many
// cases where you would want to initialize shutdown from other places than
// your main function, and where you would still like it to be able to
// do some final cleanup.
func ExampleWait() {
x := make([]struct{}, 10)
var wg sync.WaitGroup
wg.Add(len(x))
for i := range x {
go func(i int) {
time.Sleep(time.Millisecond * time.Duration(i))
wg.Done()
}(i)
}
// ignore this reset, for test purposes only
reset()
// Wait for the jobs above to finish
go func() {
wg.Wait()
fmt.Println("jobs done")
Shutdown()
}()
// Since this is main, we wait for a shutdown to occur before
// exiting.
Wait()
fmt.Println("exiting main")
// Note than the output will always be in this order.
// Output: jobs done
// exiting main
}
|
// Mandelbrot creates PNG of Mandelbrot
package mandelbrot
import (
"image"
"image/color"
"image/png"
"io"
"math/cmplx"
)
func Mandelbrot(w io.Writer, xmin, xmax, ymin, ymax float64, width, height int) {
img := image.NewRGBA(image.Rect(0, 0, width, height))
for py := 0; py < height; py++ {
y := float64(py)/float64(height)*(ymax-ymin) + ymin
for px := 0; px < width; px++ {
x := float64(px)/float64(width)*(xmax-xmin) + xmin
z := complex(x, y)
img.Set(px, py, iterate(z))
}
}
png.Encode(w, img)
}
func getColor(n, nmax uint8) color.Color {
x := uint8(uint32(255) * uint32(n) / uint32(nmax))
return color.RGBA{
R: x,
G: 128 + x,
B: 255 - x,
A: 255,
}
}
func iterate(z complex128) color.Color {
const iterations = 20
var v complex128
for n := uint8(0); n < iterations; n++ {
v = v*v + z
if cmplx.Abs(v) > 2 {
return getColor(n, iterations)
}
}
return color.Black
}
|
package tts
import (
"fmt"
"testing"
)
var text = `
我現在 schedule 上總 total 有10個 case 在 run, 等等還要跟我的 team 再 confirm 一下 format, 可能要再 review 一下新版的 checklist, 看 data 現在處理的 process 到哪邊,都 check 完、confirm了都 OK 的話,就只要給他們去 maintain 就好了,Anyway, 明天跟 RD 部門的 leader meeting還是 focus 在 interface 和 menu 上面, 反正他們都有 for 新平台的 know how 了,照 S O P 做,我 concern 的是 lab 裡面有什麼 special 的 idea. 感覺新來的比較沒 sense, present 的時候感覺進度一直 delay, 搞不好 boss 過幾天就會找他 talk 一下了`
func TestConvertSimple(t *testing.T) {
url := ConvertSimple("ttsaccount", "ttspassword", text)
fmt.Printf("\n\nURL: %#s\n\n", url)
if url == "" {
t.Fatalf("Fail to get file")
}
}
func TestConvertText(t *testing.T) {
url := ConvertText("ttsaccount", "ttspassword", text, "MCHEN_Joddess", "80", "3", "wav")
fmt.Printf("\n\nURL: %#s\n\n", url)
if url == "" {
t.Fatalf("Fail to get file")
}
}
func TestConvertAdvancedText(t *testing.T) {
url := ConvertAdvancedText("ttsaccount", "ttspassword", text, "Angela", "95", "2", "wav", "1", "1", "10")
fmt.Printf("\n\nURL: %#s\n\n", url)
if url == "" {
t.Fatalf("Fail to get file")
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.