text stringlengths 11 4.05M |
|---|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build windows
package util
import (
"syscall"
"time"
)
var (
lastInspectUnixNano int64
lastCPUUsageTime int64
)
// GetCPUPercentage calculates CPU usage and returns percentage in float64(e.g. 2.5 means 2.5%).
func GetCPUPercentage() float64 {
var ru syscall.Rusage
handle, err := syscall.GetCurrentProcess()
if err != nil {
return 0
}
e := syscall.GetProcessTimes(syscall.Handle(handle), &ru.CreationTime, &ru.ExitTime, &ru.KernelTime, &ru.UserTime)
if e != nil {
return 0
}
usageTime := ru.UserTime.Nanoseconds() + ru.KernelTime.Nanoseconds()
nowTime := time.Now().UnixNano()
perc := float64(usageTime-lastCPUUsageTime) / float64(nowTime-lastInspectUnixNano) * 100.0
lastInspectUnixNano = nowTime
lastCPUUsageTime = usageTime
return perc
}
|
package main
// Character with a name, description you can fight and talk to.
type Character struct {
name string
description string
conversation Action
battle Action
}
// Name the character.
func (char *Character) Name() string {
return char.name
}
// Describe the look of the character.
func (char *Character) Describe() string {
return char.description
}
// Talk to the character.
func (char *Character) Talk() {
char.conversation.Start()
}
// Fight the character.
func (char *Character) Fight() {
char.battle.Start()
}
|
package virtualnode
import (
"context"
appmesh "github.com/aws/aws-app-mesh-controller-for-k8s/apis/appmesh/v1beta2"
"github.com/aws/aws-app-mesh-controller-for-k8s/pkg/equality"
"github.com/aws/aws-app-mesh-controller-for-k8s/pkg/k8s"
"github.com/aws/aws-sdk-go/aws"
appmeshsdk "github.com/aws/aws-sdk-go/service/appmesh"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
testclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/log"
"testing"
)
func Test_defaultResourceManager_updateCRDVirtualNode(t *testing.T) {
type args struct {
vn *appmesh.VirtualNode
sdkVN *appmeshsdk.VirtualNodeData
}
tests := []struct {
name string
args args
wantVN *appmesh.VirtualNode
wantErr error
}{
{
name: "virtualNode needs patch both arn and condition",
args: args{
vn: &appmesh.VirtualNode{
ObjectMeta: metav1.ObjectMeta{
Name: "vn-1",
},
Status: appmesh.VirtualNodeStatus{},
},
sdkVN: &appmeshsdk.VirtualNodeData{
Metadata: &appmeshsdk.ResourceMetadata{
Arn: aws.String("arn-1"),
},
Status: &appmeshsdk.VirtualNodeStatus{
Status: aws.String(appmeshsdk.VirtualNodeStatusCodeActive),
},
},
},
wantVN: &appmesh.VirtualNode{
ObjectMeta: metav1.ObjectMeta{
Name: "vn-1",
},
Status: appmesh.VirtualNodeStatus{
VirtualNodeARN: aws.String("arn-1"),
Conditions: []appmesh.VirtualNodeCondition{
{
Type: appmesh.VirtualNodeActive,
Status: corev1.ConditionTrue,
},
},
},
},
},
{
name: "virtualNode needs patch condition only",
args: args{
vn: &appmesh.VirtualNode{
ObjectMeta: metav1.ObjectMeta{
Name: "vn-1",
},
Status: appmesh.VirtualNodeStatus{
VirtualNodeARN: aws.String("arn-1"),
Conditions: []appmesh.VirtualNodeCondition{
{
Type: appmesh.VirtualNodeActive,
Status: corev1.ConditionTrue,
},
},
},
},
sdkVN: &appmeshsdk.VirtualNodeData{
Metadata: &appmeshsdk.ResourceMetadata{
Arn: aws.String("arn-1"),
},
Status: &appmeshsdk.VirtualNodeStatus{
Status: aws.String(appmeshsdk.VirtualNodeStatusCodeInactive),
},
},
},
wantVN: &appmesh.VirtualNode{
ObjectMeta: metav1.ObjectMeta{
Name: "vn-1",
},
Status: appmesh.VirtualNodeStatus{
VirtualNodeARN: aws.String("arn-1"),
Conditions: []appmesh.VirtualNodeCondition{
{
Type: appmesh.VirtualNodeActive,
Status: corev1.ConditionFalse,
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
k8sSchema := runtime.NewScheme()
clientgoscheme.AddToScheme(k8sSchema)
appmesh.AddToScheme(k8sSchema)
k8sClient := testclient.NewFakeClientWithScheme(k8sSchema)
m := &defaultResourceManager{
k8sClient: k8sClient,
log: log.NullLogger{},
}
err := k8sClient.Create(ctx, tt.args.vn.DeepCopy())
assert.NoError(t, err)
err = m.updateCRDVirtualNode(ctx, tt.args.vn, tt.args.sdkVN)
if tt.wantErr != nil {
assert.EqualError(t, err, tt.wantErr.Error())
} else {
assert.NoError(t, err)
gotVN := &appmesh.VirtualNode{}
err = k8sClient.Get(ctx, k8s.NamespacedName(tt.args.vn), gotVN)
assert.NoError(t, err)
opts := cmp.Options{
equality.IgnoreFakeClientPopulatedFields(),
cmpopts.IgnoreTypes((*metav1.Time)(nil)),
}
assert.True(t, cmp.Equal(tt.wantVN, gotVN, opts), "diff", cmp.Diff(tt.wantVN, gotVN, opts))
}
})
}
}
func Test_defaultResourceManager_isSDKVirtualNodeControlledByCRDVirtualNode(t *testing.T) {
type fields struct {
accountID string
}
type args struct {
sdkVN *appmeshsdk.VirtualNodeData
vn *appmesh.VirtualNode
}
tests := []struct {
name string
fields fields
args args
want bool
}{
{
name: "sdkVN is controlled by crdVN",
fields: fields{accountID: "222222222"},
args: args{
sdkVN: &appmeshsdk.VirtualNodeData{
Metadata: &appmeshsdk.ResourceMetadata{
ResourceOwner: aws.String("222222222"),
},
},
vn: &appmesh.VirtualNode{},
},
want: true,
},
{
name: "sdkVN isn't controlled by crdVN",
fields: fields{accountID: "222222222"},
args: args{
sdkVN: &appmeshsdk.VirtualNodeData{
Metadata: &appmeshsdk.ResourceMetadata{
ResourceOwner: aws.String("33333333"),
},
},
vn: &appmesh.VirtualNode{},
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
m := &defaultResourceManager{
accountID: tt.fields.accountID,
log: &log.NullLogger{},
}
got := m.isSDKVirtualNodeControlledByCRDVirtualNode(ctx, tt.args.sdkVN, tt.args.vn)
assert.Equal(t, tt.want, got)
})
}
}
func Test_defaultResourceManager_isSDKVirtualNodeOwnedByCRDVirtualNode(t *testing.T) {
type fields struct {
accountID string
}
type args struct {
sdkVN *appmeshsdk.VirtualNodeData
vn *appmesh.VirtualNode
}
tests := []struct {
name string
fields fields
args args
want bool
}{
{
name: "sdkVN is owned by crdVN",
fields: fields{accountID: "222222222"},
args: args{
sdkVN: &appmeshsdk.VirtualNodeData{
Metadata: &appmeshsdk.ResourceMetadata{
ResourceOwner: aws.String("222222222"),
},
},
vn: &appmesh.VirtualNode{},
},
want: true,
},
{
name: "sdkVN isn't owned by crdVN",
fields: fields{accountID: "222222222"},
args: args{
sdkVN: &appmeshsdk.VirtualNodeData{
Metadata: &appmeshsdk.ResourceMetadata{
ResourceOwner: aws.String("33333333"),
},
},
vn: &appmesh.VirtualNode{},
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
m := &defaultResourceManager{
accountID: tt.fields.accountID,
log: &log.NullLogger{},
}
got := m.isSDKVirtualNodeOwnedByCRDVirtualNode(ctx, tt.args.sdkVN, tt.args.vn)
assert.Equal(t, tt.want, got)
})
}
}
|
package school
import (
"sort"
)
type School struct {
grades map[int][]string
}
type Grade struct {
grade int
students []string
}
func New() *School {
return &School{grades: map[int][]string{}}
}
func (school *School) Enrollment() []Grade {
return school.sortedListOfGrades()
}
func (school *School) sortedListOfGrades() []Grade {
grades := school.listOfGrades()
sort.Slice(grades, func(i, j int) bool {
return grades[i].grade < grades[j].grade
})
return grades
}
func (school *School) listOfGrades() []Grade {
school.sortStudents()
grades := []Grade{}
for g, students := range school.grades {
grades = append(grades, Grade{grade: g, students: students})
}
return grades
}
func (school *School) sortStudents() {
for _, students := range school.grades {
sort.Slice(students, func(i, j int) bool {
return students[i] < students[j]
})
}
}
func (school *School) Add(student string, g int) {
if _, ok := school.grades[g]; !ok {
school.grades[g] = []string{}
}
school.grades[g] = append(school.grades[g], student)
}
func (school *School) Grade(grade int) []string {
return school.grades[grade]
} |
package controller
import (
"encoding/json"
"fmt"
"github.com/AnaMijailovic/NTP/arf/model"
"github.com/AnaMijailovic/NTP/arf/service"
"net/http"
"strconv"
"time"
)
func Serve() {
http.HandleFunc("/api/fileTree", GetFileTree)
http.HandleFunc("/api/chartData", GetChartData)
http.HandleFunc("/api/delete", DeleteFiles)
http.HandleFunc("/api/rename", RenameFiles)
http.HandleFunc("/api/reorganize", ReorganizeFiles)
http.HandleFunc("/api/recover", Recover)
fmt.Println("Starting server ...")
http.ListenAndServe(":8080", nil)
}
func GetFileTree(w http.ResponseWriter, r *http.Request) {
keys, _ := r.URL.Query()["path"]
path := keys[0]
tree := service.CreateTree(path, true)
var err = json.NewEncoder(w).Encode(tree)
if err != nil {
fmt.Println(err)
w.WriteHeader(http.StatusInternalServerError)
}
}
func GetChartData(w http.ResponseWriter, r *http.Request) {
keys, _ := r.URL.Query()["path"]
path := keys[0]
keys, _ = r.URL.Query()["chartType"]
chartType := keys[0]
data := service.GetFileChartData(path, chartType)
var err = json.NewEncoder(w).Encode(data)
if err != nil {
fmt.Println(err)
w.WriteHeader(http.StatusInternalServerError)
}
}
func DeleteFiles(w http.ResponseWriter, r *http.Request) {
keys, _ := r.URL.Query()["path"]
path := keys[0]
keys, _ = r.URL.Query()["recursive"]
recursive, _ := strconv.ParseBool(keys[0])
keys, _ = r.URL.Query()["empty"]
empty, _ := strconv.ParseBool(keys[0])
keys, _ = r.URL.Query()["createdBefore"]
fmt.Println("Created: ", keys[0])
createdBefore, _ := time.Parse("02-Jan-2006", keys[0])
keys, _ = r.URL.Query()["notAccessedAfter"]
fmt.Println("Accessed: ", keys[0])
notAccessedAfter, _ := time.Parse("02-Jan-2006", keys[0])
deleteData := model.DeleteData{path, recursive, empty, createdBefore,
notAccessedAfter}
filesDeleted := service.DeleteFiles(&deleteData)
json.NewEncoder(w).Encode("Deleted " + strconv.FormatInt(int64(*filesDeleted),10 )+ " files")
}
func ReorganizeFiles(w http.ResponseWriter, r *http.Request) {
keys, _ := r.URL.Query()["src"]
src := keys[0]
keys, _ = r.URL.Query()["dest"]
dest := keys[0]
keys, _ = r.URL.Query()["recursive"]
recursive, _ := strconv.ParseBool(keys[0])
keys, _ = r.URL.Query()["fileType"]
fileType, _ := strconv.ParseBool(keys[0])
keys, _ = r.URL.Query()["fileSize"]
fileSize, _ := strconv.ParseInt(keys[0], 10, 64 )
keys, _ = r.URL.Query()["createdDate"]
createdDate := keys[0]
reorganizeData := model.ReorganizeData{src, dest, recursive,
fileType, fileSize, createdDate}
errs := service.ReorganizeFiles(&reorganizeData)
if len(errs) > 0 {
w.WriteHeader(http.StatusInternalServerError)
if _, ok := errs[0].(model.UnableToRenameFileError); ok {
json.NewEncoder(w).Encode("Arf was unable to move all files")
} else {
json.NewEncoder(w).Encode(errs[0].Error())
}
} else {
json.NewEncoder(w).Encode("Files were successfully reorganized")
}
}
func RenameFiles(w http.ResponseWriter, r *http.Request) {
keys, _ := r.URL.Query()["path"]
path := keys[0]
keys, _ = r.URL.Query()["recursive"]
recursive, _ := strconv.ParseBool(keys[0])
keys, _ = r.URL.Query()["random"]
random, _ := strconv.ParseBool(keys[0])
keys, _ = r.URL.Query()["remove"]
remove := keys[0]
keys, _ = r.URL.Query()["replaceWith"]
replaceWith := keys[0]
keys, _ = r.URL.Query()["pattern"]
pattern := keys[0]
renameData := model.RenameData{path, recursive, random,
remove, replaceWith, pattern}
errs := service.Rename(&renameData)
if len(errs) > 0 {
if _, ok := errs[0].(model.UnableToRenameFileError); ok {
json.NewEncoder(w).Encode("Arf was unable to rename all files")
} else {
json.NewEncoder(w).Encode(errs[0].Error())
}
} else {
json.NewEncoder(w).Encode("Files were successfully renamed.")
}
}
func Recover(w http.ResponseWriter, r *http.Request) {
keys, _ := r.URL.Query()["path"]
path := keys[0]
errs := service.Recover(path)
if len(errs) > 0 {
if _, ok := errs[0].(model.UnableToRenameFileError); ok {
json.NewEncoder(w).Encode("Arf was unable to recover all files")
} else {
json.NewEncoder(w).Encode(errs[0].Error())
}
} else {
json.NewEncoder(w).Encode("Successfully recovered")
}
} |
package api
type GetDimensionsResponse struct {
Dimensions []string `json:"dimensions,omitempty"`
}
type DimensionResponse struct {
Index int `json:"index"`
Name string `json:"name"`
Code string `json:"code"`
} |
package literals
//http://unicode-table.com
var (
SYMBOL_GRASS_BLANK string = " "
SYMBOL_GRASS_LIGHT string = string([]byte{226, 150, 145})
SYMBOL_GRASS_MEDIUM string = string([]byte{226, 150, 146})
SYMBOL_GRASS_DARK string = string([]byte{226, 150, 147})
SYMBOL_POINT string = string([]byte{226, 173, 153})
SYMBOL_QUEEN string = string([]byte{226, 153, 148})
SYMBOL_ANT string = string([]byte{240, 159, 144, 156})
)
|
// Copyright 2019-present PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mvcc
import (
"github.com/pingcap/badger/y"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/util/codec"
)
// WriteType defines a write type.
type WriteType = byte
// WriteType
const (
WriteTypeLock WriteType = 'L'
WriteTypeRollback WriteType = 'R'
WriteTypeDelete WriteType = 'D'
WriteTypePut WriteType = 'P'
)
// WriteCFValue represents a write CF value.
type WriteCFValue struct {
Type WriteType
StartTS uint64
ShortVal []byte
}
var errInvalidWriteCFValue = errors.New("invalid write CF value")
// ParseWriteCFValue parses the []byte data and returns a WriteCFValue.
func ParseWriteCFValue(data []byte) (wv WriteCFValue, err error) {
if len(data) == 0 {
err = errInvalidWriteCFValue
return
}
wv.Type = data[0]
switch wv.Type {
case WriteTypePut, WriteTypeDelete, WriteTypeLock, WriteTypeRollback:
default:
err = errInvalidWriteCFValue
return
}
wv.ShortVal, wv.StartTS, err = codec.DecodeUvarint(data[1:])
return
}
const (
shortValuePrefix = 'v'
forUpdatePrefix = 'f'
minCommitTsPrefix = 'm'
//ShortValueMaxLen defines max length of short value.
ShortValueMaxLen = 64
)
// EncodeWriteCFValue accepts a write cf parameters and return the encoded bytes data.
// Just like the tikv encoding form. See tikv/src/storage/mvcc/write.rs for more detail.
func EncodeWriteCFValue(t WriteType, startTs uint64, shortVal []byte) []byte {
data := make([]byte, 0)
data = append(data, t)
data = codec.EncodeUvarint(data, startTs)
if len(shortVal) != 0 {
data = append(data, byte(shortValuePrefix), byte(len(shortVal)))
return append(data, shortVal...)
}
return data
}
// EncodeLockCFValue encodes the mvcc lock and returns putLock value and putDefault value if exists.
func EncodeLockCFValue(lock *Lock) ([]byte, []byte) {
data := make([]byte, 0)
switch lock.Op {
case byte(kvrpcpb.Op_Put):
data = append(data, LockTypePut)
case byte(kvrpcpb.Op_Del):
data = append(data, LockTypeDelete)
case byte(kvrpcpb.Op_Lock):
data = append(data, LockTypeLock)
case byte(kvrpcpb.Op_PessimisticLock):
data = append(data, LockTypePessimistic)
default:
panic("invalid lock op")
}
var longValue []byte
data = codec.EncodeUvarint(codec.EncodeCompactBytes(data, lock.Primary), lock.StartTS)
data = codec.EncodeUvarint(data, uint64(lock.TTL))
if len(lock.Value) <= ShortValueMaxLen {
if len(lock.Value) != 0 {
data = append(data, byte(shortValuePrefix), byte(len(lock.Value)))
data = append(data, lock.Value...)
}
} else {
longValue = y.SafeCopy(nil, lock.Value)
}
if lock.ForUpdateTS > 0 {
data = append(data, byte(forUpdatePrefix))
data = codec.EncodeUint(data, lock.ForUpdateTS)
}
if lock.MinCommitTS > 0 {
data = append(data, byte(minCommitTsPrefix))
data = codec.EncodeUint(data, lock.MinCommitTS)
}
return data, longValue
}
// LockType defines a lock type.
type LockType = byte
// LockType
const (
LockTypePut LockType = 'P'
LockTypeDelete LockType = 'D'
LockTypeLock LockType = 'L'
LockTypePessimistic LockType = 'S'
)
var errInvalidLockCFValue = errors.New("invalid lock CF value")
|
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
)
/*
This application uses some of the previous concepts to calculate the size of a directory or a bunch of directories
given as input.
*/
func main() {
var verbose = flag.Bool("v", false, "show verbose progress messages")
var start, end time.Time
// Determine the initial directories.
flag.Parse()
roots := flag.Args()
if len(roots) == 0 {
roots = []string{"."}
}
/*
Traverse the file tree with the walkDir func in a separate go routine. fileSizes is an
unbuffered channel that is incremented on the way. When the recursion is done the channel
is closed.
*/
fileSizes := make(chan int64)
start = time.Now()
go func() {
for _, root := range roots {
walkDir(root, fileSizes)
}
close(fileSizes)
}()
// Print the results periodically.
var tick <-chan time.Time
if *verbose {
tick = time.Tick(1000 * time.Millisecond)
}
// Print the results.
var nfiles, nbytes int64
loop:
for {
select {
// the fileSizes is read by the main go routine until it is closed. It increments the total of
// bytes and the number of file. The for ends (thanks to the range statement) when the channel is closed.
case size, ok := <-fileSizes:
if !ok {
break loop // fileSizes was closed
}
nfiles++
nbytes += size
case <-tick:
// print the number of files and the space along the way
printDiskUsage(nfiles, nbytes)
}
}
end = time.Now()
// print the number of final files and final space
printDiskUsage(nfiles, nbytes)
execTime := end.Sub(start)
fmt.Printf("execution time %d ms\n", execTime.Milliseconds())
}
func printDiskUsage(nfiles, nbytes int64) {
fmt.Printf("%d files %.1f GB (%d bytes)\n", nfiles, float64(nbytes)/1e9, nbytes)
}
// walkDir recursively walks the file tree rooted at dir
// and sends the size of each found file on fileSizes.
func walkDir(dir string, fileSizes chan<- int64) {
for _, entry := range dirents(dir) {
if entry.IsDir() {
subdir := filepath.Join(dir, entry.Name())
walkDir(subdir, fileSizes)
} else {
fileSizes <- entry.Size()
}
}
}
// dirents returns the entries of directory dir.
func dirents(dir string) []os.FileInfo {
entries, err := ioutil.ReadDir(dir)
if err != nil {
fmt.Fprintf(os.Stderr, "du1: %v\n", err)
return nil
}
return entries
}
|
package index
import (
"errors"
"fmt"
"github.com/MintegralTech/juno/datastruct"
"github.com/MintegralTech/juno/debug"
"github.com/MintegralTech/juno/document"
"github.com/MintegralTech/juno/helpers"
"github.com/MintegralTech/juno/log"
"github.com/easierway/concurrent_map"
"github.com/sirupsen/logrus"
"strconv"
"strings"
"sync/atomic"
)
const (
MaxNumIndex = 50000
)
type IndexerV2 struct {
invertedIndex InvertedIndex
storageIndex StorageIndex
campaignMapping *concurrent_map.ConcurrentMap
idMap []document.DocId
count uint64
name string
kvType *concurrent_map.ConcurrentMap
logger log.Logger
aDebug *debug.Debug
}
func NewIndexV2(name string) (i *IndexerV2) {
i = &IndexerV2{
invertedIndex: NewInvertedIndexV2(),
storageIndex: NewStorageIndexerV2(),
campaignMapping: concurrent_map.CreateConcurrentMap(128),
kvType: concurrent_map.CreateConcurrentMap(128),
idMap: make([]document.DocId, MaxNumIndex),
count: 0,
name: name,
logger: logrus.New(),
}
return i
}
func (i *IndexerV2) GetInvertedIndex() InvertedIndex {
return i.invertedIndex
}
func (i *IndexerV2) GetStorageIndex() StorageIndex {
return i.storageIndex
}
func (i *IndexerV2) GetCampaignMap() *concurrent_map.ConcurrentMap {
return i.campaignMapping
}
func (i *IndexerV2) GetName() string {
return i.name
}
func (i *IndexerV2) SetDebug(level int) {
if i.aDebug == nil {
i.aDebug = debug.NewDebug(level, i.GetName())
i.invertedIndex.SetDebug(level)
i.storageIndex.SetDebug(level)
}
}
func (i *IndexerV2) GetValueById(id document.DocId) *IndexDebugInfo {
var res = &IndexDebugInfo{}
docId, ok := i.campaignMapping.Get(DocId(id))
if ok {
if _, err := i.GetId(docId.(document.DocId)); err != nil {
return res
}
res.InvertIndex = i.GetInvertedIndex().GetValueById(docId.(document.DocId))
res.StorageIndex = i.GetStorageIndex().GetValueById(docId.(document.DocId))
}
return res
}
func (i *IndexerV2) UpdateIds(fieldName string, ids []document.DocId) {
panic("method not support")
}
func (i *IndexerV2) Delete(fieldName string) {
panic("method not support")
}
func (i *IndexerV2) Add(doc *document.DocInfo) error {
if doc == nil {
return helpers.DocumentError
}
if i.count >= MaxNumIndex {
return fmt.Errorf("index is full: current[%d], max[%d]", i.count, MaxNumIndex)
}
for _, field := range doc.Fields {
switch field.IndexType {
case document.InvertedIndexType:
i.invertAdd(document.DocId(i.count), field)
case document.StorageIndexType:
i.storageAdd(document.DocId(i.count), field)
i.kvType.Set(concurrent_map.StrKey(field.Name), field.ValueType)
case document.BothIndexType:
i.invertAdd(document.DocId(i.count), field)
i.storageAdd(document.DocId(i.count), field)
i.kvType.Set(concurrent_map.StrKey(field.Name), field.ValueType)
default:
i.WarnStatus(field.Name, field.Value, "type is wrong")
}
}
i.campaignMapping.Set(DocId(doc.Id), document.DocId(i.count))
i.idMap[i.count] = doc.Id
atomic.AddUint64(&i.count, 1)
return nil
}
func (i *IndexerV2) Del(doc *document.DocInfo) {
i.campaignMapping.Del(DocId(doc.Id))
}
func (i *IndexerV2) Update(filename string) error {
if err := i.Dump(filename); err != nil {
return err
}
return nil
}
func (i *IndexerV2) Dump(filename string) error {
// TODO
return nil
}
func (i *IndexerV2) Load(filename string) error {
return nil
}
func (i *IndexerV2) GetDataType(fieldName string) document.FieldType {
if t, ok := i.kvType.Get(concurrent_map.StrKey(fieldName)); ok {
return t.(document.FieldType)
}
return document.DefaultFieldType
}
func (i *IndexerV2) invertAdd(id document.DocId, field *document.Field) {
switch field.Value.(type) {
case []string:
value, _ := field.Value.([]string)
for _, v := range value {
if err := i.invertedIndex.Add(field.Name+SEP+v, id); err != nil {
i.WarnStatus(field.Name, v, err.Error())
}
}
case []int64:
value, _ := field.Value.([]int64)
for _, v := range value {
if err := i.invertedIndex.Add(field.Name+SEP+strconv.FormatInt(v, 10), id); err != nil {
i.WarnStatus(field.Name, v, err.Error())
}
}
case string:
value, _ := field.Value.(string)
if err := i.invertedIndex.Add(field.Name+SEP+value, id); err != nil {
i.WarnStatus(field.Name, value, err.Error())
}
case int64:
value, _ := field.Value.(int64)
if err := i.invertedIndex.Add(field.Name+SEP+strconv.FormatInt(value, 10), id); err != nil {
i.WarnStatus(field.Name, value, err.Error())
}
default:
i.WarnStatus(field.Name, field.Value, errors.New("the doc is nil or type is wrong").Error())
}
}
func (i *IndexerV2) storageAdd(id document.DocId, field *document.Field) {
if err := i.storageIndex.Add(field.Name, id, field.Value); err != nil {
i.WarnStatus(field.Name, field.Value, err.Error())
}
}
func (i *IndexerV2) DebugInfo() *debug.Debug {
if i.aDebug != nil {
i.aDebug.AddDebugMsg("invert index count: " + strconv.Itoa(i.invertedIndex.Count()))
i.aDebug.AddDebugMsg("storage index count: " + strconv.Itoa(i.storageIndex.Count()))
return i.aDebug
}
return nil
}
func (i *IndexerV2) WarnStatus(name string, value interface{}, err string) {
if i.logger != nil {
i.logger.Warnf("name:[%s] value:[%v] wrong reason:[%s]", name, value, err)
}
}
func (i *IndexerV2) MergeIndex(target *IndexerV2) error {
invertIters := make(map[string]datastruct.Iterator, target.invertedIndex.Count())
target.invertedIndex.Range(func(key, value interface{}) bool {
k := key.(string)
items := strings.Split(k, SEP)
iter := target.invertedIndex.Iterator(items[0], items[1])
invertIters[k] = iter
return true
})
storageIters := make(map[string]datastruct.Iterator, target.storageIndex.Count())
target.storageIndex.Range(func(key, value interface{}) bool {
k := key.(string)
iter := target.storageIndex.Iterator(k)
storageIters[k] = iter
return true
})
// merge by id
for id := uint64(0); id < target.count; id++ {
docId := target.idMap[id]
// already deleted
if _, ok := target.campaignMapping.Get(DocId(docId)); !ok {
continue
}
// new index updated
if _, ok := i.campaignMapping.Get(DocId(docId)); ok {
continue
}
// invert List
for k, v := range invertIters {
for v.Current() != nil && id > uint64(v.Current().Key()) {
v.Next()
}
if v.Current() != nil && id == uint64(v.Current().Key()) {
// add invert index
if e := i.invertedIndex.Add(k, document.DocId(i.count)); e != nil {
i.logger.Warnf("MergeIndex add inverted index error, docId[%d], id[%d]", docId, i.count)
}
v.Next()
}
}
// storage List
for k, v := range storageIters {
for v.Current() != nil && id > uint64(v.Current().Key()) {
v.Next()
}
if v.Current() != nil && id == uint64(v.Current().Key()) {
// add storage index
if e := i.storageIndex.Add(k, document.DocId(i.count), v.Current().Value()); e != nil {
i.logger.Warnf("MergeIndex add storage index error, docId[%d], id[%d]", docId, i.count)
}
v.Next()
}
}
i.campaignMapping.Set(DocId(docId), document.DocId(i.count))
i.idMap[i.count] = docId
i.count++
if i.count > MaxNumIndex {
return fmt.Errorf("merge index error, index is full, maxsize[%d], current[%d]", MaxNumIndex, i.count)
}
}
return nil
}
func (i *IndexerV2) GetId(id document.DocId) (document.DocId, error) {
if uint64(id) >= i.count {
return 0, errors.New("id not found")
}
return i.idMap[id], nil
}
func (i *IndexerV2) GetInnerId(id document.DocId) (document.DocId, error) {
v, ok := i.GetCampaignMap().Get(DocId(id))
if !ok {
return 0, errors.New("id not found")
}
return v.(document.DocId), nil
}
func (i *IndexerV2) GetIndexInfo() *IndexInfo {
return &IndexInfo{
DocSize: int(i.count),
InvertedIndexSize: i.GetInvertedIndex().Count(),
StorageIndex: i.GetStorageIndex().Count(),
}
}
|
package day18
import (
"fmt"
"strconv"
"strings"
"text/scanner"
)
func Run(lines []string) error {
sum, err := SumAll(lines, EqualPrecedence)
if err != nil {
return err
}
fmt.Println("Part 1:", sum)
sum, err = SumAll(lines, AdvancedPrecedence)
if err != nil {
return err
}
fmt.Println("Part 2:", sum)
return nil
}
func SumAll(input []string, prec Precedences) (int, error) {
sum := 0
for _, line := range input {
val, err := Evaluate(line, prec)
if err != nil {
return 0, err
}
sum += val
}
return sum, nil
}
func Evaluate(input string, prec Precedences) (int, error) {
tokens := tokenize(input)
tokens = postfixize(tokens, prec)
res, err := evaluatePostfixed(tokens)
if err != nil {
return 0, err
}
return res, nil
}
func evaluatePostfixed(input []string) (int, error) {
stack := []int{}
for _, token := range input {
switch token {
case "+", "*":
if len(stack) < 2 {
return 0, fmt.Errorf("stack underflow")
}
x, y := stack[len(stack)-2], stack[len(stack)-1]
stack = stack[:len(stack)-2]
res, err := execute(token, x, y)
if err != nil {
return 0, err
}
stack = append(stack, res)
default:
num, err := strconv.Atoi(token)
if err != nil {
return 0, fmt.Errorf("invalid number '%s'", token)
}
stack = append(stack, num)
}
}
if len(stack) != 1 {
return 0, fmt.Errorf("after evaluating %v, stack should have been at 1 item: %v", input, stack)
}
return stack[0], nil
}
func execute(op string, x, y int) (int, error) {
if op == "+" {
return x + y, nil
} else if op == "*" {
return x * y, nil
} else {
return 0, fmt.Errorf("unknown operator '%s'", op)
}
}
func tokenize(input string) []string {
tokens := []string{}
s := scanner.Scanner{}
s.Init(strings.NewReader(input))
for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() {
tokens = append(tokens, s.TokenText())
}
return tokens
}
type Precedences map[string]int
var EqualPrecedence = Precedences{"+": 1, "*": 1}
var AdvancedPrecedence = Precedences{"+": 2, "*": 1}
func (p *Precedences) PrecedenceOf(input string) int {
if prec, ok := (*p)[input]; ok {
return prec
}
return 0
}
func postfixize(input []string, prec Precedences) []string {
output := []string{}
operators := []string{}
for _, token := range input {
switch token {
case "(":
operators = push(operators, token)
case ")":
popUntil(&operators, &output, func(x string) bool { return x == "(" })
// do pop the "("
var popped string
operators, popped = pop(operators)
if popped != "(" {
panic("parens out of balance")
}
case "+", "*":
if prec.PrecedenceOf(token) > prec.PrecedenceOf(top(operators)) {
operators = push(operators, token)
} else {
for top(operators) != "" && prec.PrecedenceOf(token) <= prec.PrecedenceOf(top(operators)) {
popTo(&operators, &output)
}
operators = push(operators, token)
}
default:
output = push(output, token)
}
}
// Pop remaining operators to the output
popUntil(&operators, &output, func(_ string) bool { return false })
return output
}
func popTo(source *[]string, dest *[]string) {
newSrc, popped := pop(*source)
*dest = push(*dest, popped)
*source = newSrc
}
func popUntil(operators *[]string, output *[]string, pred func(string) bool) {
// Pop operators until '('
var popped string
for {
t := top(*operators)
if t == "" || pred(t) {
break
}
*operators, popped = pop(*operators)
*output = push(*output, popped)
}
}
func push(stack []string, value string) []string {
return append(stack, value)
}
func pop(stack []string) ([]string, string) {
if len(stack) == 0 {
return nil, ""
}
return stack[0 : len(stack)-1], stack[len(stack)-1]
}
func top(stack []string) string {
if len(stack) == 0 {
return ""
}
return stack[len(stack)-1]
}
|
package main
import (
"fmt"
. "github.com/little-go/learn-go/structure"
"unicode/utf8"
)
func main() {
A1()
M1()
fmt.Println(LengthOfNonRepeatingSubStr("abcasbcs"))
fmt.Println(LengthOfNonRepeatingSubStr("你好呀"))
fmt.Println("Rune count:", utf8.RuneCountInString("s"))
bytes := []byte("water")
for len(bytes) > 0 {
ch, size := utf8.DecodeRune(bytes)
bytes = bytes[size:]
fmt.Printf("%c", ch)
}
var arr = [5]int{1, 2, 3, 4, 5}
s := arr[:]
UpdateSlice(s)
fmt.Println(s) // [100,2,3,4,5]
fmt.Println(arr) // [100,2,3,4,5]
S3()
S4()
}
|
package peach
import (
"fmt"
"testing"
"github.com/zdao-pro/sky_blue/pkg/peach"
)
type jsonData struct {
A string `json:"a"`
B int `json:"b"`
}
func TestString(t *testing.T) {
// peach.Init(peach.PeachDriverApollo, "zdao_backend.sky_blue")
a, _ := peach.Get("db_ms_wallet.yaml").String()
fmt.Println(a)
}
func TestJson(t *testing.T) {
peach.Init(peach.PeachDriverApollo, "zdao_backend.sky_blue")
var j jsonData
err := peach.Get("json.json").UnmarshalJSON(&j)
if nil != err {
panic(err)
}
fmt.Println(j)
}
func TestInt(t *testing.T) {
peach.Init(peach.PeachDriverApollo, "zdao_backend.sky_blue")
a, _ := peach.Get("test_int").Int()
fmt.Println(a)
}
|
package valueobject
// ValueObject interface must be implemented by any aggregate value object to verify structural equality between two value objects
type ValueObject interface {
Equals(other ValueObject) bool
}
|
package subscriber
import (
"context"
"fmt"
"github.com/Whisker17/goMicroDemo/proto/model"
)
func Handler(ctx context.Context, msg *model.SayParam) error {
fmt.Printf("Received message: %s \n", msg.Msg)
return nil
} |
package requests
type BaseRequest struct {
Action string `json:"action" mapstructure:"action"`
}
|
package 一维数组
import "fmt"
// --------------------------------------------------- 1. 动态规划(开始) ---------------------------------------------------
// trap 接雨水。
// 动态规划解法: 计算所有水柱高度的总和。 (垂直)
// 水柱高度: 向左最大高度、向右最大高度的最小值 - 当前位置的高度。
func trap(heights []int) int {
// 1. 构建当前点向左的最大值映射。
maxHeightToLeft := make([]int, len(heights)+2) // maxHeightToLeft[pos] 表示 heights[:pos] 的最大值。
for i := 0; i < len(heights); i++ {
pos := i + 1
maxHeightToLeft[pos] = max(maxHeightToLeft[pos-1], heights[i])
}
// 2. 构建当前点向右的最大值映射。
maxHeightToRight := make([]int, len(heights)+2) // maxHeightToRight[pos] 表示 heights[pos-1:] 的最大值。
for i := len(heights) - 1; i >= 0; i-- {
pos := i + 1
maxHeightToRight[pos] = max(maxHeightToRight[pos+1], heights[i])
}
// 3. 获取雨水量。
rain := 0
for i := 0; i < len(heights); i++ {
pos := i + 1
rain += min(maxHeightToLeft[pos], maxHeightToRight[pos]) - heights[i]
}
// 4. 返回。
return rain
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func min(a, b int) int {
if a > b {
return b
}
return a
}
// --------------------------------------------------- 1. 动态规划(结束) ---------------------------------------------------
// --------------------------------------------------- 2. 最小栈(开始) ---------------------------------------------------
// trap 接雨水。
// 最小栈解法: 计算所有水滩容量的总和。 (水平)
// 水滩高度: (左侧第一个比滩底高的位置 与 右侧第一个比滩底高的位置 的距离) * (左右侧短板高度 - 滩底高度)
// PS: 使用单调递减栈、单调非递增栈都能解决该题。
// 切换方式是 heights[indexStack[len(indexStack)-1]] <= heights[i] 改为
// heights[indexStack[len(indexStack)-1]] < heights[i]。
func trap(heights []int) int {
// 1. 初始化。
indexStack := make([]int, 0) // 单调递减栈。(大小关系指的是值,该栈内部存放指向值的索引)
rain := 0
// 2. 处理。
for i := 0; i < len(heights); i++ {
// 2.1 累加雨水量、出栈。
for len(indexStack) != 0 && heights[indexStack[len(indexStack)-1]] <= heights[i] {
// 2.2.1 大于 1,说明存在右边界。
if len(indexStack) > 1 {
indexOfLeftBound, indexOfRightBound := indexStack[len(indexStack)-2], i // 水滩的左右边界。
indexOfStackTop := indexStack[len(indexStack)-1] // 滩底高度。
height := min(heights[indexOfLeftBound], heights[indexOfRightBound]) - heights[indexOfStackTop] // 水滩高度 = 左右侧短板高度 - 滩底高度。
width := i - indexOfLeftBound - 1 // 水滩宽度 = 左侧第一个比滩底高的位置 与 右侧第一个比滩底高的位置 的距离。
rain += height * width
}
// 2.2.1 出栈。
indexStack = indexStack[:len(indexStack)-1]
}
// 2.2 入栈。
indexStack = append(indexStack, i)
}
// 3. 返回。
return rain
}
func min(a, b int) int {
if a > b {
return b
}
return a
}
// --------------------------------------------------- 2. 最小栈(结束) ---------------------------------------------------
|
package db
import (
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
)
func ConnectGORM() *gorm.DB {
DBMS := "mysql"
USER := "root"
PASS := "password"
PROTOCOL := "tcp([mysql]:3306)"
DBNAME := "video"
CONNECT := USER + ":" + PASS + "@" + PROTOCOL + "/" + DBNAME + "?parseTime=true&loc=Asia%2FTokyo"
db, err := gorm.Open(DBMS, CONNECT)
if err != nil {
panic(err.Error())
}
return db
}
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"testing"
"github.com/google/go-cmp/cmp"
"k8s.io/client-go/tools/events"
"k8s.io/kubernetes/pkg/scheduler/profile"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/component-base/featuregate"
featuregatetesting "k8s.io/component-base/featuregate/testing"
_ "k8s.io/kubernetes/pkg/apis/core/install"
"k8s.io/kubernetes/pkg/scheduler"
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/core"
)
type testCase struct {
name string
JSON string
featureGates map[featuregate.Feature]bool
wantPlugins map[string][]config.Plugin
wantExtenders []config.Extender
}
func TestCompatibility_v1_Scheduler(t *testing.T) {
// Add serialized versions of scheduler config that exercise available options to ensure compatibility between releases
testcases := []testCase{
// This is a special test for the "composite" predicate "GeneralPredicate". GeneralPredicate is a combination
// of predicates, and here we test that if given, it is mapped to the set of plugins that should be executed.
{
name: "GeneralPredicate",
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
{"name": "GeneralPredicates"}
],
"priorities": [
]
}`,
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {{Name: "PrioritySort"}},
"PreFilterPlugin": {
{Name: "NodeResourcesFit"},
{Name: "NodePorts"},
},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "NodeResourcesFit"},
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "TaintToleration"},
},
"BindPlugin": {{Name: "DefaultBinder"}},
},
},
// This is a special test for the case where a policy is specified without specifying any filters.
{
name: "MandatoryFilters",
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
],
"priorities": [
]
}`,
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {{Name: "PrioritySort"}},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "TaintToleration"},
},
"BindPlugin": {{Name: "DefaultBinder"}},
},
},
// Do not change this JSON after the corresponding release has been tagged.
// A failure indicates backwards compatibility with the specified release was broken.
{
name: "1.0",
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsPorts"},
{"name": "NoDiskConflict"},
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
],"priorities": [
{"name": "LeastRequestedPriority", "weight": 1},
{"name": "ServiceSpreadingPriority", "weight": 2},
{"name": "TestServiceAntiAffinity", "weight": 3, "argument": {"serviceAntiAffinity": {"label": "zone"}}},
{"name": "TestLabelPreference", "weight": 4, "argument": {"labelPreference": {"label": "bar", "presence":true}}}
]
}`,
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {{Name: "PrioritySort"}},
"PreFilterPlugin": {
{Name: "NodePorts"},
{Name: "NodeResourcesFit"},
{Name: "ServiceAffinity"},
},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResourcesFit"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "NodeLabel"},
{Name: "ServiceAffinity"},
},
"PreScorePlugin": {{Name: "DefaultPodTopologySpread"}},
"ScorePlugin": {
{Name: "NodeResourcesLeastAllocated", Weight: 1},
{Name: "NodeLabel", Weight: 4},
{Name: "DefaultPodTopologySpread", Weight: 2},
{Name: "ServiceAffinity", Weight: 3},
},
"BindPlugin": {{Name: "DefaultBinder"}},
},
},
// Do not change this JSON after the corresponding release has been tagged.
// A failure indicates backwards compatibility with the specified release was broken.
{
name: "1.1",
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsHostPorts"},
{"name": "PodFitsResources"},
{"name": "NoDiskConflict"},
{"name": "HostName"},
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
],"priorities": [
{"name": "EqualPriority", "weight": 2},
{"name": "LeastRequestedPriority", "weight": 2},
{"name": "BalancedResourceAllocation", "weight": 2},
{"name": "SelectorSpreadPriority", "weight": 2},
{"name": "TestServiceAntiAffinity1", "weight": 3, "argument": {"serviceAntiAffinity": {"label": "zone"}}},
{"name": "TestServiceAntiAffinity2", "weight": 3, "argument": {"serviceAntiAffinity": {"label": "region"}}},
{"name": "TestLabelPreference1", "weight": 4, "argument": {"labelPreference": {"label": "bar", "presence":true}}},
{"name": "TestLabelPreference2", "weight": 4, "argument": {"labelPreference": {"label": "foo", "presence":false}}}
]
}`,
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {{Name: "PrioritySort"}},
"PreFilterPlugin": {
{Name: "NodePorts"},
{Name: "NodeResourcesFit"},
{Name: "ServiceAffinity"},
},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResourcesFit"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "NodeLabel"},
{Name: "ServiceAffinity"},
},
"PreScorePlugin": {{Name: "DefaultPodTopologySpread"}},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeLabel", Weight: 8}, // Weight is 4 * number of LabelPreference priorities
{Name: "DefaultPodTopologySpread", Weight: 2},
{Name: "ServiceAffinity", Weight: 6}, // Weight is the 3 * number of custom ServiceAntiAffinity priorities
},
"BindPlugin": {{Name: "DefaultBinder"}},
},
},
// Do not change this JSON after the corresponding release has been tagged.
// A failure indicates backwards compatibility with the specified release was broken.
{
name: "1.2",
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsHostPorts"},
{"name": "HostName"},
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
],"priorities": [
{"name": "EqualPriority", "weight": 2},
{"name": "NodeAffinityPriority", "weight": 2},
{"name": "ImageLocalityPriority", "weight": 2},
{"name": "LeastRequestedPriority", "weight": 2},
{"name": "BalancedResourceAllocation", "weight": 2},
{"name": "SelectorSpreadPriority", "weight": 2},
{"name": "TestServiceAntiAffinity", "weight": 3, "argument": {"serviceAntiAffinity": {"label": "zone"}}},
{"name": "TestLabelPreference", "weight": 4, "argument": {"labelPreference": {"label": "bar", "presence":true}}}
]
}`,
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {{Name: "PrioritySort"}},
"PreFilterPlugin": {
{Name: "NodePorts"},
{Name: "NodeResourcesFit"},
{Name: "ServiceAffinity"},
},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResourcesFit"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "NodeLabel"},
{Name: "ServiceAffinity"},
{Name: "EBSLimits"},
{Name: "GCEPDLimits"},
{Name: "AzureDiskLimits"},
{Name: "VolumeZone"},
},
"PreScorePlugin": {{Name: "DefaultPodTopologySpread"}},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodeLabel", Weight: 4},
{Name: "DefaultPodTopologySpread", Weight: 2},
{Name: "ServiceAffinity", Weight: 3},
},
"BindPlugin": {{Name: "DefaultBinder"}},
},
},
// Do not change this JSON after the corresponding release has been tagged.
// A failure indicates backwards compatibility with the specified release was broken.
{
name: "1.3",
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsHostPorts"},
{"name": "HostName"},
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
{"name": "MatchInterPodAffinity"},
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
],"priorities": [
{"name": "EqualPriority", "weight": 2},
{"name": "ImageLocalityPriority", "weight": 2},
{"name": "LeastRequestedPriority", "weight": 2},
{"name": "BalancedResourceAllocation", "weight": 2},
{"name": "SelectorSpreadPriority", "weight": 2},
{"name": "NodeAffinityPriority", "weight": 2},
{"name": "TaintTolerationPriority", "weight": 2},
{"name": "InterPodAffinityPriority", "weight": 2}
]
}`,
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {{Name: "PrioritySort"}},
"PreFilterPlugin": {
{Name: "NodePorts"},
{Name: "NodeResourcesFit"},
{Name: "ServiceAffinity"},
{Name: "InterPodAffinity"},
},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResourcesFit"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "NodeLabel"},
{Name: "ServiceAffinity"},
{Name: "EBSLimits"},
{Name: "GCEPDLimits"},
{Name: "AzureDiskLimits"},
{Name: "VolumeZone"},
{Name: "InterPodAffinity"},
},
"PreScorePlugin": {
{Name: "InterPodAffinity"},
{Name: "DefaultPodTopologySpread"},
{Name: "TaintToleration"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "InterPodAffinity", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "DefaultPodTopologySpread", Weight: 2},
{Name: "TaintToleration", Weight: 2},
},
"BindPlugin": {{Name: "DefaultBinder"}},
},
},
// Do not change this JSON after the corresponding release has been tagged.
// A failure indicates backwards compatibility with the specified release was broken.
{
name: "1.4",
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsHostPorts"},
{"name": "HostName"},
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
{"name": "MatchInterPodAffinity"},
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
],"priorities": [
{"name": "EqualPriority", "weight": 2},
{"name": "ImageLocalityPriority", "weight": 2},
{"name": "LeastRequestedPriority", "weight": 2},
{"name": "BalancedResourceAllocation", "weight": 2},
{"name": "SelectorSpreadPriority", "weight": 2},
{"name": "NodePreferAvoidPodsPriority", "weight": 2},
{"name": "NodeAffinityPriority", "weight": 2},
{"name": "TaintTolerationPriority", "weight": 2},
{"name": "InterPodAffinityPriority", "weight": 2},
{"name": "MostRequestedPriority", "weight": 2}
]
}`,
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {{Name: "PrioritySort"}},
"PreFilterPlugin": {
{Name: "NodePorts"},
{Name: "NodeResourcesFit"},
{Name: "ServiceAffinity"},
{Name: "InterPodAffinity"},
},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResourcesFit"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "NodeLabel"},
{Name: "ServiceAffinity"},
{Name: "EBSLimits"},
{Name: "GCEPDLimits"},
{Name: "AzureDiskLimits"},
{Name: "VolumeZone"},
{Name: "InterPodAffinity"},
},
"PreScorePlugin": {
{Name: "InterPodAffinity"},
{Name: "DefaultPodTopologySpread"},
{Name: "TaintToleration"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "InterPodAffinity", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "DefaultPodTopologySpread", Weight: 2},
{Name: "TaintToleration", Weight: 2},
},
"BindPlugin": {{Name: "DefaultBinder"}},
},
},
// Do not change this JSON after the corresponding release has been tagged.
// A failure indicates backwards compatibility with the specified release was broken.
{
name: "1.7",
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsHostPorts"},
{"name": "HostName"},
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
{"name": "MatchInterPodAffinity"},
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
],"priorities": [
{"name": "EqualPriority", "weight": 2},
{"name": "ImageLocalityPriority", "weight": 2},
{"name": "LeastRequestedPriority", "weight": 2},
{"name": "BalancedResourceAllocation", "weight": 2},
{"name": "SelectorSpreadPriority", "weight": 2},
{"name": "NodePreferAvoidPodsPriority", "weight": 2},
{"name": "NodeAffinityPriority", "weight": 2},
{"name": "TaintTolerationPriority", "weight": 2},
{"name": "InterPodAffinityPriority", "weight": 2},
{"name": "MostRequestedPriority", "weight": 2}
],"extenders": [{
"urlPrefix": "/prefix",
"filterVerb": "filter",
"prioritizeVerb": "prioritize",
"weight": 1,
"BindVerb": "bind",
"enableHttps": true,
"tlsConfig": {"Insecure":true},
"httpTimeout": 1,
"nodeCacheCapable": true
}]
}`,
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {{Name: "PrioritySort"}},
"PreFilterPlugin": {
{Name: "NodePorts"},
{Name: "NodeResourcesFit"},
{Name: "ServiceAffinity"},
{Name: "InterPodAffinity"},
},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResourcesFit"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "NodeLabel"},
{Name: "ServiceAffinity"},
{Name: "EBSLimits"},
{Name: "GCEPDLimits"},
{Name: "AzureDiskLimits"},
{Name: "VolumeZone"},
{Name: "InterPodAffinity"},
},
"PreScorePlugin": {
{Name: "InterPodAffinity"},
{Name: "DefaultPodTopologySpread"},
{Name: "TaintToleration"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "InterPodAffinity", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "DefaultPodTopologySpread", Weight: 2},
{Name: "TaintToleration", Weight: 2},
},
"BindPlugin": {{Name: "DefaultBinder"}},
},
wantExtenders: []config.Extender{{
URLPrefix: "/prefix",
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 1,
BindVerb: "bind", // 1.7 was missing json tags on the BindVerb field and required "BindVerb"
EnableHTTPS: true,
TLSConfig: &config.ExtenderTLSConfig{Insecure: true},
HTTPTimeout: 1,
NodeCacheCapable: true,
}},
},
// Do not change this JSON after the corresponding release has been tagged.
// A failure indicates backwards compatibility with the specified release was broken.
{
name: "1.8",
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsHostPorts"},
{"name": "HostName"},
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
{"name": "MatchInterPodAffinity"},
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
],"priorities": [
{"name": "EqualPriority", "weight": 2},
{"name": "ImageLocalityPriority", "weight": 2},
{"name": "LeastRequestedPriority", "weight": 2},
{"name": "BalancedResourceAllocation", "weight": 2},
{"name": "SelectorSpreadPriority", "weight": 2},
{"name": "NodePreferAvoidPodsPriority", "weight": 2},
{"name": "NodeAffinityPriority", "weight": 2},
{"name": "TaintTolerationPriority", "weight": 2},
{"name": "InterPodAffinityPriority", "weight": 2},
{"name": "MostRequestedPriority", "weight": 2}
],"extenders": [{
"urlPrefix": "/prefix",
"filterVerb": "filter",
"prioritizeVerb": "prioritize",
"weight": 1,
"bindVerb": "bind",
"enableHttps": true,
"tlsConfig": {"Insecure":true},
"httpTimeout": 1,
"nodeCacheCapable": true
}]
}`,
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {{Name: "PrioritySort"}},
"PreFilterPlugin": {
{Name: "NodePorts"},
{Name: "NodeResourcesFit"},
{Name: "ServiceAffinity"},
{Name: "InterPodAffinity"},
},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResourcesFit"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "NodeLabel"},
{Name: "ServiceAffinity"},
{Name: "EBSLimits"},
{Name: "GCEPDLimits"},
{Name: "AzureDiskLimits"},
{Name: "VolumeZone"},
{Name: "InterPodAffinity"},
},
"PreScorePlugin": {
{Name: "InterPodAffinity"},
{Name: "DefaultPodTopologySpread"},
{Name: "TaintToleration"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "InterPodAffinity", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "DefaultPodTopologySpread", Weight: 2},
{Name: "TaintToleration", Weight: 2},
},
"BindPlugin": {{Name: "DefaultBinder"}},
},
wantExtenders: []config.Extender{{
URLPrefix: "/prefix",
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 1,
BindVerb: "bind", // 1.8 became case-insensitive and tolerated "bindVerb"
EnableHTTPS: true,
TLSConfig: &config.ExtenderTLSConfig{Insecure: true},
HTTPTimeout: 1,
NodeCacheCapable: true,
}},
},
// Do not change this JSON after the corresponding release has been tagged.
// A failure indicates backwards compatibility with the specified release was broken.
{
name: "1.9",
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsHostPorts"},
{"name": "HostName"},
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
{"name": "MatchInterPodAffinity"},
{"name": "CheckVolumeBinding"},
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
],"priorities": [
{"name": "EqualPriority", "weight": 2},
{"name": "ImageLocalityPriority", "weight": 2},
{"name": "LeastRequestedPriority", "weight": 2},
{"name": "BalancedResourceAllocation", "weight": 2},
{"name": "SelectorSpreadPriority", "weight": 2},
{"name": "NodePreferAvoidPodsPriority", "weight": 2},
{"name": "NodeAffinityPriority", "weight": 2},
{"name": "TaintTolerationPriority", "weight": 2},
{"name": "InterPodAffinityPriority", "weight": 2},
{"name": "MostRequestedPriority", "weight": 2}
],"extenders": [{
"urlPrefix": "/prefix",
"filterVerb": "filter",
"prioritizeVerb": "prioritize",
"weight": 1,
"bindVerb": "bind",
"enableHttps": true,
"tlsConfig": {"Insecure":true},
"httpTimeout": 1,
"nodeCacheCapable": true
}]
}`,
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {{Name: "PrioritySort"}},
"PreFilterPlugin": {
{Name: "NodePorts"},
{Name: "NodeResourcesFit"},
{Name: "ServiceAffinity"},
{Name: "VolumeBinding"},
{Name: "InterPodAffinity"},
},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResourcesFit"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "NodeLabel"},
{Name: "ServiceAffinity"},
{Name: "EBSLimits"},
{Name: "GCEPDLimits"},
{Name: "AzureDiskLimits"},
{Name: "VolumeBinding"},
{Name: "VolumeZone"},
{Name: "InterPodAffinity"},
},
"PreScorePlugin": {
{Name: "InterPodAffinity"},
{Name: "DefaultPodTopologySpread"},
{Name: "TaintToleration"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "InterPodAffinity", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "DefaultPodTopologySpread", Weight: 2},
{Name: "TaintToleration", Weight: 2},
},
"BindPlugin": {{Name: "DefaultBinder"}},
"ReservePlugin": {{Name: "VolumeBinding"}},
"UnreservePlugin": {{Name: "VolumeBinding"}},
"PreBindPlugin": {{Name: "VolumeBinding"}},
"PostBindPlugin": {{Name: "VolumeBinding"}},
},
wantExtenders: []config.Extender{{
URLPrefix: "/prefix",
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 1,
BindVerb: "bind", // 1.9 was case-insensitive and tolerated "bindVerb"
EnableHTTPS: true,
TLSConfig: &config.ExtenderTLSConfig{Insecure: true},
HTTPTimeout: 1,
NodeCacheCapable: true,
}},
},
// Do not change this JSON after the corresponding release has been tagged.
// A failure indicates backwards compatibility with the specified release was broken.
{
name: "1.10",
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsHostPorts"},
{"name": "HostName"},
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
{"name": "MatchInterPodAffinity"},
{"name": "CheckVolumeBinding"},
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
],"priorities": [
{"name": "EqualPriority", "weight": 2},
{"name": "ImageLocalityPriority", "weight": 2},
{"name": "LeastRequestedPriority", "weight": 2},
{"name": "BalancedResourceAllocation", "weight": 2},
{"name": "SelectorSpreadPriority", "weight": 2},
{"name": "NodePreferAvoidPodsPriority", "weight": 2},
{"name": "NodeAffinityPriority", "weight": 2},
{"name": "TaintTolerationPriority", "weight": 2},
{"name": "InterPodAffinityPriority", "weight": 2},
{"name": "MostRequestedPriority", "weight": 2}
],"extenders": [{
"urlPrefix": "/prefix",
"filterVerb": "filter",
"prioritizeVerb": "prioritize",
"weight": 1,
"bindVerb": "bind",
"enableHttps": true,
"tlsConfig": {"Insecure":true},
"httpTimeout": 1,
"nodeCacheCapable": true,
"managedResources": [{"name":"example.com/foo","ignoredByScheduler":true}],
"ignorable":true
}]
}`,
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {{Name: "PrioritySort"}},
"PreFilterPlugin": {
{Name: "NodePorts"},
{Name: "NodeResourcesFit"},
{Name: "ServiceAffinity"},
{Name: "VolumeBinding"},
{Name: "InterPodAffinity"},
},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResourcesFit"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "NodeLabel"},
{Name: "ServiceAffinity"},
{Name: "EBSLimits"},
{Name: "GCEPDLimits"},
{Name: "AzureDiskLimits"},
{Name: "VolumeBinding"},
{Name: "VolumeZone"},
{Name: "InterPodAffinity"},
},
"PreScorePlugin": {
{Name: "InterPodAffinity"},
{Name: "DefaultPodTopologySpread"},
{Name: "TaintToleration"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "InterPodAffinity", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "DefaultPodTopologySpread", Weight: 2},
{Name: "TaintToleration", Weight: 2},
},
"BindPlugin": {{Name: "DefaultBinder"}},
"ReservePlugin": {{Name: "VolumeBinding"}},
"UnreservePlugin": {{Name: "VolumeBinding"}},
"PreBindPlugin": {{Name: "VolumeBinding"}},
"PostBindPlugin": {{Name: "VolumeBinding"}},
},
wantExtenders: []config.Extender{{
URLPrefix: "/prefix",
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 1,
BindVerb: "bind", // 1.10 was case-insensitive and tolerated "bindVerb"
EnableHTTPS: true,
TLSConfig: &config.ExtenderTLSConfig{Insecure: true},
HTTPTimeout: 1,
NodeCacheCapable: true,
ManagedResources: []config.ExtenderManagedResource{{Name: "example.com/foo", IgnoredByScheduler: true}},
Ignorable: true,
}},
},
// Do not change this JSON after the corresponding release has been tagged.
// A failure indicates backwards compatibility with the specified release was broken.
{
name: "1.11",
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsHostPorts"},
{"name": "HostName"},
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
{"name": "MatchInterPodAffinity"},
{"name": "CheckVolumeBinding"},
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
],"priorities": [
{"name": "EqualPriority", "weight": 2},
{"name": "ImageLocalityPriority", "weight": 2},
{"name": "LeastRequestedPriority", "weight": 2},
{"name": "BalancedResourceAllocation", "weight": 2},
{"name": "SelectorSpreadPriority", "weight": 2},
{"name": "NodePreferAvoidPodsPriority", "weight": 2},
{"name": "NodeAffinityPriority", "weight": 2},
{"name": "TaintTolerationPriority", "weight": 2},
{"name": "InterPodAffinityPriority", "weight": 2},
{"name": "MostRequestedPriority", "weight": 2},
{
"name": "RequestedToCapacityRatioPriority",
"weight": 2,
"argument": {
"requestedToCapacityRatioArguments": {
"shape": [
{"utilization": 0, "score": 0},
{"utilization": 50, "score": 7}
]
}
}}
],"extenders": [{
"urlPrefix": "/prefix",
"filterVerb": "filter",
"prioritizeVerb": "prioritize",
"weight": 1,
"bindVerb": "bind",
"enableHttps": true,
"tlsConfig": {"Insecure":true},
"httpTimeout": 1,
"nodeCacheCapable": true,
"managedResources": [{"name":"example.com/foo","ignoredByScheduler":true}],
"ignorable":true
}]
}`,
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {{Name: "PrioritySort"}},
"PreFilterPlugin": {
{Name: "NodePorts"},
{Name: "NodeResourcesFit"},
{Name: "ServiceAffinity"},
{Name: "VolumeBinding"},
{Name: "InterPodAffinity"},
},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResourcesFit"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "NodeLabel"},
{Name: "ServiceAffinity"},
{Name: "EBSLimits"},
{Name: "GCEPDLimits"},
{Name: "AzureDiskLimits"},
{Name: "VolumeBinding"},
{Name: "VolumeZone"},
{Name: "InterPodAffinity"},
},
"PreScorePlugin": {
{Name: "InterPodAffinity"},
{Name: "DefaultPodTopologySpread"},
{Name: "TaintToleration"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "InterPodAffinity", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "RequestedToCapacityRatio", Weight: 2},
{Name: "DefaultPodTopologySpread", Weight: 2},
{Name: "TaintToleration", Weight: 2},
},
"BindPlugin": {{Name: "DefaultBinder"}},
"ReservePlugin": {{Name: "VolumeBinding"}},
"UnreservePlugin": {{Name: "VolumeBinding"}},
"PreBindPlugin": {{Name: "VolumeBinding"}},
"PostBindPlugin": {{Name: "VolumeBinding"}},
},
wantExtenders: []config.Extender{{
URLPrefix: "/prefix",
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 1,
BindVerb: "bind", // 1.11 restored case-sensitivity, but allowed either "BindVerb" or "bindVerb"
EnableHTTPS: true,
TLSConfig: &config.ExtenderTLSConfig{Insecure: true},
HTTPTimeout: 1,
NodeCacheCapable: true,
ManagedResources: []config.ExtenderManagedResource{{Name: "example.com/foo", IgnoredByScheduler: true}},
Ignorable: true,
}},
},
// Do not change this JSON after the corresponding release has been tagged.
// A failure indicates backwards compatibility with the specified release was broken.
{
name: "1.12",
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsHostPorts"},
{"name": "HostName"},
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
{"name": "MaxCSIVolumeCountPred"},
{"name": "MatchInterPodAffinity"},
{"name": "CheckVolumeBinding"},
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
],"priorities": [
{"name": "EqualPriority", "weight": 2},
{"name": "ImageLocalityPriority", "weight": 2},
{"name": "LeastRequestedPriority", "weight": 2},
{"name": "BalancedResourceAllocation", "weight": 2},
{"name": "SelectorSpreadPriority", "weight": 2},
{"name": "NodePreferAvoidPodsPriority", "weight": 2},
{"name": "NodeAffinityPriority", "weight": 2},
{"name": "TaintTolerationPriority", "weight": 2},
{"name": "InterPodAffinityPriority", "weight": 2},
{"name": "MostRequestedPriority", "weight": 2},
{
"name": "RequestedToCapacityRatioPriority",
"weight": 2,
"argument": {
"requestedToCapacityRatioArguments": {
"shape": [
{"utilization": 0, "score": 0},
{"utilization": 50, "score": 7}
]
}
}}
],"extenders": [{
"urlPrefix": "/prefix",
"filterVerb": "filter",
"prioritizeVerb": "prioritize",
"weight": 1,
"bindVerb": "bind",
"enableHttps": true,
"tlsConfig": {"Insecure":true},
"httpTimeout": 1,
"nodeCacheCapable": true,
"managedResources": [{"name":"example.com/foo","ignoredByScheduler":true}],
"ignorable":true
}]
}`,
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {{Name: "PrioritySort"}},
"PreFilterPlugin": {
{Name: "NodePorts"},
{Name: "NodeResourcesFit"},
{Name: "ServiceAffinity"},
{Name: "VolumeBinding"},
{Name: "InterPodAffinity"},
},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResourcesFit"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "NodeLabel"},
{Name: "ServiceAffinity"},
{Name: "EBSLimits"},
{Name: "GCEPDLimits"},
{Name: "NodeVolumeLimits"},
{Name: "AzureDiskLimits"},
{Name: "VolumeBinding"},
{Name: "VolumeZone"},
{Name: "InterPodAffinity"},
},
"PreScorePlugin": {
{Name: "InterPodAffinity"},
{Name: "DefaultPodTopologySpread"},
{Name: "TaintToleration"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "InterPodAffinity", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "RequestedToCapacityRatio", Weight: 2},
{Name: "DefaultPodTopologySpread", Weight: 2},
{Name: "TaintToleration", Weight: 2},
},
"BindPlugin": {{Name: "DefaultBinder"}},
"ReservePlugin": {{Name: "VolumeBinding"}},
"UnreservePlugin": {{Name: "VolumeBinding"}},
"PreBindPlugin": {{Name: "VolumeBinding"}},
"PostBindPlugin": {{Name: "VolumeBinding"}},
},
wantExtenders: []config.Extender{{
URLPrefix: "/prefix",
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 1,
BindVerb: "bind", // 1.11 restored case-sensitivity, but allowed either "BindVerb" or "bindVerb"
EnableHTTPS: true,
TLSConfig: &config.ExtenderTLSConfig{Insecure: true},
HTTPTimeout: 1,
NodeCacheCapable: true,
ManagedResources: []config.ExtenderManagedResource{{Name: "example.com/foo", IgnoredByScheduler: true}},
Ignorable: true,
}},
},
{
name: "1.14",
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsHostPorts"},
{"name": "HostName"},
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
{"name": "MaxCSIVolumeCountPred"},
{"name": "MaxCinderVolumeCount"},
{"name": "MatchInterPodAffinity"},
{"name": "CheckVolumeBinding"},
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
],"priorities": [
{"name": "EqualPriority", "weight": 2},
{"name": "ImageLocalityPriority", "weight": 2},
{"name": "LeastRequestedPriority", "weight": 2},
{"name": "BalancedResourceAllocation", "weight": 2},
{"name": "SelectorSpreadPriority", "weight": 2},
{"name": "NodePreferAvoidPodsPriority", "weight": 2},
{"name": "NodeAffinityPriority", "weight": 2},
{"name": "TaintTolerationPriority", "weight": 2},
{"name": "InterPodAffinityPriority", "weight": 2},
{"name": "MostRequestedPriority", "weight": 2},
{
"name": "RequestedToCapacityRatioPriority",
"weight": 2,
"argument": {
"requestedToCapacityRatioArguments": {
"shape": [
{"utilization": 0, "score": 0},
{"utilization": 50, "score": 7}
]
}
}}
],"extenders": [{
"urlPrefix": "/prefix",
"filterVerb": "filter",
"prioritizeVerb": "prioritize",
"weight": 1,
"bindVerb": "bind",
"enableHttps": true,
"tlsConfig": {"Insecure":true},
"httpTimeout": 1,
"nodeCacheCapable": true,
"managedResources": [{"name":"example.com/foo","ignoredByScheduler":true}],
"ignorable":true
}]
}`,
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {{Name: "PrioritySort"}},
"PreFilterPlugin": {
{Name: "NodePorts"},
{Name: "NodeResourcesFit"},
{Name: "ServiceAffinity"},
{Name: "VolumeBinding"},
{Name: "InterPodAffinity"},
},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResourcesFit"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "NodeLabel"},
{Name: "ServiceAffinity"},
{Name: "EBSLimits"},
{Name: "GCEPDLimits"},
{Name: "NodeVolumeLimits"},
{Name: "AzureDiskLimits"},
{Name: "CinderLimits"},
{Name: "VolumeBinding"},
{Name: "VolumeZone"},
{Name: "InterPodAffinity"},
},
"PreScorePlugin": {
{Name: "InterPodAffinity"},
{Name: "DefaultPodTopologySpread"},
{Name: "TaintToleration"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "InterPodAffinity", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "RequestedToCapacityRatio", Weight: 2},
{Name: "DefaultPodTopologySpread", Weight: 2},
{Name: "TaintToleration", Weight: 2},
},
"BindPlugin": {{Name: "DefaultBinder"}},
"ReservePlugin": {{Name: "VolumeBinding"}},
"UnreservePlugin": {{Name: "VolumeBinding"}},
"PreBindPlugin": {{Name: "VolumeBinding"}},
"PostBindPlugin": {{Name: "VolumeBinding"}},
},
wantExtenders: []config.Extender{{
URLPrefix: "/prefix",
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 1,
BindVerb: "bind", // 1.11 restored case-sensitivity, but allowed either "BindVerb" or "bindVerb"
EnableHTTPS: true,
TLSConfig: &config.ExtenderTLSConfig{Insecure: true},
HTTPTimeout: 1,
NodeCacheCapable: true,
ManagedResources: []config.ExtenderManagedResource{{Name: "example.com/foo", IgnoredByScheduler: true}},
Ignorable: true,
}},
},
{
name: "1.16",
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsHostPorts"},
{"name": "HostName"},
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
{"name": "MaxCSIVolumeCountPred"},
{"name": "MaxCinderVolumeCount"},
{"name": "MatchInterPodAffinity"},
{"name": "CheckVolumeBinding"},
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
],"priorities": [
{"name": "EqualPriority", "weight": 2},
{"name": "ImageLocalityPriority", "weight": 2},
{"name": "LeastRequestedPriority", "weight": 2},
{"name": "BalancedResourceAllocation", "weight": 2},
{"name": "SelectorSpreadPriority", "weight": 2},
{"name": "NodePreferAvoidPodsPriority", "weight": 2},
{"name": "NodeAffinityPriority", "weight": 2},
{"name": "TaintTolerationPriority", "weight": 2},
{"name": "InterPodAffinityPriority", "weight": 2},
{"name": "MostRequestedPriority", "weight": 2},
{
"name": "RequestedToCapacityRatioPriority",
"weight": 2,
"argument": {
"requestedToCapacityRatioArguments": {
"shape": [
{"utilization": 0, "score": 0},
{"utilization": 50, "score": 7}
],
"resources": [
{"name": "intel.com/foo", "weight": 3},
{"name": "intel.com/bar", "weight": 5}
]
}
}}
],"extenders": [{
"urlPrefix": "/prefix",
"filterVerb": "filter",
"prioritizeVerb": "prioritize",
"weight": 1,
"bindVerb": "bind",
"enableHttps": true,
"tlsConfig": {"Insecure":true},
"httpTimeout": 1,
"nodeCacheCapable": true,
"managedResources": [{"name":"example.com/foo","ignoredByScheduler":true}],
"ignorable":true
}]
}`,
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {{Name: "PrioritySort"}},
"PreFilterPlugin": {
{Name: "NodePorts"},
{Name: "NodeResourcesFit"},
{Name: "ServiceAffinity"},
{Name: "VolumeBinding"},
{Name: "InterPodAffinity"},
},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "NodeResourcesFit"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "NodeLabel"},
{Name: "ServiceAffinity"},
{Name: "EBSLimits"},
{Name: "GCEPDLimits"},
{Name: "NodeVolumeLimits"},
{Name: "AzureDiskLimits"},
{Name: "CinderLimits"},
{Name: "VolumeBinding"},
{Name: "VolumeZone"},
{Name: "InterPodAffinity"},
},
"PreScorePlugin": {
{Name: "InterPodAffinity"},
{Name: "DefaultPodTopologySpread"},
{Name: "TaintToleration"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
{Name: "ImageLocality", Weight: 2},
{Name: "InterPodAffinity", Weight: 2},
{Name: "NodeResourcesLeastAllocated", Weight: 2},
{Name: "NodeResourcesMostAllocated", Weight: 2},
{Name: "NodeAffinity", Weight: 2},
{Name: "NodePreferAvoidPods", Weight: 2},
{Name: "RequestedToCapacityRatio", Weight: 2},
{Name: "DefaultPodTopologySpread", Weight: 2},
{Name: "TaintToleration", Weight: 2},
},
"BindPlugin": {{Name: "DefaultBinder"}},
"ReservePlugin": {{Name: "VolumeBinding"}},
"UnreservePlugin": {{Name: "VolumeBinding"}},
"PreBindPlugin": {{Name: "VolumeBinding"}},
"PostBindPlugin": {{Name: "VolumeBinding"}},
},
wantExtenders: []config.Extender{{
URLPrefix: "/prefix",
FilterVerb: "filter",
PrioritizeVerb: "prioritize",
Weight: 1,
BindVerb: "bind", // 1.11 restored case-sensitivity, but allowed either "BindVerb" or "bindVerb"
EnableHTTPS: true,
TLSConfig: &config.ExtenderTLSConfig{Insecure: true},
HTTPTimeout: 1,
NodeCacheCapable: true,
ManagedResources: []config.ExtenderManagedResource{{Name: "example.com/foo", IgnoredByScheduler: true}},
Ignorable: true,
}},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
for feature, value := range tc.featureGates {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, feature, value)()
}
policyConfigMap := v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: "scheduler-custom-policy-config"},
Data: map[string]string{config.SchedulerPolicyConfigMapKey: tc.JSON},
}
client := fake.NewSimpleClientset(&policyConfigMap)
algorithmSrc := config.SchedulerAlgorithmSource{
Policy: &config.SchedulerPolicySource{
ConfigMap: &config.SchedulerPolicyConfigMapSource{
Namespace: policyConfigMap.Namespace,
Name: policyConfigMap.Name,
},
},
}
informerFactory := informers.NewSharedInformerFactory(client, 0)
recorderFactory := profile.NewRecorderFactory(events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1beta1().Events("")}))
sched, err := scheduler.New(
client,
informerFactory,
informerFactory.Core().V1().Pods(),
recorderFactory,
make(chan struct{}),
scheduler.WithAlgorithmSource(algorithmSrc),
)
if err != nil {
t.Fatalf("Error constructing: %v", err)
}
defProf := sched.Profiles["default-scheduler"]
gotPlugins := defProf.Framework.ListPlugins()
if diff := cmp.Diff(tc.wantPlugins, gotPlugins); diff != "" {
t.Errorf("unexpected plugins diff (-want, +got): %s", diff)
}
gotExtenders := sched.Algorithm.Extenders()
var wantExtenders []*core.HTTPExtender
for _, e := range tc.wantExtenders {
extender, err := core.NewHTTPExtender(&e)
if err != nil {
t.Errorf("Error transforming extender: %+v", e)
}
wantExtenders = append(wantExtenders, extender.(*core.HTTPExtender))
}
for i := range gotExtenders {
if !core.Equal(wantExtenders[i], gotExtenders[i].(*core.HTTPExtender)) {
t.Errorf("Got extender #%d %+v, want %+v", i, gotExtenders[i], wantExtenders[i])
}
}
})
}
}
func TestAlgorithmProviderCompatibility(t *testing.T) {
// Add serialized versions of scheduler config that exercise available options to ensure compatibility between releases
defaultPlugins := map[string][]config.Plugin{
"QueueSortPlugin": {
{Name: "PrioritySort"},
},
"PreFilterPlugin": {
{Name: "NodeResourcesFit"},
{Name: "NodePorts"},
{Name: "PodTopologySpread"},
{Name: "InterPodAffinity"},
{Name: "VolumeBinding"},
},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "NodeResourcesFit"},
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "EBSLimits"},
{Name: "GCEPDLimits"},
{Name: "NodeVolumeLimits"},
{Name: "AzureDiskLimits"},
{Name: "VolumeBinding"},
{Name: "VolumeZone"},
{Name: "PodTopologySpread"},
{Name: "InterPodAffinity"},
},
"PreScorePlugin": {
{Name: "InterPodAffinity"},
{Name: "PodTopologySpread"},
{Name: "TaintToleration"},
{Name: "DefaultPodTopologySpread"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 1},
{Name: "ImageLocality", Weight: 1},
{Name: "InterPodAffinity", Weight: 1},
{Name: "NodeResourcesLeastAllocated", Weight: 1},
{Name: "NodeAffinity", Weight: 1},
{Name: "NodePreferAvoidPods", Weight: 10000},
{Name: "PodTopologySpread", Weight: 2},
{Name: "TaintToleration", Weight: 1},
{Name: "DefaultPodTopologySpread", Weight: 1},
},
"BindPlugin": {{Name: "DefaultBinder"}},
"ReservePlugin": {{Name: "VolumeBinding"}},
"UnreservePlugin": {{Name: "VolumeBinding"}},
"PreBindPlugin": {{Name: "VolumeBinding"}},
"PostBindPlugin": {{Name: "VolumeBinding"}},
}
testcases := []struct {
name string
provider string
wantPlugins map[string][]config.Plugin
}{
{
name: "No Provider specified",
wantPlugins: defaultPlugins,
},
{
name: "DefaultProvider",
provider: config.SchedulerDefaultProviderName,
wantPlugins: defaultPlugins,
},
{
name: "ClusterAutoscalerProvider",
provider: algorithmprovider.ClusterAutoscalerProvider,
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {
{Name: "PrioritySort"},
},
"PreFilterPlugin": {
{Name: "NodeResourcesFit"},
{Name: "NodePorts"},
{Name: "PodTopologySpread"},
{Name: "InterPodAffinity"},
{Name: "VolumeBinding"},
},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "NodeResourcesFit"},
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "EBSLimits"},
{Name: "GCEPDLimits"},
{Name: "NodeVolumeLimits"},
{Name: "AzureDiskLimits"},
{Name: "VolumeBinding"},
{Name: "VolumeZone"},
{Name: "PodTopologySpread"},
{Name: "InterPodAffinity"},
},
"PreScorePlugin": {
{Name: "InterPodAffinity"},
{Name: "PodTopologySpread"},
{Name: "TaintToleration"},
{Name: "DefaultPodTopologySpread"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 1},
{Name: "ImageLocality", Weight: 1},
{Name: "InterPodAffinity", Weight: 1},
{Name: "NodeResourcesMostAllocated", Weight: 1},
{Name: "NodeAffinity", Weight: 1},
{Name: "NodePreferAvoidPods", Weight: 10000},
{Name: "PodTopologySpread", Weight: 2},
{Name: "TaintToleration", Weight: 1},
{Name: "DefaultPodTopologySpread", Weight: 1},
},
"ReservePlugin": {{Name: "VolumeBinding"}},
"UnreservePlugin": {{Name: "VolumeBinding"}},
"PreBindPlugin": {{Name: "VolumeBinding"}},
"BindPlugin": {{Name: "DefaultBinder"}},
"PostBindPlugin": {{Name: "VolumeBinding"}},
},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
var opts []scheduler.Option
if len(tc.provider) != 0 {
opts = append(opts, scheduler.WithAlgorithmSource(config.SchedulerAlgorithmSource{
Provider: &tc.provider,
}))
}
client := fake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0)
recorderFactory := profile.NewRecorderFactory(events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1beta1().Events("")}))
sched, err := scheduler.New(
client,
informerFactory,
informerFactory.Core().V1().Pods(),
recorderFactory,
make(chan struct{}),
opts...,
)
if err != nil {
t.Fatalf("Error constructing: %v", err)
}
defProf := sched.Profiles["default-scheduler"]
gotPlugins := defProf.ListPlugins()
if diff := cmp.Diff(tc.wantPlugins, gotPlugins); diff != "" {
t.Errorf("unexpected plugins diff (-want, +got): %s", diff)
}
})
}
}
func TestPluginsConfigurationCompatibility(t *testing.T) {
defaultPlugins := map[string][]config.Plugin{
"QueueSortPlugin": {
{Name: "PrioritySort"},
},
"PreFilterPlugin": {
{Name: "NodeResourcesFit"},
{Name: "NodePorts"},
{Name: "PodTopologySpread"},
{Name: "InterPodAffinity"},
{Name: "VolumeBinding"},
},
"FilterPlugin": {
{Name: "NodeUnschedulable"},
{Name: "NodeResourcesFit"},
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "EBSLimits"},
{Name: "GCEPDLimits"},
{Name: "NodeVolumeLimits"},
{Name: "AzureDiskLimits"},
{Name: "VolumeBinding"},
{Name: "VolumeZone"},
{Name: "PodTopologySpread"},
{Name: "InterPodAffinity"},
},
"PreScorePlugin": {
{Name: "InterPodAffinity"},
{Name: "PodTopologySpread"},
{Name: "TaintToleration"},
{Name: "DefaultPodTopologySpread"},
},
"ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 1},
{Name: "ImageLocality", Weight: 1},
{Name: "InterPodAffinity", Weight: 1},
{Name: "NodeResourcesLeastAllocated", Weight: 1},
{Name: "NodeAffinity", Weight: 1},
{Name: "NodePreferAvoidPods", Weight: 10000},
{Name: "PodTopologySpread", Weight: 2},
{Name: "TaintToleration", Weight: 1},
{Name: "DefaultPodTopologySpread", Weight: 1},
},
"ReservePlugin": {{Name: "VolumeBinding"}},
"UnreservePlugin": {{Name: "VolumeBinding"}},
"PreBindPlugin": {{Name: "VolumeBinding"}},
"BindPlugin": {{Name: "DefaultBinder"}},
"PostBindPlugin": {{Name: "VolumeBinding"}},
}
testcases := []struct {
name string
plugins config.Plugins
wantPlugins map[string][]config.Plugin
pluginConfig []config.PluginConfig
wantPluginConfig []config.PluginConfig
}{
{
name: "default plugins",
wantPlugins: defaultPlugins,
wantPluginConfig: nil,
},
{
name: "default plugins with customized plugin config",
wantPlugins: defaultPlugins,
pluginConfig: []config.PluginConfig{
{
Name: "NodeResourcesFit",
Args: &config.NodeResourcesFitArgs{
IgnoredResources: []string{"foo", "bar"},
},
},
{
Name: "PodTopologySpread",
Args: &config.PodTopologySpreadArgs{
DefaultConstraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "foo",
WhenUnsatisfiable: v1.DoNotSchedule,
},
{
MaxSkew: 10,
TopologyKey: "bar",
WhenUnsatisfiable: v1.ScheduleAnyway,
},
},
},
},
{
Name: "RequestedToCapacityRatio",
Args: &config.RequestedToCapacityRatioArgs{
Shape: []config.UtilizationShapePoint{
{Utilization: 5, Score: 5},
},
Resources: []config.ResourceSpec{
{Name: "cpu", Weight: 10},
},
},
},
{
Name: "InterPodAffinity",
Args: &config.InterPodAffinityArgs{
HardPodAffinityWeight: 100,
},
},
{
Name: "NodeLabel",
Args: &config.NodeLabelArgs{
PresentLabels: []string{"foo", "bar"},
AbsentLabels: []string{"apple"},
PresentLabelsPreference: []string{"dog"},
AbsentLabelsPreference: []string{"cat"},
},
},
{
Name: "ServiceAffinity",
Args: &config.ServiceAffinityArgs{
AffinityLabels: []string{"foo", "bar"},
AntiAffinityLabelsPreference: []string{"disk", "flash"},
},
},
{
Name: "VolumeBinding",
Args: &config.VolumeBindingArgs{
BindTimeoutSeconds: 300,
},
},
},
wantPluginConfig: []config.PluginConfig{
{
Name: "NodeResourcesFit",
Args: &config.NodeResourcesFitArgs{
IgnoredResources: []string{"foo", "bar"},
},
},
{
Name: "PodTopologySpread",
Args: &config.PodTopologySpreadArgs{
DefaultConstraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "foo",
WhenUnsatisfiable: v1.DoNotSchedule,
},
{
MaxSkew: 10,
TopologyKey: "bar",
WhenUnsatisfiable: v1.ScheduleAnyway,
},
},
},
},
{
Name: "RequestedToCapacityRatio",
Args: &config.RequestedToCapacityRatioArgs{
Shape: []config.UtilizationShapePoint{
{Utilization: 5, Score: 5},
},
Resources: []config.ResourceSpec{
{Name: "cpu", Weight: 10},
},
},
},
{
Name: "InterPodAffinity",
Args: &config.InterPodAffinityArgs{
HardPodAffinityWeight: 100,
},
},
{
Name: "NodeLabel",
Args: &config.NodeLabelArgs{
PresentLabels: []string{"foo", "bar"},
AbsentLabels: []string{"apple"},
PresentLabelsPreference: []string{"dog"},
AbsentLabelsPreference: []string{"cat"},
},
},
{
Name: "ServiceAffinity",
Args: &config.ServiceAffinityArgs{
AffinityLabels: []string{"foo", "bar"},
AntiAffinityLabelsPreference: []string{"disk", "flash"},
},
},
{
Name: "VolumeBinding",
Args: &config.VolumeBindingArgs{
BindTimeoutSeconds: 300,
},
},
},
},
{
name: "disable some default plugins",
plugins: config.Plugins{
PreFilter: &config.PluginSet{
Disabled: []config.Plugin{
{Name: "NodeResourcesFit"},
{Name: "NodePorts"},
{Name: "InterPodAffinity"},
{Name: "PodTopologySpread"},
{Name: "VolumeBinding"},
},
},
Filter: &config.PluginSet{
Disabled: []config.Plugin{
{Name: "NodeUnschedulable"},
{Name: "NodeResourcesFit"},
{Name: "NodeName"},
{Name: "NodePorts"},
{Name: "NodeAffinity"},
{Name: "VolumeRestrictions"},
{Name: "TaintToleration"},
{Name: "EBSLimits"},
{Name: "GCEPDLimits"},
{Name: "NodeVolumeLimits"},
{Name: "AzureDiskLimits"},
{Name: "VolumeBinding"},
{Name: "VolumeZone"},
{Name: "InterPodAffinity"},
{Name: "PodTopologySpread"},
},
},
PreScore: &config.PluginSet{
Disabled: []config.Plugin{
{Name: "InterPodAffinity"},
{Name: "DefaultPodTopologySpread"},
{Name: "TaintToleration"},
{Name: "PodTopologySpread"},
},
},
Score: &config.PluginSet{
Disabled: []config.Plugin{
{Name: "NodeResourcesBalancedAllocation"},
{Name: "ImageLocality"},
{Name: "InterPodAffinity"},
{Name: "NodeResourcesLeastAllocated"},
{Name: "NodeAffinity"},
{Name: "NodePreferAvoidPods"},
{Name: "DefaultPodTopologySpread"},
{Name: "TaintToleration"},
{Name: "PodTopologySpread"},
},
},
PreBind: &config.PluginSet{
Disabled: []config.Plugin{
{Name: "VolumeBinding"},
},
},
PostBind: &config.PluginSet{
Disabled: []config.Plugin{
{Name: "VolumeBinding"},
},
},
Reserve: &config.PluginSet{
Disabled: []config.Plugin{
{Name: "VolumeBinding"},
},
},
Unreserve: &config.PluginSet{
Disabled: []config.Plugin{
{Name: "VolumeBinding"},
},
},
},
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {
{Name: "PrioritySort"},
},
"BindPlugin": {{Name: "DefaultBinder"}},
},
},
{
name: "reverse default plugins order with changing score weight",
plugins: config.Plugins{
QueueSort: &config.PluginSet{
Enabled: []config.Plugin{
{Name: "PrioritySort"},
},
Disabled: []config.Plugin{
{Name: "*"},
},
},
PreFilter: &config.PluginSet{
Enabled: []config.Plugin{
{Name: "InterPodAffinity"},
{Name: "NodePorts"},
{Name: "NodeResourcesFit"},
},
Disabled: []config.Plugin{
{Name: "*"},
},
},
Filter: &config.PluginSet{
Enabled: []config.Plugin{
{Name: "InterPodAffinity"},
{Name: "VolumeZone"},
{Name: "VolumeBinding"},
{Name: "AzureDiskLimits"},
{Name: "NodeVolumeLimits"},
{Name: "GCEPDLimits"},
{Name: "EBSLimits"},
{Name: "TaintToleration"},
{Name: "VolumeRestrictions"},
{Name: "NodeAffinity"},
{Name: "NodePorts"},
{Name: "NodeName"},
{Name: "NodeResourcesFit"},
{Name: "NodeUnschedulable"},
},
Disabled: []config.Plugin{
{Name: "*"},
},
},
PreScore: &config.PluginSet{
Enabled: []config.Plugin{
{Name: "TaintToleration"},
{Name: "DefaultPodTopologySpread"},
{Name: "InterPodAffinity"},
},
Disabled: []config.Plugin{
{Name: "*"},
},
},
Score: &config.PluginSet{
Enabled: []config.Plugin{
{Name: "TaintToleration", Weight: 24},
{Name: "DefaultPodTopologySpread", Weight: 24},
{Name: "NodePreferAvoidPods", Weight: 24},
{Name: "NodeAffinity", Weight: 24},
{Name: "NodeResourcesLeastAllocated", Weight: 24},
{Name: "InterPodAffinity", Weight: 24},
{Name: "ImageLocality", Weight: 24},
{Name: "NodeResourcesBalancedAllocation", Weight: 24},
},
Disabled: []config.Plugin{
{Name: "*"},
},
},
Bind: &config.PluginSet{
Enabled: []config.Plugin{{Name: "DefaultBinder"}},
Disabled: []config.Plugin{{Name: "*"}},
},
},
wantPlugins: map[string][]config.Plugin{
"QueueSortPlugin": {
{Name: "PrioritySort"},
},
"PreFilterPlugin": {
{Name: "InterPodAffinity"},
{Name: "NodePorts"},
{Name: "NodeResourcesFit"},
},
"FilterPlugin": {
{Name: "InterPodAffinity"},
{Name: "VolumeZone"},
{Name: "VolumeBinding"},
{Name: "AzureDiskLimits"},
{Name: "NodeVolumeLimits"},
{Name: "GCEPDLimits"},
{Name: "EBSLimits"},
{Name: "TaintToleration"},
{Name: "VolumeRestrictions"},
{Name: "NodeAffinity"},
{Name: "NodePorts"},
{Name: "NodeName"},
{Name: "NodeResourcesFit"},
{Name: "NodeUnschedulable"},
},
"PreScorePlugin": {
{Name: "TaintToleration"},
{Name: "DefaultPodTopologySpread"},
{Name: "InterPodAffinity"},
},
"ScorePlugin": {
{Name: "TaintToleration", Weight: 24},
{Name: "DefaultPodTopologySpread", Weight: 24},
{Name: "NodePreferAvoidPods", Weight: 24},
{Name: "NodeAffinity", Weight: 24},
{Name: "NodeResourcesLeastAllocated", Weight: 24},
{Name: "InterPodAffinity", Weight: 24},
{Name: "ImageLocality", Weight: 24},
{Name: "NodeResourcesBalancedAllocation", Weight: 24},
},
"ReservePlugin": {{Name: "VolumeBinding"}},
"UnreservePlugin": {{Name: "VolumeBinding"}},
"PreBindPlugin": {{Name: "VolumeBinding"}},
"BindPlugin": {{Name: "DefaultBinder"}},
"PostBindPlugin": {{Name: "VolumeBinding"}},
},
wantPluginConfig: nil,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
client := fake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0)
recorderFactory := profile.NewRecorderFactory(events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1beta1().Events("")}))
sched, err := scheduler.New(
client,
informerFactory,
informerFactory.Core().V1().Pods(),
recorderFactory,
make(chan struct{}),
scheduler.WithProfiles(config.KubeSchedulerProfile{
SchedulerName: v1.DefaultSchedulerName,
Plugins: &tc.plugins,
PluginConfig: tc.pluginConfig,
}),
scheduler.WithBuildFrameworkCapturer(func(p config.KubeSchedulerProfile) {
if p.SchedulerName != v1.DefaultSchedulerName {
t.Errorf("unexpected scheduler name (want %q, got %q)", v1.DefaultSchedulerName, p.SchedulerName)
}
if diff := cmp.Diff(tc.wantPluginConfig, p.PluginConfig); diff != "" {
t.Errorf("unexpected plugins diff (-want, +got): %s", diff)
}
}),
)
if err != nil {
t.Fatalf("Error constructing: %v", err)
}
defProf := sched.Profiles[v1.DefaultSchedulerName]
gotPlugins := defProf.ListPlugins()
if diff := cmp.Diff(tc.wantPlugins, gotPlugins); diff != "" {
t.Errorf("unexpected plugins diff (-want, +got): %s", diff)
}
})
}
}
|
package library
import (
"fmt"
devworkspace "github.com/devfile/api/pkg/apis/workspaces/v1alpha2"
"io/ioutil"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"os"
"sigs.k8s.io/controller-runtime"
"strings"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
scheme = runtime.NewScheme()
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(devworkspace.AddToScheme(scheme))
}
// GetCurrentNamespace get the current namespace this container is running in
func GetCurrentNamespace() (string, error) {
nsBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
if err != nil {
if os.IsNotExist(err) {
return "", fmt.Errorf("could not read namespace from mounted serviceaccount info")
}
return "", err
}
ns := strings.TrimSpace(string(nsBytes))
return ns, nil
}
func GetK8sClient() (client.Client, error) {
cfg, err := controllerruntime.GetConfig()
if err != nil {
return nil, err
}
k8sClient, err := client.New(cfg, client.Options{Scheme: scheme})
if err != nil {
return nil, err
}
return k8sClient, nil
}
|
package goxtremio
import xms "github.com/emccode/goxtremio/api/v3"
type InitiatorGroupFolder *xms.IGFolder
// GetIGFolder returns a specific initiator by name or ID
func (c *Client) GetIGFolder(id string, name string) (InitiatorGroupFolder, error) {
igf, err := c.api.GetIGFolder(id, name)
if err != nil {
return nil, err
}
return InitiatorGroupFolder(igf.Content), nil
}
// GetIGFolders returns a list of initiators
func (c *Client) GetIGFolders() (Refs, error) {
igFolders, err := c.api.GetIGFolders()
if err != nil {
return nil, err
}
return igFolders.Folders, nil
}
|
package shardkv
// Field names must start with capital letters,
// otherwise RPC will break.
//
// additional state to include in arguments to PutAppend RPC.
//
type PutAppendArgsImpl struct {
RequestID int64
}
//
// additional state to include in arguments to Get RPC.
//
type GetArgsImpl struct {
}
//
// for new RPCs that you add, declare types for arguments and reply
//
type ReceiveDataArgs struct{
RPCID int64
ShardIndex int // which shard I give you
Key2val map[string]string
RequestIDs []int64 // all requests ID in this shard
}
type ReceiveDataReply struct{
} |
package actions
import (
"path/filepath"
"testing"
"github.com/gobuffalo/suite"
"github.com/gomods/athens/pkg/config"
)
var (
testConfigFile = filepath.Join("..", "..", "..", "config.dev.toml")
)
type ActionSuite struct {
*suite.Action
}
func Test_ActionSuite(t *testing.T) {
conf, err := config.GetConf(testConfigFile)
if err != nil {
t.Fatalf("Unable to parse config file: %s", err.Error())
}
app, err := App(conf)
if err != nil {
t.Fatal(err)
}
as := &ActionSuite{suite.NewAction(app)}
suite.Run(t, as)
}
|
package stack
type Stack struct {
arr []interface{}
}
func NewStack() *Stack {
return &Stack{
arr: make([]interface{}, 0),
}
}
func (stack *Stack) Push(data interface{}) {
stack.arr = append(stack.arr, data)
}
func (stack *Stack) Pop() interface{} {
if len(stack.arr) == 0 {
return ""
}
data := stack.arr[len(stack.arr)-1]
stack.arr = stack.arr[:len(stack.arr)-1]
return data
}
func (stack *Stack) Top() interface{} {
if len(stack.arr) == 0 {
return ""
}
return stack.arr[len(stack.arr)-1]
}
func (stack *Stack) IsEmpty() bool {
return len(stack.arr) == 0
}
|
// Kubeaware is built in order to bring order
// for those applications who don't have cloud native support out of the box.
// This becomes important once companies start porting their legacy systems
// onto platforms like Kubernetes
package main
|
package utils
import (
"golang.org/x/crypto/bcrypt"
)
// HashPassword hashes the provided password.
// The hashing uses bcrypt with a default cost of 10.
func HashPassword(password string) string {
// Hashing the password with the default cost of 10
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
if err != nil {
panic(err)
}
return string(hashedPassword)
}
// CheckPassword compares whether or not the provided password matches the saved password hash for the provided username.
func CheckPassword(hash string, password string) bool {
//if err == nil {
compareErr := bcrypt.CompareHashAndPassword([]byte(hash), []byte(password))
if compareErr == nil {
return true
}
return false
}
|
package planets
import "errors"
var (
ErrPlanetNotAdded = errors.New("could not save the planet")
ErrPlanetsNotFound = errors.New("planets not found")
ErrPlanetNotFound = errors.New("planet not found")
ErrPlanetNotRemoved = errors.New("could not delete the planet")
ErrPlanetAlreadyRegistered = errors.New("planet already registered")
)
type Planet struct {
ID interface{} `bson:"_id,omitempty"`
Name string `bson:"name"`
Climate string `bson:"climate"`
Terrain string `bson:"terrain"`
Movies int `bson:"movies"`
}
func NewPlanet(name, climate, terrain string, movies int) Planet {
return Planet{
Name: name,
Climate: climate,
Terrain: terrain,
Movies: movies,
}
}
type GetPlanetResponse struct {
ID interface{} `json:"id"`
Name string `json:"name"`
Climate string `json:"climate"`
Terrain string `json:"terrain"`
Movies int `json:"movies"`
}
|
package bmmongo
import (
"github.com/alfredyang1986/blackmirror/bmmodel/request"
)
type BMMongo interface {
InsertBMObject() error
UpdateBMObject(request.Request) error
FindOne(request.Request) error
}
type BMMongoMulti interface {
FindMulti(req request.Request) error
}
type BmMongoCover interface {
CoverBMObject() error
}
type BMMongoDel interface {
DeleteOne(request.Request) error
}
type BMMongoDelAll interface {
DeleteAll(request.Request) error
}
type BMMongoCount interface {
FindCount(request.Request) (int, error)
}
|
/*
Copyright IBM Corp. 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"encoding/json"
"fmt"
"strconv"
"time"
"github.com/hyperledger/fabric-contract-api-go/contractapi"
)
// SmartContract Chaincode implementation
type SmartContract struct {
contractapi.Contract
}
// Shimano struct
type Shimano struct {
TrStock []string `json:"tStock"`
BrStock []string `json:"bStock"`
WhStock []string `json:"wStock"`
SN int `json:"sn"`
}
// Specialized struct
type Specialized struct {
TrStock []string `json:"tStock"`
BrStock []string `json:"bStock"`
WhStock []string `json:"wStock"`
BikeStock []string `json:"bikeStock"`
SN int `json:"sn"`
}
// BikeStore struct
type BikeStore struct {
StoreName string `json:"storeName"`
BikeStock []string `json:"bikeStock"`
}
// Roadbike ...
type Roadbike struct {
SerialNumber string `json:"serialNumber"`
Model string `json:"model"`
Colour string `json:"colour"`
Owner string `json:"owner"`
PlaceOfProduction string `json:"production"`
DateOfProduction string `json:"productionDate"`
BikeStore string `json:"bikeStore"`
DateOfWarehousing string `json:"dateOfWarehousing"`
DateOfPurchase string `json:"dateOfPurchase"`
TradeHistory []Trade `json:"tradeHistory"`
Transmission string `json:"transmission"`
Brake string `json:"brake"`
Wheel string `json:"wheel"`
Price int `json:"price"`
}
// Part struct
type Part struct {
SerialNumber string `json:"serialNumber"`
Model string `json:"model"`
PlaceOfProduction string `json:"placeofProduction"`
DateOfProduction string `json:"dateOfProduction"`
Price int `json:"price"`
}
// Trade struct
type Trade struct {
BikeSN string `json:"bikeSN"`
DateOfOpen string `json:"date"`
DateOfSign string `json:"dateOfSign"`
Seller string `json:"seller"`
Buyer string `json:"buyer"`
OriginalPrice int `json:"originalPrice"`
LastPrice int `json:"lastPrice"`
CurrentPrice int `json:"currentPrice"`
State string `json:"state"`
}
func (s *SmartContract) makeTrade(ctx contractapi.TransactionContextInterface, _sn string, _seller string) (Trade, error) {
timeNow := time.Now().Format("2020-01-01")
var roadbike Roadbike
roadbikeBytes, err := ctx.GetStub().GetState(_sn)
if err != nil {
return Trade{}, fmt.Errorf("fail to change serialNumber")
}
json.Unmarshal(roadbikeBytes, &roadbike)
price := roadbike.Price
trade := Trade{BikeSN: _sn, DateOfOpen: timeNow, DateOfSign: "", Seller: _seller, Buyer: "", OriginalPrice: price, LastPrice: price, CurrentPrice: price, State: "Open"}
if err != nil {
return Trade{}, fmt.Errorf("fail to change serialNumber")
}
var tradeList []Trade
// Put state of tradeList
tradeListBytes, _ := ctx.GetStub().GetState("TradeList")
json.Unmarshal(tradeListBytes, &tradeList)
tradeList = append(tradeList, trade)
tradeListBytes, _ = json.Marshal(tradeList)
err = ctx.GetStub().PutState("TradeList", tradeListBytes)
if err != nil {
return Trade{}, fmt.Errorf("fail to change serialNumber")
}
// Put state of roadbike
tradeList = roadbike.TradeHistory
roadbike.TradeHistory = append(tradeList, trade)
roadbikeBytes, _ = json.Marshal(roadbikeBytes)
err = ctx.GetStub().PutState(_sn, roadbikeBytes)
if err != nil {
return Trade{}, fmt.Errorf("fail to change serialNumber")
}
return trade, nil
}
func signTrade(ctx contractapi.TransactionContextInterface, _sn string, _buyer string) {
}
// Produce func
func (s *SmartContract) producePart(ctx contractapi.TransactionContextInterface, partName string) error {
var shimano Shimano
shimanoBytes, _ := ctx.GetStub().GetState("Shimano")
_ = json.Unmarshal(shimanoBytes, &shimano)
snInt := shimano.SN + 1
shimano.SN = snInt
snStr := ""
timeNow := time.Now().Format("2020-01-01")
var part Part
switch partName {
case "transmission":
snStr = "SHTRJP" + strconv.Itoa(snInt)
part = Part{SerialNumber: snStr, Model: "Transmission_Shimano", PlaceOfProduction: "Factory_Japan", DateOfProduction: timeNow, Price: 500000}
case "brake":
snStr = "SHBRJP" + strconv.Itoa(snInt)
part = Part{SerialNumber: snStr, Model: "Brake_Shimano", PlaceOfProduction: "Factory_Japan", DateOfProduction: timeNow, Price: 200000}
case "wheel":
snStr = "SHWHJP" + strconv.Itoa(snInt)
part = Part{SerialNumber: snStr, Model: "Wheel_Shimano", PlaceOfProduction: "Factory_Japan", DateOfProduction: timeNow, Price: 300000}
}
partBytes, _ := json.Marshal(part)
err := ctx.GetStub().PutState(part.SerialNumber, partBytes)
if err != nil {
return fmt.Errorf("fail to produce")
}
shimanoBytes, _ = json.Marshal(shimano)
err = ctx.GetStub().PutState("Shimano", shimanoBytes)
if err != nil {
return fmt.Errorf("fail to change serialNumber")
}
return nil
}
func pop(arr []string) (string, []string) {
rtn := arr[0]
copy(arr[0:], arr[1:])
arr = arr[:len(arr)-1]
return rtn, arr
}
// purchasePart func
func (s *SmartContract) purchasePart(ctx contractapi.TransactionContextInterface, partName string) error {
// shimano state
var shimano Shimano
shimanoBytes, _ := ctx.GetStub().GetState("Shimano")
err := json.Unmarshal(shimanoBytes, &shimano)
if err != nil {
return fmt.Errorf("Failed to read world state. %s", err.Error())
}
// specaialized state
var specialized Specialized
specializedBytes, _ := ctx.GetStub().GetState("Specialized")
err = json.Unmarshal(specializedBytes, &specialized)
if err != nil {
return fmt.Errorf("Failed to read world state. %s", err.Error())
}
sn := ""
switch partName {
case "transmission":
sn, shimano.TrStock = pop(shimano.TrStock)
specialized.TrStock = append(specialized.TrStock, sn)
case "brake":
sn, shimano.BrStock = pop(shimano.BrStock)
specialized.BrStock = append(specialized.BrStock, sn)
case "wheel":
sn, shimano.WhStock = pop(shimano.WhStock)
specialized.WhStock = append(specialized.WhStock, sn)
}
// part state
var part Part
partBytes, _ := ctx.GetStub().GetState(sn)
err = json.Unmarshal(partBytes, &part)
if err != nil {
return fmt.Errorf("Failed to read world state. %s", err.Error())
}
specializedBytes, _ = json.Marshal(specialized)
_ = ctx.GetStub().PutState("Specialized", specializedBytes)
shimanoBytes, _ = json.Marshal(shimano)
_ = ctx.GetStub().PutState("Shimano", shimanoBytes)
return nil
}
// ProduceRoadbike func
func (s *SmartContract) produceRoadbike(ctx contractapi.TransactionContextInterface) (string, error) {
fmt.Println("GenerateRoadbike !")
timeNow := time.Now().Format("2020-01-01")
// specaialized state
var specialized Specialized
specializedBytes, _ := ctx.GetStub().GetState("Specialized")
err := json.Unmarshal(specializedBytes, &specialized)
if err != nil {
return "", fmt.Errorf("Failed to read world state. %s", err.Error())
}
snInt := specialized.SN + 1
specialized.SN = snInt
snTr := ""
snBr := ""
snWh := ""
snTr, specialized.TrStock = pop(specialized.TrStock)
snBr, specialized.BrStock = pop(specialized.TrStock)
snWh, specialized.WhStock = pop(specialized.TrStock)
tradeHistory := []Trade{}
roadbike := Roadbike{
SerialNumber: "SPALAM" + strconv.Itoa(snInt),
Model: "Allez_2020",
Colour: "Red",
Owner: "",
PlaceOfProduction: "Factory_USA",
DateOfProduction: timeNow,
BikeStore: "",
DateOfWarehousing: "",
TradeHistory: tradeHistory,
Transmission: snTr,
Brake: snBr,
Wheel: snWh,
Price: 1500000}
roadbikeAsBytes, _ := json.Marshal(roadbike)
err = ctx.GetStub().PutState(roadbike.SerialNumber, roadbikeAsBytes)
if err != nil {
return "", err
}
fmt.Println(roadbike.Model + " generated !")
fmt.Println("S/N : " + roadbike.SerialNumber)
return roadbike.SerialNumber, nil
}
func (s *SmartContract) purchaseRoadbike(ctx contractapi.TransactionContextInterface) error {
// specaialized state
var specialized Specialized
specializedBytes, _ := ctx.GetStub().GetState("Specialized")
err := json.Unmarshal(specializedBytes, &specialized)
if err != nil {
return fmt.Errorf("Failed to read world state. %s", err.Error())
}
// bikestore state
var bikeStore BikeStore
bikeStoreBytes, _ := ctx.GetStub().GetState("BikeStore")
err = json.Unmarshal(bikeStoreBytes, &bikeStore)
if err != nil {
return fmt.Errorf("Failed to read world state. %s", err.Error())
}
sn := ""
sn, specialized.BikeStock = pop(specialized.BikeStock)
bikeStore.BikeStock = append(bikeStore.BikeStock, sn)
// bikestore state
var roadbike Roadbike
roadbikeBytes, _ := ctx.GetStub().GetState(sn)
err = json.Unmarshal(roadbikeBytes, &roadbike)
if err != nil {
return fmt.Errorf("Failed to read world state. %s", err.Error())
}
roadbike.BikeStore = bikeStore.StoreName
roadbikeBytes, _ = json.Marshal(roadbike)
err = ctx.GetStub().PutState(roadbike.SerialNumber, roadbikeBytes)
bikeStoreBytes, _ = json.Marshal(bikeStore)
err = ctx.GetStub().PutState("BikeStore", bikeStoreBytes)
specializedBytes, _ = json.Marshal(specialized)
err = ctx.GetStub().PutState("Specialized", specializedBytes)
return nil
}
func (s *SmartContract) getBikeList(ctx contractapi.TransactionContextInterface) ([]string, error) {
// bikestore state
var bikeStore BikeStore
bikeStoreBytes, _ := ctx.GetStub().GetState("RealBikeShop")
err := json.Unmarshal(bikeStoreBytes, &bikeStore)
if err != nil {
return nil, fmt.Errorf("Failed to read world state. %s", err.Error())
}
var bikeBytes []byte
bikeList := []string{}
for _, bikeSN := range bikeStore.BikeStock {
bikeBytes, _ = ctx.GetStub().GetState(bikeSN)
bikeList = append(bikeList, string(bikeBytes))
}
return bikeList, nil
}
func (s *SmartContract) purchaseMyBike(ctx contractapi.TransactionContextInterface, _owner string, _sn string) error {
timeNow := time.Now().Format("2020-01-01")
// bikestore state
var bikeStore BikeStore
bikeStoreBytes, _ := ctx.GetStub().GetState("RealBikeShop")
err := json.Unmarshal(bikeStoreBytes, &bikeStore)
// bike state
var roadbike Roadbike
roadbikeBytes, _ := ctx.GetStub().GetState(_sn)
err = json.Unmarshal(roadbikeBytes, &roadbike)
if err != nil {
return fmt.Errorf("Failed to read world state. %s", err.Error())
}
roadbike.Owner = _owner
roadbike.DateOfPurchase = timeNow
roadbikeBytes, _ = json.Marshal(roadbike)
ctx.GetStub().PutState(_sn, roadbikeBytes)
// ctx.GetStub().DelState(_sn)
arr := []string{}
for _, bikeSN := range bikeStore.BikeStock {
if bikeSN != _sn {
arr = append(arr, bikeSN)
}
}
bikeStore.BikeStock = arr
bikeStoreBytes, _ = json.Marshal(bikeStore)
err = ctx.GetStub().PutState("RealBikeShop", bikeStoreBytes)
if err != nil {
return fmt.Errorf("Failed to read world state. %s", err.Error())
}
return nil
}
// 여기 볼 차례
func (s *SmartContract) changeOwner(ctx contractapi.TransactionContextInterface, _owner1 string, _owner2 string) error {
// bike state
var roadbike Roadbike
roadbikeBytes, _ := ctx.GetStub().GetState(_owner1)
err := json.Unmarshal(roadbikeBytes, &roadbike)
if err != nil {
return fmt.Errorf("Failed to read world state. %s", err.Error())
}
roadbike.Owner = _owner2
roadbikeBytes, _ = json.Marshal(roadbike)
ctx.GetStub().PutState(_owner2, roadbikeBytes)
return nil
}
func (s *SmartContract) getBikeInfo(ctx contractapi.TransactionContextInterface, _sn string) (Roadbike, error) {
// bike state
var roadbike Roadbike
roadbikeBytes, _ := ctx.GetStub().GetState(_sn)
err := json.Unmarshal(roadbikeBytes, &roadbike)
if err != nil {
return roadbike, fmt.Errorf("Failed to read world state. %s", err.Error())
}
return roadbike, nil
}
func (s *SmartContract) getMyBikeInfo(ctx contractapi.TransactionContextInterface, _owner string) (Roadbike, error) {
// bike state
var roadbike Roadbike
roadbikeBytes, _ := ctx.GetStub().GetState(_owner)
err := json.Unmarshal(roadbikeBytes, &roadbike)
if err != nil {
return roadbike, fmt.Errorf("Failed to read world state. %s", err.Error())
}
return roadbike, nil
}
func (s *SmartContract) changePart(ctx contractapi.TransactionContextInterface, _owner string, _part string, _sn string) (Roadbike, error) {
// bike state
var roadbike Roadbike
roadbikeBytes, _ := ctx.GetStub().GetState(_owner)
err := json.Unmarshal(roadbikeBytes, &roadbike)
if err != nil {
return roadbike, fmt.Errorf("Failed to read world state. %s", err.Error())
}
switch _part {
case "transmission":
roadbike.Transmission = _sn
case "brake":
roadbike.Brake = _sn
case "wheel":
roadbike.Wheel = _sn
}
roadbikeBytes, _ = json.Marshal(roadbike)
ctx.GetStub().PutState(_owner, roadbikeBytes)
return roadbike, nil
}
// func (s *SmartContract) makeTrade(ctx contractapi.TransactionContextInterface) {
// var trade Trade
// }
func (s *SmartContract) signTrade() {
}
//InitLedger function
func (s *SmartContract) InitLedger(ctx contractapi.TransactionContextInterface) error {
fmt.Println("SmartContract Init")
empty := []string{}
//shimano
shimano := Shimano{TrStock: empty, BrStock: empty, WhStock: empty, SN: 0}
shimanoBytes, _ := json.Marshal(shimano)
_ = ctx.GetStub().PutState("Shimano", shimanoBytes)
//specialized
specialized := Specialized{TrStock: empty, BrStock: empty, WhStock: empty, BikeStock: empty, SN: 0}
specializedBytes, _ := json.Marshal(specialized)
_ = ctx.GetStub().PutState("Specialized", specializedBytes)
//bikestore
bikeStore := BikeStore{StoreName: "RealBikeShop", BikeStock: empty}
bikeStoreBytes, _ := json.Marshal(bikeStore)
_ = ctx.GetStub().PutState("RealBikeShop", bikeStoreBytes)
// bikelist
bikeInfo := make(map[string]Roadbike)
bikeInfoBytes, _ := json.Marshal(bikeInfo)
_ = ctx.GetStub().PutState("BikeInfo", bikeInfoBytes)
// // tradelist
// tradeInfo := make(map[string]Trade)
// tradeInfoBytes, _ := json.Marshal(tradeInfo)
// _ = ctx.GetStub().PutState("TradeInfo", tradeInfoBytes)
// tradeList
tradeList := []Trade{}
tradeListBytes, _ := json.Marshal(tradeList)
_ = ctx.GetStub().PutState("TradeList", tradeListBytes)
return nil
}
func main() {
chaincode, err := contractapi.NewChaincode(new(SmartContract))
if err != nil {
fmt.Printf("Error create roadbike-scm chaincode: %s", err.Error())
return
}
if err := chaincode.Start(); err != nil {
fmt.Printf("Error starting roadbike-scm chaincode: %s", err.Error())
}
}
|
/*
Create a function that takes a sentence and turns every "i" into "wi" and "e" into "we", and add "owo" at the end.
Notes
Don't forget to return the value!
There's a space in front of owo!
uwu
*/
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Println(owofied("I'm gonna ride 'til I can't no more"))
fmt.Println(owofied("Do you ever feel like a plastic bag"))
fmt.Println(owofied("Cause baby you're a firework"))
fmt.Println(owofied("Never gonna give you up"))
fmt.Println(owofied("We've known each other for so long"))
fmt.Println(owofied("Never gonna let you down"))
fmt.Println(owofied("Shine bright like a diamond"))
}
func owofied(s string) string {
p := new(bytes.Buffer)
for _, r := range s {
switch r {
case 'i', 'e':
p.WriteRune('w')
}
p.WriteRune(r)
}
p.WriteString(" owo")
return p.String()
}
|
package solver
import "github.com/truggeri/go-sudoku/cmd/go-sudoku/puzzle"
type solveTechnique struct {
set func(int, int) puzzle.Set
index func(int, int) int
}
type solution struct {
x, y int
square puzzle.Square
}
// Solve Returns the given puzzle with all elements solved
func Solve(puz puzzle.Puzzle) puzzle.Puzzle {
for true {
puz = puz.CalculatePossibilities()
newSolution := false
var answer solution
techniques := [4]func(puzzle.Puzzle) (bool, solution){solveUniques, solveRows, solveColumns, solveCubes}
for _, technique := range techniques {
newSolution, answer = technique(puz)
if newSolution {
puz[answer.x][answer.y] = answer.square
break
}
}
if !newSolution {
break
}
}
return puz
}
func solveUniques(puz puzzle.Puzzle) (bool, solution) {
for x := 0; x < puzzle.LineWidth; x++ {
for y := 0; y < puzzle.LineWidth; y++ {
if puz[x][y].Solved() {
continue
}
onlyOne, value := onlyOnePossibility(puz[x][y].Possibilities)
if onlyOne {
return true, solution{x: x, y: y, square: puzzle.CreatePuzzleSquare(value)}
}
}
}
return false, solution{}
}
func onlyOnePossibility(poss puzzle.Possibilies) (bool, int) {
counter, onlyOption := 0, 0
for i, v := range poss {
if v {
onlyOption = i + 1
counter++
}
if counter > 1 {
return false, 0
}
}
return true, onlyOption
}
func solveRows(puz puzzle.Puzzle) (bool, solution) {
set := func(x, y int) puzzle.Set {
return puz.GetRow(x)
}
index := func(x, y int) int {
return y
}
return solveByElement(puz, solveTechnique{set, index})
}
func solveByElement(puz puzzle.Puzzle, st solveTechnique) (bool, solution) {
for x := 0; x < puzzle.LineWidth; x++ {
for y := 0; y < puzzle.LineWidth; y++ {
if puz[x][y].Solved() {
continue
}
updated, result := solveSet(st.set(x, y), st.index(x, y))
if updated {
return true, solution{x: x, y: y, square: result}
}
}
}
return false, solution{}
}
func solveSet(set puzzle.Set, i int) (bool, puzzle.Square) {
if set[i].Solved() {
return false, set[i]
}
setOptions := set.Possibilities(i)
for j := range setOptions {
if set[i].Possibilities[j] && !setOptions[j] {
return true, puzzle.CreatePuzzleSquare(j + 1)
}
}
return false, set[i]
}
func solveColumns(puz puzzle.Puzzle) (bool, solution) {
set := func(x, y int) puzzle.Set {
return puz.GetColumn(y)
}
index := func(x, y int) int {
return x
}
return solveByElement(puz, solveTechnique{set, index})
}
func solveCubes(puz puzzle.Puzzle) (bool, solution) {
set := func(x, y int) puzzle.Set {
return puz.GetCube(x, y)
}
index := puzzle.PositionInCube
return solveByElement(puz, solveTechnique{set, index})
}
|
/*
GoLang code created by Jirawat Harnsiriwatanakit https://github.com/kazekim
*/
package kbank
import (
"fmt"
"github.com/kazekim/thaibankclient-go/thcerror"
)
type testSSLError struct {
StatusCode string
ErrorMsg *string
}
func NewTestSSLError(code, errorMsg *string) thcerror.Error {
return &testSSLError{
*code,
errorMsg,
}
}
func (e *testSSLError) StatusCodeValue() string {
return e.StatusCode
}
func (e *testSSLError) MessageTHValue() string {
return e.MessageTHValue()
}
func (e *testSSLError) MessageENValue() string {
if e.ErrorMsg != nil {
return *e.ErrorMsg
}
return "-"
}
func (e *testSSLError) Error() string {
return fmt.Sprintf("code: %v - %v", e.StatusCode, e.MessageENValue())
} |
package format
import (
"github.com/plandem/xlsx/internal/ml/primitives"
)
//List of all possible values for FontFamilyType
const (
_ primitives.FontFamilyType = iota
FontFamilyRoman
FontFamilySwiss
FontFamilyModern
FontFamilyScript
FontFamilyDecorative
)
|
package main
import "fmt"
func test(f func()) {
if f != nil {
f()
}
}
func main() {
test(func() { fmt.Println("hoge") })
test(nil)
}
|
// 고루틴(GoRoutine)
/*
//고루틴(Goroutine)은 함수를 동시에 실행시키는 기능입니다.
//다른 언어의 스레드 생성 방법보다 문법이 간단하고,
//스레드보다 운영체제의 리소스를 적게 사용하므로 많은 수의 고루틴을 쉽게 생성할 수 있습니다.
//'go 함수명'
package main
import (
"fmt"
"math/rand"
"time"
)
//(TIP)
//시간표현
const (
Nanosecond Duration = 1
Microsecond = 1000 * Nanosecond
Millisecond = 1000 * Microsecond
Second = 1000 * Millisecond
Minute = 60 * Second
Hour = 60 * Minute
)
func hello() {
fmt.Println("Hellow, world!")
}
func hello2(n int) {
r := rand.Intn(100) // 랜덤한 숫자 생성
time.Sleep(time.Duration(r)) // 랜덤한 시간 동안 대기
fmt.Println(n)
}
func main() {
go hello() // 함수를 고루틴으로 실행
for i := 0; i < 100; i++ { // 100번 반복하여
go hello2(i) // 고루틴 100개 생성
}
fmt.Scanln() // main 함수가 종료되지 않도록 대기
}
*/
/*
// 멀티코어 활용하기
//Go 언어는 CPU 코어를 한 개만 사용하도록 설정되어 있습니다.
//다음은 시스템의 모든 CPU 코어를 사용하는 방법입니다.
package main
import (
"fmt"
"runtime"
)
func main() {
runtime.GOMAXPROCS(runtime.NumCPU()) //CPU 개수를 구한 뒤 사용할 최대 CPU 개수 설정
fmt.Println(runtime.GOMAXPROCS(0)) //설정 값 출력 (함수에 0을 넣으면 설정 값은 바꾸지 않으며 현재 설정 값만 리턴합니다.)
s := "Hello, world"
for i := 0; i < 100; i++ {
go func(n int) {
fmt.Println(s, n)
}(i)
}
fmt.Scanln()
}
*/
// 클로저를 고루틴으로 실행하기
//함수 안에서 클로저를 정의한 뒤 고루틴으로 실행할 수 있습니다.
//예제의 출력 결과를 좀 더 확실하게 표현하기 위해 CPU 코어는 하나만 사용하도록 설정합니다.
package main
import (
"fmt"
"runtime"
)
func main() {
runtime.GOMAXPROCS(1) // CPU를 하나만 사용
s := "Hello, world!"
for i := 0; i < 100; i++ {
go func(n int) { // 익명 함수를 고루틴으로 실행(클로저)
fmt.Println(s, n) // s와 매개변수로 받은 n 값 출력
}(i) // 반복문의 변수는 매개변수로 넘겨줌
}
//일반 클로저는 반복문 안에서 순서대로 실행되지만 고루틴으로 실행한 클로저는 반복문이 끝난 뒤에 고루틴이 실행됩니다.
for i := 0; i < 100; i++ {
go func() {
fmt.Println(s, i) // 반복문의 변수를
// 클로저에서 바로 출력
}()
}
fmt.Scanln()
//Go 언어에서 고루틴의 실행 순서를 보장하려면 동기 채널 등을 사용해야 합니다(‘34.1 동기 채널’ 참조).
}
|
package planets
import (
"log"
"net/http"
"github.com/flaviogf/star_wars_backend/internal/planets"
"github.com/gorilla/mux"
)
func RemovePlanetHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
handler := planets.NewRemovePlanetHandler(MongoRepository{})
err := handler.Execute(r.Context(), vars["id"])
if err != nil && err == planets.ErrPlanetNotRemoved {
log.Println(err)
w.WriteHeader(http.StatusNotFound)
return
}
if err != nil {
log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusNoContent)
}
|
package client
import (
"io/ioutil"
"net/http"
"sync"
"time"
)
type HttpClient interface {
GET(url string) ([]byte, error)
}
var once sync.Once
var defaultHttp defaultHttpClient
type defaultHttpClient struct {
client http.Client
}
func NewDefaultHttpClient() HttpClient {
once.Do(func() {
defaultHttp = defaultHttpClient{client: http.Client{
Timeout: 3 * time.Second,
},}
})
return &defaultHttp
}
func (d *defaultHttpClient) GET(url string) ([]byte, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
resp, err := d.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return data, nil
}
|
// Command json2csv converts JSON to CSV.
//
// For example, the following JSON input:
//
// [
// {"key1": a, "key2": b},
// {"key1": x, "key2": y}
// ]
//
// is converted to the following CSV:
//
// key1 key2
// a b
// x y
//
// The input must be a JSON list of objects, with each object having the
// same set (or subset) of keys. Values such as a, b, x, and y in the
// example are formatted using "%v". The command does not validate that
// the input meets the expected format.
//
// The default field delimiter in the CSV output is "\t".
package main
import (
"encoding/csv"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"unicode/utf8"
)
var (
delim = flag.String("d", "\t", "CSV field delimiter rune")
header = flag.Bool("h", true, "include CSV header")
)
func usage() {
fmt.Fprintf(os.Stderr, "usage: json2csv [flags] [file]\n")
fmt.Fprintf(os.Stderr, "\n")
fmt.Fprintf(os.Stderr, "Flags:\n")
flag.PrintDefaults()
}
func main() {
log.SetFlags(0)
log.SetPrefix("json2csv: ")
flag.Usage = usage
flag.Parse()
var in io.ReadCloser
switch flag.NArg() {
case 0:
in = ioutil.NopCloser(os.Stdin)
case 1:
var err error
in, err = os.Open(flag.Arg(0))
if err != nil {
log.Fatalf("error opening file: %s", err)
}
default:
usage()
os.Exit(2)
}
defer in.Close() // ok to ignore error: file is read-only
delim, _ := utf8.DecodeRuneInString(*delim)
if delim == utf8.RuneError {
log.Fatalf("invalid value for -d")
}
var l []map[string]interface{}
if err := json.NewDecoder(in).Decode(&l); err != nil {
log.Fatalf("error decoding json: %s", err)
}
if len(l) == 0 {
return
}
var records [][]string
if *header {
var keys []string
for k := range l[0] {
keys = append(keys, k)
}
records = append(records, keys)
}
for _, m := range l {
var row []string
for _, v := range m {
row = append(row, fmt.Sprintf("%v", v))
}
records = append(records, row)
}
w := csv.NewWriter(os.Stdout)
w.Comma = delim
if err := w.WriteAll(records); err != nil {
log.Fatalf("error writing csv: %s", err)
}
if err := w.Error(); err != nil {
log.Fatalf("%s", err)
}
}
|
package spacesaving
type StreamManager interface {
Offer(item string, increment int)
Top() []Result
Name() string
}
|
/*
Copyright 2021. The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/metrics"
velametrics "github.com/kubevela/pkg/monitor/metrics"
)
var (
// StepDurationHistogram report the step execution duration.
StepDurationHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "step_duration_ms",
Help: "step latency distributions.",
Buckets: velametrics.FineGrainedBuckets,
ConstLabels: prometheus.Labels{},
}, []string{"controller", "step_type"})
)
var collectorGroup = []prometheus.Collector{
AppReconcileStageDurationHistogram,
StepDurationHistogram,
ListResourceTrackerCounter,
ApplicationReconcileTimeHistogram,
ApplyComponentTimeHistogram,
WorkflowFinishedTimeHistogram,
ApplicationPhaseCounter,
WorkflowStepPhaseGauge,
ClusterIsConnectedGauge,
ClusterWorkerNumberGauge,
ClusterMasterNumberGauge,
ClusterMemoryCapacityGauge,
ClusterCPUCapacityGauge,
ClusterPodCapacityGauge,
ClusterMemoryAllocatableGauge,
ClusterCPUAllocatableGauge,
ClusterPodAllocatableGauge,
ClusterMemoryUsageGauge,
ClusterCPUUsageGauge,
}
func init() {
for _, collector := range collectorGroup {
if err := metrics.Registry.Register(collector); err != nil {
klog.Error(err)
}
}
}
|
package main
import (
"os"
"fmt"
"flag"
"os/signal"
"sync"
"github.com/infrawatch/lokean/pkg/logs"
"github.com/infrawatch/apputils/connector"
"github.com/infrawatch/apputils/logging"
"github.com/infrawatch/apputils/config"
)
func printUsage() {
fmt.Fprintln(os.Stderr, `Required command line argument missing`)
flag.PrintDefaults()
}
//spawnSignalHandler spawns goroutine which will wait for interruption signal(s)
// and end lokean in case any of the signal is received
func spawnSignalHandler(finish chan bool, logger *logging.Logger, watchedSignals ...os.Signal) {
interruptChannel := make(chan os.Signal, 1)
signal.Notify(interruptChannel, watchedSignals...)
go func() {
signalLoop:
for sig := range interruptChannel {
logger.Metadata(map[string]interface{}{
"signal": sig,
})
logger.Error("Stopping execution on caught signal")
close(finish)
break signalLoop
}
}()
}
func parseLogLevel(s string) (logging.LogLevel, error) {
if s == "DEBUG" {
return logging.DEBUG, nil
} else if s == "INFO" {
return logging.INFO, nil
} else if s == "WANGING" {
return logging.WARN, nil
} else if s == "ERROR" {
return logging.ERROR, nil
} else {
return logging.ERROR, fmt.Errorf("Failed to parse the logLevel string: %s", s)
}
}
func getConfigMetadata() map[string][]config.Parameter {
elements := map[string][]config.Parameter{
"default": []config.Parameter{
config.Parameter{Name: "logFile", Tag: "", Default: "/dev/stderr", Validators: []config.Validator{}},
config.Parameter{Name: "logLevel", Tag: "", Default: "INFO", Validators: []config.Validator{config.StringOptionsValidatorFactory([]string{"DEBUG", "INFO", "WARNING", "ERROR"})}},
},
"amqp1": []config.Parameter{
config.Parameter{Name: "connection", Tag: "", Default: "amqp://localhost:5672/lokean/logs", Validators: []config.Validator{}},
config.Parameter{Name: "send_timeout", Tag: "", Default: 2, Validators: []config.Validator{config.IntValidatorFactory()}},
config.Parameter{Name: "client_name", Tag: "", Default: "test", Validators: []config.Validator{}},
config.Parameter{Name: "listen_channels", Tag: "", Default: "lokean/logs", Validators: []config.Validator{}},
},
"loki": []config.Parameter{
config.Parameter{Name: "connection", Tag: "", Default: "http://localhost:3100", Validators: []config.Validator{}},
config.Parameter{Name: "batch_size", Tag: "", Default: 20, Validators: []config.Validator{config.IntValidatorFactory()}},
config.Parameter{Name: "max_wait_time", Tag: "", Default: 100, Validators: []config.Validator{config.IntValidatorFactory()}},
},
}
return elements
}
func main() {
flag.Usage = printUsage
fConfigLocation := flag.String("config", "", "Path to configuration file.")
flag.Parse()
if len(*fConfigLocation) == 0 {
printUsage()
os.Exit(1)
}
// init logger with temporary values until the correct ones
// can be read from config
logger, err := logging.NewLogger(logging.ERROR, "/dev/stderr")
if err != nil {
fmt.Printf("Failed to open tempLogger: %s\n", err.Error())
os.Exit(1)
}
defer logger.Destroy()
metadata := getConfigMetadata()
conf := config.NewINIConfig(metadata, logger)
err = conf.Parse(*fConfigLocation)
if err != nil {
logger.Metadata(map[string]interface{}{
"error": err,
"file": *fConfigLocation,
})
logger.Error("Failed to parse the config file")
os.Exit(1)
}
logFile := conf.Sections["default"].Options["logFile"].GetString()
logLevelString := conf.Sections["default"].Options["logLevel"].GetString()
logLevel, err := parseLogLevel(logLevelString)
if err != nil {
logger.Metadata(map[string]interface{}{
"error": err,
"logLevel": logLevelString,
})
logger.Error("Failed to parse log level from config file")
os.Exit(1)
}
logger.SetLogLevel(logLevel)
err = logger.SetFile(logFile, 0666)
if err != nil {
logger.Metadata(map[string]interface{}{
"error": err,
"logFile": logFile,
})
logger.Error("Failed to set proper log ifle")
os.Exit(1)
}
finish := make(chan bool)
var wait sync.WaitGroup
spawnSignalHandler(finish, logger, os.Interrupt)
amqp, err := connector.NewAMQP10Connector(conf, logger)
if err != nil {
logger.Metadata(map[string]interface{}{
"error": err,
})
logger.Error("Couldn't connect to AMQP")
return
}
err = amqp.Connect()
if err != nil {
logger.Metadata(map[string]interface{}{
"error": err,
})
logger.Error("Error while connecting to AMQP")
return
}
amqp.CreateReceiver("lokean/logs", -1)
amqpReceiver := make(chan interface{})
amqpSender := make(chan interface{})
amqp.Start(amqpReceiver, amqpSender)
loki, err := connector.NewLokiConnector(conf, logger)
if err != nil {
logger.Metadata(map[string]interface{}{
"error": err,
})
logger.Error("Couldn't connect to Loki")
return
}
err = loki.Connect()
if err != nil {
logger.Metadata(map[string]interface{}{
"error": err,
})
logger.Error("Couldn't connect to Loki")
return
}
lokiReceiver := make(chan interface{})
lokiSender := make(chan interface{})
loki.Start(lokiReceiver, lokiSender)
defer loki.Disconnect()
defer amqp.Disconnect()
logs.Run(amqpReceiver, lokiSender, logger, finish, &wait)
wait.Wait()
}
|
//-----------------------------------------------Paquetes E Imports-----------------------------------------------------
package AnalisisYComandos
import (
"../Metodos"
"../Variables"
"bytes"
"fmt"
"github.com/gookit/color"
"path/filepath"
"strconv"
"strings"
)
//--------------------------------------------------------Métodos-------------------------------------------------------
func VerificarComandoMount() {
//Variables
var Path bool
var Name bool
var ParametroExtra bool
var ListaDeParticiones bool
var ArregloParametros []string
var ContadorPath int
var ContadorName int
//Asignación
Path = false
Name = false
ParametroExtra = false
ContadorPath = 0
ContadorName = 0
Variables.MapComandos = make(map[string]string)
//Verificación De Parametros
if len(Variables.ArregloComandos) > 1 {
ListaDeParticiones = false
for Contador := 1; Contador <= len(Variables.ArregloComandos)-1; Contador++ {
//Obtener Parametro
Variables.ArregloComandos[Contador] = Metodos.Trim(Variables.ArregloComandos[Contador])
ArregloParametros = Metodos.SplitParametro(Variables.ArregloComandos[Contador])
ArregloParametros[0] = strings.ToLower(ArregloParametros[0])
ArregloParametros[0] = Metodos.Trim(ArregloParametros[0])
switch ArregloParametros[0] {
case "path":
if ContadorPath == 0 {
if len(ArregloParametros) > 1 {
ArregloParametros[1] = Metodos.QuitarComillas(ArregloParametros[1])
ArregloParametros[1] = Metodos.Trim(ArregloParametros[1])
Path = Metodos.ExisteRuta(ArregloParametros[1])
if Path {
Variables.MapComandos["path"] = ArregloParametros[1]
}
ContadorPath++
} else {
Path = false
}
} else {
ContadorPath++
}
case "name":
if ContadorName == 0 {
if len(ArregloParametros) > 1 {
ArregloParametros[1] = Metodos.QuitarComillas(ArregloParametros[1])
ArregloParametros[1] = Metodos.Trim(ArregloParametros[1])
Variables.MapComandos["name"] = Metodos.Trim(ArregloParametros[1])
Name = true
ContadorName++
} else {
Name = false
}
} else {
ContadorName++
}
default:
ParametroExtra = true
}
}
} else {
ListaDeParticiones = true
Path = true
Name = true
ParametroExtra = false
ContadorPath = 1
ContadorName = 1
}
if Path && Name && !ParametroExtra && ContadorPath == 1 && ContadorName == 1 {
if ListaDeParticiones {
ComandoMountListaParticiones()
} else {
ComandoMount()
}
} else {
if ParametroExtra {
color.HEX("#de4843", false).Println("Parametro Especificado No Valido")
color.HEX("#de4843", false).Println("Parametros Validos: ")
color.HEX("#de4843", false).Println("1). -path-> (Obligatorio)")
color.HEX("#de4843", false).Println("2). -name-> (Obligatorio)")
fmt.Println("")
}
if !Path {
color.HEX("#de4843", false).Println("No Se Encuentra El Parametro -path-> o")
color.HEX("#de4843", false).Println("No Existe El Archivo")
fmt.Println("")
}
if !Name {
color.HEX("#de4843", false).Println("No Se Encuentra el Parametro -name-> o")
color.HEX("#de4843", false).Println("Existe Error En La Sintaxis")
fmt.Println("")
}
if ContadorPath > 1 || ContadorName > 1 {
color.HEX("#de4843", false).Println("Existen Demasiados Parametros")
fmt.Println("")
}
}
}
func VerificarNombreParticion() (bool, Variables.EBREstructura, Variables.ParticionEstructura) {
//Variables
var Nombre string
var NombreArray1 string
var NombreArray2 string
var NombreArray3 string
var NombreArray4 string
var NombreExtendida string
var Bandera bool
var ExisteNombre bool
var InicioExtendida int64
var MBRAuxiliar Variables.MBREstructura
var EBRAuxiliar Variables.EBREstructura
var PartAuxiliar Variables.ParticionEstructura
var ArregloEBR []Variables.EBREstructura
//Asignacion
MBRAuxiliar, Bandera = Metodos.LeerArchivoBinarioArraglo(Variables.MapComandos["path"])
Nombre = Metodos.Trim(strings.ToLower(Variables.MapComandos["name"]))
ExisteNombre = false
InicioExtendida = 0
NombreArray1 = string(bytes.Trim(MBRAuxiliar.Particion1MBR.NamePart[:], "\x00"))
NombreArray2 = string(bytes.Trim(MBRAuxiliar.Particion2MBR.NamePart[:], "\x00"))
NombreArray3 = string(bytes.Trim(MBRAuxiliar.Particion3MBR.NamePart[:], "\x00"))
NombreArray4 = string(bytes.Trim(MBRAuxiliar.Particion4MBR.NamePart[:], "\x00"))
if Bandera {
if MBRAuxiliar.Particion1MBR.SizePart != 0 {
if strings.EqualFold(Nombre, NombreArray1) {
ExisteNombre = true
PartAuxiliar = MBRAuxiliar.Particion1MBR
}
if MBRAuxiliar.Particion1MBR.TipoPart == 'e' {
InicioExtendida = MBRAuxiliar.Particion1MBR.InicioPart
}
}
if MBRAuxiliar.Particion2MBR.SizePart != 0 {
if strings.EqualFold(Nombre, NombreArray2) {
ExisteNombre = true
PartAuxiliar = MBRAuxiliar.Particion2MBR
}
if MBRAuxiliar.Particion2MBR.TipoPart == 'e' {
InicioExtendida = MBRAuxiliar.Particion2MBR.InicioPart
}
}
if MBRAuxiliar.Particion3MBR.SizePart != 0 {
if strings.EqualFold(Nombre, NombreArray3) {
ExisteNombre = true
PartAuxiliar = MBRAuxiliar.Particion3MBR
}
if MBRAuxiliar.Particion3MBR.TipoPart == 'e' {
InicioExtendida = MBRAuxiliar.Particion3MBR.InicioPart
}
}
if MBRAuxiliar.Particion4MBR.SizePart != 0 {
if strings.EqualFold(Nombre, NombreArray4) {
ExisteNombre = true
PartAuxiliar = MBRAuxiliar.Particion4MBR
}
if MBRAuxiliar.Particion4MBR.TipoPart == 'e' {
InicioExtendida = MBRAuxiliar.Particion4MBR.InicioPart
}
}
if InicioExtendida != 0 {
ArregloEBR = ObtenerEBR(InicioExtendida)
for Contador := 0; Contador < len(ArregloEBR); Contador++ {
NombreExtendida = string(bytes.Trim(ArregloEBR[Contador].NameEBR[:], "\x00"))
if strings.EqualFold(Variables.MapComandos["name"], NombreExtendida) {
ExisteNombre = true
EBRAuxiliar = ArregloEBR[Contador]
}
}
}
} else {
color.HEX("#de4843", false).Println("Error Al Ejecutar El Comando mount")
color.HEX("#de4843", false).Println("No Existe El Disco Indicado")
fmt.Println("")
}
return ExisteNombre, EBRAuxiliar, PartAuxiliar
}
func GenerarIdentificadorDisco() string {
//Variables
var Archivo string
var NombreDisco string
var NuevoId string
var Existe bool
var ArrayDisco []string
var ArrayLetra [49]string
var IdAuxiliar Variables.IDEstructura
//Asignacion
Archivo = ""
NombreDisco = ""
NuevoId = ""
Existe = false
ArrayDisco = make([]string, 0)
IdAuxiliar = Variables.IDEstructura{}
ArrayLetra = [49]string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n" +
"o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "aa", "bb", "cc", "dd", "ee" +
"ff", "gg", "hh", "ii", "jj", "kk", "ll", "mm", "nn" +
"oo", "pp", "qq", "rr", "ss", "tt", "uu", "vv", "ww", "xx", "yy", "zz"}
//Obtener Nombre Archivo
_, Archivo = filepath.Split(Metodos.Trim(Variables.MapComandos["path"]))
//Obtener Nombre Del Disco
ArrayDisco = strings.Split(Archivo, ".")
NombreDisco = ArrayDisco[0]
//Buscar Clave
IdAuxiliar, Existe = Variables.MapIdentificador[NombreDisco]
//Verificar Si Ya Existe El Disco Montado
if Existe {
IdAuxiliar.ParticionID = IdAuxiliar.ParticionID + 1
NuevoId = "vd" + IdAuxiliar.LetraID + strconv.Itoa(IdAuxiliar.ParticionID)
Variables.MapIdentificador[NombreDisco] = IdAuxiliar
} else {
//Verificar Letras Utilizadas
for Disco := range Variables.MapIdentificador {
for Contador := 0; Contador < len(ArrayLetra); Contador++ {
if Variables.MapIdentificador[Disco].LetraID == ArrayLetra[Contador] {
ArrayLetra[Contador] = ""
}
}
}
for Contador := 0; Contador < len(ArrayLetra); Contador++ {
if ArrayLetra[Contador] != "" {
IdAuxiliar.LetraID = ArrayLetra[Contador]
IdAuxiliar.ParticionID = 1
NuevoId = "vd" + IdAuxiliar.LetraID + strconv.Itoa(IdAuxiliar.ParticionID)
Variables.MapIdentificador[NombreDisco] = IdAuxiliar
break
}
}
}
return NuevoId
}
func LlenarLista(Largo, Size int) string {
//Variables
var Espacios string
var Diferencia int
//Asignacion
Espacios = ""
Diferencia = Size - Largo
for Contador := 0; Contador < Diferencia; Contador++ {
Espacios += " "
}
return Espacios
}
func ComandoMount(){
//Variables
var CodigoPart string
var ExisteNombre bool
var EstaMontada bool
var EBRAuxiliar Variables.EBREstructura
var PartAuxiliar Variables.ParticionEstructura
var PartMontada Variables.MountEstructura
//Asignacion
CodigoPart = ""
ExisteNombre = false
EstaMontada = false
EBRAuxiliar = Variables.EBREstructura{}
PartAuxiliar = Variables.ParticionEstructura{}
ExisteNombre, EBRAuxiliar, PartAuxiliar = VerificarNombreParticion()
// Verificar Montaje
for Contador := 0; Contador < len(Variables.ArregloParticionesMontadas); Contador++ {
if strings.EqualFold(Variables.MapComandos["name"], Variables.ArregloParticionesMontadas[Contador].NombreMount) && strings.EqualFold(Variables.MapComandos["path"], Variables.ArregloParticionesMontadas[Contador].RutaDiscoMount) {
EstaMontada = true
}
}
if ExisteNombre {
if !EstaMontada {
CodigoPart = GenerarIdentificadorDisco()
if CodigoPart != "" {
if EBRAuxiliar.SizeEBR != 0 {
PartMontada.IdentificadorMount = CodigoPart
PartMontada.EBRMount = EBRAuxiliar
PartMontada.ParticionMount = PartAuxiliar
PartMontada.NombreMount = Metodos.Trim(Variables.MapComandos["name"])
PartMontada.RutaDiscoMount = Metodos.Trim(Variables.MapComandos["path"])
PartMontada.TipoMount = "Logica"
} else {
PartMontada.IdentificadorMount = CodigoPart
PartMontada.EBRMount = EBRAuxiliar
PartMontada.ParticionMount = PartAuxiliar
PartMontada.NombreMount = Metodos.Trim(Variables.MapComandos["name"])
PartMontada.RutaDiscoMount = Metodos.Trim(Variables.MapComandos["path"])
PartMontada.TipoMount = "Primaria"
}
Variables.ArregloParticionesMontadas = append(Variables.ArregloParticionesMontadas, PartMontada)
color.Success.Println("Particion Montada Con Exito!")
fmt.Println("")
} else {
color.HEX("#de4843", false).Println("Error No Existe Espacio Para Montar La Particion")
fmt.Println("")
}
} else {
color.HEX("#de4843", false).Println("Error La Particion Ya Se Encuentra Montada")
fmt.Println("")
}
} else {
color.HEX("#de4843", false).Println("Error No Existe La Particion Indicada")
fmt.Println("")
}
}
func ComandoMountListaParticiones() {
fmt.Println("")
fmt.Println("")
color.HEX("#21C68A", false).Println(" " +
"Lista De Particiones Montadas")
fmt.Println("")
color.HEX("#2194C6", false).Println(" ------ID------", "--------------------" +
"-------Ruta-----------------------------", "------------Nombre------------")
for Contador := 0; Contador < len(Variables.ArregloParticionesMontadas); Contador++ {
color.HEX("#3CE90D", false).Println(" ",
Variables.ArregloParticionesMontadas[Contador].IdentificadorMount,
LlenarLista(len(Variables.ArregloParticionesMontadas[Contador].IdentificadorMount), 13),
Variables.ArregloParticionesMontadas[Contador].RutaDiscoMount,
LlenarLista(len(Variables.ArregloParticionesMontadas[Contador].RutaDiscoMount), 59),
Variables.ArregloParticionesMontadas[Contador].NombreMount)
}
fmt.Println("")
fmt.Println("")
}
|
package appdynamics
import (
"errors"
"fmt"
"github.com/HarryEMartland/terraform-provider-appdynamics/appdynamics/client"
"github.com/hashicorp/terraform-plugin-sdk/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
"github.com/stretchr/testify/assert"
"strings"
"testing"
)
func TestAccAppDTransactionRule_basicRule(t *testing.T) {
name := acctest.RandStringFromCharSet(11, acctest.CharSetAlphaNum)
resourceName := "appdynamics_transaction_detection_rule.test_rule"
agentType := "NODE_JS_SERVER"
description := "Health rule created in automated acceptance tests for terraform"
enabled := "true"
entryPointType := "NODEJS_WEB"
method := "GET"
matchType := "EQUALS"
resource.Test(t, resource.TestCase{
Providers: map[string]terraform.ResourceProvider{
"appdynamics": Provider(),
},
Steps: []resource.TestStep{
{
Config: transactionRule(name, agentType, description, enabled, 36, entryPointType, matchType, []string{"/test"}, method),
Check: resource.ComposeAggregateTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "application_id", applicationIdS),
resource.TestCheckResourceAttr(resourceName, "name", name),
resource.TestCheckResourceAttr(resourceName, "agent_type", agentType),
resource.TestCheckResourceAttr(resourceName, "account_id", accountId),
resource.TestCheckResourceAttr(resourceName, "description", description),
resource.TestCheckResourceAttr(resourceName, "enabled", enabled),
resource.TestCheckResourceAttr(resourceName, "entry_point_type", entryPointType),
resource.TestCheckResourceAttr(resourceName, "http_uris.#", "1"),
resource.TestCheckResourceAttr(resourceName, "http_method", method),
resource.TestCheckResourceAttr(resourceName, "http_uri_match_type", matchType),
RetryCheck(CheckTransactionRuleExists(resourceName)),
),
},
},
CheckDestroy: RetryCheck(CheckTransactionRuleDoesNotExist(resourceName)),
})
}
func TestAccAppDTransactionRule_modify(t *testing.T) {
name := acctest.RandStringFromCharSet(11, acctest.CharSetAlphaNum)
resourceName := "appdynamics_transaction_detection_rule.test_rule"
agentType := "NODE_JS_SERVER"
description := "Health rule created in automated acceptance tests for terraform"
updateDescription := "Health rule updated"
enabled := "true"
entryPointType := "NODEJS_WEB"
method := "GET"
matchType := "EQUALS"
updatedMatchType := "CONTAINS"
resource.Test(t, resource.TestCase{
Providers: map[string]terraform.ResourceProvider{
"appdynamics": Provider(),
},
Steps: []resource.TestStep{
{
Config: transactionRule(name, agentType, description, enabled, 36, entryPointType, matchType, []string{"/test"}, method),
Check: resource.ComposeAggregateTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "application_id", applicationIdS),
resource.TestCheckResourceAttr(resourceName, "name", name),
resource.TestCheckResourceAttr(resourceName, "agent_type", agentType),
resource.TestCheckResourceAttr(resourceName, "account_id", accountId),
resource.TestCheckResourceAttr(resourceName, "description", description),
resource.TestCheckResourceAttr(resourceName, "enabled", enabled),
resource.TestCheckResourceAttr(resourceName, "entry_point_type", entryPointType),
resource.TestCheckResourceAttr(resourceName, "http_uris.#", "1"),
resource.TestCheckResourceAttr(resourceName, "http_method", method),
resource.TestCheckResourceAttr(resourceName, "http_uri_match_type", matchType),
RetryCheck(CheckTransactionRuleExists(resourceName)),
),
},
{
Config: transactionRule(name, agentType, updateDescription, enabled, 36, entryPointType, updatedMatchType, []string{"/test2"}, method),
Check: resource.ComposeAggregateTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "description", updateDescription),
resource.TestCheckResourceAttr(resourceName, "http_uri_match_type", updatedMatchType),
resource.TestCheckResourceAttr(resourceName, "http_uris.#", "1"),
),
},
},
CheckDestroy: RetryCheck(CheckTransactionRuleDoesNotExist(resourceName)),
})
}
func TestAccAppDTransactionRule_multipleUris(t *testing.T) {
name := acctest.RandStringFromCharSet(11, acctest.CharSetAlphaNum)
resourceName := "appdynamics_transaction_detection_rule.test_rule"
resource.Test(t, resource.TestCase{
Providers: map[string]terraform.ResourceProvider{
"appdynamics": Provider(),
},
Steps: []resource.TestStep{
{
Config: basicTransactionRuleMultiple(name),
Check: resource.ComposeAggregateTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "application_id", applicationIdS),
resource.TestCheckResourceAttr(resourceName, "name", name),
resource.TestCheckResourceAttr(resourceName, "http_uris.#", "2"),
RetryCheck(CheckTransactionRule(resourceName, func(rule *client.RuleScope) {
assert.Equal(t, "/health,/user", rule.Rule.TxMatchRule.TxCustomRule.MatchConditions[0].HttpMatch.Uri.MatchStrings[0])
})),
),
},
},
CheckDestroy: RetryCheck(CheckTransactionRuleDoesNotExist(resourceName)),
})
}
func CheckTransactionRule(resourceName string, callback func(scope *client.RuleScope)) func(state *terraform.State) error {
return func(state *terraform.State) error {
resourceState, ok := state.RootModule().Resources[resourceName]
if !ok {
return fmt.Errorf("not found: %s", resourceName)
}
rule, found, err := appDClient.GetTransactionDetectionRule(applicationIdI, resourceState.Primary.ID)
if err != nil {
return err
}
if !found {
return errors.New("transaction rule not found")
}
callback(rule)
return nil
}
}
func CheckTransactionRuleDoesNotExist(resourceName string) func(state *terraform.State) error {
return func(state *terraform.State) error {
resourceState, ok := state.RootModule().Resources[resourceName]
if !ok {
return fmt.Errorf("not found: %s", resourceName)
}
_, found, err := appDClient.GetTransactionDetectionRule(applicationIdI, resourceState.Primary.ID)
if err != nil {
return fmt.Errorf("error finding transaction: %s", resourceState.Primary.ID)
}
if found {
return fmt.Errorf("transaction rule found: %s", resourceState.Primary.ID)
}
return nil
}
}
func CheckTransactionRuleExists(resourceName string) func(state *terraform.State) error {
return func(state *terraform.State) error {
resourceState, ok := state.RootModule().Resources[resourceName]
if !ok {
return fmt.Errorf("not found: %s", resourceName)
}
_, found, err := appDClient.GetTransactionDetectionRule(applicationIdI, resourceState.Primary.ID)
if err != nil {
return err
}
if !found {
return errors.New("transaction rule not found")
}
return nil
}
}
func transactionRule(name string, agentType string, description string, enabled string, priority int, entryPointType string, uriMatchType string, uris []string, httpMethod string) string {
return fmt.Sprintf(`
%s
resource "appdynamics_transaction_detection_rule" "test_rule" {
application_id = var.application_id
scope_id = var.scope_id
name = "%s"
agent_type = "%s"
account_id = "%s"
description = "%s"
enabled = %s
priority = %d
entry_point_type = "%s"
http_uri_match_type = "%s"
http_uris = ["%s"]
http_method = "%s"
}
`, configureConfig(), name, agentType, accountId, description, enabled, priority, entryPointType, uriMatchType, strings.Join(uris, "\",\""), httpMethod)
}
func basicTransactionRuleMultiple(name string) string {
return fmt.Sprintf(`
%s
resource "appdynamics_transaction_detection_rule" "test_rule" {
application_id = var.application_id
scope_id = var.scope_id
name = "%s"
agent_type = "NODE_JS_SERVER"
account_id = "%s"
description = "Health rule created in automated acceptance tests for terraform"
enabled = true
priority = 36
entry_point_type = "NODEJS_WEB"
http_uri_match_type = "EQUALS"
http_uris = ["/health", "/user"]
http_method = "GET"
}
`, configureConfig(), name, accountId)
}
|
package timestamp
import "encoding/asn1"
// Status contains the PKI status code.
type PKIStatus int
const (
PKIStatusGranted PKIStatus = iota
PKIStatusGrantedWithMods
PKIStatusRejection
PKIStatusWaiting
PKIStatusRevocationWarning
PKIStatusRevocationNotification
)
// PKIFailureInfo contains error messages
type PKIFailureInfo asn1.BitString
const (
PKIFailureInfoBadAlg = 0 // unrecognized or unsupported Algorithm Identifier
PKIFailureInfoBadRequest = 2 // transaction not permitted or supported
PKIFailureInfoBadDataFormat = 5 // the data submitted has the wrong format
PKIFailureInfoTimeNotAvailable = 14 // the TSA's time source is not available
PKIFailureInfoUnacceptedPolicy = 15 // the requested TSA policy is not supported by the TSA.
PKIFailureInfoUnacceptedExtension = 16 // the requested extension is not supported by the TSA.
PKIFailureInfoAddInfoNotAvailable = 17 // the additional information requested could not be understood or is not available
PKIFailureInfoSystemFailure = 25 // the request cannot be handled due to system failure
)
|
package service
import (
"fmt"
"log"
"net/http"
"github.com/gorilla/mux"
"time"
)
func MiddleHandler(inner http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
a := vars["username"]
fmt.Println("username:", a)
log.Println("middleware 1", r.RequestURI)
//w.Write([]byte("dsifhsidfhsi"))
//return
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
//w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Origin", r.Header.Get("Origin"))
//w.Header().Set("Access-Control-Allow-Origin", "http://localhost:58870")
//w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Allow-Methods", "GET,POST,PUT,POST")
w.Header().Set("Access-Control-Allow-Headers", "content-type")
//w.Header().Set("Access-Control-Max-Age", "1800")
inner.ServeHTTP(w, r)
log.Println("middleware 2", r.RequestURI)
})
}
func Logger(inner http.Handler, name string) http.Handler{
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
inner.ServeHTTP(w, r)
log.Printf("%s %s %s %s", r.Method, r.RequestURI, name, time.Since(start),)
})
}
|
package main
import (
"io"
"io/ioutil"
"os"
"strings"
"testing"
)
func TestStartWithNoConfigs(t *testing.T) {
code := Start("test")
if code == 0 {
t.Fatalf("expected program to exit with non-zero, but got %d", code)
}
}
func TestStartWithWrongPath(t *testing.T) {
code := Start("test", "this-is-definitely-not-a-valid-config-file.yaml")
if code == 0 {
t.Fatalf("expected program to exit with non-zero, but got %d", code)
}
}
func TestStartWithConfig(t *testing.T) {
tmp, err := ioutil.TempFile("", "test_*.yaml")
if err != nil {
t.Fatal(err)
}
defer func() {
tmp.Close()
os.Remove(tmp.Name())
}()
yamlStr := `
jobs:
job:
steps:
- run: echo "test"`
_, err = io.Copy(tmp, strings.NewReader(yamlStr))
if err != nil {
t.Fatal(err)
}
code := Start("test", tmp.Name())
if code != 0 {
t.Fatalf("expected program to exit with 0, but got %d", code)
}
}
func TestStartWithCircularDependencyError(t *testing.T) {
tmp, err := ioutil.TempFile("", "test_*.yaml")
if err != nil {
t.Fatal(err)
}
defer func() {
tmp.Close()
os.Remove(tmp.Name())
}()
yamlStr := `
jobs:
job1:
needs:
- job2
job2:
needs:
- job1`
_, err = io.Copy(tmp, strings.NewReader(yamlStr))
if err != nil {
t.Fatal(err)
}
code := Start("test", tmp.Name())
if code == 0 {
t.Fatalf("expected program to exit with non-zero, but got %d", code)
}
}
func TestStartWithStepError(t *testing.T) {
tmp, err := ioutil.TempFile("", "test_*.yaml")
if err != nil {
t.Fatal(err)
}
defer func() {
tmp.Close()
os.Remove(tmp.Name())
}()
yamlStr := `
jobs:
job1:
steps:
- run: exit 1`
_, err = io.Copy(tmp, strings.NewReader(yamlStr))
if err != nil {
t.Fatal(err)
}
code := Start("test", tmp.Name())
if code == 0 {
t.Fatalf("expected program to exit with non-zero, but got %d", code)
}
}
|
package tumblr
import (
"github.com/mrjones/oauth"
"net/http"
)
const (
requestTokenURL = "http://www.tumblr.com/oauth/request_token"
authorizeTokenURL = "http://www.tumblr.com/oauth/authorize"
accessTokenURL = "http://www.tumblr.com/oauth/access_token"
)
// Client is tumblr api client
type Client struct {
consumerKey string
oauthClient *oauth.Consumer
}
// NewClient return Client
func NewClient(consumerKey string, consumerSecret string) *Client {
s := oauth.ServiceProvider{
RequestTokenUrl: requestTokenURL,
AuthorizeTokenUrl: authorizeTokenURL,
AccessTokenUrl: accessTokenURL,
}
consumer := oauth.NewConsumer(consumerKey, consumerSecret, s)
return &Client{
consumerKey: consumerKey,
oauthClient: consumer,
}
}
// Get send data by GET method
func (c *Client) Get(url string, userParams map[string]string, token *oauth.AccessToken) (resp *http.Response, err error) {
return c.oauthClient.Get(url, userParams, token)
}
// Post send data by POST method
func (c *Client) Post(url string, userParams map[string]string, token *oauth.AccessToken) (resp *http.Response, err error) {
return c.oauthClient.Post(url, userParams, token)
}
// GetConsumerKey return consumer key
func (c *Client) GetConsumerKey() string {
return c.consumerKey
}
// GetRequestTokenAndURL get request token and authorize url
func (c *Client) GetRequestTokenAndURL(callbackURL string) (rtoken *oauth.RequestToken, loginURL string, err error) {
return c.oauthClient.GetRequestTokenAndUrl(callbackURL)
}
// AuthorizeToken get tumblr auth token
func (c *Client) AuthorizeToken(rtoken *oauth.RequestToken, verificationCode string) (atoken *oauth.AccessToken, err error) {
return c.oauthClient.AuthorizeToken(rtoken, verificationCode)
}
|
package service
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/go-xorm/xorm"
)
var MysqlService = new(MysqlPool)
type MysqlPool struct {
DBMysql *xorm.Engine
}
func (M *MysqlPool) InitMysqlPool(config map[string]map[string]string) (err error) {
host := "tcp(" + config["mysql"]["host"] + ":" + config["mysql"]["port"] + ")"
database := config["mysql"]["database"]
user := config["mysql"]["username"]
password := config["mysql"]["password"]
charset := "utf8"
maxOpenConns := 5
maxIdleConns := 2
dataSourceName := user + ":" + password + "@" + host + "/" + database + "?charset=" + charset
M.DBMysql, err = xorm.NewEngine("mysql", dataSourceName)
fmt.Println("初始化mysql引擎")
if err != nil {
return err
}
M.DBMysql.SetMaxOpenConns(maxOpenConns)
M.DBMysql.SetMaxIdleConns(maxIdleConns)
err = M.DBMysql.Ping()
if err != nil {
return err
}
return
}
func (M *MysqlPool) GetClient() *xorm.Engine {
return M.DBMysql
}
|
package assembler
import (
"bufio"
"github.com/bonjourmalware/melody/internal/engine"
"github.com/bonjourmalware/melody/internal/events"
"github.com/google/gopacket"
"github.com/google/gopacket/tcpassembly"
"github.com/google/gopacket/tcpassembly/tcpreader"
"io"
"net/http"
)
// HTTPStreamFactory implements tcpassembly.StreamFactory
type HTTPStreamFactory struct{}
// HTTPStream will handle the actual decoding of http requests.
type HTTPStream struct {
net, transport gopacket.Flow
r tcpreader.ReaderStream
}
// New creates a new HTTPStreamFactory from the given flow data
func (h *HTTPStreamFactory) New(net, transport gopacket.Flow) tcpassembly.Stream {
hstream := &HTTPStream{
net: net,
transport: transport,
r: tcpreader.NewReaderStream(),
}
go hstream.run() // Important... we must guarantee that data from the reader stream is read.
// ReaderStream implements tcpassembly.Stream, so we can return a pointer to it.
return &hstream.r
}
func (h *HTTPStream) run() {
buf := bufio.NewReader(&h.r)
for {
req, err := http.ReadRequest(buf)
if err == io.EOF {
// We must read until we see an EOF... very important!
return
} else if err != nil {
} else {
ev, _ := events.NewHTTPEvent(req, h.net, h.transport)
engine.EventChan <- ev
}
}
}
|
package main
/**
* Definition for a Node.
* type Node struct {
* Val int
* Children []*Node
* }
*/
type Node struct {
Val int
Children []*Node
}
// 栈-前序遍历
func preorder(root *Node) []int {
var res []int
if root == nil {
return res
}
stack := []*Node{root}
var popNode *Node
for len(stack) > 0 {
// 出栈
popNode = stack[len(stack)-1]
res = append(res, popNode.Val)
stack = stack[:len(stack)-1]
root = popNode
// 子节点压栈 - 压一次出一次
if root != nil {
for index := len(root.Children) - 1; index >= 0; index-- {
stack = append(stack, root.Children[index])
}
}
}
return res
}
func main() {
NTree := &Node{1, []*Node{}}
next := &Node{3, []*Node{&Node{5, []*Node{}}, &Node{6, []*Node{}}}}
NTree.Children = append(NTree.Children, next)
NTree.Children = append(NTree.Children, &Node{2, []*Node{}})
NTree.Children = append(NTree.Children, &Node{4, []*Node{}})
preorder(NTree)
}
|
package main
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
)
// FetchAWSData represents the set of methods used to interact with AWS API.
type FetchAWSData interface {
GetSnapshots(awsFilters []Filters) (*ec2.DescribeSnapshotsOutput, error)
}
// NewEC2Client creates an AWS Session and returns an initialized client with the session object embedded.
func (hub *Hub) NewEC2Client(awsCreds *AWSCreds) (*EC2Client, error) {
// Initialize default config.
config := &aws.Config{
Region: aws.String(awsCreds.Region),
}
// Override Access Key and Secret Key env vars if specified in config.
if awsCreds.AccessKey != "" && awsCreds.SecretKey != "" {
config.Credentials = credentials.NewStaticCredentials(awsCreds.AccessKey, awsCreds.SecretKey, "")
}
// Initialize session with custom config embedded.
sess, err := session.NewSessionWithOptions(session.Options{
Config: *config,
})
if err != nil {
hub.logger.Errorf("Error creating AWS Session %s", err)
return nil, fmt.Errorf("could not create aws session")
}
// Initialize EC2 Client.
var ec2Client *ec2.EC2
if awsCreds.RoleARN != "" {
// Assume Role if specified
hub.logger.Debugf("Assuming Role: %v", awsCreds.RoleARN)
creds := stscreds.NewCredentials(sess, awsCreds.RoleARN)
ec2Client = ec2.New(sess, &aws.Config{Credentials: creds})
} else {
ec2Client = ec2.New(sess)
}
return &EC2Client{
client: ec2Client,
}, nil
}
// GetSnapshots takes awsFilters as input and returns the API response of `DescribeSnapshots` API Call.
func (e *EC2Client) GetSnapshots(awsFilters []Filters) (*ec2.DescribeSnapshotsOutput, error) {
filters := make([]*ec2.Filter, 0, len(awsFilters))
for _, tag := range awsFilters {
filters = append(filters, &ec2.Filter{
Name: aws.String(tag.Name),
Values: []*string{aws.String(tag.Value)},
})
}
// Construct request params for the API Request.
params := &ec2.DescribeSnapshotsInput{}
if len(filters) != 0 {
params = &ec2.DescribeSnapshotsInput{Filters: filters}
}
resp, err := e.client.DescribeSnapshots(params)
if err != nil {
return nil, err
}
return resp, nil
}
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package filesort
import (
"math/rand"
"os"
"testing"
"time"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/stretchr/testify/require"
)
func TestLessThan(t *testing.T) {
t.Parallel()
sc := new(stmtctx.StatementContext)
d0 := types.NewDatum(0)
d1 := types.NewDatum(1)
tests := []struct {
i []types.Datum
j []types.Datum
byDesc []bool
ret bool
}{
{[]types.Datum{d0}, []types.Datum{d0}, []bool{false}, false},
{[]types.Datum{d0}, []types.Datum{d1}, []bool{false}, true},
{[]types.Datum{d1}, []types.Datum{d0}, []bool{false}, false},
{[]types.Datum{d0}, []types.Datum{d0}, []bool{true}, false},
{[]types.Datum{d0}, []types.Datum{d1}, []bool{true}, false},
{[]types.Datum{d1}, []types.Datum{d0}, []bool{true}, true},
{[]types.Datum{d0, d0}, []types.Datum{d1, d1}, []bool{false, false}, true},
{[]types.Datum{d0, d1}, []types.Datum{d1, d1}, []bool{false, false}, true},
{[]types.Datum{d0, d0}, []types.Datum{d1, d1}, []bool{false, false}, true},
{[]types.Datum{d0, d0}, []types.Datum{d0, d1}, []bool{false, false}, true},
{[]types.Datum{d0, d1}, []types.Datum{d0, d1}, []bool{false, false}, false},
{[]types.Datum{d0, d1}, []types.Datum{d0, d0}, []bool{false, false}, false},
{[]types.Datum{d1, d0}, []types.Datum{d0, d1}, []bool{false, false}, false},
{[]types.Datum{d1, d1}, []types.Datum{d0, d1}, []bool{false, false}, false},
{[]types.Datum{d1, d1}, []types.Datum{d0, d0}, []bool{false, false}, false},
}
for _, test := range tests {
ret, err := lessThan(sc, test.i, test.j, test.byDesc)
require.NoError(t, err)
require.Equal(t, test.ret, ret)
}
}
func TestInMemory(t *testing.T) {
t.Parallel()
seed := rand.NewSource(time.Now().UnixNano())
r := rand.New(seed)
sc := new(stmtctx.StatementContext)
keySize := r.Intn(10) + 1 // random int in range [1, 10]
valSize := r.Intn(20) + 1 // random int in range [1, 20]
bufSize := 40 // hold up to 40 items per file
byDesc := make([]bool, keySize)
for i := range byDesc {
byDesc[i] = r.Intn(2) == 0
}
var (
err error
fs *FileSorter
pkey []types.Datum
key []types.Datum
tmpDir string
ret bool
)
tmpDir, err = os.MkdirTemp("", "util_filesort_test")
require.NoError(t, err)
fsBuilder := new(Builder)
fs, err = fsBuilder.SetSC(sc).SetSchema(keySize, valSize).SetBuf(bufSize).SetWorkers(1).SetDesc(byDesc).SetDir(tmpDir).Build()
require.NoError(t, err)
defer func() {
err := fs.Close()
require.NoError(t, err)
}()
nRows := r.Intn(bufSize-1) + 1 // random int in range [1, bufSize - 1]
for i := 1; i <= nRows; i++ {
err = fs.Input(nextRow(r, keySize, valSize))
require.NoError(t, err)
}
pkey, _, _, err = fs.Output()
require.NoError(t, err)
for i := 1; i < nRows; i++ {
key, _, _, err = fs.Output()
require.NoError(t, err)
ret, err = lessThan(sc, key, pkey, byDesc)
require.NoError(t, err)
require.False(t, ret)
pkey = key
}
}
func TestMultipleFiles(t *testing.T) {
t.Parallel()
seed := rand.NewSource(time.Now().UnixNano())
r := rand.New(seed)
sc := new(stmtctx.StatementContext)
keySize := r.Intn(10) + 1 // random int in range [1, 10]
valSize := r.Intn(20) + 1 // random int in range [1, 20]
bufSize := 40 // hold up to 40 items per file
byDesc := make([]bool, keySize)
for i := range byDesc {
byDesc[i] = r.Intn(2) == 0
}
var (
err error
fs *FileSorter
pkey []types.Datum
key []types.Datum
tmpDir string
ret bool
)
tmpDir, err = os.MkdirTemp("", "util_filesort_test")
require.NoError(t, err)
fsBuilder := new(Builder)
// Test for basic function.
_, err = fsBuilder.Build()
require.EqualError(t, err, "StatementContext is nil")
fsBuilder.SetSC(sc)
_, err = fsBuilder.Build()
require.EqualError(t, err, "key size is not positive")
fsBuilder.SetDesc(byDesc)
_, err = fsBuilder.Build()
require.EqualError(t, err, "mismatch in key size and byDesc slice")
fsBuilder.SetSchema(keySize, valSize)
_, err = fsBuilder.Build()
require.EqualError(t, err, "buffer size is not positive")
fsBuilder.SetBuf(bufSize)
_, err = fsBuilder.Build()
require.EqualError(t, err, "tmpDir does not exist")
fsBuilder.SetDir(tmpDir)
fs, err = fsBuilder.SetWorkers(1).Build()
require.NoError(t, err)
defer func() {
err := fs.Close()
require.NoError(t, err)
}()
nRows := (r.Intn(bufSize) + 1) * (r.Intn(10) + 2)
for i := 1; i <= nRows; i++ {
err = fs.Input(nextRow(r, keySize, valSize))
require.NoError(t, err)
}
pkey, _, _, err = fs.Output()
require.NoError(t, err)
for i := 1; i < nRows; i++ {
key, _, _, err = fs.Output()
require.NoError(t, err)
ret, err = lessThan(sc, key, pkey, byDesc)
require.NoError(t, err)
require.False(t, ret)
pkey = key
}
}
func TestMultipleWorkers(t *testing.T) {
t.Parallel()
seed := rand.NewSource(time.Now().UnixNano())
r := rand.New(seed)
sc := new(stmtctx.StatementContext)
keySize := r.Intn(10) + 1 // random int in range [1, 10]
valSize := r.Intn(20) + 1 // random int in range [1, 20]
bufSize := 40 // hold up to 40 items per file
byDesc := make([]bool, keySize)
for i := range byDesc {
byDesc[i] = r.Intn(2) == 0
}
var (
err error
fs *FileSorter
pkey []types.Datum
key []types.Datum
tmpDir string
ret bool
)
tmpDir, err = os.MkdirTemp("", "util_filesort_test")
require.NoError(t, err)
fsBuilder := new(Builder)
fs, err = fsBuilder.SetSC(sc).SetSchema(keySize, valSize).SetBuf(bufSize).SetWorkers(4).SetDesc(byDesc).SetDir(tmpDir).Build()
require.NoError(t, err)
defer func() {
err := fs.Close()
require.NoError(t, err)
}()
nRows := (r.Intn(bufSize) + 1) * (r.Intn(10) + 2)
for i := 1; i <= nRows; i++ {
err = fs.Input(nextRow(r, keySize, valSize))
require.NoError(t, err)
}
pkey, _, _, err = fs.Output()
require.NoError(t, err)
for i := 1; i < nRows; i++ {
key, _, _, err = fs.Output()
require.NoError(t, err)
ret, err = lessThan(sc, key, pkey, byDesc)
require.NoError(t, err)
require.False(t, ret)
pkey = key
}
}
func TestClose(t *testing.T) {
t.Parallel()
seed := rand.NewSource(time.Now().UnixNano())
r := rand.New(seed)
sc := new(stmtctx.StatementContext)
keySize := 2
valSize := 2
bufSize := 40
byDesc := []bool{false, false}
var (
err error
fs0 *FileSorter
fs1 *FileSorter
tmpDir0 string
tmpDir1 string
errmsg = "FileSorter has been closed"
)
// Prepare two FileSorter instances for tests
fsBuilder := new(Builder)
tmpDir0, err = os.MkdirTemp("", "util_filesort_test")
require.NoError(t, err)
fs0, err = fsBuilder.SetSC(sc).SetSchema(keySize, valSize).SetBuf(bufSize).SetWorkers(1).SetDesc(byDesc).SetDir(tmpDir0).Build()
require.NoError(t, err)
defer func() {
err := fs0.Close()
require.NoError(t, err)
}()
tmpDir1, err = os.MkdirTemp("", "util_filesort_test")
require.NoError(t, err)
fs1, err = fsBuilder.SetSC(sc).SetSchema(keySize, valSize).SetBuf(bufSize).SetWorkers(1).SetDesc(byDesc).SetDir(tmpDir1).Build()
require.NoError(t, err)
defer func() {
err := fs1.Close()
require.NoError(t, err)
}()
// 1. Close after some Input
err = fs0.Input(nextRow(r, keySize, valSize))
require.NoError(t, err)
err = fs0.Close()
require.NoError(t, err)
_, err = os.Stat(tmpDir0)
require.True(t, os.IsNotExist(err))
_, _, _, err = fs0.Output()
require.EqualError(t, err, errmsg)
err = fs0.Input(nextRow(r, keySize, valSize))
require.EqualError(t, err, errmsg)
err = fs0.Close()
require.NoError(t, err)
// 2. Close after some Output
err = fs1.Input(nextRow(r, keySize, valSize))
require.NoError(t, err)
err = fs1.Input(nextRow(r, keySize, valSize))
require.NoError(t, err)
_, _, _, err = fs1.Output()
require.NoError(t, err)
err = fs1.Close()
require.NoError(t, err)
_, err = os.Stat(tmpDir1)
require.True(t, os.IsNotExist(err))
_, _, _, err = fs1.Output()
require.EqualError(t, err, errmsg)
err = fs1.Input(nextRow(r, keySize, valSize))
require.EqualError(t, err, errmsg)
err = fs1.Close()
require.NoError(t, err)
}
func TestMismatchedUsage(t *testing.T) {
t.Parallel()
seed := rand.NewSource(time.Now().UnixNano())
r := rand.New(seed)
sc := new(stmtctx.StatementContext)
keySize := 2
valSize := 2
bufSize := 40
byDesc := []bool{false, false}
var (
err error
fs0 *FileSorter
fs1 *FileSorter
key []types.Datum
tmpDir string
errmsg = "call input after output"
)
// Prepare two FileSorter instances for tests
fsBuilder := new(Builder)
tmpDir, err = os.MkdirTemp("", "util_filesort_test")
require.NoError(t, err)
fs0, err = fsBuilder.SetSC(sc).SetSchema(keySize, valSize).SetBuf(bufSize).SetWorkers(1).SetDesc(byDesc).SetDir(tmpDir).Build()
require.NoError(t, err)
defer func() {
err := fs0.Close()
require.NoError(t, err)
}()
tmpDir, err = os.MkdirTemp("", "util_filesort_test")
require.NoError(t, err)
fs1, err = fsBuilder.SetSC(sc).SetSchema(keySize, valSize).SetBuf(bufSize).SetWorkers(1).SetDesc(byDesc).SetDir(tmpDir).Build()
require.NoError(t, err)
defer func() {
err := fs1.Close()
require.NoError(t, err)
}()
// 1. call Output after fetched all rows
err = fs0.Input(nextRow(r, keySize, valSize))
require.NoError(t, err)
key, _, _, err = fs0.Output()
require.NoError(t, err)
require.NotNil(t, key)
key, _, _, err = fs0.Output()
require.NoError(t, err)
require.Nil(t, key)
// 2. call Input after Output
err = fs1.Input(nextRow(r, keySize, valSize))
require.NoError(t, err)
key, _, _, err = fs1.Output()
require.NoError(t, err)
require.NotNil(t, key)
err = fs1.Input(nextRow(r, keySize, valSize))
require.EqualError(t, err, errmsg)
}
|
package middleware
import (
"bytes"
"net/http"
"testing"
"github.com/root-gg/utils"
"github.com/stretchr/testify/require"
"github.com/root-gg/plik/server/common"
"github.com/root-gg/plik/server/context"
)
func TestPaginateDefault(t *testing.T) {
ctx := newTestingContext(common.NewConfiguration())
req, err := http.NewRequest("GET", "/something", &bytes.Buffer{})
require.NoError(t, err, "unable to create new request")
rr := ctx.NewRecorder(req)
Paginate(ctx, common.DummyHandler).ServeHTTP(rr, req)
context.TestOK(t, rr)
require.NotNil(t, ctx.GetPagingQuery(), "missing paging query")
require.Equal(t, 20, *ctx.GetPagingQuery().Limit, "invalid limit")
require.Nil(t, ctx.GetPagingQuery().Order, "invalid order")
require.Nil(t, ctx.GetPagingQuery().After, "invalid after")
require.Nil(t, ctx.GetPagingQuery().Before, "invalid before")
}
func TestPaginate(t *testing.T) {
ctx := newTestingContext(common.NewConfiguration())
req, err := http.NewRequest("GET", "/something?limit=1&order=asc&after=after", &bytes.Buffer{})
require.NoError(t, err, "unable to create new request")
rr := ctx.NewRecorder(req)
Paginate(ctx, common.DummyHandler).ServeHTTP(rr, req)
context.TestOK(t, rr)
require.NotNil(t, ctx.GetPagingQuery(), "missing paging query")
require.Equal(t, 1, *ctx.GetPagingQuery().Limit, "invalid limit")
require.Equal(t, "asc", *ctx.GetPagingQuery().Order, "invalid order")
require.Equal(t, "after", *ctx.GetPagingQuery().After, "invalid after")
require.Nil(t, ctx.GetPagingQuery().Before, "invalid before")
}
func TestPaginateBefore(t *testing.T) {
ctx := newTestingContext(common.NewConfiguration())
req, err := http.NewRequest("GET", "/something?limit=1&order=asc&before=before", &bytes.Buffer{})
require.NoError(t, err, "unable to create new request")
rr := ctx.NewRecorder(req)
Paginate(ctx, common.DummyHandler).ServeHTTP(rr, req)
context.TestOK(t, rr)
require.NotNil(t, ctx.GetPagingQuery(), "missing paging query")
require.Equal(t, 1, *ctx.GetPagingQuery().Limit, "invalid limit")
require.Equal(t, "asc", *ctx.GetPagingQuery().Order, "invalid order")
require.Equal(t, "before", *ctx.GetPagingQuery().Before, "invalid before")
require.Nil(t, ctx.GetPagingQuery().After, "invalid after")
}
func TestPaginateHeader(t *testing.T) {
ctx := newTestingContext(common.NewConfiguration())
req, err := http.NewRequest("GET", "/something", &bytes.Buffer{})
require.NoError(t, err, "unable to create new request")
pagingQuery := common.NewPagingQuery().WithLimit(1).WithOrder("asc").WithAfterCursor("after")
req.Header.Set("X-Plik-Paging", utils.Sdump(pagingQuery))
rr := ctx.NewRecorder(req)
Paginate(ctx, common.DummyHandler).ServeHTTP(rr, req)
context.TestOK(t, rr)
require.NotNil(t, ctx.GetPagingQuery(), "missing paging query")
require.Equal(t, 1, *ctx.GetPagingQuery().Limit, "invalid limit")
require.Equal(t, "asc", *ctx.GetPagingQuery().Order, "invalid order")
require.Equal(t, "after", *ctx.GetPagingQuery().After, "invalid after")
require.Nil(t, ctx.GetPagingQuery().Before, "invalid before")
}
func TestPaginateInvalidHeader(t *testing.T) {
ctx := newTestingContext(common.NewConfiguration())
req, err := http.NewRequest("GET", "/something", &bytes.Buffer{})
require.NoError(t, err, "unable to create new request")
req.Header.Set("X-Plik-Paging", "blah blah blah")
rr := ctx.NewRecorder(req)
Paginate(ctx, common.DummyHandler).ServeHTTP(rr, req)
context.TestBadRequest(t, rr, "invalid paging header")
}
func TestPaginateInvalidLimit(t *testing.T) {
ctx := newTestingContext(common.NewConfiguration())
req, err := http.NewRequest("GET", "/something?limit=-1", &bytes.Buffer{})
require.NoError(t, err, "unable to create new request")
rr := ctx.NewRecorder(req)
Paginate(ctx, common.DummyHandler).ServeHTTP(rr, req)
context.TestBadRequest(t, rr, "invalid limit")
}
func TestPaginateInvalidLimitParsing(t *testing.T) {
ctx := newTestingContext(common.NewConfiguration())
req, err := http.NewRequest("GET", "/something?limit=limit", &bytes.Buffer{})
require.NoError(t, err, "unable to create new request")
rr := ctx.NewRecorder(req)
Paginate(ctx, common.DummyHandler).ServeHTTP(rr, req)
context.TestBadRequest(t, rr, "invalid limit")
}
func TestPaginateInvalidOrder(t *testing.T) {
ctx := newTestingContext(common.NewConfiguration())
req, err := http.NewRequest("GET", "/something?order=blah", &bytes.Buffer{})
require.NoError(t, err, "unable to create new request")
rr := ctx.NewRecorder(req)
Paginate(ctx, common.DummyHandler).ServeHTTP(rr, req)
context.TestBadRequest(t, rr, "invalid order")
}
func TestPaginateBothCursors(t *testing.T) {
ctx := newTestingContext(common.NewConfiguration())
req, err := http.NewRequest("GET", "/something?before=before&after=after", &bytes.Buffer{})
require.NoError(t, err, "unable to create new request")
rr := ctx.NewRecorder(req)
Paginate(ctx, common.DummyHandler).ServeHTTP(rr, req)
context.TestBadRequest(t, rr, "both before and after cursors set")
}
|
package main
import "fmt"
// it has a fixed size
// all elm must be of same type
func main() {
var num [5]int
num[0] = 10
num[4] = 20
fmt.Printf("%#v", num)
}
|
/*
* @lc app=leetcode.cn id=1 lang=golang
*
* [1] 两数之和
*/
package solution
// @lc code=start
func twoSum(nums []int, target int) []int {
m := make(map[int]int)
for i := 0; i < len(nums); i++ {
rest := target - nums[i]
if _, ok := m[rest]; ok {
return []int{m[rest], i}
}
m[nums[i]] = i
}
return nil
}
// @lc code=end
|
package main
import (
"fmt"
"github.com/liuzl/unidecode"
)
func main() {
fmt.Println("vim-go")
fmt.Println(unidecode.Unidecode(`乾隆爷的乾儿子是谁?`))
fmt.Println(unidecode.Unidecode("multiply by?"))
}
|
package cosmos
import "testing"
func getDummyClient() *Client {
client, _ := New("AccountEndpoint=https://cosmos-url;AccountKey=abc")
return client
}
func TestEmptyConnString(t *testing.T) {
_, err := New("")
if err == nil {
t.Fatal("error should not be nil")
}
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package globalconn_test
import (
"fmt"
"math"
"runtime"
"sync"
"sync/atomic"
"testing"
"github.com/cznic/mathutil"
"github.com/pingcap/tidb/util/globalconn"
"github.com/stretchr/testify/assert"
)
func TestAutoIncPool(t *testing.T) {
assert := assert.New(t)
const SizeInBits uint32 = 8
const Size uint64 = 1 << SizeInBits
const TryCnt = 4
var (
pool globalconn.AutoIncPool
val uint64
ok bool
i uint64
)
pool.InitExt(Size, true, TryCnt)
assert.Equal(int(Size), pool.Cap())
assert.Equal(0, pool.Len())
// get all.
for i = 1; i < Size; i++ {
val, ok = pool.Get()
assert.True(ok)
assert.Equal(i, val)
}
val, ok = pool.Get()
assert.True(ok)
assert.Equal(uint64(0), val) // wrap around to 0
assert.Equal(int(Size), pool.Len())
_, ok = pool.Get() // exhausted. try TryCnt times, lastID is added to 0+TryCnt.
assert.False(ok)
nextVal := uint64(TryCnt + 1)
pool.Put(nextVal)
val, ok = pool.Get()
assert.True(ok)
assert.Equal(nextVal, val)
nextVal += TryCnt - 1
pool.Put(nextVal)
val, ok = pool.Get()
assert.True(ok)
assert.Equal(nextVal, val)
nextVal += TryCnt + 1
pool.Put(nextVal)
_, ok = pool.Get()
assert.False(ok)
}
func TestLockFreePoolBasic(t *testing.T) {
assert := assert.New(t)
const SizeInBits uint32 = 8
const Size uint64 = 1<<SizeInBits - 1
var (
pool globalconn.LockFreeCircularPool
val uint64
ok bool
i uint64
)
pool.InitExt(uint32(1<<SizeInBits), math.MaxUint32)
assert.Equal(int(Size), pool.Cap())
assert.Equal(int(Size), pool.Len())
// get all.
for i = 1; i <= Size; i++ {
val, ok = pool.Get()
assert.True(ok)
assert.Equal(i, val)
}
_, ok = pool.Get()
assert.False(ok)
assert.Equal(0, pool.Len())
// put to full.
for i = 1; i <= Size; i++ {
ok = pool.Put(i)
assert.True(ok)
}
ok = pool.Put(0)
assert.False(ok)
assert.Equal(int(Size), pool.Len())
// get all.
for i = 1; i <= Size; i++ {
val, ok = pool.Get()
assert.True(ok)
assert.Equal(i, val)
}
_, ok = pool.Get()
assert.False(ok)
assert.Equal(0, pool.Len())
}
func TestLockFreePoolInitEmpty(t *testing.T) {
assert := assert.New(t)
const SizeInBits uint32 = 8
const Size uint64 = 1<<SizeInBits - 1
var (
pool globalconn.LockFreeCircularPool
val uint64
ok bool
i uint64
)
pool.InitExt(uint32(1<<SizeInBits), 0)
assert.Equal(int(Size), pool.Cap())
assert.Equal(0, pool.Len())
// put to full.
for i = 1; i <= Size; i++ {
ok = pool.Put(i)
assert.True(ok)
}
ok = pool.Put(0)
assert.False(ok)
assert.Equal(int(Size), pool.Len())
// get all.
for i = 1; i <= Size; i++ {
val, ok = pool.Get()
assert.True(ok)
assert.Equal(i, val)
}
_, ok = pool.Get()
assert.False(ok)
assert.Equal(0, pool.Len())
}
var _ globalconn.IDPool = (*LockBasedCircularPool)(nil)
// LockBasedCircularPool implements IDPool by lock-based manner.
// For benchmark purpose.
type LockBasedCircularPool struct {
_ uint64 // align to 64bits
head uint32 // first available slot
_ uint32 // padding to avoid false sharing
tail uint32 // first empty slot. `head==tail` means empty.
_ uint32 // padding to avoid false sharing
cap uint32
mu *sync.Mutex
slots []uint32
}
func (p *LockBasedCircularPool) Init(size uint64) {
p.InitExt(uint32(size), 0)
}
func (p *LockBasedCircularPool) InitExt(size uint32, fillCount uint32) {
p.mu = &sync.Mutex{}
p.cap = size
p.slots = make([]uint32, p.cap)
fillCount = mathutil.MinUint32(p.cap-1, fillCount)
var i uint32
for i = 0; i < fillCount; i++ {
p.slots[i] = i + 1
}
for ; i < p.cap; i++ {
p.slots[i] = math.MaxUint32
}
p.head = 0
p.tail = fillCount
}
func (p *LockBasedCircularPool) Len() int {
p.mu.Lock()
defer p.mu.Unlock()
return int(p.tail - p.head)
}
func (p *LockBasedCircularPool) Cap() int {
return int(p.cap - 1)
}
func (p LockBasedCircularPool) String() string {
head := p.head
tail := p.tail
headVal := p.slots[head&(p.cap-1)]
tailVal := p.slots[tail&(p.cap-1)]
length := tail - head
return fmt.Sprintf("cap:%v, len:%v; head:%x, slot:{%x}; tail:%x, slot:{%x}",
p.cap, length, head, headVal, tail, tailVal)
}
func (p *LockBasedCircularPool) Put(val uint64) (ok bool) {
p.mu.Lock()
defer p.mu.Unlock()
if p.tail-p.head == p.cap-1 { // full
return false
}
p.slots[p.tail&(p.cap-1)] = uint32(val)
p.tail++
return true
}
func (p *LockBasedCircularPool) Get() (val uint64, ok bool) {
p.mu.Lock()
defer p.mu.Unlock()
if p.head == p.tail { // empty
return globalconn.IDPoolInvalidValue, false
}
val = uint64(p.slots[p.head&(p.cap-1)])
p.head++
return val, true
}
func prepareLockBasedPool(sizeInBits uint32, fillCount uint32) globalconn.IDPool {
var pool LockBasedCircularPool
pool.InitExt(1<<sizeInBits, fillCount)
return &pool
}
func prepareLockFreePool(sizeInBits uint32, fillCount uint32, headPos uint32) globalconn.IDPool {
var pool globalconn.LockFreeCircularPool
pool.InitExt(1<<sizeInBits, fillCount)
if headPos > 0 {
pool.InitForTest(headPos, fillCount)
}
return &pool
}
func prepareConcurrencyTest(pool globalconn.IDPool, producers int, consumers int, requests int, total *int64) (ready chan struct{}, done chan struct{}, wgProducer *sync.WaitGroup, wgConsumer *sync.WaitGroup) {
ready = make(chan struct{})
done = make(chan struct{})
wgProducer = &sync.WaitGroup{}
if producers > 0 {
reqsPerProducer := (requests + producers - 1) / producers
wgProducer.Add(producers)
for p := 0; p < producers; p++ {
go func(p int) {
defer wgProducer.Done()
<-ready
for i := p * reqsPerProducer; i < (p+1)*reqsPerProducer && i < requests; i++ {
for !pool.Put(uint64(i)) {
runtime.Gosched()
}
}
}(p)
}
}
wgConsumer = &sync.WaitGroup{}
if consumers > 0 {
wgConsumer.Add(consumers)
for c := 0; c < consumers; c++ {
go func(c int) {
defer wgConsumer.Done()
<-ready
var sum int64
Loop:
for {
val, ok := pool.Get()
if ok {
sum += int64(val)
continue
}
select {
case <-done:
break Loop
default:
runtime.Gosched()
}
}
atomic.AddInt64(total, sum)
}(c)
}
}
return ready, done, wgProducer, wgConsumer
}
func doConcurrencyTest(ready chan struct{}, done chan struct{}, wgProducer *sync.WaitGroup, wgConsumer *sync.WaitGroup) {
// logutil.BgLogger().Info("Init", zap.Stringer("pool", q))
close(ready)
wgProducer.Wait()
// logutil.BgLogger().Info("Snapshot on producing done", zap.Stringer("pool", q))
close(done)
wgConsumer.Wait()
// logutil.BgLogger().Info("Finally", zap.Stringer("pool", q))
}
func expectedConcurrencyTestResult(poolSizeInBits uint32, fillCount uint32, producers int, consumers int, requests int) (expected int64) {
if producers > 0 && consumers > 0 {
expected += (int64(requests) - 1) * int64(requests) / 2
}
if fillCount > 0 {
fillCount = mathutil.MinUint32(1<<poolSizeInBits-1, fillCount)
expected += (1 + int64(fillCount)) * int64(fillCount) / 2
}
return expected
}
func testLockFreePoolConcurrency(poolSizeInBits uint32, fillCount uint32, producers int, consumers int, requests int, headPos uint32) (expected, actual int64) {
var total int64
pool := prepareLockFreePool(poolSizeInBits, fillCount, headPos)
ready, done, wgProducer, wgConsumer := prepareConcurrencyTest(pool, producers, consumers, requests, &total)
doConcurrencyTest(ready, done, wgProducer, wgConsumer)
expected = expectedConcurrencyTestResult(poolSizeInBits, fillCount, producers, consumers, requests)
return expected, atomic.LoadInt64(&total)
}
func testLockBasedPoolConcurrency(poolSizeInBits uint32, producers int, consumers int, requests int) (expected, actual int64) {
var total int64
pool := prepareLockBasedPool(poolSizeInBits, 0)
ready, done, wgProducer, wgConsumer := prepareConcurrencyTest(pool, producers, consumers, requests, &total)
doConcurrencyTest(ready, done, wgProducer, wgConsumer)
expected = expectedConcurrencyTestResult(poolSizeInBits, 0, producers, consumers, requests)
return expected, atomic.LoadInt64(&total)
}
func TestLockFreePoolBasicConcurrencySafety(t *testing.T) {
assert := assert.New(t)
var (
expected int64
actual int64
)
const (
sizeInBits = 8
fillCount = 0
producers = 20
consumers = 20
requests = 1 << 20
headPos = uint32(0x1_0000_0000 - (1 << (sizeInBits + 8)))
)
expected, actual = testLockFreePoolConcurrency(sizeInBits, fillCount, producers, consumers, requests, 0)
assert.Equal(expected, actual)
// test overflow of head & tail
expected, actual = testLockFreePoolConcurrency(sizeInBits, fillCount, producers, consumers, requests, headPos)
assert.Equal(expected, actual)
}
func TestLockBasedPoolConcurrencySafety(t *testing.T) {
var (
expected int64
actual int64
)
const (
sizeInBits = 8
producers = 20
consumers = 20
requests = 1 << 20
)
expected, actual = testLockBasedPoolConcurrency(sizeInBits, producers, consumers, requests)
assert.Equal(t, expected, actual)
}
type poolConcurrencyTestCase struct {
sizeInBits uint32
fillCount uint32
producers int
consumers int
requests int64
}
func (ta poolConcurrencyTestCase) String() string {
return fmt.Sprintf("size:%v, fillCount:%v, producers:%v, consumers:%v, requests:%v",
1<<ta.sizeInBits, ta.fillCount, ta.producers, ta.consumers, ta.requests)
}
func TestLockFreePoolConcurrencySafety(t *testing.T) {
const (
poolSizeInBits = 16
requests = 1 << 20
concurrency = 1000
)
// Test cases from Anthony Williams, "C++ Concurrency in Action, 2nd", 11.2.2 "Locating concurrency-related bugs by testing":
cases := []poolConcurrencyTestCase{
// #1 Multiple threads calling pop() on a partially full queue with insufficient items for all threads
{sizeInBits: 4, fillCount: 1 << 3, producers: 0, consumers: 32, requests: requests},
// #2 Multiple threads calling push() while one thread calls pop() on an empty queue
{sizeInBits: poolSizeInBits, fillCount: 0, producers: concurrency, consumers: 1, requests: requests},
// #3 Multiple threads calling push() while one thread calls pop() on a full queue
{sizeInBits: poolSizeInBits, fillCount: 0xffff_ffff, producers: concurrency, consumers: 1, requests: requests},
// #4 Multiple threads calling push() while multiple threads call pop() on an empty queue
{sizeInBits: poolSizeInBits, fillCount: 0, producers: concurrency, consumers: concurrency, requests: requests},
// #5 Multiple threads calling push() while multiple threads call pop() on a full queue
{sizeInBits: poolSizeInBits, fillCount: 0xffff_ffff, producers: concurrency, consumers: concurrency, requests: requests},
}
for i, ca := range cases {
expected, actual := testLockFreePoolConcurrency(ca.sizeInBits, ca.fillCount, ca.producers, ca.consumers, requests, 0)
assert.Equalf(t, expected, actual, "case #%v: %v", i+1, ca)
}
}
func BenchmarkPoolConcurrency(b *testing.B) {
b.ReportAllocs()
const (
poolSizeInBits = 16
requests = 1 << 18
)
cases := []poolConcurrencyTestCase{
{producers: 1, consumers: 1},
{producers: 3, consumers: 3},
{producers: 10, consumers: 10},
{producers: 20, consumers: 20},
{producers: 100, consumers: 100},
}
for _, ta := range cases {
b.Run(fmt.Sprintf("LockBasedCircularPool: P:C: %v:%v", ta.producers, ta.consumers), func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
var total int64
pool := prepareLockBasedPool(poolSizeInBits, 0)
ready, done, wgProducer, wgConsumer := prepareConcurrencyTest(pool, ta.producers, ta.consumers, requests, &total)
b.StartTimer()
doConcurrencyTest(ready, done, wgProducer, wgConsumer)
b.StopTimer()
expected := expectedConcurrencyTestResult(poolSizeInBits, 0, ta.producers, ta.consumers, requests)
actual := atomic.LoadInt64(&total)
if expected != actual {
b.Fatalf("concurrency safety fail, expected:%v, actual:%v", expected, actual)
}
}
})
b.Run(fmt.Sprintf("LockFreeCircularPool: P:C: %v:%v", ta.producers, ta.consumers), func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
var total int64
pool := prepareLockFreePool(poolSizeInBits, 0, 0)
ready, done, wgProducer, wgConsumer := prepareConcurrencyTest(pool, ta.producers, ta.consumers, requests, &total)
b.StartTimer()
doConcurrencyTest(ready, done, wgProducer, wgConsumer)
b.StopTimer()
expected := expectedConcurrencyTestResult(poolSizeInBits, 0, ta.producers, ta.consumers, requests)
actual := atomic.LoadInt64(&total)
if expected != actual {
b.Fatalf("concurrency safety fail, expected:%v, actual:%v", expected, actual)
}
}
})
}
}
|
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/kms"
)
//encryptFile takes in a file path and returns the KMS encrypted data
func encryptFile(targetFile *string, kmsID *string, client kms.KMS, pipe chan<- []byte) {
text, err := ioutil.ReadFile(*targetFile)
if err != nil {
log.Fatal("Cannot read file: ", *targetFile)
}
var input kms.EncryptInput
input.KeyId = kmsID
input.Plaintext = text
output, err := client.Encrypt(&kms.EncryptInput{
KeyId: aws.String(*kmsID),
Plaintext: text})
if err != nil {
fmt.Println(err)
}
fmt.Println(string(output.CiphertextBlob))
pipe <- output.CiphertextBlob
}
//writeEncryptedFile writes the encrypted data to disk and creates the folder to hold them
func writeEncryptedFile(outputFolder *string, osPerms *int, path *string, pipe chan []byte) {
file := <-pipe
perms := os.FileMode(*osPerms)
filename := *outputFolder + filepath.Base(*path)
err := ioutil.WriteFile(filename, file, perms)
if err != nil {
fmt.Println(os.Mkdir(filepath.Base(*outputFolder), perms))
}
}
func main() {
outputFolder := flag.String("output", "./encrypted/", "folder to output encrytped files to")
kmsID := flag.String("kms", "", "KMS Key to use to encrypt the file")
region := flag.String("region", "us-west-1", "region with KMS key")
flag.Parse()
files := flag.Args()
if len(files) < 1 {
log.Fatal("usage: ./cfgcrpyt -o=./encryptedOutPut/ /path1/file1 /path2/file2")
}
sess := session.Must(session.NewSession())
client := *kms.New(sess, aws.NewConfig().WithRegion(*region))
osPerms := int(0667)
pipe := make(chan []byte)
for x := range files {
fmt.Println("Encrypting: ", files[x])
go encryptFile(&files[x], kmsID, client, pipe)
writeEncryptedFile(outputFolder, &osPerms, &files[x], pipe)
}
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"strconv"
"strings"
)
func main() {
file, err := os.Open("Day2/in2.txt")
if err != nil {
log.Fatal(err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
scanner.Scan()
s := strings.Split(scanner.Text(), ",")
var nums [276]int
for a,b := range s{
nums[a], _ = strconv.Atoi(b)
}
out:
for noun := 0; noun < 100; noun++ {
for verb := 0; verb < 100; verb++{
var nums2 [len(nums)]int
for i := range nums {
nums2[i] = nums[i]
}
i := 0
nums2[1] = noun
nums2[2] = verb
for{
if nums2[i] == 1{
nums2[ nums2[i+3]] = nums2[ nums2[i+1]] + nums2[nums2[i+2]]
} else if nums2[i] == 2{
nums2[ nums2[i+3]] = nums2[nums2[i+1]] * nums2[nums2[i+2]]
} else if nums2[i] == 99{
break
}
i += 4
}
if nums2[0] == 19690720{
fmt.Println(100 * noun + verb)
break out
}
}
}
}
|
////////////////////////////////////////////////////////////////////////////////
// //
// Copyright 2019 Broadcom. The term Broadcom refers to Broadcom Inc. and/or //
// its subsidiaries. //
// //
// Licensed under the Apache License, Version 2.0 (the "License"); //
// you may not use this file except in compliance with the License. //
// You may obtain a copy of the License at //
// //
// http://www.apache.org/licenses/LICENSE-2.0 //
// //
// Unless required by applicable law or agreed to in writing, software //
// distributed under the License is distributed on an "AS IS" BASIS, //
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
// See the License for the specific language governing permissions and //
// limitations under the License. //
// //
////////////////////////////////////////////////////////////////////////////////
package main
import (
"fmt"
// "errors"
"flag"
"github.com/golang/glog"
"github.com/Azure/sonic-mgmt-common/translib/db"
"time"
"github.com/Azure/sonic-mgmt-common/translib/tlerr"
)
func main() {
var avalue,rvalue db.Value
var akey,rkey db.Key
var e error
defer glog.Flush()
flag.Parse()
fmt.Println("Creating the DB ==============")
d,_ := db.NewDB(db.Options {
DBNo : db.ConfigDB,
InitIndicator : "CONFIG_DB_INITIALIZED",
TableNameSeparator: "|",
KeySeparator : "|",
})
// fmt.Println("key: CONFIG_DB_INITIALIZED value: ",
// d.Client.Get("CONFIG_DB_INITIALIZED").String())
tsa := db.TableSpec { Name: "ACL_TABLE" }
tsr := db.TableSpec { Name: "ACL_RULE" }
ca := make([]string, 1, 1)
fmt.Println("Testing GetEntry error ==============")
ca[0] = "MyACL1_ACL_IPVNOTEXIST"
akey = db.Key { Comp: ca}
avalue, e = d.GetEntry(&tsa, akey)
fmt.Println("ts: ", tsa, " ", akey, ": ", avalue, " error: ", e)
if _, ok := e.(tlerr.TranslibRedisClientEntryNotExist) ; ok {
fmt.Println("Type is TranslibRedisClientEntryNotExist")
}
fmt.Println("Testing NoTransaction SetEntry ==============")
ca[0] = "MyACL1_ACL_IPV4"
akey = db.Key { Comp: ca}
avalue = db.Value { map[string]string {"ports@":"Ethernet0","type":"MIRROR" }}
d.SetEntry(&tsa, akey, avalue)
fmt.Println("Testing GetEntry ==============")
avalue, _ = d.GetEntry(&tsa, akey)
fmt.Println("ts: ", tsa, " ", akey, ": ", avalue)
fmt.Println("Testing GetKeys ==============")
keys, _ := d.GetKeys(&tsa);
fmt.Println("ts: ", tsa, " keys: ", keys)
fmt.Println("Testing GetKeys ==============")
keys, _ := d.GetKeysPattern(&tsa, akey);
fmt.Println("ts: ", tsa, " keys: ", keys)
fmt.Println("Testing NoTransaction DeleteEntry ==============")
akey = db.Key { Comp: ca}
d.DeleteEntry(&tsa, akey)
avalue, e = d.GetEntry(&tsa, akey)
if e == nil {
fmt.Println("!!! ts: ", tsa, " ", akey, ": ", avalue)
}
fmt.Println("Testing 2 more ACLs ==============")
ca[0] = "MyACL2_ACL_IPV4"
avalue = db.Value { map[string]string {"ports@":"Ethernet0","type":"MIRROR" }}
d.SetEntry(&tsa, akey, avalue)
ca[0] = "MyACL3_ACL_IPV4"
d.SetEntry(&tsa, akey, avalue)
ta, _ := d.GetTable(&tsa)
fmt.Println("ts: ", tsa, " table: ", ta)
tr, _ := d.GetTable(&tsr)
fmt.Println("ts: ", tsr, " table: ", tr)
fmt.Println("Testing Transaction =================")
rkey = db.Key { Comp: []string { "MyACL2_ACL_IPV4", "RULE_1" }}
rvalue = db.Value { Field: map[string]string {
"priority" : "0",
"packet_action" : "DROP",
},
}
// d.StartTx([]db.WatchKeys { {Ts: &tsr, Key: &rkey} })
d.StartTx([]db.WatchKeys {{Ts: &tsr, Key: &rkey} },
[]*db.TableSpec { &tsr, &tsa})
fmt.Println("Sleeping 5...")
time.Sleep(5 * time.Second)
d.SetEntry( &tsr, rkey, rvalue)
e = d.CommitTx()
if e != nil {
fmt.Println("Transaction Failed ======= e: ", e)
}
fmt.Println("Testing AbortTx =================")
// d.StartTx([]db.WatchKeys { {Ts: &tsr, Key: &rkey} })
d.StartTx([]db.WatchKeys {}, []*db.TableSpec { &tsr, &tsa})
d.DeleteEntry( &tsa, rkey)
d.AbortTx()
avalue, e = d.GetEntry(&tsr, rkey)
fmt.Println("ts: ", tsr, " ", akey, ": ", avalue)
fmt.Println("Testing DeleteKeys =================")
d.DeleteKeys(&tsr, db.Key { Comp: []string {"ToBeDeletedACLs*"} })
fmt.Println("Testing GetTable")
tr, _ = d.GetTable(&tsr)
fmt.Println("ts: ", tsr, " table: ", tr)
// d.DeleteTable(&ts)
fmt.Println("Testing Tables2TableSpecs =================")
var tables []string
tables = []string { "ACL_TABLE", "ACL_RULE" }
fmt.Println("Tables: ", tables)
fmt.Println("TableSpecs: ")
for _, tsi := range db.Tables2TableSpecs(tables) {
fmt.Println(" ", *tsi)
}
fmt.Println("Empty TableSpecs: ")
for _, tsi := range db.Tables2TableSpecs([]string { } ) {
fmt.Println(" ", *tsi)
}
d.DeleteDB()
}
|
package vac
import (
"bytes"
"errors"
"fmt"
"io"
"sort"
"strings"
"time"
venti "sigint.ca/venti2"
)
const (
// TODO: use the fossil magic (+1)? and reverse botch logic
MetaMagic = 0x5656fc79
MetaHeaderSize = 12
MetaIndexSize = 4
IndexEntrySize = 8
)
const (
BytesPerEntry = 100 // estimate of bytes per dir entries - determines number of index entries in the block
FullPercentage = 80 // don't allocate in block if more than this percentage full
FlushSize = 200 // number of blocks to flush
DirtyPercentage = 50 // maximum percentage of dirty blocks
)
type MetaBlock struct {
size int // size used
free int // free space within used size
// index table
maxIndex int // entries allocated
nIndex int // amount of table used
unbotch bool // toggle index search algorithm
// the entire block
buf []byte
}
type MetaEntry struct {
Offset int
Size int
}
func NewMetaBlock(buf []byte, entries int) *MetaBlock {
mb := MetaBlock{
buf: buf,
maxIndex: entries,
size: MetaHeaderSize + entries*MetaIndexSize,
}
return &mb
}
func UnpackMetaBlock(buf []byte) (*MetaBlock, error) {
if len(buf) < MetaHeaderSize {
return nil, fmt.Errorf("short buffer: %d < %d", len(buf), MetaHeaderSize)
}
mb := MetaBlock{buf: buf}
r := bytes.NewReader(buf)
// TODO: check size here? what does the plan9 code do here?
magic := readUint32(r)
if magic != MetaMagic && magic != MetaMagic+1 {
return nil, fmt.Errorf("bad meta block magic %#x", magic)
}
mb.size = int(readUint16(r))
mb.free = int(readUint16(r))
mb.maxIndex = int(readUint16(r))
mb.nIndex = int(readUint16(r))
mb.unbotch = (magic == MetaMagic+1)
if mb.size > len(buf) {
return nil, fmt.Errorf("bad meta block size: %d > %d", mb.size, len(buf))
}
if r.Len() < mb.maxIndex*MetaIndexSize {
return nil, fmt.Errorf("truncated meta block: %d < %d", r.Len(), mb.maxIndex*MetaIndexSize)
}
return &mb, nil
}
func (mb *MetaBlock) Pack() []byte {
p := mb.buf
putUint32(p, MetaMagic)
putUint16(p[4:], uint16(mb.size))
putUint16(p[6:], uint16(mb.free))
putUint16(p[8:], uint16(mb.maxIndex))
putUint16(p[10:], uint16(mb.nIndex))
return p
}
// Grow returns an offset to a slice of n unused bytes.
func (mb *MetaBlock) Alloc(n int) (offset int, err error) {
// off the end
if len(mb.buf)-mb.size >= n {
return mb.size, nil
}
// check if possible
if len(mb.buf)-mb.size+mb.free < n {
return 0, errors.New("no space in meta block")
}
// chunks are MetaEntries sorted by the offset
// of the DirEntry they point to in mb.
mc := mb.chunks()
// look for hole
o := MetaHeaderSize + mb.maxIndex*MetaIndexSize
for i := 0; i < mb.nIndex; i++ {
if int(mc[i].offset)-o >= n {
return o, nil
}
o = int(mc[i].offset) + int(mc[i].size)
}
if len(mb.buf)-o >= n {
return o, nil
}
// compact and return off the end
mb.compact(mc)
if len(mb.buf)-mb.size < n {
panic("invariant failed")
}
return mb.size, nil
}
type metaChunk struct {
offset uint16
size uint16
index uint16
}
func (mb *MetaBlock) chunks() []metaChunk {
chunks := make([]metaChunk, mb.nIndex)
p := mb.buf[MetaHeaderSize:]
for i := 0; i < mb.nIndex; i++ {
chunks[i] = metaChunk{
offset: getUint16(p),
size: getUint16(p[2:]),
index: uint16(i),
}
p = p[MetaIndexSize:]
}
sort.Slice(chunks, func(i, j int) bool {
return chunks[i].offset < chunks[j].offset
})
// check block looks ok
oo := MetaHeaderSize + mb.maxIndex*MetaIndexSize
o := oo
n := 0
for i := 0; i < mb.nIndex; i++ {
o = int(chunks[i].offset)
n = int(chunks[i].size)
if o < oo {
panic("invariant failed")
}
oo += n
}
if o+n <= mb.size {
panic("invariant failed")
}
if mb.size-oo != mb.free {
panic("invariant failed")
}
return chunks
}
func (mb *MetaBlock) compact(chunks []metaChunk) {
oo := MetaHeaderSize + mb.maxIndex*MetaIndexSize
for i := 0; i < mb.nIndex; i++ {
o := int(chunks[i].offset)
n := int(chunks[i].size)
if o != oo {
copy(mb.buf[oo:], mb.buf[o:o+n])
putUint16(mb.buf[MetaHeaderSize+chunks[i].index*MetaIndexSize:], uint16(oo))
}
oo += n
}
mb.size = oo
mb.free = 0
}
func (mb *MetaBlock) slice(me MetaEntry) []byte {
return mb.buf[me.Offset : me.Offset+me.Size]
}
// Delete deletes me from position i of the MetaBlock index.
func (mb *MetaBlock) Delete(i int, me MetaEntry) {
if i >= mb.nIndex {
panic("invariant failed")
}
if me.Offset+me.Size == mb.size {
// last entry in the index
mb.size -= me.Size
} else {
// leave a gap
mb.free += me.Size
}
p := mb.buf[MetaHeaderSize+i*MetaIndexSize:]
n := (mb.nIndex - i - 1) * MetaIndexSize
copy(p, p[MetaIndexSize:MetaIndexSize+n])
memset(p[n:n+MetaIndexSize], 0)
mb.nIndex--
}
// Insert inserts me into position i of the MetaBlock index.
func (mb *MetaBlock) Insert(i int, me MetaEntry) {
if mb.nIndex >= mb.maxIndex {
panic("invariant failed")
}
if me.Offset+me.Size > mb.size {
// append, possibly also using some trailing free space
mb.free -= mb.size - me.Offset
mb.size = me.Offset + me.Size
} else {
// insert strictly into free space
mb.free -= me.Size
}
p := mb.buf[MetaHeaderSize+i*MetaIndexSize:]
n := (mb.nIndex - i) * MetaIndexSize
copy(p[MetaIndexSize:], p[:n])
putUint16(p, uint16(me.Offset))
putUint16(p[2:], uint16(me.Size))
mb.nIndex++
}
func (mb *MetaBlock) Search(elem string) (found bool, i int, me MetaEntry, err error) {
// binary search within block
b := 0
t := mb.nIndex
for b < t {
i = (b + t) >> 1
me, err = mb.unpackMetaEntry(i)
if err != nil {
return
}
var x int
if mb.unbotch {
x = mb.compareNew(me, elem)
} else {
x = mb.compare(me, elem)
}
if x == 0 {
found = true
return
}
if x < 0 {
b = i + 1
} else { // x > 0
t = i
}
}
if b != t {
panic("invariant failed")
}
return
}
func (mb *MetaBlock) compare(me MetaEntry, s string) int {
p := mb.slice(me)
// first 6 bytes are magic and version
n := int(getUint16(p[6:]))
p = p[8:]
if n >= len(p) {
panic("invariant failed")
}
r1 := bytes.NewReader(p[:n])
r2 := strings.NewReader(s)
for r1.Len() > 0 {
if r2.Len() == 0 {
return -1
}
c1, _ := r1.ReadByte()
c2, _ := r2.ReadByte()
if c1 < c2 {
return -1
}
if c1 > c2 {
return 1
}
}
if r2.Len() == 0 {
return 0
}
return 1
}
func (mb *MetaBlock) compareNew(me MetaEntry, s string) int {
p := mb.slice(me)
// first 6 bytes are magic and version
n := int(getUint16(p[6:]))
p = p[8:]
if n >= len(p) {
panic("invariant failed")
}
r1 := bytes.NewReader(p[:n])
r2 := strings.NewReader(s)
for r1.Len() > 0 {
if r2.Len() == 0 {
return 1
}
c1, _ := r1.ReadByte()
c2, _ := r2.ReadByte()
if c1 < c2 {
return -1
}
if c1 > c2 {
return 1
}
}
if r2.Len() == 0 {
return 0
}
return -1
}
func (mb *MetaBlock) unpackMetaEntry(i int) (MetaEntry, error) {
if i < 0 || i >= mb.nIndex {
return MetaEntry{}, errors.New("bad meta entry index")
}
p := mb.buf[MetaHeaderSize+i*MetaIndexSize:]
eo := int(getUint16(p))
en := int(getUint16(p[2:]))
if eo < MetaHeaderSize+mb.maxIndex*MetaIndexSize {
return MetaEntry{}, errors.New("corrupted entry in meta block")
}
if eo+en > mb.size {
return MetaEntry{}, fmt.Errorf("truncated meta block: %d < %d", mb.size, eo+en)
}
p = mb.buf[eo:]
// make sure entry looks ok and includes an elem name
if en < 8 || getUint32(p) != DirMagic || en < 8+int(getUint16(p[6:])) {
return MetaEntry{}, errors.New("corrupted meta block entry")
}
me := MetaEntry{Offset: eo, Size: en}
return me, nil
}
func (mb *MetaBlock) unpackDirEntry(me MetaEntry) (*DirEntry, error) {
var dir DirEntry
var err error
r := bytes.NewReader(mb.slice(me))
// magic
if r.Len() < 4 {
return nil, errCorruptMeta
}
if readUint32(r) != DirMagic {
return nil, errCorruptMeta
}
// version
if r.Len() < 2 {
return nil, errCorruptMeta
}
version := readUint16(r)
if version < 7 || version > 9 {
return nil, errCorruptMeta
}
// elem
dir.Elem, err = readString(r)
if err != nil {
return nil, errCorruptMeta
}
// entry
if r.Len() < 4 {
return nil, errCorruptMeta
}
dir.Entry = int(readUint32(r))
if version < 9 {
dir.Gen = 0
dir.Mentry = dir.Entry + 1
dir.Mgen = 0
} else {
if r.Len() < 3*4 {
return nil, errCorruptMeta
}
dir.Gen = int(readUint32(r))
dir.Mentry = int(readUint32(r))
dir.Mgen = int(readUint32(r))
}
// size is gotten from DirEntry
// qid
if r.Len() < 8 {
return nil, errCorruptMeta
}
dir.Qid = uint64(readUint64(r))
// skip replacement
if version == 7 {
if r.Len() < venti.ScoreSize {
return nil, errCorruptMeta
}
r.Seek(venti.ScoreSize, io.SeekCurrent)
}
// uid
dir.Uid, err = readString(r)
if err != nil {
return nil, errCorruptMeta
}
// gid
dir.Gid, err = readString(r)
if err != nil {
return nil, errCorruptMeta
}
// mid
dir.Mid, err = readString(r)
if err != nil {
return nil, errCorruptMeta
}
if r.Len() < 5*4 {
return nil, errCorruptMeta
}
dir.Mtime = time.Unix(int64(readUint32(r)), 0)
dir.Mcount = int(readUint32(r))
dir.Ctime = time.Unix(int64(readUint32(r)), 0)
dir.Atime = time.Unix(int64(readUint32(r)), 0)
dir.Mode = readUint32(r)
// optional meta data
for r.Len() > 0 {
if r.Len() < 3 {
return nil, errCorruptMeta
}
t, _ := r.ReadByte()
nn := int(readUint16(r))
if r.Len() < nn {
return nil, errCorruptMeta
}
switch t {
case DirPlan9Entry:
// not valid in version >= 9
if version >= 9 {
break
}
if dir.plan9 || nn != 12 {
return nil, errCorruptMeta
}
dir.plan9 = true
dir.p9Path = readUint64(r)
dir.p9Version = int(readUint32(r))
if dir.Mcount == 0 {
dir.Mcount = dir.p9Version
}
break
case DirGenEntry:
// not valid in version >= 9
if version >= 9 {
break
}
break
case DirQidSpaceEntry:
if dir.qidSpace || nn != 16 {
return nil, errCorruptMeta
}
dir.qidSpace = true
dir.qidOffset = readUint64(r)
dir.qidMax = readUint64(r)
break
}
}
if r.Len() != 0 {
return nil, errCorruptMeta
}
return &dir, nil
}
|
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package android
import (
"errors"
"fmt"
"reflect"
"strings"
"testing"
"github.com/google/blueprint/proptools"
)
type strsTestCase struct {
in []string
out string
err []error
}
var commonValidatePathTestCases = []strsTestCase{
{
in: []string{""},
out: "",
},
{
in: []string{"a/b"},
out: "a/b",
},
{
in: []string{"a/b", "c"},
out: "a/b/c",
},
{
in: []string{"a/.."},
out: ".",
},
{
in: []string{"."},
out: ".",
},
{
in: []string{".."},
out: "",
err: []error{errors.New("Path is outside directory: ..")},
},
{
in: []string{"../a"},
out: "",
err: []error{errors.New("Path is outside directory: ../a")},
},
{
in: []string{"b/../../a"},
out: "",
err: []error{errors.New("Path is outside directory: ../a")},
},
{
in: []string{"/a"},
out: "",
err: []error{errors.New("Path is outside directory: /a")},
},
{
in: []string{"a", "../b"},
out: "",
err: []error{errors.New("Path is outside directory: ../b")},
},
{
in: []string{"a", "b/../../c"},
out: "",
err: []error{errors.New("Path is outside directory: ../c")},
},
{
in: []string{"a", "./.."},
out: "",
err: []error{errors.New("Path is outside directory: ..")},
},
}
var validateSafePathTestCases = append(commonValidatePathTestCases, []strsTestCase{
{
in: []string{"$host/../$a"},
out: "$a",
},
}...)
var validatePathTestCases = append(commonValidatePathTestCases, []strsTestCase{
{
in: []string{"$host/../$a"},
out: "",
err: []error{errors.New("Path contains invalid character($): $host/../$a")},
},
{
in: []string{"$host/.."},
out: "",
err: []error{errors.New("Path contains invalid character($): $host/..")},
},
}...)
func TestValidateSafePath(t *testing.T) {
for _, testCase := range validateSafePathTestCases {
t.Run(strings.Join(testCase.in, ","), func(t *testing.T) {
ctx := &configErrorWrapper{}
out, err := validateSafePath(testCase.in...)
if err != nil {
reportPathError(ctx, err)
}
check(t, "validateSafePath", p(testCase.in), out, ctx.errors, testCase.out, testCase.err)
})
}
}
func TestValidatePath(t *testing.T) {
for _, testCase := range validatePathTestCases {
t.Run(strings.Join(testCase.in, ","), func(t *testing.T) {
ctx := &configErrorWrapper{}
out, err := validatePath(testCase.in...)
if err != nil {
reportPathError(ctx, err)
}
check(t, "validatePath", p(testCase.in), out, ctx.errors, testCase.out, testCase.err)
})
}
}
func TestOptionalPath(t *testing.T) {
var path OptionalPath
checkInvalidOptionalPath(t, path)
path = OptionalPathForPath(nil)
checkInvalidOptionalPath(t, path)
}
func checkInvalidOptionalPath(t *testing.T, path OptionalPath) {
t.Helper()
if path.Valid() {
t.Errorf("Uninitialized OptionalPath should not be valid")
}
if path.String() != "" {
t.Errorf("Uninitialized OptionalPath String() should return \"\", not %q", path.String())
}
defer func() {
if r := recover(); r == nil {
t.Errorf("Expected a panic when calling Path() on an uninitialized OptionalPath")
}
}()
path.Path()
}
func check(t *testing.T, testType, testString string,
got interface{}, err []error,
expected interface{}, expectedErr []error) {
t.Helper()
printedTestCase := false
e := func(s string, expected, got interface{}) {
t.Helper()
if !printedTestCase {
t.Errorf("test case %s: %s", testType, testString)
printedTestCase = true
}
t.Errorf("incorrect %s", s)
t.Errorf(" expected: %s", p(expected))
t.Errorf(" got: %s", p(got))
}
if !reflect.DeepEqual(expectedErr, err) {
e("errors:", expectedErr, err)
}
if !reflect.DeepEqual(expected, got) {
e("output:", expected, got)
}
}
func p(in interface{}) string {
if v, ok := in.([]interface{}); ok {
s := make([]string, len(v))
for i := range v {
s[i] = fmt.Sprintf("%#v", v[i])
}
return "[" + strings.Join(s, ", ") + "]"
} else {
return fmt.Sprintf("%#v", in)
}
}
type moduleInstallPathContextImpl struct {
baseModuleContext
inData bool
inTestcases bool
inSanitizerDir bool
inRamdisk bool
inRecovery bool
inRoot bool
}
func (m moduleInstallPathContextImpl) Config() Config {
return m.baseModuleContext.config
}
func (moduleInstallPathContextImpl) AddNinjaFileDeps(deps ...string) {}
func (m moduleInstallPathContextImpl) InstallInData() bool {
return m.inData
}
func (m moduleInstallPathContextImpl) InstallInTestcases() bool {
return m.inTestcases
}
func (m moduleInstallPathContextImpl) InstallInSanitizerDir() bool {
return m.inSanitizerDir
}
func (m moduleInstallPathContextImpl) InstallInRamdisk() bool {
return m.inRamdisk
}
func (m moduleInstallPathContextImpl) InstallInRecovery() bool {
return m.inRecovery
}
func (m moduleInstallPathContextImpl) InstallInRoot() bool {
return m.inRoot
}
func (m moduleInstallPathContextImpl) InstallBypassMake() bool {
return false
}
func pathTestConfig(buildDir string) Config {
return TestConfig(buildDir, nil, "", nil)
}
func TestPathForModuleInstall(t *testing.T) {
testConfig := pathTestConfig("")
hostTarget := Target{Os: Linux}
deviceTarget := Target{Os: Android}
testCases := []struct {
name string
ctx *moduleInstallPathContextImpl
in []string
out string
}{
{
name: "host binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: hostTarget.Os,
target: hostTarget,
},
},
in: []string{"bin", "my_test"},
out: "host/linux-x86/bin/my_test",
},
{
name: "system binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
},
},
in: []string{"bin", "my_test"},
out: "target/product/test_device/system/bin/my_test",
},
{
name: "vendor binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
earlyModuleContext: earlyModuleContext{
kind: socSpecificModule,
},
},
},
in: []string{"bin", "my_test"},
out: "target/product/test_device/vendor/bin/my_test",
},
{
name: "odm binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
earlyModuleContext: earlyModuleContext{
kind: deviceSpecificModule,
},
},
},
in: []string{"bin", "my_test"},
out: "target/product/test_device/odm/bin/my_test",
},
{
name: "product binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
earlyModuleContext: earlyModuleContext{
kind: productSpecificModule,
},
},
},
in: []string{"bin", "my_test"},
out: "target/product/test_device/product/bin/my_test",
},
{
name: "system_ext binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
earlyModuleContext: earlyModuleContext{
kind: systemExtSpecificModule,
},
},
},
in: []string{"bin", "my_test"},
out: "target/product/test_device/system_ext/bin/my_test",
},
{
name: "root binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
},
inRoot: true,
},
in: []string{"my_test"},
out: "target/product/test_device/root/my_test",
},
{
name: "recovery binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
},
inRecovery: true,
},
in: []string{"bin/my_test"},
out: "target/product/test_device/recovery/root/system/bin/my_test",
},
{
name: "recovery root binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
},
inRecovery: true,
inRoot: true,
},
in: []string{"my_test"},
out: "target/product/test_device/recovery/root/my_test",
},
{
name: "system native test binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
},
inData: true,
},
in: []string{"nativetest", "my_test"},
out: "target/product/test_device/data/nativetest/my_test",
},
{
name: "vendor native test binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
earlyModuleContext: earlyModuleContext{
kind: socSpecificModule,
},
},
inData: true,
},
in: []string{"nativetest", "my_test"},
out: "target/product/test_device/data/nativetest/my_test",
},
{
name: "odm native test binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
earlyModuleContext: earlyModuleContext{
kind: deviceSpecificModule,
},
},
inData: true,
},
in: []string{"nativetest", "my_test"},
out: "target/product/test_device/data/nativetest/my_test",
},
{
name: "product native test binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
earlyModuleContext: earlyModuleContext{
kind: productSpecificModule,
},
},
inData: true,
},
in: []string{"nativetest", "my_test"},
out: "target/product/test_device/data/nativetest/my_test",
},
{
name: "system_ext native test binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
earlyModuleContext: earlyModuleContext{
kind: systemExtSpecificModule,
},
},
inData: true,
},
in: []string{"nativetest", "my_test"},
out: "target/product/test_device/data/nativetest/my_test",
},
{
name: "sanitized system binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
},
inSanitizerDir: true,
},
in: []string{"bin", "my_test"},
out: "target/product/test_device/data/asan/system/bin/my_test",
},
{
name: "sanitized vendor binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
earlyModuleContext: earlyModuleContext{
kind: socSpecificModule,
},
},
inSanitizerDir: true,
},
in: []string{"bin", "my_test"},
out: "target/product/test_device/data/asan/vendor/bin/my_test",
},
{
name: "sanitized odm binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
earlyModuleContext: earlyModuleContext{
kind: deviceSpecificModule,
},
},
inSanitizerDir: true,
},
in: []string{"bin", "my_test"},
out: "target/product/test_device/data/asan/odm/bin/my_test",
},
{
name: "sanitized product binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
earlyModuleContext: earlyModuleContext{
kind: productSpecificModule,
},
},
inSanitizerDir: true,
},
in: []string{"bin", "my_test"},
out: "target/product/test_device/data/asan/product/bin/my_test",
},
{
name: "sanitized system_ext binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
earlyModuleContext: earlyModuleContext{
kind: systemExtSpecificModule,
},
},
inSanitizerDir: true,
},
in: []string{"bin", "my_test"},
out: "target/product/test_device/data/asan/system_ext/bin/my_test",
},
{
name: "sanitized system native test binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
},
inData: true,
inSanitizerDir: true,
},
in: []string{"nativetest", "my_test"},
out: "target/product/test_device/data/asan/data/nativetest/my_test",
},
{
name: "sanitized vendor native test binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
earlyModuleContext: earlyModuleContext{
kind: socSpecificModule,
},
},
inData: true,
inSanitizerDir: true,
},
in: []string{"nativetest", "my_test"},
out: "target/product/test_device/data/asan/data/nativetest/my_test",
},
{
name: "sanitized odm native test binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
earlyModuleContext: earlyModuleContext{
kind: deviceSpecificModule,
},
},
inData: true,
inSanitizerDir: true,
},
in: []string{"nativetest", "my_test"},
out: "target/product/test_device/data/asan/data/nativetest/my_test",
},
{
name: "sanitized product native test binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
earlyModuleContext: earlyModuleContext{
kind: productSpecificModule,
},
},
inData: true,
inSanitizerDir: true,
},
in: []string{"nativetest", "my_test"},
out: "target/product/test_device/data/asan/data/nativetest/my_test",
},
{
name: "sanitized system_ext native test binary",
ctx: &moduleInstallPathContextImpl{
baseModuleContext: baseModuleContext{
os: deviceTarget.Os,
target: deviceTarget,
earlyModuleContext: earlyModuleContext{
kind: systemExtSpecificModule,
},
},
inData: true,
inSanitizerDir: true,
},
in: []string{"nativetest", "my_test"},
out: "target/product/test_device/data/asan/data/nativetest/my_test",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
tc.ctx.baseModuleContext.config = testConfig
output := PathForModuleInstall(tc.ctx, tc.in...)
if output.basePath.path != tc.out {
t.Errorf("unexpected path:\n got: %q\nwant: %q\n",
output.basePath.path,
tc.out)
}
})
}
}
func TestDirectorySortedPaths(t *testing.T) {
config := TestConfig("out", nil, "", map[string][]byte{
"Android.bp": nil,
"a.txt": nil,
"a/txt": nil,
"a/b/c": nil,
"a/b/d": nil,
"b": nil,
"b/b.txt": nil,
"a/a.txt": nil,
})
ctx := PathContextForTesting(config)
makePaths := func() Paths {
return Paths{
PathForSource(ctx, "a.txt"),
PathForSource(ctx, "a/txt"),
PathForSource(ctx, "a/b/c"),
PathForSource(ctx, "a/b/d"),
PathForSource(ctx, "b"),
PathForSource(ctx, "b/b.txt"),
PathForSource(ctx, "a/a.txt"),
}
}
expected := []string{
"a.txt",
"a/a.txt",
"a/b/c",
"a/b/d",
"a/txt",
"b",
"b/b.txt",
}
paths := makePaths()
reversePaths := ReversePaths(paths)
sortedPaths := PathsToDirectorySortedPaths(paths)
reverseSortedPaths := PathsToDirectorySortedPaths(reversePaths)
if !reflect.DeepEqual(Paths(sortedPaths).Strings(), expected) {
t.Fatalf("sorted paths:\n %#v\n != \n %#v", paths.Strings(), expected)
}
if !reflect.DeepEqual(Paths(reverseSortedPaths).Strings(), expected) {
t.Fatalf("sorted reversed paths:\n %#v\n !=\n %#v", reversePaths.Strings(), expected)
}
expectedA := []string{
"a/a.txt",
"a/b/c",
"a/b/d",
"a/txt",
}
inA := sortedPaths.PathsInDirectory("a")
if !reflect.DeepEqual(inA.Strings(), expectedA) {
t.Errorf("FilesInDirectory(a):\n %#v\n != \n %#v", inA.Strings(), expectedA)
}
expectedA_B := []string{
"a/b/c",
"a/b/d",
}
inA_B := sortedPaths.PathsInDirectory("a/b")
if !reflect.DeepEqual(inA_B.Strings(), expectedA_B) {
t.Errorf("FilesInDirectory(a/b):\n %#v\n != \n %#v", inA_B.Strings(), expectedA_B)
}
expectedB := []string{
"b/b.txt",
}
inB := sortedPaths.PathsInDirectory("b")
if !reflect.DeepEqual(inB.Strings(), expectedB) {
t.Errorf("FilesInDirectory(b):\n %#v\n != \n %#v", inA.Strings(), expectedA)
}
}
func TestMaybeRel(t *testing.T) {
testCases := []struct {
name string
base string
target string
out string
isRel bool
}{
{
name: "normal",
base: "a/b/c",
target: "a/b/c/d",
out: "d",
isRel: true,
},
{
name: "parent",
base: "a/b/c/d",
target: "a/b/c",
isRel: false,
},
{
name: "not relative",
base: "a/b",
target: "c/d",
isRel: false,
},
{
name: "abs1",
base: "/a",
target: "a",
isRel: false,
},
{
name: "abs2",
base: "a",
target: "/a",
isRel: false,
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
ctx := &configErrorWrapper{}
out, isRel := MaybeRel(ctx, testCase.base, testCase.target)
if len(ctx.errors) > 0 {
t.Errorf("MaybeRel(..., %s, %s) reported unexpected errors %v",
testCase.base, testCase.target, ctx.errors)
}
if isRel != testCase.isRel || out != testCase.out {
t.Errorf("MaybeRel(..., %s, %s) want %v, %v got %v, %v",
testCase.base, testCase.target, testCase.out, testCase.isRel, out, isRel)
}
})
}
}
func TestPathForSource(t *testing.T) {
testCases := []struct {
name string
buildDir string
src string
err string
}{
{
name: "normal",
buildDir: "out",
src: "a/b/c",
},
{
name: "abs",
buildDir: "out",
src: "/a/b/c",
err: "is outside directory",
},
{
name: "in out dir",
buildDir: "out",
src: "out/a/b/c",
err: "is in output",
},
}
funcs := []struct {
name string
f func(ctx PathContext, pathComponents ...string) (SourcePath, error)
}{
{"pathForSource", pathForSource},
{"safePathForSource", safePathForSource},
}
for _, f := range funcs {
t.Run(f.name, func(t *testing.T) {
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
testConfig := pathTestConfig(test.buildDir)
ctx := &configErrorWrapper{config: testConfig}
_, err := f.f(ctx, test.src)
if len(ctx.errors) > 0 {
t.Fatalf("unexpected errors %v", ctx.errors)
}
if err != nil {
if test.err == "" {
t.Fatalf("unexpected error %q", err.Error())
} else if !strings.Contains(err.Error(), test.err) {
t.Fatalf("incorrect error, want substring %q got %q", test.err, err.Error())
}
} else {
if test.err != "" {
t.Fatalf("missing error %q", test.err)
}
}
})
}
})
}
}
type pathForModuleSrcTestModule struct {
ModuleBase
props struct {
Srcs []string `android:"path"`
Exclude_srcs []string `android:"path"`
Src *string `android:"path"`
Module_handles_missing_deps bool
}
src string
rel string
srcs []string
rels []string
missingDeps []string
}
func pathForModuleSrcTestModuleFactory() Module {
module := &pathForModuleSrcTestModule{}
module.AddProperties(&module.props)
InitAndroidModule(module)
return module
}
func (p *pathForModuleSrcTestModule) GenerateAndroidBuildActions(ctx ModuleContext) {
var srcs Paths
if p.props.Module_handles_missing_deps {
srcs, p.missingDeps = PathsAndMissingDepsForModuleSrcExcludes(ctx, p.props.Srcs, p.props.Exclude_srcs)
} else {
srcs = PathsForModuleSrcExcludes(ctx, p.props.Srcs, p.props.Exclude_srcs)
}
p.srcs = srcs.Strings()
for _, src := range srcs {
p.rels = append(p.rels, src.Rel())
}
if p.props.Src != nil {
src := PathForModuleSrc(ctx, *p.props.Src)
if src != nil {
p.src = src.String()
p.rel = src.Rel()
}
}
if !p.props.Module_handles_missing_deps {
p.missingDeps = ctx.GetMissingDependencies()
}
ctx.Build(pctx, BuildParams{
Rule: Touch,
Output: PathForModuleOut(ctx, "output"),
})
}
type pathForModuleSrcOutputFileProviderModule struct {
ModuleBase
props struct {
Outs []string
Tagged []string
}
outs Paths
tagged Paths
}
func pathForModuleSrcOutputFileProviderModuleFactory() Module {
module := &pathForModuleSrcOutputFileProviderModule{}
module.AddProperties(&module.props)
InitAndroidModule(module)
return module
}
func (p *pathForModuleSrcOutputFileProviderModule) GenerateAndroidBuildActions(ctx ModuleContext) {
for _, out := range p.props.Outs {
p.outs = append(p.outs, PathForModuleOut(ctx, out))
}
for _, tagged := range p.props.Tagged {
p.tagged = append(p.tagged, PathForModuleOut(ctx, tagged))
}
}
func (p *pathForModuleSrcOutputFileProviderModule) OutputFiles(tag string) (Paths, error) {
switch tag {
case "":
return p.outs, nil
case ".tagged":
return p.tagged, nil
default:
return nil, fmt.Errorf("unsupported tag %q", tag)
}
}
type pathForModuleSrcTestCase struct {
name string
bp string
srcs []string
rels []string
src string
rel string
}
func testPathForModuleSrc(t *testing.T, buildDir string, tests []pathForModuleSrcTestCase) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx := NewTestContext()
ctx.RegisterModuleType("test", pathForModuleSrcTestModuleFactory)
ctx.RegisterModuleType("output_file_provider", pathForModuleSrcOutputFileProviderModuleFactory)
ctx.RegisterModuleType("filegroup", FileGroupFactory)
fgBp := `
filegroup {
name: "a",
srcs: ["src/a"],
}
`
ofpBp := `
output_file_provider {
name: "b",
outs: ["gen/b"],
tagged: ["gen/c"],
}
`
mockFS := map[string][]byte{
"fg/Android.bp": []byte(fgBp),
"foo/Android.bp": []byte(test.bp),
"ofp/Android.bp": []byte(ofpBp),
"fg/src/a": nil,
"foo/src/b": nil,
"foo/src/c": nil,
"foo/src/d": nil,
"foo/src/e/e": nil,
"foo/src_special/$": nil,
}
config := TestConfig(buildDir, nil, "", mockFS)
ctx.Register(config)
_, errs := ctx.ParseFileList(".", []string{"fg/Android.bp", "foo/Android.bp", "ofp/Android.bp"})
FailIfErrored(t, errs)
_, errs = ctx.PrepareBuildActions(config)
FailIfErrored(t, errs)
m := ctx.ModuleForTests("foo", "").Module().(*pathForModuleSrcTestModule)
if g, w := m.srcs, test.srcs; !reflect.DeepEqual(g, w) {
t.Errorf("want srcs %q, got %q", w, g)
}
if g, w := m.rels, test.rels; !reflect.DeepEqual(g, w) {
t.Errorf("want rels %q, got %q", w, g)
}
if g, w := m.src, test.src; g != w {
t.Errorf("want src %q, got %q", w, g)
}
if g, w := m.rel, test.rel; g != w {
t.Errorf("want rel %q, got %q", w, g)
}
})
}
}
func TestPathsForModuleSrc(t *testing.T) {
tests := []pathForModuleSrcTestCase{
{
name: "path",
bp: `
test {
name: "foo",
srcs: ["src/b"],
}`,
srcs: []string{"foo/src/b"},
rels: []string{"src/b"},
},
{
name: "glob",
bp: `
test {
name: "foo",
srcs: [
"src/*",
"src/e/*",
],
}`,
srcs: []string{"foo/src/b", "foo/src/c", "foo/src/d", "foo/src/e/e"},
rels: []string{"src/b", "src/c", "src/d", "src/e/e"},
},
{
name: "recursive glob",
bp: `
test {
name: "foo",
srcs: ["src/**/*"],
}`,
srcs: []string{"foo/src/b", "foo/src/c", "foo/src/d", "foo/src/e/e"},
rels: []string{"src/b", "src/c", "src/d", "src/e/e"},
},
{
name: "filegroup",
bp: `
test {
name: "foo",
srcs: [":a"],
}`,
srcs: []string{"fg/src/a"},
rels: []string{"src/a"},
},
{
name: "output file provider",
bp: `
test {
name: "foo",
srcs: [":b"],
}`,
srcs: []string{buildDir + "/.intermediates/ofp/b/gen/b"},
rels: []string{"gen/b"},
},
{
name: "output file provider tagged",
bp: `
test {
name: "foo",
srcs: [":b{.tagged}"],
}`,
srcs: []string{buildDir + "/.intermediates/ofp/b/gen/c"},
rels: []string{"gen/c"},
},
{
name: "special characters glob",
bp: `
test {
name: "foo",
srcs: ["src_special/*"],
}`,
srcs: []string{"foo/src_special/$"},
rels: []string{"src_special/$"},
},
}
testPathForModuleSrc(t, buildDir, tests)
}
func TestPathForModuleSrc(t *testing.T) {
tests := []pathForModuleSrcTestCase{
{
name: "path",
bp: `
test {
name: "foo",
src: "src/b",
}`,
src: "foo/src/b",
rel: "src/b",
},
{
name: "glob",
bp: `
test {
name: "foo",
src: "src/e/*",
}`,
src: "foo/src/e/e",
rel: "src/e/e",
},
{
name: "filegroup",
bp: `
test {
name: "foo",
src: ":a",
}`,
src: "fg/src/a",
rel: "src/a",
},
{
name: "output file provider",
bp: `
test {
name: "foo",
src: ":b",
}`,
src: buildDir + "/.intermediates/ofp/b/gen/b",
rel: "gen/b",
},
{
name: "output file provider tagged",
bp: `
test {
name: "foo",
src: ":b{.tagged}",
}`,
src: buildDir + "/.intermediates/ofp/b/gen/c",
rel: "gen/c",
},
{
name: "special characters glob",
bp: `
test {
name: "foo",
src: "src_special/*",
}`,
src: "foo/src_special/$",
rel: "src_special/$",
},
}
testPathForModuleSrc(t, buildDir, tests)
}
func TestPathsForModuleSrc_AllowMissingDependencies(t *testing.T) {
bp := `
test {
name: "foo",
srcs: [":a"],
exclude_srcs: [":b"],
src: ":c",
}
test {
name: "bar",
srcs: [":d"],
exclude_srcs: [":e"],
module_handles_missing_deps: true,
}
`
config := TestConfig(buildDir, nil, bp, nil)
config.TestProductVariables.Allow_missing_dependencies = proptools.BoolPtr(true)
ctx := NewTestContext()
ctx.SetAllowMissingDependencies(true)
ctx.RegisterModuleType("test", pathForModuleSrcTestModuleFactory)
ctx.Register(config)
_, errs := ctx.ParseFileList(".", []string{"Android.bp"})
FailIfErrored(t, errs)
_, errs = ctx.PrepareBuildActions(config)
FailIfErrored(t, errs)
foo := ctx.ModuleForTests("foo", "").Module().(*pathForModuleSrcTestModule)
if g, w := foo.missingDeps, []string{"a", "b", "c"}; !reflect.DeepEqual(g, w) {
t.Errorf("want foo missing deps %q, got %q", w, g)
}
if g, w := foo.srcs, []string{}; !reflect.DeepEqual(g, w) {
t.Errorf("want foo srcs %q, got %q", w, g)
}
if g, w := foo.src, ""; g != w {
t.Errorf("want foo src %q, got %q", w, g)
}
bar := ctx.ModuleForTests("bar", "").Module().(*pathForModuleSrcTestModule)
if g, w := bar.missingDeps, []string{"d", "e"}; !reflect.DeepEqual(g, w) {
t.Errorf("want bar missing deps %q, got %q", w, g)
}
if g, w := bar.srcs, []string{}; !reflect.DeepEqual(g, w) {
t.Errorf("want bar srcs %q, got %q", w, g)
}
}
func ExampleOutputPath_ReplaceExtension() {
ctx := &configErrorWrapper{
config: TestConfig("out", nil, "", nil),
}
p := PathForOutput(ctx, "system/framework").Join(ctx, "boot.art")
p2 := p.ReplaceExtension(ctx, "oat")
fmt.Println(p, p2)
fmt.Println(p.Rel(), p2.Rel())
// Output:
// out/system/framework/boot.art out/system/framework/boot.oat
// boot.art boot.oat
}
func ExampleOutputPath_FileInSameDir() {
ctx := &configErrorWrapper{
config: TestConfig("out", nil, "", nil),
}
p := PathForOutput(ctx, "system/framework").Join(ctx, "boot.art")
p2 := p.InSameDir(ctx, "oat", "arm", "boot.vdex")
fmt.Println(p, p2)
fmt.Println(p.Rel(), p2.Rel())
// Output:
// out/system/framework/boot.art out/system/framework/oat/arm/boot.vdex
// boot.art oat/arm/boot.vdex
}
|
package main
import (
"fmt"
"math"
)
func main() {
a := 0
b := 1
fmt.Println(divide(a, b))
a = -2147483648
b = 2
fmt.Println(divide(a, b))
fmt.Print(2 >> 1)
fmt.Print(2 << 0)
fmt.Print(2 << 1)
}
func divide(dividend int, divisor int) int {
sign := 1
if dividend < 0 {
dividend = -dividend
sign = -sign
}
if divisor < 0 {
divisor = -divisor
sign = -sign
}
var count, i uint64
for dividend >= divisor {
if dividend < divisor<<i {
dividend -= divisor << (i - 1)
count += (2 << (i - 1)) >> 1
i = 0
} else if dividend == divisor<<i {
count += (2 << i) >> 1
break
} else {
i++
}
}
result := int(count) * sign
if result > math.MaxInt32 {
return math.MaxInt32
} else if result < math.MinInt32 {
return math.MinInt32
}
return result
}
|
// Package lifecycle contains life cycle utilities for autonomous components.
//
// The definitions in this package complement the tomb package.
package lifecycle
import (
"context"
"errors"
)
// Error variables related to AutonmousComponent.
var (
ErrStopSignalled = errors.New("stop signalled")
)
type AutonomousComponent interface {
Start(ctx context.Context)
SignalStop()
Wait() error
Err() error
Dead() <-chan struct{}
}
|
package errors_test
import (
"fmt"
"io"
"syscall"
"testing"
"github.com/kazhuravlev/options-gen/pkg/errors"
"github.com/stretchr/testify/assert"
)
func TestValidationErrors(t *testing.T) {
t.Parallel()
errs := new(errors.ValidationErrors)
assert.NoError(t, errs.AsError())
assert.Equal(t, "", errs.Error())
errs.Add(errors.NewValidationError("field1", syscall.ENOENT))
errs.Add(errors.NewValidationError("field2", nil))
errs.Add(errors.NewValidationError("field3-dupl", io.EOF))
errs.Add(errors.NewValidationError("field3-dupl", io.EOF))
assert.Error(t, errs.AsError())
expErrStr := `ValidationErrors: (field1): no such file or directory; (field3-dupl): EOF; (field3-dupl): EOF`
assert.Equal(t, expErrStr, errs.Error())
assert.Len(t, errs.Errors(), 3)
var err errors.ValidationErrors
assert.ErrorAs(t, errs.AsError(), &err)
assert.Len(t, err.Errors(), 3)
}
func TestValidationError(t *testing.T) {
t.Parallel()
err := errors.NewValidationError("field1", nil)
// NOTE(kazhuravlev): We do not using NoError, because err has the
// type *validationError. see the next assertion.
assert.Nil(t, err)
assert.NotEqual(t, (*errors.ValidationErrors)(nil), error(nil))
err2 := errors.NewValidationError("field1", io.EOF)
assert.ErrorIs(t, err2, io.EOF)
err3 := errors.NewValidationError("field1", fmt.Errorf("some error is occurs: %w", io.EOF))
assert.ErrorIs(t, err3, io.EOF)
}
|
package model
import (
"errors"
"fmt"
"io"
"math/big"
"strconv"
"time"
"github.com/99designs/gqlgen/graphql"
)
type Statistics struct {
ID string `json:"id"`
CoinID uint64 `json:"coinId"`
UserID uint64 `json:"userId"`
WorkerID string `json:"workerId"`
DateTimeUpdate time.Time `json:"dateTimeUpdate"`
DateAdded time.Time `json:"dateAdded"`
Hashes []big.Int `json:"hashes"`
}
func MarshalTimestamp(t time.Time) graphql.Marshaler {
fmt.Println(">>>>>>начала работу MarshalTimestamp")
timestamp := t.Unix() * 1000
return graphql.WriterFunc(func(w io.Writer) {
io.WriteString(w, strconv.FormatInt(timestamp, 10))
})
}
func UnmarshalTimestamp(v interface{}) (time.Time, error) {
fmt.Println(">>>>>>начала работу UnmarshalTimestamp")
if tmpStr, ok := v.(int); ok {
return time.Unix(int64(tmpStr), 0), nil
}
return time.Time{}, errors.New("Timestamp")
}
|
package main
import (
"log"
"net/http"
_ "github.com/GiG/go-swagger-ui/statik"
"github.com/rakyll/statik/fs"
)
func main() {
statikFS, err := fs.New()
if err != nil {
panic(err)
}
http.Handle("/swagger/", http.StripPrefix("/swagger/", http.FileServer(statikFS)))
log.Println("Listening on localhost:8000")
log.Fatal(http.ListenAndServe("localhost:8000", nil))
}
|
package gorm
import (
"fmt"
"testing"
"time"
"github.com/Jetereting/gorm"
_ "github.com/go-sql-driver/mysql"
)
var (
db *gorm.DB
err error
)
func init() {
db, err = gorm.Open("mysql", "user:password@tcp(ip:port)/dbName?charset=utf8")
if err != nil {
fmt.Println(err)
return
}
db.DB().SetMaxOpenConns(50)
db.DB().SetMaxIdleConns(10)
db.DB().SetConnMaxLifetime(time.Hour * 2)
db.Debug()
}
// TestQuery 测试查询
func TestQuery(t *testing.T) {
datas, e := db.RawMap("select * from users where user_id=?", 123)
if e != nil {
fmt.Println("err:", e)
}
fmt.Println("datas:", datas)
}
// TestIsCanInsert 测试插入
func TestIsCanInsert(t *testing.T) {
_, e := db.RawMap("insert into users(user_id,user_name,user_tag) values (?,?,?)", 123, "testName", "testTag")
if e != nil {
fmt.Println("err:", e)
}
fmt.Println("It work!")
}
// TestIsCanUpdate 测试更新
func TestIsCanUpdate(t *testing.T) {
_, e := db.RawMap("update users set user_name=? where user_id=?", "testName2", 123)
if e != nil {
fmt.Println("err:", e)
}
fmt.Println("It work!")
}
// TestTX 测试事物
func TestTX(t *testing.T) {
tx := db.Begin()
_, e := tx.RawMap("update users set user_name=? where user_id=?", "testName3", 123)
if e != nil {
fmt.Println("err:", e)
tx.Rollback()
}
_, e = tx.RawMap("update users set user_name=? where user_id=?", "long text....long text....long text....long text....long text....long text....long text....", 123)
if e != nil {
fmt.Println("err:", e)
tx.Rollback()
}
tx.Commit()
fmt.Println("done!")
}
|
package controller
import (
"fmt"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/Sirupsen/logrus"
"github.com/andygrunwald/perseus/config"
"github.com/andygrunwald/perseus/dependency"
"github.com/andygrunwald/perseus/dependency/repository"
"github.com/andygrunwald/perseus/downloader"
)
// AddController reflects the business logic and the Command interface to add a new package.
// This command is independent from an human interface (CLI, HTTP, etc.)
// The human interfaces will interact with this command.
type AddController struct {
// WithDependencies decides if the dependencies of an external package needs to be mirrored as well
WithDependencies bool
// Package is the package to mirror
Package string
// Config is the main medusa configuration
Config *config.Medusa
// Log represents a logger to log messages
Log logrus.FieldLogger
// NumOfWorker is the number of worker used for concurrent actions (like resolving the dependency tree)
NumOfWorker int
}
// downloadResult represents the result of a download
type downloadResult struct {
Package string
Error error
}
// Run is the business logic of AddCommand.
func (c *AddController) Run() error {
p, err := dependency.NewPackage(c.Package, "")
if err != nil {
return err
}
var satisRepositories []string
downloadablePackages := []*dependency.Package{}
// We don't respect the error here.
// OH: "WTF? Why? You claim 'Serious error handling' in the README!"
// Yep, you are right. And we still do.
// In this case, it is okay, if p is not configured or no repositories are configured at all.
// When this happen, we will ask Packagist fot the repository url.
// If this package is not available on packagist, this will be shift to an error.
p.Repository, _ = c.Config.GetRepositoryURLOfPackage(p)
if p.Repository == nil {
// Check if we should load the dependency also
if c.WithDependencies {
pUrl := "https://packagist.org/"
c.Log.WithFields(logrus.Fields{
"package": c.Package,
"source": pUrl,
}).Info("Loading dependencies")
packagistClient, err := repository.NewPackagist(pUrl, nil)
if err != nil {
return err
}
// Lets get a dependency resolver.
// If we can't bootstrap one, we are lost anyway.
// We set the queue length to the number of workers + 1. Why?
// With this every worker has work, when the queue is filled.
// During the add command, this is enough in most of the cases.
d, err := dependency.NewComposerResolver(c.NumOfWorker, packagistClient)
if err != nil {
return err
}
results := d.GetResultStream()
go d.Resolve([]*dependency.Package{p})
dependencyNames := []string{}
// Finally we collect all the results of the work.
for v := range results {
downloadablePackages = append(downloadablePackages, v.Package)
dependencyNames = append(dependencyNames, v.Package.Name)
}
if l := len(dependencyNames); l == 0 {
c.Log.WithFields(logrus.Fields{
"amount": l,
"package": c.Package,
"source": pUrl,
}).Info("No dependencies found")
} else {
c.Log.WithFields(logrus.Fields{
"amount": l,
"package": c.Package,
"source": pUrl,
"dependencies": strings.Join(dependencyNames, ", "),
}).Info("Dependencies found")
}
} else {
// It seems to be that we don't have an URL for the package
// Lets ask packagist for it
p, err = c.getURLOfPackageFromPackagist(p)
if err != nil {
return err
}
downloadablePackages = append(downloadablePackages, p)
}
} else {
c.Log.WithFields(logrus.Fields{
"package": p.Name,
"repository": p.Repository,
}).Info("Mirroring started")
downloadablePackages = append(downloadablePackages, p)
}
// Okay, we have everything done here.
// Resolved the dependencies (or not) and collected the packages.
// I would say we can start with downloading them ....
// Why we are talking? Lets do it!
c.Log.WithFields(logrus.Fields{
"amountPackages": len(downloadablePackages),
"amountWorker": c.NumOfWorker,
}).Info("Start concurrent download process")
d, err := downloader.NewGitDownloader(c.NumOfWorker, c.Config.GetString("repodir"))
if err != nil {
return err
}
results := d.GetResultStream()
d.Download(downloadablePackages)
for i := 1; i <= len(downloadablePackages); i++ {
v := <-results
if v.Error != nil {
if os.IsExist(v.Error) {
c.Log.WithFields(logrus.Fields{
"package": v.Package.Name,
}).Info("Package exists on disk. Try updating it instead. Skipping.")
} else {
c.Log.WithFields(logrus.Fields{
"package": v.Package.Name,
}).WithError(v.Error).Info("Error while mirroring package")
// If we have an error, we don't need to add it to satis repositories
continue
}
} else {
c.Log.WithFields(logrus.Fields{
"package": v.Package.Name,
}).Info("Mirroring of package successful")
}
satisRepositories = append(satisRepositories, c.getLocalUrlForRepository(v.Package.Name))
}
d.Close()
// And as a final step, write the satis configuration
err = c.writeSatisConfig(satisRepositories...)
return err
}
func (c *AddController) writeSatisConfig(satisRepositories ...string) error {
// Write Satis file
satisConfig := c.Config.GetString("satisconfig")
if len(satisConfig) == 0 {
c.Log.Info("No Satis configuration specified. Skipping to write a satis configuration.")
return nil
}
satisContent, err := ioutil.ReadFile(satisConfig)
if err != nil {
return fmt.Errorf("Can't read Satis configuration %s: %s", satisConfig, err)
}
j, err := config.NewJSONProvider(satisContent)
if err != nil {
return fmt.Errorf("Error while creating JSONProvider: %s", err)
}
s, err := config.NewSatis(j)
if err != nil {
return fmt.Errorf("Error while creating Satis object: %s", err)
}
s.AddRepositories(satisRepositories...)
err = s.WriteFile(satisConfig, 0644)
if err != nil {
return fmt.Errorf("Writing Satis configuration to %s failed: %s", satisConfig, err)
}
c.Log.WithFields(logrus.Fields{
"path": satisConfig,
}).Info("Satis configuration successful written")
return nil
}
func (c *AddController) getLocalUrlForRepository(p string) string {
var r string
satisURL := c.Config.GetString("satisurl")
repoDir := c.Config.GetString("repodir")
if len(satisURL) > 0 {
r = fmt.Sprintf("%s/%s.git", satisURL, p)
} else {
t := fmt.Sprintf("%s/%s.git", repoDir, p)
t = strings.TrimLeft(filepath.Clean(t), "/")
r = fmt.Sprintf("file:///%s", t)
}
return r
}
func (c *AddController) getURLOfPackageFromPackagist(p *dependency.Package) (*dependency.Package, error) {
packagistClient, err := repository.NewPackagist("https://packagist.org/", nil)
if err != nil {
return p, fmt.Errorf("Packagist client creation failed: %s", err)
}
packagistPackage, resp, err := packagistClient.GetPackageByName(p.Name)
if err != nil {
return p, fmt.Errorf("Failed to retrieve information about package \"%s\" from Packagist. Called %s. Error: %s", p.Name, resp.Request.URL.String(), err)
}
// Check if URL is empty
if len(packagistPackage.Repository) == 0 {
return p, fmt.Errorf("Received empty URL for package %s from Packagist", p.Name)
}
// Overwriting values from Packagist
p.Name = packagistPackage.Name
u, err := url.Parse(packagistPackage.Repository)
if err != nil {
return p, fmt.Errorf("URL conversion of %s to a net/url.URL object failed: %s", packagistPackage.Repository, err)
}
p.Repository = u
return p, nil
}
|
// Magic 8 ball
package main
import (
"bufio"
"fmt"
"math/rand"
"os"
"time"
)
var RESPONSES = []string{
"No",
"Yes",
"Maybe",
"Ask again later",
}
func randomInt(low, high int) int {
rand.Seed(time.Now().UnixNano())
return rand.Intn(high-low) + low
}
func main() {
fmt.Print("What is your question? ")
reader := bufio.NewReader(os.Stdin)
reader.ReadString('\n') // no variables taken :p
randomNumber := randomInt(0, len(RESPONSES))
fmt.Println(RESPONSES[randomNumber])
}
|
package glman
// Model interface for 3D model object
type Model interface {
Render()
}
|
package tameshigiri
import "fmt"
import "runtime"
import "testing"
// Static reference number of the processed assertions
var NumberOfProcessedAssertion uint = 0
// Assertion class
//
// When the assertion fails, assuming that the given result is unexpected, the
// most recent call stacks (up to the size of 2KB) will be provided along with
// the human-readable description.
type Assertion struct {
T *testing.T
stackDumpEnabled bool
}
// Create a new assertion.
func NewAssertion(t *testing.T) Assertion {
assertion := Assertion{ T: t }
assertion.EnableStackDump()
return assertion
}
// Check if the result is true.
func (self *Assertion) IsTrue(result bool, description string) bool {
NumberOfProcessedAssertion += 1
if !result {
if self.stackDumpEnabled {
self.T.Logf("#%d FAILED\n", NumberOfProcessedAssertion)
self.T.Logf("#%d %s\n", NumberOfProcessedAssertion, description)
self.T.FailNow()
}
return false
}
return true
}
// Check if the result is false.
func (self *Assertion) IsFalse(result bool, description string) bool {
return self.IsTrue(!result, description)
}
// Assert if the actual value is equal to the expected value
func (self *Assertion) Equals(expected interface{}, actual interface{}, description string) bool {
var yes bool
var stackDumpEnabled = self.stackDumpEnabled
if stackDumpEnabled {
self.DisableStackDump()
}
yes = self.IsTrue(expected == actual, "")
if stackDumpEnabled {
self.EnableStackDump()
}
if yes {
return true
}
if stackDumpEnabled {
self.T.Logf("#%d FAILED\n", NumberOfProcessedAssertion)
self.T.Logf("#%d %s\n", NumberOfProcessedAssertion, description)
prefix := fmt.Sprintf("#%d", NumberOfProcessedAssertion)
self.T.Log(prefix, "Expected:", expected)
self.T.Log(prefix, "Given:", actual)
self.dumpStack()
self.T.FailNow()
}
return false
}
// Enable stack dump
func (self *Assertion) EnableStackDump() {
self.stackDumpEnabled = true
}
// Disable stack dump
func (self *Assertion) DisableStackDump() {
self.stackDumpEnabled = false
}
// Dump call stacks
func (self *Assertion) dumpStack() {
var buffer []byte
if !self.stackDumpEnabled {
return
}
buffer = make([]byte, 2048); // keep 2KB
runtime.Stack(buffer, true)
self.T.Logf("#%d Detail: %s\n", NumberOfProcessedAssertion, buffer)
}
|
package main
import "fmt"
func main() {
var s []int
for i := 1; i <= 3; i++ {
s = append(s, i)
}
fmt.Println(cap(s))
reverse5(s)
fmt.Println(s)
}
func reverse2(s []int) {
s = append(s, 999, 1000, 1001)
for i, j := 0, len(s)-1; i < j; i++ {
j = len(s) - (i + 1)
s[i], s[j] = s[j], s[i]
}
}
func reverse5(s []int) {
newElem := 999
for len(s) < cap(s) {
fmt.Println("Adding an element:", newElem, "cap:", cap(s), "len:", len(s))
s = append(s, newElem)
newElem++
}
for i, j := 0, len(s)-1; i < j; i++ {
j = len(s) - (i + 1)
s[i], s[j] = s[j], s[i]
}
} |
package namecheap
import (
"encoding/xml"
"fmt"
)
type DomainsResponse struct {
XMLName xml.Name `xml:"ApiResponse"`
Errors []struct {
Message string `xml:",chardata"`
Number string `xml:"Number,attr"`
} `xml:"Errors>Error"`
CommandResponse struct {
Domains []Domain `xml:"DomainGetListResult>Domain"`
} `xml:"CommandResponse"`
}
type RecordsResponse struct {
XMLName xml.Name `xml:"ApiResponse"`
Errors []struct {
Message string `xml:",chardata"`
Number string `xml:"Number,attr"`
} `xml:"Errors>Error"`
CommandResponse struct {
Records []Record `xml:"DomainDNSGetHostsResult>host"`
} `xml:"CommandResponse"`
}
type RecordsCreateResult struct {
XMLName xml.Name `xml:"ApiResponse"`
Errors []struct {
Message string `xml:",chardata"`
Number string `xml:"Number,attr"`
} `xml:"Errors>Error"`
CommandResponse struct {
DomainDNSSetHostsResult struct {
Domain string `xml:"Domain,attr"`
IsSuccess bool `xml:"IsSuccess,attr"`
} `xml:"DomainDNSSetHostsResult"`
} `xml:"CommandResponse"`
}
type NSListResponse struct {
XMLName xml.Name `xml:"ApiResponse"`
Errors []struct {
Message string `xml:",chardata"`
Number string `xml:"Number,attr"`
} `xml:"Errors>Error"`
CommandResponse struct {
DomainDNSGetListResult []string `xml:"DomainDNSGetListResult>Nameserver"`
} `xml:"CommandResponse"`
}
type NSSetCustomRepsonse struct {
XMLName xml.Name `xml:"ApiResponse"`
Errors []struct {
Message string `xml:",chardata"`
Number string `xml:"Number,attr"`
} `xml:"Errors>Error"`
CommandResponse struct {
DomainDNSSetCustomResult struct {
Domain string `xml:"Domain,attr"`
Updated bool `xml:"Updated,attr"`
} `xml:"DomainDNSSetCustomResult"`
} `xml:"CommandResponse"`
}
type NSSetDefaultResponse struct {
XMLName xml.Name `xml:"ApiResponse"`
Errors []struct {
Message string `xml:",chardata"`
Number string `xml:"Number,attr"`
} `xml:"Errors>Error"`
CommandResponse struct {
DomainDNSSetDefaultResult struct {
Domain string `xml:"Domain,attr"`
Updated bool `xml:"Updated,attr"`
} `xml:"DomainDNSSetDefaultResult"`
} `xml:"CommandResponse"`
}
// Domain is used to represent a retrieved Domain. All properties
// are set as strings.
type Domain struct {
ID string `xml:"ID,attr"`
Name string `xml:"Name,attr"`
}
// Record is used to represent a retrieved Record. All properties
// are set as strings.
type Record struct {
Name string `xml:"Name,attr"`
FriendlyName string `xml:"FriendlyName,attr"`
Address string `xml:"Address,attr"`
MXPref int `xml:"MXPref,attr"`
AssociatedAppTitle string `xml:"AssociatedAppTitle,attr"`
Id int `xml:"HostId,attr"`
RecordType string `xml:"Type,attr"`
TTL int `xml:"TTL,attr"`
IsActive bool `xml:"IsActive,attr"`
IsDDNSEnabled bool `xml:"IsDDNSEnabled,attr"`
}
// return a map[string]string of differences between two Records
func (r *Record) diff(other *Record) map[string]string {
out := make(map[string]string, 0)
if r.Name != other.Name {
out["Name"] = fmt.Sprintf("%q vs %q", r.Name, other.Name)
}
if r.Address != other.Address {
out["Address"] = fmt.Sprintf("%q vs %q", r.Address, other.Address)
}
if r.MXPref != other.MXPref {
out["MXPref"] = fmt.Sprintf("%d vs %d", r.MXPref, other.MXPref)
}
if r.AssociatedAppTitle != other.AssociatedAppTitle {
out["AssociatedAppTitle"] = fmt.Sprintf("%q vs %q", r.AssociatedAppTitle, other.AssociatedAppTitle)
}
if r.RecordType != other.RecordType {
out["RecordType"] = fmt.Sprintf("%q vs %q", r.RecordType, other.RecordType)
}
if r.TTL != other.TTL {
out["TTL"] = fmt.Sprintf("%d vs %d", r.TTL, other.TTL)
}
if r.IsActive != other.IsActive {
out["IsActive"] = fmt.Sprintf("%v vs %v", r.IsActive, other.IsActive)
}
if r.IsDDNSEnabled != other.IsDDNSEnabled {
out["IsDDNSEnabled"] = fmt.Sprintf("%v vs %v", r.IsDDNSEnabled, other.IsDDNSEnabled)
}
return out
}
// "Equal" in the sense that clients would see them as the same
func (r *Record) Equal(other *Record) bool {
return len(r.diff(other)) == 0
}
|
package targets
import (
"fmt"
"os"
"../effects"
"../utils"
)
import . "../defs"
func (cg *CodeGeneratorWla) OutputCallbacks(outFile *os.File) int {
callbacksSize := 0
outFile.WriteString("xpmp_callback_tbl:\n")
for _, cb := range cg.itarget.GetCompilerItf().GetCallbacks() {
outFile.WriteString(".dw " + cb + "\n")
callbacksSize += 2
}
outFile.WriteString("\n")
utils.INFO("Size of callback table: %d bytes", callbacksSize)
return callbacksSize
}
func (cg *CodeGeneratorWla) OutputEffectFlags(outFile *os.File) {
songs := cg.itarget.GetCompilerItf().GetSongs()
numChannels := len(songs[0].GetChannels())
for _, effName := range EFFECT_STRINGS {
for c := 0; c < numChannels; c++ {
for _, sng := range songs {
channels := sng.GetChannels()
if channels[c].IsUsingEffect(effName) {
outFile.WriteString(fmt.Sprintf(".DEFINE XPMP_CHN%d_USES_", channels[c].GetNum()) + effName + "\n")
break
}
}
}
}
}
/* Outputs the pattern data and addresses.
*/
func (cg *CodeGeneratorWla) OutputPatterns(outFile *os.File) int {
patSize := 0
patterns := cg.itarget.GetCompilerItf().GetPatterns()
for n, pat := range patterns {
outFile.WriteString(fmt.Sprintf("xpmp_pattern%d:", n))
cmds := pat.GetCommands()
for j, cmd := range cmds {
if (j % 16) == 0 {
outFile.WriteString("\n.db ")
}
outFile.WriteString(fmt.Sprintf("$%02x", cmd & 0xFF))
if j < len(cmds)-1 && (j % 16) != 15 {
outFile.WriteString(",")
}
}
outFile.WriteString("\n")
patSize += len(cmds)
}
outFile.WriteString("\nxpmp_pattern_tbl:\n")
for n := range patterns {
outFile.WriteString(fmt.Sprintf(".dw xpmp_pattern%d\n", n))
patSize += 2
}
outFile.WriteString("\n")
return patSize
}
/* Outputs the channel data (the actual notes, volume commands, effect invokations, etc)
* for all channels and all songs.
*/
func (cg *CodeGeneratorWla) OutputChannelData(outFile *os.File) int {
songDataSize := 0
songs := cg.itarget.GetCompilerItf().GetSongs()
for n, sng := range songs {
channels := sng.GetChannels()
if n > 0 {
fmt.Printf("\n")
}
for _, chn := range channels {
if chn.IsVirtual() {
continue
}
outFile.WriteString(fmt.Sprintf("xpmp_s%d_channel_%s:", n, chn.GetName()))
commands := chn.GetCommands()
for j, cmd := range commands {
if (j % 16) == 0 {
outFile.WriteString("\n.db ")
}
outFile.WriteString(fmt.Sprintf("$%02x", cmd & 0xFF))
songDataSize++
if j < len(commands)-1 && (j % 16) != 15 {
outFile.WriteString(",")
}
}
outFile.WriteString("\n")
fmt.Printf("Song %d, Channel %s: %d bytes, %d / %d ticks\n",
sng.GetNum(), chn.GetName(), len(commands), utils.Round2(float64(chn.GetTicks())), utils.Round2(float64(chn.GetLoopTicks())))
}
}
outFile.WriteString("\nxpmp_song_tbl:\n")
for n, sng := range songs {
channels := sng.GetChannels()
for _, chn := range channels {
if chn.IsVirtual() {
continue
}
outFile.WriteString(fmt.Sprintf(".dw xpmp_s%d_channel_%s\n", n, chn.GetName()))
songDataSize += 2
}
}
return songDataSize
}
func (cg *CodeGeneratorWla) OutputTable(outFile *os.File, tblName string, effMap *effects.EffectMap, canLoop bool, scaling int, loopDelim int) int {
var bytesWritten, dat int
bytesWritten = 0
hexPrefix := "$"
byteDecl := ".db"
wordDecl := ".dw"
if effMap.Len() > 0 {
for _, key := range effMap.GetKeys() {
outFile.WriteString(fmt.Sprintf(tblName + "_%d:", key))
effectData := effMap.GetData(key)
for j, param := range effectData.MainPart {
dat = (param.(int) * scaling) & 0xFF
if canLoop && (dat == loopDelim) {
dat++
}
if canLoop && j == len(effectData.MainPart)-1 && len(effectData.LoopedPart) == 0 {
if j > 0 {
outFile.WriteString(fmt.Sprintf(", %s%02x", hexPrefix, loopDelim))
}
outFile.WriteString(fmt.Sprintf("\n" + tblName + "_%d_loop:\n", key))
outFile.WriteString(fmt.Sprintf("%s %s%02x, %s%02x", byteDecl, hexPrefix, dat, hexPrefix, loopDelim))
bytesWritten += 3
} else if j == 0 {
outFile.WriteString(fmt.Sprintf("\n%s %s%02x", byteDecl, hexPrefix, dat))
bytesWritten += 1
} else {
outFile.WriteString(fmt.Sprintf(", %s%02x", hexPrefix, dat))
bytesWritten += 1
}
}
if canLoop && len(effectData.LoopedPart) > 0 {
if len(effectData.MainPart) > 0 {
outFile.WriteString(fmt.Sprintf(", %s%02x", hexPrefix, loopDelim))
bytesWritten += 1
}
outFile.WriteString(fmt.Sprintf("\n" + tblName + "_%d_loop:\n", key))
for j, param := range effectData.LoopedPart {
dat = (param.(int) * scaling) & 0xFF
if dat == loopDelim && canLoop {
dat++
}
if j == 0 {
outFile.WriteString(fmt.Sprintf("%s %s%02x", byteDecl, hexPrefix, dat))
} else {
outFile.WriteString(fmt.Sprintf(", %s%02x", hexPrefix, dat))
}
bytesWritten += 1
}
outFile.WriteString(fmt.Sprintf(", %s%02x", hexPrefix, loopDelim))
bytesWritten += 1
}
outFile.WriteString("\n")
}
outFile.WriteString(tblName + "_tbl:\n")
for _, key := range effMap.GetKeys() {
outFile.WriteString(fmt.Sprintf("%s " + tblName + "_%d\n", wordDecl, key))
bytesWritten += 2
}
if canLoop {
outFile.WriteString(tblName + "_loop_tbl:\n")
for _, key := range effMap.GetKeys() {
outFile.WriteString(fmt.Sprintf("%s " + tblName + "_%d_loop\n", wordDecl, key))
bytesWritten += 2
}
}
outFile.WriteString("\n")
} else {
outFile.WriteString(tblName + "_tbl:\n")
if canLoop {
outFile.WriteString(tblName + "_loop_tbl:\n")
}
outFile.WriteString("\n")
}
return bytesWritten
}
|
package main
import (
"context"
"fmt"
_ "github.com/denisenkom/go-mssqldb"
)
//GetState get a state by it's StateId
func GetState(stateId int) (*State, error) {
ctx := context.Background()
// Check if database is alive.
err := db.PingContext(ctx)
if err != nil {
return nil, err
}
tsql := fmt.Sprintf(`SELECT
s.StateId
,s.Name
,s.CreatedOn
FROM dbo.State s
WHERE s.StateId = %d`, stateId)
// Execute query
rows, err := db.QueryContext(ctx, tsql)
if err != nil {
return nil, err
}
defer rows.Close()
s := State{}
// Iterate through the result set.
for rows.Next() {
// Get values from row.
err := rows.Scan(&s.StateId, &s.Name, &s.CreatedOn)
if err != nil {
return nil, err
}
}
return &s, nil
}
|
package caaa
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01100104 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:caaa.011.001.04 Document"`
Message *AcceptorBatchTransferV04 `xml:"AccptrBtchTrf"`
}
func (d *Document01100104) AddMessage() *AcceptorBatchTransferV04 {
d.Message = new(AcceptorBatchTransferV04)
return d.Message
}
// The AcceptorBatchTransfer is sent by an acceptor (or its agent) to transfer the financial data of a collection of transactions to the acquirer (or its agent).
type AcceptorBatchTransferV04 struct {
// Batch capture message management information.
Header *iso20022.Header12 `xml:"Hdr"`
// Card payment transactions from one or several data set of transactions.
BatchTransfer *iso20022.CardPaymentBatchTransfer3 `xml:"BtchTrf"`
// Trailer of the message containing a MAC or a digital signature.
SecurityTrailer *iso20022.ContentInformationType12 `xml:"SctyTrlr"`
}
func (a *AcceptorBatchTransferV04) AddHeader() *iso20022.Header12 {
a.Header = new(iso20022.Header12)
return a.Header
}
func (a *AcceptorBatchTransferV04) AddBatchTransfer() *iso20022.CardPaymentBatchTransfer3 {
a.BatchTransfer = new(iso20022.CardPaymentBatchTransfer3)
return a.BatchTransfer
}
func (a *AcceptorBatchTransferV04) AddSecurityTrailer() *iso20022.ContentInformationType12 {
a.SecurityTrailer = new(iso20022.ContentInformationType12)
return a.SecurityTrailer
}
|
package inmemory
import (
"errors"
"sync"
"time"
"github.com/Tinee/go-graphql-chat/domain"
)
var (
ErrProfileNotFound = errors.New("profile not found in memory")
)
type profileInMemory struct {
mtx *sync.Mutex
profiles []domain.Profile
}
func (m *profileInMemory) Create(p domain.Profile) (domain.Profile, error) {
p.ID = generateID()
p.CreatedAt = time.Now()
m.mtx.Lock()
m.profiles = append(m.profiles, p)
m.mtx.Unlock()
return p, nil
}
func (m *profileInMemory) FindMany(take int, offset int) []domain.Profile {
var res []domain.Profile
m.mtx.Lock()
defer m.mtx.Unlock()
if len(m.profiles) <= offset {
return res
}
for _, p := range m.profiles[offset:] {
res = append(res, p)
if len(res) == take {
break
}
}
return res
}
func (m *profileInMemory) Find(id string) (*domain.Profile, error) {
m.mtx.Lock()
defer m.mtx.Unlock()
for _, v := range m.profiles {
if v.ID == id {
return &v, nil
}
}
return nil, ErrProfileNotFound
}
|
package main
import (
"newfeed/flatform/newfeed"
"newfeed/httpd/handler"
"github.com/gin-gonic/gin"
)
func main() {
feed := newfeed.New()
r := gin.Default()
r.GET("/ping", handler.PingGet())
r.GET("/get_feeds", handler.NewFeedGet(feed))
r.POST("/new_feeds", handler.NewFeedPost(feed))
r.Run(":3000")
}
|
package main
func arithmeticSum(n int, a1 int, increment int) int {
return (2*a1 + (n-1)*increment) * n / 2
}
func getDivisors(n int) []int {
divisors := make(map[int]int)
divisors[1] = n
factors := make([]int, 0)
if n > 1 {
factors = append(factors, 1)
factors = append(factors, n)
for i := 2; i < n/2; i++ {
if n%i == 0 && divisors[n/i] == 0 {
divisors[i] = n / i
factors = append(factors, i)
if divisors[i] != i {
factors = append(factors, divisors[i])
}
}
}
}
return factors
}
|
//
// Copyright (c) Telefonica I+D. All rights reserved.
//
package svc
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"net/http/httputil"
"os"
"strings"
"sync"
"time"
)
type level int
const (
debugLevel level = iota
infoLevel
warnLevel
errorLevel
fatalLevel
)
var levelNames = []string{"DEBUG", "INFO", "WARN", "ERROR", "FATAL"}
func levelByName(levelName string) level {
levelName = strings.ToUpper(levelName)
for i, name := range levelNames {
if name == levelName {
return level(i)
}
}
return infoLevel
}
// Logger type.
type Logger struct {
out io.Writer
logLevel level
context interface{}
mutex sync.Mutex
}
// NewLogger to create a Logger.
func NewLogger() *Logger {
return &Logger{
out: os.Stdout,
logLevel: infoLevel,
}
}
// SetLogContext to set a global context.
func (l *Logger) SetLogContext(context interface{}) {
l.context = context
}
// GetLogContext to get the global context.
func (l *Logger) GetLogContext() interface{} {
return l.context
}
// SetLevel to set the log level.
func (l *Logger) SetLevel(levelName string) {
l.logLevel = levelByName(levelName)
}
// GetLevel to return the log level.
func (l *Logger) GetLevel() string {
return levelNames[l.logLevel]
}
func (l *Logger) log(logLevel level, context interface{}, message string, args ...interface{}) {
if logLevel >= l.logLevel {
var buf bytes.Buffer
writeDoc(&buf, time.Now(), levelNames[logLevel], l.context, context, fmt.Sprintf(message, args...))
bytes := buf.Bytes()
l.mutex.Lock()
defer l.mutex.Unlock()
l.out.Write(bytes)
}
}
func writeDoc(buf *bytes.Buffer, time time.Time, level string, context, customContext interface{}, message string) {
buf.WriteByte('{')
writeField(buf, "time", time)
buf.WriteByte(',')
writeField(buf, "lvl", level)
buf.WriteByte(',')
if length := writeFields(buf, context); length > 0 {
buf.WriteByte(',')
}
if length := writeFields(buf, customContext); length > 0 {
buf.WriteByte(',')
}
writeField(buf, "msg", message)
buf.WriteByte('}')
buf.WriteByte('\n')
}
func writeField(buf *bytes.Buffer, key string, value interface{}) {
buf.WriteByte('"')
buf.WriteString(key)
buf.WriteByte('"')
buf.WriteByte(':')
if jsonValue, err := json.Marshal(value); err == nil {
buf.Write(jsonValue)
}
}
func writeFields(buf *bytes.Buffer, fields interface{}) int {
if fields == nil {
return 0
}
if jsonFields, err := json.Marshal(fields); err == nil {
if len(jsonFields) > 2 && jsonFields[0] == '{' && jsonFields[len(jsonFields)-1] == '}' {
if _, err := buf.Write(jsonFields[1 : len(jsonFields)-1]); err == nil {
return len(jsonFields) - 2
}
}
}
return 0
}
// Debug to log a message at debug level
func (l *Logger) Debug(message string, args ...interface{}) {
l.log(debugLevel, nil, message, args...)
}
// DebugC to log a message at debug level with custom context
func (l *Logger) DebugC(context interface{}, message string, args ...interface{}) {
l.log(debugLevel, context, message, args...)
}
// Info to log a message at info level
func (l *Logger) Info(message string, args ...interface{}) {
l.log(infoLevel, nil, message, args...)
}
// InfoC to log a message at info level
func (l *Logger) InfoC(context interface{}, message string, args ...interface{}) {
l.log(infoLevel, context, message, args...)
}
// Warn to log a message at warn level
func (l *Logger) Warn(message string, args ...interface{}) {
l.log(warnLevel, nil, message, args...)
}
// WarnC to log a message at warn level
func (l *Logger) WarnC(context interface{}, message string, args ...interface{}) {
l.log(warnLevel, context, message, args...)
}
// Error to log a message at error level
func (l *Logger) Error(message string, args ...interface{}) {
l.log(errorLevel, nil, message, args...)
}
// ErrorC to log a message at error level
func (l *Logger) ErrorC(context interface{}, message string, args ...interface{}) {
l.log(errorLevel, context, message, args...)
}
// Fatal to log a message at fatal level
func (l *Logger) Fatal(message string, args ...interface{}) {
l.log(fatalLevel, nil, message, args...)
}
// FatalC to log a message at fatal level
func (l *Logger) FatalC(context interface{}, message string, args ...interface{}) {
l.log(fatalLevel, context, message, args...)
}
// DebugResponse to dump the response at debug level.
func (l *Logger) DebugResponse(message string, r *http.Response) {
l.DebugResponseC(nil, message, r)
}
// DebugResponseC to dump the response at debug level.
func (l *Logger) DebugResponseC(context interface{}, message string, r *http.Response) {
if r != nil && l.logLevel <= debugLevel {
if dump, err := httputil.DumpResponse(r, true); err == nil {
l.DebugC(context, "%s. %s", message, dump)
}
}
}
// DebugRequest to dump the request at debug level.
func (l *Logger) DebugRequest(message string, r *http.Request) {
l.DebugRequestC(nil, message, r)
}
// DebugRequestC to dump the request at debug level.
func (l *Logger) DebugRequestC(context interface{}, message string, r *http.Request) {
if r != nil && l.logLevel <= debugLevel {
if dump, err := httputil.DumpRequest(r, true); err == nil {
l.DebugC(context, "%s. %s", message, dump)
}
}
}
// DebugRequestOut to dump the output request at debug level.
func (l *Logger) DebugRequestOut(message string, r *http.Request) {
l.DebugRequestOutC(nil, message, r)
}
// DebugRequestOutC to dump the output request at debug level.
func (l *Logger) DebugRequestOutC(context interface{}, message string, r *http.Request) {
if r != nil && l.logLevel <= debugLevel {
if dump, err := httputil.DumpRequestOut(r, true); err == nil {
l.DebugC(context, "%s. %s", message, dump)
}
}
}
// Bridge to std log
type writer struct {
l *Logger
}
func (w *writer) Write(p []byte) (int, error) {
w.l.Error(string(p))
return len(p), nil
}
// NewStdLogger returns a standard logger struct but using our custom logger.
func NewStdLogger(l *Logger) *log.Logger {
sl := log.New(&writer{l: l}, "", 0)
return sl
}
// NewStdLoggerC returns a standard logger struct but using our custom logger with a specific context.
func NewStdLoggerC(context interface{}) *log.Logger {
logger := NewLogger()
logger.SetLogContext(context)
return NewStdLogger(logger)
}
|
/*
* Copyright 2018, CS Systemes d'Information, http://www.c-s.fr
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package utils
import (
"bytes"
"fmt"
txttmpl "text/template"
"time"
rice "github.com/GeertJohan/go.rice"
brokerclient "github.com/CS-SI/SafeScale/broker/client"
"github.com/CS-SI/SafeScale/deploy/install"
"github.com/CS-SI/SafeScale/system"
"github.com/CS-SI/SafeScale/utils/template"
)
const (
tempFolder = "/var/tmp/"
)
var (
// systemTemplateBox ...
systemTemplateBox *rice.Box
// bashLibraryContent contains the script containing bash library
bashLibraryContent *string
)
// ExecuteScript executes the script template with the parameters on tarGetHost
func ExecuteScript(
box *rice.Box, funcMap map[string]interface{}, tmplName string, data map[string]interface{},
hostID string,
) (int, string, string, error) {
// Configures reserved_BashLibrary template var
bashLibrary, err := system.GetBashLibrary()
if err != nil {
return 0, "", "", err
}
data["reserved_BashLibrary"] = bashLibrary
path, err := UploadTemplateToFile(box, funcMap, tmplName, data, hostID, tmplName)
if err != nil {
return 0, "", "", err
}
var cmd string
//if debug
if true {
cmd = fmt.Sprintf("sudo bash %s", path)
} else {
cmd = fmt.Sprintf("sudo bash %s; rc=$?; rm %s; exit $rc", path, path)
}
return brokerclient.New().Ssh.Run(hostID, cmd, brokerclient.DefaultConnectionTimeout, time.Duration(20)*time.Minute)
}
// UploadTemplateToFile uploads a template named 'tmplName' coming from rice 'box' in a file to a remote host
func UploadTemplateToFile(
box *rice.Box, funcMap map[string]interface{}, tmplName string, data map[string]interface{},
hostID string, fileName string,
) (string, error) {
if box == nil {
panic("box is nil!")
}
broker := brokerclient.New()
host, err := broker.Host.Inspect(hostID, brokerclient.DefaultExecutionTimeout)
if err != err {
return "", fmt.Errorf("failed to get host information: %s", err)
}
tmplString, err := box.String(tmplName)
if err != nil {
return "", fmt.Errorf("failed to load template: %s", err.Error())
}
tmplCmd, err := txttmpl.New(fileName).Funcs(template.MergeFuncs(funcMap, false)).Parse(tmplString)
if err != nil {
return "", fmt.Errorf("failed to parse template: %s", err.Error())
}
dataBuffer := bytes.NewBufferString("")
err = tmplCmd.Execute(dataBuffer, data)
if err != nil {
return "", fmt.Errorf("failed to realize template: %s", err.Error())
}
cmd := dataBuffer.String()
remotePath := tempFolder + fileName
err = install.UploadStringToRemoteFile(cmd, host, remotePath, "", "", "")
if err != nil {
return "", err
}
return remotePath, nil
}
|
package rest
import (
"fmt"
"time"
"github.com/jinmukeji/jiujiantang-services/pkg/rest"
analysispb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/analysis/v1"
"github.com/kataras/iris/v12"
)
// GetWeeklyReportBody 请求周报的body
type GetWeeklyReportBody struct {
Language Language `json:"language"`
}
// GetWeeklyReport 周报
func (h *v2Handler) GetWeeklyReport(ctx iris.Context) {
userID, err := ctx.Params().GetInt("user_id")
if err != nil {
writeError(ctx, wrapError(ErrInvalidValue, "", err), false)
return
}
if ctx.Values().GetString(ClientIDKey) == seamlessClient {
writeError(
ctx,
wrapError(ErrDeniedToAccessAPI, "", fmt.Errorf("%s is denied to access this API", seamlessClient)),
false,
)
return
}
var body GetWeeklyReportBody
errReadJSON := ctx.ReadJSON(&body)
if errReadJSON != nil {
writeError(ctx, wrapError(ErrParsingRequestFailed, "", errReadJSON), false)
return
}
language, err := mapRestLanguageToProto(body.Language)
if err != nil {
writeError(ctx, wrapError(ErrValueRequired, "", err), false)
return
}
timeZone := getTimeZone(ctx)
// 周历史记录信息
location, _ := time.LoadLocation(timeZone)
now := time.Now().In(location)
// 从当天的23点59分59秒,往前推7天
weekStart := time.Date(now.Year(), now.Month(), now.Day(), 23, 59, 59, 0, location).AddDate(0, 0, -7)
// 从当天的23点59分59秒算结束时间
weekEnd := time.Date(now.Year(), now.Month(), now.Day(), 23, 59, 59, 0, location).AddDate(0, 0, 0)
statData, err := h.getWeekOrMonthStatData(ctx, userID, weekStart.UTC(), weekEnd.UTC(), WeekStatData)
if err != nil {
writeError(ctx, wrapError(ErrRPCInternal, "", err), false)
return
}
// 错误提示不为空,表示记录信息不全,返回错误提示
if statData.ErrorMessage != "" {
rest.WriteOkJSON(ctx, WeeklyOrMonthlyReportResponse{
ErrorMessage: statData.ErrorMessage,
})
return
}
req := new(analysispb.GetWeeklyAnalyzeResultRequest)
req.UserId = int32(userID)
req.Language = language
req.Cid = rest.GetCidFromContext(ctx)
req.CInfo = &analysispb.CInfo{
C0: statData.AverageMeridian.C0,
C1: statData.AverageMeridian.C1,
C2: statData.AverageMeridian.C2,
C3: statData.AverageMeridian.C3,
C4: statData.AverageMeridian.C4,
C5: statData.AverageMeridian.C5,
C6: statData.AverageMeridian.C6,
C7: statData.AverageMeridian.C7,
}
req.PhysicalDialectics = statData.PhysicalDialectics
resp, err := h.rpcAnalysisSvc.GetWeeklyAnalyzeResult(
newRPCContext(ctx), req,
)
if err != nil {
writeRPCInternalError(ctx, err, false)
return
}
// 与引擎分析结果相关模块
analysisReportContent, err := getAnalysisModules(resp.GetReport().GetModules())
if err != nil {
writeRPCInternalError(ctx, wrapError(ErrRPCInternal, "", err), false)
return
}
// 个人信息模块
userProfileModule, err := getUserProfileModule(resp.GetReport().GetUserProfile())
if err != nil {
writeRPCInternalError(ctx, wrapError(ErrRPCInternal, "", err), false)
return
}
analysisReportContent.UserProfile = userProfileModule
rest.WriteOkJSON(ctx, WeeklyOrMonthlyReportResponse{
ReportVersion: resp.ReportVersion,
ReportContent: analysisReportContent,
StartTime: weekStart,
EndTime: weekEnd,
})
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"strconv"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/guregu/dynamo"
)
const workers = 80
func main() {
db := dynamo.New(session.New(), &aws.Config{
Region: aws.String("us-east-1"),
Credentials: credentials.NewEnvCredentials(),
})
table := db.Table("trips")
file, err := os.Open("./results/part-00000")
if err != nil {
log.Fatal(err)
}
defer file.Close()
c := make(chan flight, workers)
for i := 0; i != workers; i++ {
go func() {
for t := range c {
err := table.Put(t).Run()
if err != nil {
fmt.Println(err)
}
}
}()
}
// put item
scanner := bufio.NewScanner(file)
i := 0
for scanner.Scan() {
i++
parts := strings.Split(scanner.Text(), ",")
hour, _ := strconv.ParseInt(parts[6], 10, 64)
delay, _ := strconv.ParseFloat(strings.Trim(parts[7], " \t\r\n"), 64)
f := flight{
Id: fmt.Sprintf("%s_%s_%s_%s",parts[0],parts[5],parts[1],parts[2]),
AmPm: parts[0],
FlightNumber: parts[4],
Origin: parts[1],
Dest: parts[2],
Carrier: parts[3],
Day: parts[5],
Hour: hour,
Delay: delay,
}
c <- f
if i%100 == 0 {
log.Println(i)
}
}
close(c)
}
type flight struct {
Id string
AmPm string
FlightNumber string
Origin string
Dest string
Carrier string
Day string
Hour int64
Delay float64
}
|
package db
import (
"database/sql"
"errors"
_ "github.com/mattn/go-sqlite3"
)
// DB wraps a sqlite DB with specific calls for wish list.
type DB struct {
db *sql.DB
}
// WishListRow is a json annotated representation of the db schema.
type WishListRow struct {
UserId int `json:"userId"`
BookTitle string `json:"bookTitle"`
Deleted bool `json:"deleted"`
}
// InitDB opens a filepath or :memory: and returns bacl the pointer to the db.
func InitDB(filepath string) (*DB, error) {
db, err := sql.Open("sqlite3", filepath)
if err != nil {
return nil, err
}
if db == nil {
return nil, errors.New("db couldn't be read")
}
return &DB{db}, nil
}
// DeleteWishList removes a record for the given id.
func (d *DB) DeleteWishList(id int) error {
sqlDelete := `DELETE FROM wishlist WHERE id = ?`
stmt, err := d.db.Prepare(sqlDelete)
if err != nil {
return err
}
defer stmt.Close()
rs, err := stmt.Exec(id)
if err != nil {
return err
} else if rs == nil {
return errors.New("wish list record not found")
}
rows, err := rs.RowsAffected()
if err != nil {
return err
} else if rows == 0 {
return errors.New("wish list record not found")
}
return nil
}
// GetWishList returns back the search endpoints values plus the marshaled db row.
// ie) if a row is "1984" -> you get back /search?q=1984 + {.., "bookTitle":"1984".. }
func (d *DB) GetWishList(id int) (*WishListRow, error) {
sqlSelect := `SELECT userId, booktitle, deleted FROM wishlist WHERE id = ?`
stmt, err := d.db.Prepare(sqlSelect)
if err != nil {
return nil, err
}
defer stmt.Close()
rs := stmt.QueryRow(id)
if rs == nil {
return nil, errors.New("wish list record not found")
}
w := WishListRow{}
err = rs.Scan(&w.UserId, &w.BookTitle, &w.Deleted)
if err != nil {
return nil, err
}
return &w, nil
}
// InsertRow unmarshales a request body and checks for the required fields.
// If the fields are valid it inserts it into the db.
func (d *DB) InsertRow(row WishListRow) error {
if row.BookTitle == "" {
return errors.New("bookTitle cannot be empty")
} else if row.UserId == 0 {
return errors.New("userId cannot be empty")
}
sqlInsert := `INSERT INTO wishlist (userid, booktitle) VALUES (?, ?)`
stmt, err := d.db.Prepare(sqlInsert)
if err != nil {
return err
}
defer stmt.Close()
_, err = stmt.Exec(row.UserId, row.BookTitle)
return err
}
|
package _1
import (
"bufio"
"os"
"strconv"
)
func PartOne(numbers []int, setNumbers []int) int {
for n := range numbers {
for sn := range setNumbers {
if n == setNumbers[sn] {
return n * (2020 - n)
}
}
}
return 0
}
func PartTwo(numbers []int, setNumbers []int) int {
for n := range numbers {
for n2 := range numbers {
for sn := range setNumbers {
if (2020 - n - n2) == setNumbers[sn] {
return n * (2020 - n - n2) * n2
}
}
}
}
return 0
}
func main(fileName string) error {
if fileName != "" {
file, err := os.Open(fileName)
if err != nil {
return err
}
defer file.Close()
scanner := bufio.NewReader(file)
var line string
for {
line, err = scanner.ReadString('\n')
if err != nil {
return err
}
conv, _ := strconv.Atoi(line)
//todo fix
PartOne(conv, conv)
PartTwo(conv, conv)
}
}
return nil
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package label
import (
"encoding/hex"
"encoding/json"
"fmt"
"slices"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/util/codec"
"gopkg.in/yaml.v2"
)
const (
// IDPrefix is the prefix for label rule ID.
IDPrefix = "schema"
ruleType = "key-range"
)
const (
// RuleIndexDefault is the default index for a rule.
RuleIndexDefault int = iota
// RuleIndexDatabase is the index for a rule of database.
RuleIndexDatabase
// RuleIndexTable is the index for a rule of table.
RuleIndexTable
// RuleIndexPartition is the index for a rule of partition.
RuleIndexPartition
)
var (
// TableIDFormat is the format of the label rule ID for a table.
// The format follows "schema/database_name/table_name".
TableIDFormat = "%s/%s/%s"
// PartitionIDFormat is the format of the label rule ID for a partition.
// The format follows "schema/database_name/table_name/partition_name".
PartitionIDFormat = "%s/%s/%s/%s"
)
// Rule is used to establish the relationship between labels and a key range.
type Rule struct {
ID string `json:"id"`
Index int `json:"index"`
Labels Labels `json:"labels"`
RuleType string `json:"rule_type"`
Data []interface{} `json:"data"`
}
// NewRule creates a rule.
func NewRule() *Rule {
return &Rule{}
}
// ApplyAttributesSpec will transfer attributes defined in AttributesSpec to the labels.
func (r *Rule) ApplyAttributesSpec(spec *ast.AttributesSpec) error {
if spec.Default {
r.Labels = []Label{}
return nil
}
// construct a string list
attrBytes := []byte("[" + spec.Attributes + "]")
attributes := []string{}
err := yaml.UnmarshalStrict(attrBytes, &attributes)
if err != nil {
return err
}
r.Labels, err = NewLabels(attributes)
return err
}
// String implements fmt.Stringer.
func (r *Rule) String() string {
t, err := json.Marshal(r)
if err != nil {
return ""
}
return string(t)
}
// Clone clones a rule.
func (r *Rule) Clone() *Rule {
newRule := NewRule()
*newRule = *r
return newRule
}
// Reset will reset the label rule for a table/partition with a given ID and names.
func (r *Rule) Reset(dbName, tableName, partName string, ids ...int64) *Rule {
isPartition := partName != ""
if isPartition {
r.ID = fmt.Sprintf(PartitionIDFormat, IDPrefix, dbName, tableName, partName)
} else {
r.ID = fmt.Sprintf(TableIDFormat, IDPrefix, dbName, tableName)
}
if len(r.Labels) == 0 {
return r
}
var hasDBKey, hasTableKey, hasPartitionKey bool
for i := range r.Labels {
switch r.Labels[i].Key {
case dbKey:
r.Labels[i].Value = dbName
hasDBKey = true
case tableKey:
r.Labels[i].Value = tableName
hasTableKey = true
case partitionKey:
if isPartition {
r.Labels[i].Value = partName
hasPartitionKey = true
}
default:
}
}
if !hasDBKey {
r.Labels = append(r.Labels, Label{Key: dbKey, Value: dbName})
}
if !hasTableKey {
r.Labels = append(r.Labels, Label{Key: tableKey, Value: tableName})
}
if isPartition && !hasPartitionKey {
r.Labels = append(r.Labels, Label{Key: partitionKey, Value: partName})
}
r.RuleType = ruleType
r.Data = []interface{}{}
slices.Sort(ids)
for i := 0; i < len(ids); i++ {
data := map[string]string{
"start_key": hex.EncodeToString(codec.EncodeBytes(nil, tablecodec.GenTablePrefix(ids[i]))),
"end_key": hex.EncodeToString(codec.EncodeBytes(nil, tablecodec.GenTablePrefix(ids[i]+1))),
}
r.Data = append(r.Data, data)
}
// We may support more types later.
r.Index = RuleIndexTable
if isPartition {
r.Index = RuleIndexPartition
}
return r
}
// RulePatch is the patch to update the label rules.
type RulePatch struct {
SetRules []*Rule `json:"sets"`
DeleteRules []string `json:"deletes"`
}
// NewRulePatch returns a patch of rules which need to be set or deleted.
func NewRulePatch(setRules []*Rule, deleteRules []string) *RulePatch {
return &RulePatch{
SetRules: setRules,
DeleteRules: deleteRules,
}
}
|
package problem0623
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func addOneRow(root *TreeNode, val int, depth int) *TreeNode {
if depth == 1 {
newRoot := &TreeNode{Val: val}
newRoot.Left = root
return newRoot
}
h := 1
queue := []*TreeNode{root}
for len(queue) > 0 {
levelLen := len(queue)
for i := 0; i < levelLen; i++ {
node := queue[i]
if h == depth-1 {
newLeftChild := &TreeNode{Val: val}
newLeftChild.Left = node.Left
newRightChild := &TreeNode{Val: val}
newRightChild.Right = node.Right
node.Left = newLeftChild
node.Right = newRightChild
} else {
if node.Left != nil {
queue = append(queue, node.Left)
}
if node.Right != nil {
queue = append(queue, node.Right)
}
}
}
queue = queue[levelLen:]
if h == depth-1 {
break
}
h++
}
return root
}
|
package main
import "fmt"
func Add(a int , b int ) int {
return a +b
}
func main() {
a := 111
b := 111
c := Add( a, b)
fmt.Println( c )
// fmt.Println("Hello MT.Qomolangma!")
}
|
package email
type Button struct {
Color string
TextColor string
Text string
Link string
}
|
package common
/*
Generated using mavgen - https://github.com/ArduPilot/pymavlink/
Copyright 2020 queue-b <https://github.com/queue-b>
Permission is hereby granted, free of charge, to any person obtaining a copy
of the generated software (the "Generated Software"), to deal
in the Generated Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Generated Software, and to permit persons to whom the Generated
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Generated Software.
THE GENERATED SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE GENERATED SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE GENERATED SOFTWARE.
*/
import (
"bytes"
"encoding/binary"
"fmt"
"text/tabwriter"
"github.com/queue-b/go-mavlink2"
"github.com/queue-b/go-mavlink2/util"
)
/*SysStatus The general system state. If the system is following the MAVLink standard, the system state is mainly defined by three orthogonal states/modes: The system mode, which is either LOCKED (motors shut down and locked), MANUAL (system under RC control), GUIDED (system with autonomous position control, position setpoint controlled manually) or AUTO (system guided by path/waypoint planner). The NAV_MODE defined the current flight state: LIFTOFF (often an open-loop maneuver), LANDING, WAYPOINTS or VECTOR. This represents the internal navigation state machine. The system status shows whether the system is currently active or not and if an emergency occurred. During the CRITICAL and EMERGENCY states the MAV is still considered to be active, but should start emergency procedures autonomously. After a failure occurred it should first move from active to critical to allow manual intervention and then move to emergency after a certain timeout. */
type SysStatus struct {
/*OnboardControlSensorsPresent Bitmap showing which onboard controllers and sensors are present. Value of 0: not present. Value of 1: present. */
OnboardControlSensorsPresent uint32
/*OnboardControlSensorsEnabled Bitmap showing which onboard controllers and sensors are enabled: Value of 0: not enabled. Value of 1: enabled. */
OnboardControlSensorsEnabled uint32
/*OnboardControlSensorsHealth Bitmap showing which onboard controllers and sensors have an error (or are operational). Value of 0: error. Value of 1: healthy. */
OnboardControlSensorsHealth uint32
/*Load Maximum usage in percent of the mainloop time. Values: [0-1000] - should always be below 1000 */
Load uint16
/*VoltageBattery Battery voltage, UINT16_MAX: Voltage not sent by autopilot */
VoltageBattery uint16
/*CurrentBattery Battery current, -1: Current not sent by autopilot */
CurrentBattery int16
/*DropRateComm Communication drop rate, (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) */
DropRateComm uint16
/*ErrorsComm Communication errors (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) */
ErrorsComm uint16
/*ErrorsCount1 Autopilot-specific errors */
ErrorsCount1 uint16
/*ErrorsCount2 Autopilot-specific errors */
ErrorsCount2 uint16
/*ErrorsCount3 Autopilot-specific errors */
ErrorsCount3 uint16
/*ErrorsCount4 Autopilot-specific errors */
ErrorsCount4 uint16
/*BatteryRemaining Battery energy remaining, -1: Battery remaining energy not sent by autopilot */
BatteryRemaining int8
/*HasExtensionFieldValues indicates if this message has any extensions and */
HasExtensionFieldValues bool
}
func (m *SysStatus) String() string {
format := ""
var buffer bytes.Buffer
writer := tabwriter.NewWriter(&buffer, 0, 0, 2, ' ', 0)
format += "Name:\t%v/%v\n"
// Output field values based on the decoded message type
format += "OnboardControlSensorsPresent:\t%v \n"
format += "OnboardControlSensorsEnabled:\t%v \n"
format += "OnboardControlSensorsHealth:\t%v \n"
format += "Load:\t%v \n"
format += "VoltageBattery:\t%v [mV]\n"
format += "CurrentBattery:\t%v [cA]\n"
format += "DropRateComm:\t%v \n"
format += "ErrorsComm:\t%v \n"
format += "ErrorsCount1:\t%v \n"
format += "ErrorsCount2:\t%v \n"
format += "ErrorsCount3:\t%v \n"
format += "ErrorsCount4:\t%v \n"
format += "BatteryRemaining:\t%v \n"
fmt.Fprintf(
writer,
format,
m.GetDialect(),
m.GetMessageName(),
m.OnboardControlSensorsPresent,
m.OnboardControlSensorsEnabled,
m.OnboardControlSensorsHealth,
m.Load,
m.VoltageBattery,
m.CurrentBattery,
m.DropRateComm,
m.ErrorsComm,
m.ErrorsCount1,
m.ErrorsCount2,
m.ErrorsCount3,
m.ErrorsCount4,
m.BatteryRemaining,
)
writer.Flush()
return string(buffer.Bytes())
}
// GetVersion gets the MAVLink version of the Message contents
func (m *SysStatus) GetVersion() int {
if m.HasExtensionFieldValues {
return 2
}
return 1
}
// GetDialect gets the name of the dialect that defines the Message
func (m *SysStatus) GetDialect() string {
return "common"
}
// GetMessageName gets the name of the Message
func (m *SysStatus) GetMessageName() string {
return "SysStatus"
}
// GetID gets the ID of the Message
func (m *SysStatus) GetID() uint32 {
return 1
}
// HasExtensionFields returns true if the message definition contained extensions; false otherwise
func (m *SysStatus) HasExtensionFields() bool {
return false
}
func (m *SysStatus) getV1Length() int {
return 31
}
func (m *SysStatus) getIOSlice() []byte {
return make([]byte, 31+1)
}
// Read sets the field values of the message from the raw message payload
func (m *SysStatus) Read(frame mavlink2.Frame) (err error) {
version := frame.GetVersion()
// Ensure only Version 1 or Version 2 were specified
if version != 1 && version != 2 {
err = mavlink2.ErrUnsupportedVersion
return
}
// Don't attempt to Read V2 messages from V1 frames
if m.GetID() > 255 && version < 2 {
err = mavlink2.ErrDecodeV2MessageV1Frame
return
}
// binary.Read can panic; swallow the panic and return a sane error
defer func() {
if r := recover(); r != nil {
err = mavlink2.ErrPrivateField
}
}()
// Get a slice of bytes long enough for the all the SysStatus fields
// binary.Read requires enough bytes in the reader to read all fields, even if
// the fields are just zero values. This also simplifies handling MAVLink2
// extensions and trailing zero truncation.
ioSlice := m.getIOSlice()
copy(ioSlice, frame.GetMessageBytes())
// Indicate if
if version == 2 && m.HasExtensionFields() {
ioSlice[len(ioSlice)-1] = 1
}
reader := bytes.NewReader(ioSlice)
err = binary.Read(reader, binary.LittleEndian, m)
return
}
// Write encodes the field values of the message to a byte array
func (m *SysStatus) Write(version int) (output []byte, err error) {
var buffer bytes.Buffer
// Ensure only Version 1 or Version 2 were specified
if version != 1 && version != 2 {
err = mavlink2.ErrUnsupportedVersion
return
}
// Don't attempt to Write V2 messages to V1 bodies
if m.GetID() > 255 && version < 2 {
err = mavlink2.ErrEncodeV2MessageV1Frame
return
}
err = binary.Write(&buffer, binary.LittleEndian, *m)
if err != nil {
return
}
output = buffer.Bytes()
// V1 uses fixed message lengths and does not include any extension fields
// Truncate the byte slice to the correct length
// This also removes the trailing extra byte written for HasExtensionFieldValues
if version == 1 {
output = output[:m.getV1Length()]
}
// V2 uses variable message lengths and includes extension fields
// The variable length is caused by truncating any trailing zeroes from
// the end of the message before it is added to a frame
if version == 2 {
// Set HasExtensionFieldValues to zero so that it doesn't interfere with V2 truncation
output[len(output)-1] = 0
output = util.TruncateV2(buffer.Bytes())
}
return
}
|
package auth_test
import (
"context"
"fmt"
"testing"
auth "github.com/gofor-little/aws-auth"
"github.com/stretchr/testify/require"
)
func TestSignUp(t *testing.T) {
setup(t)
defer teardown(t)
testCases := []struct {
emailAddress string
password string
}{
{"john@example.com", "test-Password1234!!"},
}
for i, tc := range testCases {
t.Run(fmt.Sprintf("TestSignUp_%d", i), func(t *testing.T) {
_, err := auth.SignUp(context.Background(), tc.emailAddress, tc.password)
require.NoError(t, err)
})
}
}
|
package router
import (
"testing"
"github.com/stretchr/testify/assert"
)
type inputAndOutput struct {
input string
output Params
}
var testData map[string][]inputAndOutput = map[string][]inputAndOutput{
"*": []inputAndOutput{
0: inputAndOutput{
input: "/",
output: Params{},
},
1: inputAndOutput{
input: "/asdf",
output: Params{},
},
2: inputAndOutput{
input: "/123/dvaks",
output: Params{},
},
},
"/asdf/*": []inputAndOutput{
0: inputAndOutput{
input: "/asdf/0x11",
output: Params{},
},
1: inputAndOutput{
input: "/asdf/false/1213",
output: Params{},
},
},
"/asdf/:id/*": []inputAndOutput{
0: inputAndOutput{
input: "/asdf/000/gkdj",
output: Params{
"id": "000",
},
},
},
"/path/:aa/*/:bb": []inputAndOutput{
0: inputAndOutput{
input: "/path/1/path2/2",
output: Params{
"aa": "1",
"bb": "2",
},
},
1: inputAndOutput{
input: "/path/1/path222/2",
output: Params{
"aa": "1",
"bb": "2",
},
},
2: inputAndOutput{
input:"/path/1/path2/3/fff",
output: Params{},
},
},
"/": []inputAndOutput{
0: inputAndOutput{
input: "/",
output: Params{},
},
1: inputAndOutput{
input: "/test",
output: Params{},
},
2: inputAndOutput{
input: "/2",
output: Params{},
},
3: inputAndOutput{
input: "/test/1",
output: Params{},
},
4: inputAndOutput{
input: "//",
output: Params{},
},
},
"/test": []inputAndOutput{
0: inputAndOutput{
input: "/",
output: Params{},
},
1: inputAndOutput{
input: "/test",
output: Params{},
},
2: inputAndOutput{
input: "/test/1",
output: Params{},
},
},
"/:id": []inputAndOutput{
0: inputAndOutput{
input: "/",
output: Params{"id": ""},
},
1: inputAndOutput{
input: "/2/1",
output: Params{},
},
2: inputAndOutput{
input: "/1",
output: Params{"id": "1"},
},
3: inputAndOutput{
input: "/test",
output: Params{"id": "test"},
},
4: inputAndOutput{
input: "/false",
output: Params{"id": "false"},
},
},
"/:param": []inputAndOutput{
0: inputAndOutput{
input: "/",
output: Params{"param": ""},
},
1: inputAndOutput{
input: "/param",
output: Params{"param": "param"},
},
2: inputAndOutput{
input: "/param/234",
output: Params{},
},
},
"/path1/:param/path2": []inputAndOutput{
0: inputAndOutput{
input: "/path1",
output: Params{},
},
1: inputAndOutput{
input: "/path1/111",
output: Params{},
},
2: inputAndOutput{
input: "/path2/111/path2",
output: Params{},
},
3: inputAndOutput{
input: "/path1/111/path2",
output: Params{"param": "111"},
},
4: inputAndOutput{
input: "/path1/1111/path3",
output: Params{},
},
5: inputAndOutput{
input: "/path1/11111/path2/haha",
output: Params{},
},
},
}
func TestParseURL(t *testing.T) {
for pattern, inputAndOutputs := range testData {
for _, inputAndOutput := range inputAndOutputs {
input := inputAndOutput.input
expected := inputAndOutput.output
actual := parseURL(pattern, input)
assert.Equal(t, expected, actual)
}
}
} |
package main
import "fmt"
/*
func main() {
var var0, var1, var2, var4 int
var0 = 1
var1 = 983
if var0 == 1 {
var1 += 10550400
var0 = 0
}
var2 = 1
for {
var4 = 1
for {
if (var2 * var4) == var1 {
var0 += var2
}
var4++
if var4 > var1 {
break
}
}
var2++
if var2 > var1 {
break // EXIT
}
}
fmt.Println(var0)
}
*/
func main() {
var var0, var1 int
var0 = 1
var1 = 983
if var0 == 1 {
var1 += 10550400
var0 = 0
}
sum := 0
for i := 1; i <= var1; i++ {
if var1%i == 0 {
sum += i
}
}
fmt.Println(sum)
}
|
package observe
import (
"fmt"
"github.com/nokamoto/grpc-proxy/yaml"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc/codes"
"sort"
"time"
)
// Prom represents a collection of collectors which observes request count, latency, and request/response size.
type Prom interface {
Observe(string, codes.Code, int, int, time.Duration) error
Destroy()
}
// NewProm returns Prom from the yaml configuration.
func NewProm(c yaml.Prom) (Prom, error) {
labels := []string{"method", "status"}
counter := prometheus.NewCounterVec(prometheus.CounterOpts{Name: fmt.Sprintf("%s_request_count", c.Name)}, labels)
err := prometheus.Register(counter)
if err != nil {
return nil, err
}
sorted := func(f []float64) []float64 {
sort.Sort(sort.Float64Slice(f))
return f
}
hist := func(name string, buckets []float64) (*prometheus.HistogramVec, error) {
h := prometheus.NewHistogramVec(prometheus.HistogramOpts{Name: fmt.Sprintf("%s_%s", c.Name, name), Buckets: sorted(buckets)}, labels)
err := prometheus.Register(h)
return h, err
}
latency, err := hist("latency_seconds", c.Buckets.LatencySeconds)
if err != nil {
return nil, err
}
req, err := hist("request_bytes", c.Buckets.RequestBytes)
if err != nil {
return nil, err
}
res, err := hist("response_bytes", c.Buckets.ResponseBytes)
if err != nil {
return nil, err
}
return &prom{counter: counter, hist: hists{req: req, res: res, latency: latency}}, nil
}
type prom struct {
counter *prometheus.CounterVec
hist hists
}
type hists struct {
req *prometheus.HistogramVec
res *prometheus.HistogramVec
latency *prometheus.HistogramVec
}
func (p *prom) Observe(method string, code codes.Code, req int, res int, nanos time.Duration) error {
labels := []string{method, code.String()}
c, err := p.counter.GetMetricWithLabelValues(labels...)
if err != nil {
return err
}
hreq, err := p.hist.req.GetMetricWithLabelValues(labels...)
if err != nil {
return err
}
hres, err := p.hist.res.GetMetricWithLabelValues(labels...)
if err != nil {
return err
}
hlatency, err := p.hist.latency.GetMetricWithLabelValues(labels...)
if err != nil {
return err
}
c.Inc()
hreq.Observe(float64(req))
hres.Observe(float64(res))
hlatency.Observe(float64(nanos) / (1000 * 1000 * 1000))
return nil
}
func (p *prom) Destroy() {
prometheus.Unregister(p.counter)
prometheus.Unregister(p.hist.latency)
prometheus.Unregister(p.hist.req)
prometheus.Unregister(p.hist.res)
}
|
package main
import (
"code.google.com/p/portaudio-go/portaudio"
"fmt"
"github.com/rynlbrwn/oregon/knob"
"github.com/rynlbrwn/oregon/polysynth"
"math"
"os"
"time"
)
const (
rate = 44100
channels = 1
framesPerBuffer = 2048
int16Max = 1<<15 - 1
)
var ps = polysynth.NewPolySynth()
func main() {
ps.Volume = 0.2
vco1 := ps.VCOs[0]
vco1.Wave = polysynth.Saw
vco1.Octave = 0
vco1.Semitone = 0
vco1.Cents = 0
vco2 := ps.VCOs[1]
vco2.Wave = polysynth.Saw
vco2.Octave = 0
vco2.Semitone = 0
vco2.Cents = 10
vco3 := ps.VCOs[2]
vco3.Wave = polysynth.Saw
vco3.Octave = -1
vco3.Semitone = 0
vco3.Cents = 0
ps.AddVoice(110.0)
ps.AddVoice(440.0 * math.Exp2(7.0/12.0))
ps.AddVoice(880.0)
err := knob.PrintKnobs(ps)
if err != nil {
fmt.Println(err)
}
err = portaudio.Initialize()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer portaudio.Terminate()
stream, err := portaudio.OpenDefaultStream(0, channels, rate, framesPerBuffer, audioCallback)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer stream.Close()
err = stream.Start()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer stream.Stop()
time.Sleep(5 * time.Second)
}
func audioCallback(out []int16) {
scratch := make([]float64, len(out))
ps.Add(scratch, rate)
for i := range out {
out[i] = int16(math.Min(1.0, math.Max(-1.0, scratch[i])) * int16Max)
}
}
|
package gin
import (
"strconv"
)
//ServiceCheckHandle ..
func ServiceCheckHandle() HandlerFunc {
return func(c *Context) {
if true == c.IsInternalURL() {
u := c.Query("user_id")
if "" != u {
if i, err := strconv.ParseUint(u, 10, 64); nil == err {
c.UserID = i
} else {
return
}
}
} else {
token := c.Query("token")
if "" != token {
}
}
}
}
|
package router
import (
"bytes"
"io"
"io/ioutil"
"net/http"
"testing"
"github.com/dennor/go-paddle/events/alerts"
"github.com/dennor/go-paddle/events/subscription"
"github.com/dennor/go-paddle/mime"
"github.com/stretchr/testify/mock"
)
type mockAlertHighRiskTransactionCreated struct {
mock.Mock
}
func (m *mockAlertHighRiskTransactionCreated) ServeHTTP(e *alerts.HighRiskTransactionCreated, rw http.ResponseWriter, req *http.Request) {
m.Called(e, rw, req)
}
type mockAlertHighRiskTransactionUpdated struct {
mock.Mock
}
func (m *mockAlertHighRiskTransactionUpdated) ServeHTTP(e *alerts.HighRiskTransactionUpdated, rw http.ResponseWriter, req *http.Request) {
m.Called(e, rw, req)
}
type mockAlertLockerProcessed struct {
mock.Mock
}
func (m *mockAlertLockerProcessed) ServeHTTP(e *alerts.LockerProcessed, rw http.ResponseWriter, req *http.Request) {
m.Called(e, rw, req)
}
type mockAlertNewAudienceMember struct {
mock.Mock
}
func (m *mockAlertNewAudienceMember) ServeHTTP(e *alerts.NewAudienceMember, rw http.ResponseWriter, req *http.Request) {
m.Called(e, rw, req)
}
type mockAlertPaymentDisputeClosed struct {
mock.Mock
}
func (m *mockAlertPaymentDisputeClosed) ServeHTTP(e *alerts.PaymentDisputeClosed, rw http.ResponseWriter, req *http.Request) {
m.Called(e, rw, req)
}
type mockAlertPaymentDisputeCreated struct {
mock.Mock
}
func (m *mockAlertPaymentDisputeCreated) ServeHTTP(e *alerts.PaymentDisputeCreated, rw http.ResponseWriter, req *http.Request) {
m.Called(e, rw, req)
}
type mockAlertPaymentRefunded struct {
mock.Mock
}
func (m *mockAlertPaymentRefunded) ServeHTTP(e *alerts.PaymentRefunded, rw http.ResponseWriter, req *http.Request) {
m.Called(e, rw, req)
}
type mockAlertPaymentSucceeded struct {
mock.Mock
}
func (m *mockAlertPaymentSucceeded) ServeHTTP(e *alerts.PaymentSucceeded, rw http.ResponseWriter, req *http.Request) {
m.Called(e, rw, req)
}
type mockAlertTransferCreated struct {
mock.Mock
}
func (m *mockAlertTransferCreated) ServeHTTP(e *alerts.TransferCreated, rw http.ResponseWriter, req *http.Request) {
m.Called(e, rw, req)
}
type mockAlertTransferPaid struct {
mock.Mock
}
func (m *mockAlertTransferPaid) ServeHTTP(e *alerts.TransferPaid, rw http.ResponseWriter, req *http.Request) {
m.Called(e, rw, req)
}
type mockAlertUpdateAudienceMember struct {
mock.Mock
}
func (m *mockAlertUpdateAudienceMember) ServeHTTP(e *alerts.UpdateAudienceMember, rw http.ResponseWriter, req *http.Request) {
m.Called(e, rw, req)
}
type mockSubscriptionCancelled struct {
mock.Mock
}
func (m *mockSubscriptionCancelled) ServeHTTP(e *subscription.Cancelled, rw http.ResponseWriter, req *http.Request) {
m.Called(e, rw, req)
}
type mockSubscriptionCreated struct {
mock.Mock
}
func (m *mockSubscriptionCreated) ServeHTTP(e *subscription.Created, rw http.ResponseWriter, req *http.Request) {
m.Called(e, rw, req)
}
type mockSubscriptionPaymentFailed struct {
mock.Mock
}
func (m *mockSubscriptionPaymentFailed) ServeHTTP(e *subscription.PaymentFailed, rw http.ResponseWriter, req *http.Request) {
m.Called(e, rw, req)
}
type mockSubscriptionPaymentRefunded struct {
mock.Mock
}
func (m *mockSubscriptionPaymentRefunded) ServeHTTP(e *subscription.PaymentRefunded, rw http.ResponseWriter, req *http.Request) {
m.Called(e, rw, req)
}
type mockSubscriptionPaymentSucceeded struct {
mock.Mock
}
func (m *mockSubscriptionPaymentSucceeded) ServeHTTP(e *subscription.PaymentSucceeded, rw http.ResponseWriter, req *http.Request) {
m.Called(e, rw, req)
}
type mockSubscriptionUpdated struct {
mock.Mock
}
func (m *mockSubscriptionUpdated) ServeHTTP(e *subscription.Updated, rw http.ResponseWriter, req *http.Request) {
m.Called(e, rw, req)
}
type mockResponseWriter struct {
mock.Mock
}
func (m *mockResponseWriter) Header() http.Header {
h := m.Called().Get(0)
if h == nil {
return nil
}
return h.(http.Header)
}
func (m *mockResponseWriter) Write(b []byte) (int, error) {
called := m.Called(b)
return called.Int(0), called.Error(1)
}
func (m *mockResponseWriter) WriteHeader(i int) {
m.Called(i)
}
func TestRouter(t *testing.T) {
t.Run("CallsServeHTTPWithEvent", func(t *testing.T) {
data := []struct {
router Router
query []byte
}{
{
router: Router{
Config: Config{
AlertHighRiskTransactionCreated: func() AlertHighRiskTransactionCreated {
m := &mockAlertHighRiskTransactionCreated{}
m.On("ServeHTTP", &alerts.HighRiskTransactionCreated{
AlertName: "high_risk_transaction_created",
}, mock.AnythingOfType("*router.mockResponseWriter"), mock.AnythingOfType("*http.Request"))
return m
}(),
},
},
query: []byte("alert_name=high_risk_transaction_created"),
},
{
router: Router{
Config: Config{
AlertHighRiskTransactionUpdated: func() AlertHighRiskTransactionUpdated {
m := &mockAlertHighRiskTransactionUpdated{}
m.On("ServeHTTP", &alerts.HighRiskTransactionUpdated{
AlertName: "high_risk_transaction_updated",
}, mock.AnythingOfType("*router.mockResponseWriter"), mock.AnythingOfType("*http.Request"))
return m
}(),
},
},
query: []byte("alert_name=high_risk_transaction_updated"),
},
{
router: Router{
Config: Config{
AlertLockerProcessed: func() AlertLockerProcessed {
m := &mockAlertLockerProcessed{}
m.On("ServeHTTP", &alerts.LockerProcessed{
AlertName: "locker_processed",
}, mock.AnythingOfType("*router.mockResponseWriter"), mock.AnythingOfType("*http.Request"))
return m
}(),
},
},
query: []byte("alert_name=locker_processed"),
},
{
router: Router{
Config: Config{
AlertNewAudienceMember: func() AlertNewAudienceMember {
m := &mockAlertNewAudienceMember{}
m.On("ServeHTTP", &alerts.NewAudienceMember{
AlertName: "new_audience_member",
}, mock.AnythingOfType("*router.mockResponseWriter"), mock.AnythingOfType("*http.Request"))
return m
}(),
},
},
query: []byte("alert_name=new_audience_member"),
},
{
router: Router{
Config: Config{
AlertPaymentDisputeClosed: func() AlertPaymentDisputeClosed {
m := &mockAlertPaymentDisputeClosed{}
m.On("ServeHTTP", &alerts.PaymentDisputeClosed{
AlertName: "payment_dispute_closed",
}, mock.AnythingOfType("*router.mockResponseWriter"), mock.AnythingOfType("*http.Request"))
return m
}(),
},
},
query: []byte("alert_name=payment_dispute_closed"),
},
{
router: Router{
Config: Config{
AlertPaymentDisputeCreated: func() AlertPaymentDisputeCreated {
m := &mockAlertPaymentDisputeCreated{}
m.On("ServeHTTP", &alerts.PaymentDisputeCreated{
AlertName: "payment_dispute_created",
}, mock.AnythingOfType("*router.mockResponseWriter"), mock.AnythingOfType("*http.Request"))
return m
}(),
},
},
query: []byte("alert_name=payment_dispute_created"),
},
{
router: Router{
Config: Config{
AlertPaymentRefunded: func() AlertPaymentRefunded {
m := &mockAlertPaymentRefunded{}
m.On("ServeHTTP", &alerts.PaymentRefunded{
AlertName: "payment_refunded",
}, mock.AnythingOfType("*router.mockResponseWriter"), mock.AnythingOfType("*http.Request"))
return m
}(),
},
},
query: []byte("alert_name=payment_refunded"),
},
{
router: Router{
Config: Config{
AlertPaymentSucceeded: func() AlertPaymentSucceeded {
m := &mockAlertPaymentSucceeded{}
m.On("ServeHTTP", &alerts.PaymentSucceeded{
AlertName: "payment_succeeded",
}, mock.AnythingOfType("*router.mockResponseWriter"), mock.AnythingOfType("*http.Request"))
return m
}(),
},
},
query: []byte("alert_name=payment_succeeded"),
},
{
router: Router{
Config: Config{
AlertTransferCreated: func() AlertTransferCreated {
m := &mockAlertTransferCreated{}
m.On("ServeHTTP", &alerts.TransferCreated{
AlertName: "transfer_created",
}, mock.AnythingOfType("*router.mockResponseWriter"), mock.AnythingOfType("*http.Request"))
return m
}(),
},
},
query: []byte("alert_name=transfer_created"),
},
{
router: Router{
Config: Config{
AlertTransferPaid: func() AlertTransferPaid {
m := &mockAlertTransferPaid{}
m.On("ServeHTTP", &alerts.TransferPaid{
AlertName: "transfer_paid",
}, mock.AnythingOfType("*router.mockResponseWriter"), mock.AnythingOfType("*http.Request"))
return m
}(),
},
},
query: []byte("alert_name=transfer_paid"),
},
{
router: Router{
Config: Config{
AlertUpdateAudienceMember: func() AlertUpdateAudienceMember {
m := &mockAlertUpdateAudienceMember{}
m.On("ServeHTTP", &alerts.UpdateAudienceMember{
AlertName: "update_audience_member",
}, mock.AnythingOfType("*router.mockResponseWriter"), mock.AnythingOfType("*http.Request"))
return m
}(),
},
},
query: []byte("alert_name=update_audience_member"),
},
{
router: Router{
Config: Config{
SubscriptionCancelled: func() SubscriptionCancelled {
m := &mockSubscriptionCancelled{}
m.On("ServeHTTP", &subscription.Cancelled{
AlertName: "subscription_cancelled",
}, mock.AnythingOfType("*router.mockResponseWriter"), mock.AnythingOfType("*http.Request"))
return m
}(),
},
},
query: []byte("alert_name=subscription_cancelled"),
},
{
router: Router{
Config: Config{
SubscriptionCreated: func() SubscriptionCreated {
m := &mockSubscriptionCreated{}
m.On("ServeHTTP", &subscription.Created{
AlertName: "subscription_created",
}, mock.AnythingOfType("*router.mockResponseWriter"), mock.AnythingOfType("*http.Request"))
return m
}(),
},
},
query: []byte("alert_name=subscription_created"),
},
{
router: Router{
Config: Config{
SubscriptionPaymentFailed: func() SubscriptionPaymentFailed {
m := &mockSubscriptionPaymentFailed{}
m.On("ServeHTTP", &subscription.PaymentFailed{
AlertName: "subscription_payment_failed",
}, mock.AnythingOfType("*router.mockResponseWriter"), mock.AnythingOfType("*http.Request"))
return m
}(),
},
},
query: []byte("alert_name=subscription_payment_failed"),
},
{
router: Router{
Config: Config{
SubscriptionPaymentRefunded: func() SubscriptionPaymentRefunded {
m := &mockSubscriptionPaymentRefunded{}
m.On("ServeHTTP", &subscription.PaymentRefunded{
AlertName: "subscription_payment_refunded",
}, mock.AnythingOfType("*router.mockResponseWriter"), mock.AnythingOfType("*http.Request"))
return m
}(),
},
},
query: []byte("alert_name=subscription_payment_refunded"),
},
{
router: Router{
Config: Config{
SubscriptionPaymentSucceeded: func() SubscriptionPaymentSucceeded {
m := &mockSubscriptionPaymentSucceeded{}
m.On("ServeHTTP", &subscription.PaymentSucceeded{
AlertName: "subscription_payment_succeeded",
}, mock.AnythingOfType("*router.mockResponseWriter"), mock.AnythingOfType("*http.Request"))
return m
}(),
},
},
query: []byte("alert_name=subscription_payment_succeeded"),
},
{
router: Router{
Config: Config{
SubscriptionUpdated: func() SubscriptionUpdated {
m := &mockSubscriptionUpdated{}
m.On("ServeHTTP", &subscription.Updated{
AlertName: "subscription_updated",
}, mock.AnythingOfType("*router.mockResponseWriter"), mock.AnythingOfType("*http.Request"))
return m
}(),
},
},
query: []byte("alert_name=subscription_updated"),
},
}
for _, tt := range data {
req := &http.Request{
Header: make(http.Header),
}
req.Header.Set(mime.ContentTypeHeader, mime.ApplicationForm)
req.Body = ioutil.NopCloser(bytes.NewReader(tt.query))
mockWriter := new(mockResponseWriter)
tt.router.Handler().ServeHTTP(mockWriter, req)
}
})
}
type benchAlertHighRiskTransactionCreated struct{}
func (*benchAlertHighRiskTransactionCreated) ServeHTTP(e *alerts.HighRiskTransactionCreated, rw http.ResponseWriter, req *http.Request) {
}
type benchAlertHighRiskTransactionUpdated struct {
}
func (*benchAlertHighRiskTransactionUpdated) ServeHTTP(e *alerts.HighRiskTransactionUpdated, rw http.ResponseWriter, req *http.Request) {
}
type benchAlertLockerProcessed struct {
}
func (*benchAlertLockerProcessed) ServeHTTP(e *alerts.LockerProcessed, rw http.ResponseWriter, req *http.Request) {
}
type benchAlertNewAudienceMember struct {
}
func (*benchAlertNewAudienceMember) ServeHTTP(e *alerts.NewAudienceMember, rw http.ResponseWriter, req *http.Request) {
}
type benchAlertPaymentDisputeClosed struct {
}
func (*benchAlertPaymentDisputeClosed) ServeHTTP(e *alerts.PaymentDisputeClosed, rw http.ResponseWriter, req *http.Request) {
}
type benchAlertPaymentDisputeCreated struct {
}
func (*benchAlertPaymentDisputeCreated) ServeHTTP(e *alerts.PaymentDisputeCreated, rw http.ResponseWriter, req *http.Request) {
}
type benchAlertPaymentRefunded struct {
}
func (*benchAlertPaymentRefunded) ServeHTTP(e *alerts.PaymentRefunded, rw http.ResponseWriter, req *http.Request) {
}
type benchAlertPaymentSucceeded struct {
}
func (*benchAlertPaymentSucceeded) ServeHTTP(e *alerts.PaymentSucceeded, rw http.ResponseWriter, req *http.Request) {
}
type benchAlertTransferCreated struct {
}
func (*benchAlertTransferCreated) ServeHTTP(e *alerts.TransferCreated, rw http.ResponseWriter, req *http.Request) {
}
type benchAlertTransferPaid struct {
}
func (*benchAlertTransferPaid) ServeHTTP(e *alerts.TransferPaid, rw http.ResponseWriter, req *http.Request) {
}
type benchAlertUpdateAudienceMember struct {
}
func (*benchAlertUpdateAudienceMember) ServeHTTP(e *alerts.UpdateAudienceMember, rw http.ResponseWriter, req *http.Request) {
}
type benchSubscriptionCancelled struct {
}
func (*benchSubscriptionCancelled) ServeHTTP(e *subscription.Cancelled, rw http.ResponseWriter, req *http.Request) {
}
type benchSubscriptionCreated struct {
}
func (*benchSubscriptionCreated) ServeHTTP(e *subscription.Created, rw http.ResponseWriter, req *http.Request) {
}
type benchSubscriptionPaymentFailed struct {
}
func (*benchSubscriptionPaymentFailed) ServeHTTP(e *subscription.PaymentFailed, rw http.ResponseWriter, req *http.Request) {
}
type benchSubscriptionPaymentRefunded struct {
}
func (*benchSubscriptionPaymentRefunded) ServeHTTP(e *subscription.PaymentRefunded, rw http.ResponseWriter, req *http.Request) {
}
type benchSubscriptionPaymentSucceeded struct {
}
func (*benchSubscriptionPaymentSucceeded) ServeHTTP(e *subscription.PaymentSucceeded, rw http.ResponseWriter, req *http.Request) {
}
type benchSubscriptionUpdated struct {
}
func (*benchSubscriptionUpdated) ServeHTTP(e *subscription.Updated, rw http.ResponseWriter, req *http.Request) {
}
type benchResponseWriter struct{}
func (b *benchResponseWriter) Header() http.Header {
return nil
}
func (b *benchResponseWriter) Write(buf []byte) (int, error) {
return len(buf), nil
}
func (b *benchResponseWriter) WriteHeader(i int) {}
type reader struct {
bytes.Reader
}
func (r *reader) Close() error { return nil }
func (r *reader) Reset() { r.Seek(0, io.SeekStart) }
func BenchmarkRouter(b *testing.B) {
router := Router{
Config: Config{
AlertHighRiskTransactionCreated: &benchAlertHighRiskTransactionCreated{},
AlertHighRiskTransactionUpdated: &benchAlertHighRiskTransactionUpdated{},
AlertLockerProcessed: &benchAlertLockerProcessed{},
AlertNewAudienceMember: &benchAlertNewAudienceMember{},
AlertPaymentDisputeClosed: &benchAlertPaymentDisputeClosed{},
AlertPaymentDisputeCreated: &benchAlertPaymentDisputeCreated{},
AlertPaymentRefunded: &benchAlertPaymentRefunded{},
AlertPaymentSucceeded: &benchAlertPaymentSucceeded{},
AlertTransferCreated: &benchAlertTransferCreated{},
AlertTransferPaid: &benchAlertTransferPaid{},
AlertUpdateAudienceMember: &benchAlertUpdateAudienceMember{},
SubscriptionCancelled: &benchSubscriptionCancelled{},
SubscriptionCreated: &benchSubscriptionCreated{},
SubscriptionPaymentFailed: &benchSubscriptionPaymentFailed{},
SubscriptionPaymentRefunded: &benchSubscriptionPaymentRefunded{},
SubscriptionPaymentSucceeded: &benchSubscriptionPaymentSucceeded{},
SubscriptionUpdated: &benchSubscriptionUpdated{},
},
}
data := []struct {
r reader
benchmarkName string
}{
{
r: reader{
Reader: *bytes.NewReader([]byte("alert_name=subscription_cancelled")),
},
benchmarkName: "SubscriptionCancelled",
},
{
r: reader{
Reader: *bytes.NewReader([]byte("alert_name=subscription_created")),
},
benchmarkName: "SubscriptionCreated",
},
{
r: reader{
Reader: *bytes.NewReader([]byte("alert_name=subscription_payment_failed")),
},
benchmarkName: "SubscriptionPaymentFailed",
},
{
r: reader{
Reader: *bytes.NewReader([]byte("alert_name=subscription_payment_refunded")),
},
benchmarkName: "SubscriptionPaymentRefunded",
},
{
r: reader{
Reader: *bytes.NewReader([]byte("alert_name=subscription_payment_succeeded")),
},
benchmarkName: "SubscriptionPaymentSucceeded",
},
{
r: reader{
Reader: *bytes.NewReader([]byte("alert_name=subscription_updated")),
},
benchmarkName: "SubscriptionUpdated",
},
{
r: reader{
Reader: *bytes.NewReader([]byte("alert_name=high_risk_transaction_created")),
},
benchmarkName: "HighRiskTransactionCreated",
},
{
r: reader{
Reader: *bytes.NewReader([]byte("alert_name=high_risk_transaction_updated")),
},
benchmarkName: "HighRiskTransactionUpdated",
},
{
r: reader{
Reader: *bytes.NewReader([]byte("alert_name=locker_processed")),
},
benchmarkName: "LockerProcessed",
},
{
r: reader{
Reader: *bytes.NewReader([]byte("alert_name=new_audience_member")),
},
benchmarkName: "NewAudienceMember",
},
{
r: reader{
Reader: *bytes.NewReader([]byte("alert_name=payment_dispute_closed")),
},
benchmarkName: "PaymentDisputeClosed",
},
{
r: reader{
Reader: *bytes.NewReader([]byte("alert_name=payment_dispute_created")),
},
benchmarkName: "PaymentDisputeCreated",
},
{
r: reader{
Reader: *bytes.NewReader([]byte("alert_name=payment_refunded")),
},
benchmarkName: "PaymentRefunded",
},
{
r: reader{
Reader: *bytes.NewReader([]byte("alert_name=payment_succeeded")),
},
benchmarkName: "PaymentSucceeded",
},
{
r: reader{
Reader: *bytes.NewReader([]byte("alert_name=transfer_created")),
},
benchmarkName: "TransferCreated",
},
{
r: reader{
Reader: *bytes.NewReader([]byte("alert_name=transfer_paid")),
},
benchmarkName: "TransferPaid",
},
{
r: reader{
Reader: *bytes.NewReader([]byte("alert_name=update_audience_member")),
},
benchmarkName: "UpdateAudienceMember",
},
}
for _, tt := range data {
req := &http.Request{
Header: make(http.Header),
}
req.Header.Set(mime.ContentTypeHeader, mime.ApplicationForm)
req.Body = ioutil.NopCloser(&tt.r)
rw := benchResponseWriter{}
handler := router.Handler()
b.Run(tt.benchmarkName, func(b *testing.B) {
b.ReportAllocs()
for n := 0; n < b.N; n++ {
tt.r.Reset()
handler.ServeHTTP(&rw, req)
}
})
}
}
|
package chapter5
import "testing"
func TestPowerSet(t *testing.T) {
t.Skip("Skipping PowerSet tests...")
var powerSetTests = [][]int{{}, {1}, {1, 2, 3}}
for _, tt := range powerSetTests {
PowerSet(tt)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.