text
stringlengths 11
4.05M
|
|---|
package admission
import (
"context"
"fmt"
"path/filepath"
"strings"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
chimerav1alpha1 "github.com/chimera-kube/chimera-controller/api/v1alpha1"
"github.com/chimera-kube/chimera-controller/internal/pkg/constants"
)
func (r *AdmissionReconciler) reconcileAdmissionRegistration(ctx context.Context, admissionPolicy *chimerav1alpha1.AdmissionPolicy, admissionSecret *corev1.Secret) error {
err := r.Client.Create(ctx, r.admissionRegistration(admissionPolicy, admissionSecret))
if err == nil || apierrors.IsAlreadyExists(err) {
return nil
}
return err
}
func (r *AdmissionReconciler) admissionRegistration(admissionPolicy *chimerav1alpha1.AdmissionPolicy, admissionSecret *corev1.Secret) *admissionregistrationv1.ValidatingWebhookConfiguration {
admissionPath := filepath.Join("/validate", admissionPolicy.Name)
admissionPort := int32(constants.PolicyServerPort)
service := admissionregistrationv1.ServiceReference{
Namespace: r.DeploymentsNamespace,
Name: constants.PolicyServerServiceName,
Path: &admissionPath,
Port: &admissionPort,
}
operationTypes := r.operationTypes(admissionPolicy)
var failurePolicy admissionregistrationv1.FailurePolicyType
switch admissionPolicy.Spec.FailurePolicy {
case string(admissionregistrationv1.Fail):
failurePolicy = admissionregistrationv1.Fail
case string(admissionregistrationv1.Ignore):
failurePolicy = admissionregistrationv1.Ignore
default:
r.Log.Info("admissionRegistration",
"unknown failurePolicy", admissionPolicy.Spec.FailurePolicy,
"forcing mode", admissionregistrationv1.Fail,
)
failurePolicy = admissionregistrationv1.Fail
}
sideEffects := admissionregistrationv1.SideEffectClassNone
apiGroups := admissionPolicy.Spec.APIGroups
if len(apiGroups) == 0 {
apiGroups = []string{"*"}
}
apiVersions := admissionPolicy.Spec.APIVersions
if len(apiVersions) == 0 {
apiVersions = []string{"*"}
}
return &admissionregistrationv1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: admissionPolicy.Name,
Labels: map[string]string{
"chimera": "true",
},
},
Webhooks: []admissionregistrationv1.ValidatingWebhook{
{
Name: fmt.Sprintf("%s.chimera.admission", admissionPolicy.Name),
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &service,
CABundle: admissionSecret.Data[constants.PolicyServerCASecretKeyName],
},
Rules: []admissionregistrationv1.RuleWithOperations{
{
Operations: operationTypes,
Rule: admissionregistrationv1.Rule{
APIGroups: apiGroups,
APIVersions: apiVersions,
Resources: admissionPolicy.Spec.Resources,
},
},
},
FailurePolicy: &failurePolicy,
SideEffects: &sideEffects,
AdmissionReviewVersions: []string{"v1"},
},
},
}
}
func (r *AdmissionReconciler) operationTypes(admissionPolicy *chimerav1alpha1.AdmissionPolicy) []admissionregistrationv1.OperationType {
operationTypes := []admissionregistrationv1.OperationType{}
for _, operation := range admissionPolicy.Spec.Operations {
switch strings.ToUpper(operation) {
case "*":
operationTypes = append(
operationTypes,
admissionregistrationv1.OperationAll,
)
case "CREATE":
operationTypes = append(
operationTypes,
admissionregistrationv1.Create,
)
case "UPDATE":
operationTypes = append(
operationTypes,
admissionregistrationv1.Update,
)
case "DELETE":
operationTypes = append(
operationTypes,
admissionregistrationv1.Delete,
)
case "CONNECT":
operationTypes = append(
operationTypes,
admissionregistrationv1.Connect,
)
default:
continue
}
}
return operationTypes
}
|
package main
// this is a front end to check-procs. it allows us to build the check
// as a stand alone function
import (
"fmt"
"log"
"os"
"plugins"
"plugins/metrics"
"strings"
)
var the_metric = plugins.PluginConfig{
Type: "metric",
Command: "",
Handlers: []string{},
Standalone: true,
Interval: 15,
}
func main() {
m := new(metrics.TcpStats)
the_metric.Command = strings.Join(os.Args, " ")
the_metric.Args = os.Args
_, err := m.Init(the_metric)
if nil != err {
fmt.Println(err)
os.Exit(1)
}
log.SetOutput(os.Stderr)
r := new(plugins.Result)
err = m.Gather(r)
if nil != err {
fmt.Println("Error:", err)
os.Exit(2)
}
fmt.Println(strings.Join(r.OutputAsStrings(), "\n"))
}
|
package msutil
import x "github.com/dearcj/golangproj/network"
type XServerDataMsg struct {
UniqueID []byte
changed bool
data *x.ServerData
backup *x.ServerData
}
func (n *XServerDataMsg) WriteToMsg() *x.ServerData {
n.changed = true
return n.data
}
func (n *XServerDataMsg) Reset() {
*n.data = *n.backup
n.changed = false
}
func (n *XServerDataMsg) IsChanged() bool {
return n.changed
}
func CreateXServerData() *x.ServerData {
return &x.ServerData{}
}
func CreateXServerDataMsg(UniqueID []byte) XServerDataMsg {
return XServerDataMsg{
UniqueID: UniqueID,
backup: CreateXServerData(),
data: CreateXServerData()}
}
|
package main
import (
"bufio"
"fmt"
"log"
"math/rand"
"os"
"time"
)
var (
userAgents []string
random *rand.Rand
source rand.Source
)
func loadUserAgents() {
//Load user agents from file
file, err := os.Open(*userAgentFile)
if err != nil {
//File not found, or whatever, use default UA
userAgents = append(userAgents, "Tsunami Flooder (https://github.com/ammar/tsunami)")
fmt.Println(err)
} else {
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
userAgents = append(userAgents, scanner.Text())
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
}
//Initiate random number generator
source = rand.NewSource(time.Now().UnixNano())
random = rand.New(source)
}
func getRandomUserAgent() string {
index := int(random.Uint32()) % len(userAgents)
return userAgents[index]
}
|
package db
import (
"testing"
"github.com/GoAdminGroup/go-admin/modules/config"
_ "github.com/GoAdminGroup/go-admin/modules/db/drivers/sqlite"
)
var driverTestSQLiteConn Connection
func InitSqlite() {
driverTestSQLiteConn = testConn(DriverSqlite, config.Database{File: "/admin.db"})
}
func TestSQLiteSQL_WhereIn(t *testing.T) { testSQLWhereIn(t, driverTestSQLiteConn) }
func TestSQLiteSQL_Count(t *testing.T) { testSQLCount(t, driverTestSQLiteConn) }
func TestSQLiteSQL_Select(t *testing.T) { testSQLSelect(t, driverTestSQLiteConn) }
func TestSQLiteSQL_OrderBy(t *testing.T) { testSQLOrderBy(t, driverTestSQLiteConn) }
func TestSQLiteSQL_GroupBy(t *testing.T) { testSQLGroupBy(t, driverTestSQLiteConn) }
func TestSQLiteSQL_Skip(t *testing.T) { testSQLSkip(t, driverTestSQLiteConn) }
func TestSQLiteSQL_Take(t *testing.T) { testSQLTake(t, driverTestSQLiteConn) }
func TestSQLiteSQL_Where(t *testing.T) { testSQLWhere(t, driverTestSQLiteConn) }
func TestSQLiteSQL_WhereNotIn(t *testing.T) { testSQLWhereNotIn(t, driverTestSQLiteConn) }
func TestSQLiteSQL_Find(t *testing.T) { testSQLFind(t, driverTestSQLiteConn) }
func TestSQLiteSQL_Sum(t *testing.T) { testSQLSum(t, driverTestSQLiteConn) }
func TestSQLiteSQL_Max(t *testing.T) { testSQLMax(t, driverTestSQLiteConn) }
func TestSQLiteSQL_Min(t *testing.T) { testSQLMin(t, driverTestSQLiteConn) }
func TestSQLiteSQL_Avg(t *testing.T) { testSQLAvg(t, driverTestSQLiteConn) }
func TestSQLiteSQL_WhereRaw(t *testing.T) { testSQLWhereRaw(t, driverTestSQLiteConn) }
func TestSQLiteSQL_UpdateRaw(t *testing.T) { testSQLUpdateRaw(t, driverTestSQLiteConn) }
func TestSQLiteSQL_LeftJoin(t *testing.T) { testSQLLeftJoin(t, driverTestSQLiteConn) }
func TestSQLiteSQL_WithTransaction(t *testing.T) { testSQLWithTransaction(t, driverTestSQLiteConn) }
func TestSQLiteSQL_WithTransactionByLevel(t *testing.T) {
testSQLWithTransactionByLevel(t, driverTestSQLiteConn)
}
func TestSQLiteSQL_First(t *testing.T) { testSQLFirst(t, driverTestSQLiteConn) }
func TestSQLiteSQL_All(t *testing.T) { testSQLAll(t, driverTestSQLiteConn) }
func TestSQLiteSQL_ShowColumns(t *testing.T) { testSQLShowColumns(t, driverTestSQLiteConn) }
func TestSQLiteSQL_ShowTables(t *testing.T) { testSQLShowTables(t, driverTestSQLiteConn) }
func TestSQLiteSQL_Update(t *testing.T) { testSQLUpdate(t, driverTestSQLiteConn) }
func TestSQLiteSQL_Delete(t *testing.T) { testSQLDelete(t, driverTestSQLiteConn) }
func TestSQLiteSQL_Exec(t *testing.T) { testSQLExec(t, driverTestSQLiteConn) }
func TestSQLiteSQL_Insert(t *testing.T) { testSQLInsert(t, driverTestSQLiteConn) }
func TestSQLiteSQL_Wrap(t *testing.T) { testSQLWrap(t, driverTestSQLiteConn) }
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package external
import (
"context"
"encoding/binary"
"io"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/br/pkg/storage"
"github.com/pingcap/tidb/util/logutil"
"go.uber.org/zap"
)
type kvReader struct {
byteReader *byteReader
}
func newKVReader(
ctx context.Context,
name string,
store storage.ExternalStorage,
initFileOffset uint64,
bufSize int,
) (*kvReader, error) {
sr, err := openStoreReaderAndSeek(ctx, store, name, initFileOffset)
if err != nil {
return nil, err
}
br, err := newByteReader(ctx, sr, bufSize)
if err != nil {
br.Close()
return nil, err
}
return &kvReader{
byteReader: br,
}, nil
}
func (r *kvReader) nextKV() (key, val []byte, err error) {
r.byteReader.reset()
lenBytes, err := r.byteReader.readNBytes(8)
if err != nil {
return nil, nil, err
}
keyLen := int(binary.BigEndian.Uint64(*lenBytes))
keyPtr, err := r.byteReader.readNBytes(keyLen)
if err != nil {
return nil, nil, noEOF(err)
}
lenBytes, err = r.byteReader.readNBytes(8)
if err != nil {
return nil, nil, noEOF(err)
}
valLen := int(binary.BigEndian.Uint64(*lenBytes))
valPtr, err := r.byteReader.readNBytes(valLen)
if err != nil {
return nil, nil, noEOF(err)
}
return *keyPtr, *valPtr, nil
}
// noEOF converts the EOF error to io.ErrUnexpectedEOF.
func noEOF(err error) error {
if err == io.EOF {
logutil.BgLogger().Warn("unexpected EOF", zap.Error(errors.Trace(err)))
return io.ErrUnexpectedEOF
}
return err
}
func (r *kvReader) Close() error {
return r.byteReader.Close()
}
|
package main
import (
"rpc/utils"
"fmt"
"log"
"net/rpc"
)
func main() {
var serverAddress = "localhost"
client, err := rpc.DialHTTP("tcp", serverAddress + ":8080")
if err != nil {
log.Fatal("Fail", err)
}
args := &utils.Args{10,10}
var reply int
err = client.Call("MathService.Multiply", args, &reply)
if err != nil {
log.Fatal("调用远程方法 MathService.Multiply 失败:", err)
}
fmt.Printf("%d*%d=%d\n", args.A, args.B, reply)
divideCall := client.Go("MathService.Divide", args, &reply, nil)
for {
select {
case <-divideCall.Done:
fmt.Printf("%d/%d=%d\n", args.A, args.B, reply)
return
}
}
}
|
package main
import (
"context"
"encoding/json"
"io/ioutil"
"log"
"net"
"net/http"
"strings"
)
// Client is a way of interacting with the Orbit unix socket.
type Client struct {
client *http.Client
}
// NewClient creates a new instance of the orbit socket client.
func NewClient() *Client {
return &Client{
client: &http.Client{
Transport: &http.Transport{
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", "/var/run/orbit.sock")
},
},
},
}
}
// Get makes a GET request to the Orbit socket.
func (c *Client) Get(path string) []byte {
url := "http://unix/" + strings.TrimPrefix(path, "/")
res, err := c.client.Get(url)
if err != nil {
log.Fatalf("Could not query Orbit socket: %s", err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Fatalf("Could not read HTTP response from Orbit socket: %s", err)
}
return body
}
// Router is a logical Orbit router.
type Router struct {
ID string `json:"id"`
Domain string `json:"domain"`
AppID string `json:"app_id"`
CertificateID string `json:"certificate_id"`
WWWRedirect bool `json:"www_redirect"`
}
// Challenge is a LetsEncrypt challenge path. We must serve the "token" string
// at the "path" URL.
type Challenge struct {
Path string `json:"path"`
Token string `json:"token"`
}
// Certificate is a logical certificate
type Certificate struct {
ID string `json:"id"`
FullChain []byte `json:"full_chain"`
PrivateKey []byte `json:"private_key"`
Challenges []Challenge `json:"challenges"`
}
// GetRouters retrieves all of the routers from the Orbit socket.
func (c *Client) GetRouters() []Router {
body := c.Get("/routers")
routers := []Router{}
if err := json.Unmarshal(body, &routers); err != nil {
log.Fatalf("Could not parse response from Orbit socket: %s", err)
}
return routers
}
// GetCertificates retrieves all of the certificates from the Orbit socket.
func (c *Client) GetCertificates() []Certificate {
body := c.Get("/certificates")
certificates := []Certificate{}
if err := json.Unmarshal(body, &certificates); err != nil {
log.Fatalf("Could not parse response from Orbit socket: %s", err)
}
return certificates
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package policy
import (
"context"
"testing"
"github.com/stretchr/testify/require"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
oamutil "github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/utils/common"
)
func TestParseOverridePolicyRelatedDefinitions(t *testing.T) {
cli := fake.NewClientBuilder().WithScheme(common.Scheme).WithObjects(&v1beta1.ComponentDefinition{
ObjectMeta: v1.ObjectMeta{Name: "comp", Namespace: oam.SystemDefinitionNamespace},
}, &v1beta1.TraitDefinition{
ObjectMeta: v1.ObjectMeta{Name: "trait", Namespace: "test"},
}).Build()
r := require.New(t)
app := &v1beta1.Application{}
app.SetNamespace("test")
ctx := oamutil.SetNamespaceInCtx(context.Background(), "test")
testCases := map[string]struct {
Policy v1beta1.AppPolicy
ComponentDefs []*v1beta1.ComponentDefinition
TraitDefs []*v1beta1.TraitDefinition
Error string
}{
"normal": {
Policy: v1beta1.AppPolicy{Properties: &runtime.RawExtension{Raw: []byte(`{"components":[{"type":"comp","traits":[{"type":"trait"}]}]}`)}},
ComponentDefs: []*v1beta1.ComponentDefinition{{ObjectMeta: v1.ObjectMeta{Name: "comp", Namespace: oam.SystemDefinitionNamespace}}},
TraitDefs: []*v1beta1.TraitDefinition{{ObjectMeta: v1.ObjectMeta{Name: "trait", Namespace: "test"}}},
},
"invalid-override-policy": {
Policy: v1beta1.AppPolicy{Properties: &runtime.RawExtension{Raw: []byte(`{bad value}`)}},
Error: "invalid override policy spec",
},
"comp-def-not-found": {
Policy: v1beta1.AppPolicy{Properties: &runtime.RawExtension{Raw: []byte(`{"components":[{"type":"comp-404","traits":[{"type":"trait"}]}]}`)}},
Error: "failed to get component definition",
},
"trait-def-not-found": {
Policy: v1beta1.AppPolicy{Properties: &runtime.RawExtension{Raw: []byte(`{"components":[{"type":"comp","traits":[{"type":"trait-404"}]}]}`)}},
Error: "failed to get trait definition",
},
"empty-policy": {
Policy: v1beta1.AppPolicy{Properties: nil},
ComponentDefs: nil,
TraitDefs: nil,
Error: "have empty properties",
},
}
for name, tt := range testCases {
t.Run(name, func(t *testing.T) {
compDefs, traitDefs, err := ParseOverridePolicyRelatedDefinitions(ctx, cli, app, tt.Policy)
if tt.Error != "" {
r.NotNil(err)
r.Contains(err.Error(), tt.Error)
} else {
r.NoError(err)
r.Equal(len(tt.ComponentDefs), len(compDefs))
for i := range tt.ComponentDefs {
r.Equal(tt.ComponentDefs[i].Name, compDefs[i].Name)
r.Equal(tt.ComponentDefs[i].Namespace, compDefs[i].Namespace)
}
r.Equal(len(tt.TraitDefs), len(traitDefs))
for i := range tt.TraitDefs {
r.Equal(tt.TraitDefs[i].Name, traitDefs[i].Name)
r.Equal(tt.TraitDefs[i].Namespace, traitDefs[i].Namespace)
}
}
})
}
}
|
package main
type Product interface {
Size() int
}
type Car struct {
}
type Bus struct {
}
func(c *Car) Size() int {
return 1
}
func(b *Bus) Size() int {
return 2
}
type Factory struct {
}
func NewFactory() *Factory {
return &Factory{}
}
func(f *Factory) MakeProduct(kind int) Product {
if kind == 1 {
return &Car{}
}
return &Bus{}
}
|
package websocket
import (
"KServer/library/kiface/iwebsocket"
"KServer/library/websocket/utils"
"fmt"
"strconv"
)
type MsgHandle struct {
Handle map[uint32]iwebsocket.IHandle //存放每个Id 所对应的处理方法的map属性
WorkerPoolSize uint32 //业务工作Worker池的数量
TaskQueue []chan iwebsocket.IRequest //Worker负责取任务的消息队列
CustomHandle iwebsocket.IHandle
//Response map[uint32]ziface.IResponse
}
func NewMsgHandle() *MsgHandle {
return &MsgHandle{
Handle: make(map[uint32]iwebsocket.IHandle),
WorkerPoolSize: utils.GlobalObject.WorkerPoolSize,
//一个worker对应一个queue
TaskQueue: make([]chan iwebsocket.IRequest, utils.GlobalObject.WorkerPoolSize),
//Response: make(map[uint32]ziface.IResponse),
}
}
//将消息交给TaskQueue,由worker进行处理
func (mh *MsgHandle) SendMsgToTaskQueue(request iwebsocket.IRequest) {
//根据ConnID来分配当前的连接应该由哪个worker负责处理
//轮询的平均分配法则
//得到需要处理此条连接的workerID
workerID := request.GetConnection().GetConnID() % mh.WorkerPoolSize
//fmt.Println("Add ConnID=", request.GetConnection().GetConnID()," request msgID=", request.GetMsgID(), "to workerID=", workerID)
//将请求消息发送给任务队列
mh.TaskQueue[workerID] <- request
}
//马上以非阻塞方式处理消息
func (mh *MsgHandle) DoMsgHandler(request iwebsocket.IRequest) {
handler, ok := mh.Handle[request.GetMessage().GetId()]
if !ok {
//如果没有id的请求将会执行自定义协议头
if mh.CustomHandle != nil {
fmt.Println("执行自定义头")
mh.CustomHandle.PreHandle(request)
mh.CustomHandle.PostHandle(request)
return
}
request.GetConnection().SendBuffMsg([]byte("无服务"))
request.GetConnection().Stop()
return
}
//执行对应处理方法
handler.PreHandle(request)
//handler.RunMsg(request.GetMsgID())
handler.PostHandle(request)
}
//添加一个自定义协议头 如果没有id的请求将会执行
func (mh *MsgHandle) AddCustomHandle(handle iwebsocket.IHandle) {
mh.CustomHandle = handle
fmt.Println("Socket Add CustomHandle ")
}
//添加一个协议头
func (mh *MsgHandle) AddHandle(id uint32, handle iwebsocket.IHandle) {
//1 判断当前msg绑定的API处理方法是否已经存在
if _, ok := mh.Handle[id]; ok {
panic("repeated Agreement , Id = " + strconv.Itoa(int(id)))
}
//2 添加msg与api的绑定关系
mh.Handle[id] = handle
fmt.Println("Socket Add Handle = ", id)
}
//启动一个Worker工作流程
func (mh *MsgHandle) StartOneWorker(workerID int, taskQueue chan iwebsocket.IRequest) {
fmt.Println("Worker ID = ", workerID, " is started.")
//不断的等待队列中的消息
for {
select {
//有消息则取出队列的Request,并执行绑定的业务方法
case request := <-taskQueue:
mh.DoMsgHandler(request)
}
}
}
//启动worker工作池
func (mh *MsgHandle) StartWorkerPool() {
//遍历需要启动worker的数量,依此启动
for i := 0; i < int(mh.WorkerPoolSize); i++ {
//一个worker被启动
//给当前worker对应的任务队列开辟空间
mh.TaskQueue[i] = make(chan iwebsocket.IRequest, utils.GlobalObject.MaxWorkerTaskLen)
//启动当前Worker,阻塞的等待对应的任务队列是否有消息传递进来
go mh.StartOneWorker(i, mh.TaskQueue[i])
}
}
|
package main
import (
"fmt"
"net/http"
)
type handlerType string
func (h handlerType) ServeHTTP(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "request handled")
}
func main() {
var handy handlerType
http.ListenAndServe(":8080", handy)
}
|
package main
import (
"fmt"
"github.com/araddon/dateparse"
)
func main() {
fmt.Println("vim-go")
t, err := dateparse.ParseAny("6/10/2021, 8:26:03 AM")
fmt.Printf("%+v, %+v\n", t, err)
}
|
/*
Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Kubernetes
kubernetes - related commands:
init - Return json in kubernetes format for use as init script responce
cfy-go kubernetes init
mount - Return json in kubernetes format for use as mount script responce
cfy-go kubernetes mount /tmp/someunxists '{"kubernetes.io/fsType":"ext4",... "volumegroup":"kube_vg"}' -deployment slave -instance kubenetes_slave_*
unmount - Return json in kubernetes format for use as unmount script responce
cfy-go kubernetes unmount /tmp/someunxists -deployment slave -instance kubenetes_slave_*
*/
package main
import (
"fmt"
kubernetes "github.com/cloudify-incubator/cloudify-rest-go-client/kubernetes"
)
//KubernetesOptions implementation of kubernetes subcommand
func KubernetesOptions(args, options []string) int {
defaultError := "init/mount/unmount subcommand is required"
if len(args) < 3 {
fmt.Println(defaultError)
return 1
}
operFlagSet := basicOptions("kubernetes")
var deployment string
operFlagSet.StringVar(&deployment, "deployment", "",
"The unique identifier for the deployment")
var instance string
operFlagSet.StringVar(&instance, "instance", "",
"The unique identifier for the instance")
operFlagSet.Parse(options)
cl := getQuietClient()
if kubernetes.Run(cl, args[2:], deployment, instance) != 0 {
fmt.Println(defaultError)
return 1
}
return 0
}
|
package main
import (
"fmt"
"jblee.net/adventofcode2018/utils"
)
func findSimilarString(lines []string) string {
for i, line := range lines {
for j := i + 1; j < len(lines); j++ {
line2 := lines[j]
diffPos := -1
for pos := 0; pos < len(line); pos++ {
letter := line[pos]
if letter != line2[pos] {
if diffPos >= 0 {
diffPos = -1
break
} else {
diffPos = pos
}
}
}
if diffPos >= 0 {
return line[:diffPos] + line[diffPos+1:]
}
}
}
return ""
}
func main() {
lines := utils.ReadLinesOrDie("input.txt")
answer := findSimilarString(lines)
fmt.Printf("answer: %s\n", answer)
}
|
package main
import (
"fmt"
"io/ioutil"
"strings"
)
var primes = []int64{2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101}
func main() {
lines := read()
part1(lines)
part2(lines)
}
func part1(lines []string) {
sum := 0
for _, line := range lines {
seen := map[string]bool{}
words := strings.Split(line, " ")
valid := true
for _, word := range words {
_, ok := seen[word]
seen[word] = true
valid = valid && !ok
}
if valid {
sum++
}
}
fmt.Println(sum)
}
func part2(lines []string) {
sum := 0
for _, line := range lines {
seen := map[int64]bool{}
words := strings.Split(line, " ")
valid := true
for _, word := range words {
h := hash(word)
_, ok := seen[h]
seen[h] = true
valid = valid && !ok
}
if valid {
sum++
}
}
fmt.Println(sum)
}
func hash(s string) int64 {
bs := []byte(s)
prod := int64(1)
for _, b := range bs {
p := primes[int(b-byte('a'))]
prod *= p
}
return prod
}
func read() []string {
data, err := ioutil.ReadFile("input.txt")
if err != nil {
panic(err)
}
return strings.Split(strings.TrimSpace(string(data)), "\n")
}
|
package db
import (
"fmt"
"time"
"github.com/jinzhu/gorm"
)
//BootstrapData sets up initial data
func BootstrapData(db *gorm.DB) {
bootstrapTeams(db)
bootstrapHeats(db)
}
func bootstrapTeams(db *gorm.DB) {
teams := []Team{
{Slug: "team1",
Description: "The first team1",
},
{Slug: "team2",
Description: "The first team2",
},
{Slug: "team3",
Description: "The first team3",
},
{Slug: "team4",
Description: "The first team4",
},
{Slug: "team5",
Description: "The first team5",
},
{Slug: "team6",
Description: "The first team6",
},
{Slug: "team7",
Description: "The first team7",
},
{Slug: "team8",
Description: "The first team8",
},
{Slug: "team9",
Description: "The first team9",
},
{Slug: "team10",
Description: "The first team10",
},
}
var count int
DB.Model(&Team{}).Count(&count)
if count > 0 {
fmt.Println("Skipping init of data - already present")
}
if count == 0 {
for _, team := range teams {
DB.Create(&team)
}
}
}
func bootstrapHeats(db *gorm.DB) {
teams := []Heat{
{Slug: "heat1",
Description: "Daintree Level 3",
StartTime: time.Date(2018, 3, 20, 13, 0, 0, 0, time.Local).Unix(),
EndTime: time.Date(2018, 3, 20, 14, 0, 0, 0, time.Local).Unix(),
},
{Slug: "heat2",
Description: "Uluru Level 4",
StartTime: time.Date(2018, 3, 20, 13, 0, 0, 0, time.Local).Unix(),
EndTime: time.Date(2018, 3, 20, 14, 0, 0, 0, time.Local).Unix(),
},
{Slug: "heat3",
Description: "Boardroom Level 1",
StartTime: time.Date(2018, 3, 20, 13, 0, 0, 0, time.Local).Unix(),
EndTime: time.Date(2018, 3, 20, 14, 0, 0, 0, time.Local).Unix(),
},
}
var count int
DB.Model(&Heat{}).Count(&count)
if count > 0 {
fmt.Println("Skipping init of heat data - already present")
}
if count == 0 {
for _, team := range teams {
DB.Create(&team)
}
}
}
|
package main
import (
"github.com/gorilla/mux"
"github.com/itsmeadi/cart/src/entities/config"
"github.com/itsmeadi/cart/src/interfaces/db/cache"
"github.com/itsmeadi/cart/src/interfaces/db/mysql"
"github.com/itsmeadi/cart/src/interfaces/product"
"github.com/itsmeadi/cart/src/interfaces/productByCategory"
"github.com/itsmeadi/cart/src/interfaces/web/api"
"github.com/itsmeadi/cart/src/usecase/cart"
"github.com/itsmeadi/cart/src/usecase/product_usecase"
"github.com/itsmeadi/cart/src/usecase/user_usecase"
"log"
"net/http"
)
func main() {
conf := config.CF
mysql.InitDb(conf.DB)
productCache := cache.InitProductCache(conf.PrdCache.Timeout, conf.PrdCache.CacheResetTimeOut)
productSer := product.InitService(conf.Product.Url, conf.Product.Timeout, productCache)
productList := productByCategory.InitService(conf.ProductList.Url, conf.ProductList.Timeout)
db := mysql.GetDb()
cartUC := cart.InitCartUseCase(cart.CartUseCase{
CartItemsRepo: db,
CartXCartItemRepo: db,
CartRepo: db,
Products: &productSer,
})
userUC := user_usecase.InitUsecase(db)
productUC := product_usecase.InitProductUseCase(&productSer)
productListUC := product_usecase.InitProductListUseCase(&productList)
apiStr := api.API{
Interactor: &api.Interactor{
Cart: &cartUC,
Product: &productUC,
ProductByCategory: &productListUC,
User: userUC,
},
}
api := api.New(&apiStr)
api.InitRoutes(mux.NewRouter())
log.Fatal(http.ListenAndServe(":9090", nil))
}
|
package volume
import (
"errors"
"fmt"
"strconv"
"time"
"github.com/Huawei/eSDK_K8S_Plugin/src/storage/fusionstorage/client"
"github.com/Huawei/eSDK_K8S_Plugin/src/utils"
"github.com/Huawei/eSDK_K8S_Plugin/src/utils/log"
"github.com/Huawei/eSDK_K8S_Plugin/src/utils/taskflow"
)
const (
notSupportSnapShotSpace = 0
spaceQuotaUnitMB = 2
quotaTargetFilesystem = 1
)
type NAS struct {
cli *client.Client
}
func NewNAS(cli *client.Client) *NAS {
return &NAS{
cli: cli,
}
}
func (p *NAS) preCreate(params map[string]interface{}) error {
authclient, exist := params["authclient"].(string)
if !exist || authclient == "" {
msg := fmt.Sprintf("authclient must be provided for filesystem")
log.Errorln(msg)
return errors.New(msg)
}
if v, exist := params["storagepool"].(string); exist {
pool, err := p.cli.GetPoolByName(v)
if err != nil {
return err
}
if pool == nil {
return fmt.Errorf("Storage pool %s doesn't exist", v)
}
params["poolId"] = int64(pool["poolId"].(float64))
}
name := params["name"].(string)
params["name"] = utils.GetFileSystemName(name)
if v, exist := params["clonefrom"].(string); exist {
params["clonefrom"] = utils.GetFileSystemName(v)
}
return nil
}
func (p *NAS) Create(params map[string]interface{}) error {
err := p.preCreate(params)
if err != nil {
return err
}
createTask := taskflow.NewTaskFlow("Create-FileSystem-Volume")
createTask.AddTask("Create-FS", p.createFS, p.revertFS)
createTask.AddTask("Create-Quota", p.createQuota, p.revertQuota)
createTask.AddTask("Create-Share", p.createShare, p.revertShare)
createTask.AddTask("Allow-Share-Access", p.allowShareAccess, nil)
_, err = createTask.Run(params)
if err != nil {
createTask.Revert()
return err
}
return nil
}
func (p *NAS) createFS(params, taskResult map[string]interface{}) (map[string]interface{}, error) {
fsName := params["name"].(string)
fs, err := p.cli.GetFileSystemByName(fsName)
if err != nil {
log.Errorf("Get filesystem %s error: %v", fsName, err)
return nil, err
}
if fs == nil {
_, exist := params["clonefrom"]
if exist {
fs, err = p.clone(params)
} else {
fs, err = p.cli.CreateFileSystem(params)
}
}
if err != nil {
log.Errorf("Create filesystem %s error: %v", fsName, err)
return nil, err
}
err = p.waitFilesystemCreated(fsName)
if err != nil {
return nil, err
}
return map[string]interface{}{
"fsID": strconv.FormatInt(int64(fs["id"].(float64)), 10),
}, nil
}
func (p *NAS) clone(params map[string]interface{}) (map[string]interface{}, error) {
return nil, fmt.Errorf("unimplemented")
}
func (p *NAS) revertFS(taskResult map[string]interface{}) error {
fsID, exist := taskResult["fsID"].(string)
if !exist {
return nil
}
return p.deleteFS(fsID)
}
func (p *NAS) deleteFS(fsID string) error {
err := p.cli.DeleteFileSystem(fsID)
if err != nil {
log.Errorf("Delete filesystem %s error: %v", fsID, err)
}
return err
}
func (p *NAS) createQuota(params, taskResult map[string]interface{}) (map[string]interface{}, error) {
fsID, _ := taskResult["fsID"].(string)
quota, err := p.cli.GetQuotaByFileSystem(fsID)
if err != nil {
log.Errorf("Get filesystem %s quota error: %v", fsID, err)
return nil, err
}
if quota == nil {
quotaParams := map[string]interface{}{
"parent_id": fsID,
"parent_type": "40",
"quota_type": "1",
"space_hard_quota": params["capacity"].(int64),
"snap_space_switch": notSupportSnapShotSpace,
"space_unit_type": spaceQuotaUnitMB,
"directory_quota_target": quotaTargetFilesystem,
}
err := p.cli.CreateQuota(quotaParams)
if err != nil {
log.Errorf("Create filesystem quota %v error: %v", quotaParams, err)
return nil, err
}
}
return nil, nil
}
func (p *NAS) revertQuota(taskResult map[string]interface{}) error {
fsID, exist := taskResult["fsID"].(string)
if !exist {
return nil
}
return p.deleteQuota(fsID)
}
func (p *NAS) deleteQuota(fsID string) error {
quota, err := p.cli.GetQuotaByFileSystem(fsID)
if err != nil {
log.Errorf("Get filesystem %s quota error: %v", fsID, err)
return err
}
if quota != nil {
quotaId := quota["id"].(string)
err := p.cli.DeleteQuota(quotaId)
if err != nil {
log.Errorf("Delete filesystem quota %s error: %v", quotaId, err)
return err
}
}
return nil
}
func (p *NAS) createShare(params, taskResult map[string]interface{}) (map[string]interface{}, error) {
fsName := params["name"].(string)
sharePath := utils.GetFSSharePath(fsName)
share, err := p.cli.GetNfsShareByPath(sharePath)
if err != nil {
log.Errorf("Get nfs share by path %s error: %v", sharePath, err)
return nil, err
}
if share == nil {
shareParams := map[string]interface{}{
"sharepath": sharePath,
"fsid": taskResult["fsID"].(string),
"description": "Created from Kubernetes Provisioner",
}
share, err = p.cli.CreateNfsShare(shareParams)
if err != nil {
log.Errorf("Create nfs share %v error: %v", shareParams, err)
return nil, err
}
}
return map[string]interface{}{
"shareID": share["id"].(string),
}, nil
}
func (p *NAS) waitFilesystemCreated(fsName string) error {
err := utils.WaitUntil(func() (bool, error) {
fs, err := p.cli.GetFileSystemByName(fsName)
if err != nil {
return false, err
}
if fs["running_status"].(float64) == 0 { //filesystem is ok
return true, nil
} else {
return false, nil
}
}, time.Hour*6, time.Second*5)
return err
}
func (p *NAS) revertShare(taskResult map[string]interface{}) error {
shareID, exist := taskResult["shareID"].(string)
if !exist {
return nil
}
return p.deleteShare(shareID)
}
func (p *NAS) deleteShare(shareID string) error {
err := p.cli.DeleteNfsShare(shareID)
if err != nil {
log.Errorf("Delete share %s error: %v", shareID, err)
return err
}
return nil
}
func (p *NAS) allowShareAccess(params, taskResult map[string]interface{}) (map[string]interface{}, error) {
createParams := map[string]interface{}{
"name": params["authclient"].(string),
"shareid": taskResult["shareID"].(string),
"accessval": 1,
}
err := p.cli.AllowNfsShareAccess(createParams)
if err != nil {
log.Errorf("Allow nfs share access %v error: %v", createParams, err)
return nil, err
}
return nil, nil
}
func (p *NAS) Delete(name string) error {
sharePath := utils.GetFSSharePath(name)
share, err := p.cli.GetNfsShareByPath(sharePath)
if err != nil {
log.Errorf("Get nfs share by path %s error: %v", sharePath, err)
return err
}
var fs map[string]interface{}
if share == nil {
log.Infof("Share %s to delete does not exist, continue to delete filesystem", sharePath)
fsName := utils.GetFileSystemName(name)
fs, err = p.cli.GetFileSystemByName(fsName)
if err != nil {
log.Errorf("Get filesystem %s error: %v", fsName, err)
return err
}
if fs == nil {
log.Infof("Filesystem %s to delete does not exist", fsName)
return nil
} else {
fsID := strconv.FormatInt(int64(fs["id"].(float64)), 10)
err = p.deleteQuota(fsID)
if err != nil {
log.Errorf("Delete filesystem %s quota error: %v", fsID, err)
return err
}
err = p.deleteFS(fsID)
if err != nil {
log.Errorf("Delete filesystem %s error: %v", fsID, err)
return err
}
}
} else {
shareID := share["id"].(string)
err = p.cli.DeleteNfsShare(shareID)
if err != nil {
log.Errorf("Delete nfs share %s error: %v", shareID, err)
return err
}
fsID := share["file_system_id"].(string)
err = p.deleteQuota(fsID)
if err != nil {
log.Errorf("Delete filesystem %s quota error: %v", fsID, err)
return err
}
err = p.deleteFS(fsID)
if err != nil {
log.Errorf("Delete filesystem %s error: %v", fsID, err)
return err
}
}
return nil
}
|
package dpos
import (
"testing"
"time"
"github.com/aergoio/aergo/consensus/impl/dpos/slot"
"github.com/aergoio/aergo/types"
"github.com/stretchr/testify/assert"
)
const (
nSlots = 5
bpInterval = 1
)
func TestDposFutureBlock(t *testing.T) {
slot.Init(bpInterval)
dpos := &DPoS{}
block := types.NewBlock(nil, nil, nil, nil, nil, time.Now().Add(3*time.Second).UnixNano())
assert.True(t, !dpos.VerifyTimestamp(block), "future block check failed")
block = types.NewBlock(nil, nil, nil, nil, nil, time.Now().UnixNano())
assert.True(t, dpos.VerifyTimestamp(block), "future block check failed")
block = types.NewBlock(nil, nil, nil, nil, nil, time.Now().Add(-time.Second).UnixNano())
assert.True(t, dpos.VerifyTimestamp(block), "future block check failed")
}
func TestDposPastBlock(t *testing.T) {
slot.Init(bpInterval)
dpos := &DPoS{}
block0 := types.NewBlock(nil, nil, nil, nil, nil, time.Now().UnixNano())
assert.True(t, dpos.VerifyTimestamp(block0), "invalid timestamp")
time.Sleep(time.Second)
now := time.Now().UnixNano()
block1 := types.NewBlock(block0, nil, nil, nil, nil, now)
assert.True(t, dpos.VerifyTimestamp(block1), "invalid timestamp")
// Add LIB, manually.
dpos.Status = &Status{libState: &libStatus{}}
dpos.Status.libState.Lib = newBlockInfo(block1)
block2 := types.NewBlock(block0, nil, nil, nil, nil, now)
// Test whether a block number error is raised or not by checking the
// return value.
assert.True(t, !dpos.VerifyTimestamp(block2), "block number error must be raised")
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-06-12 10:08
# @File : list.go
# @Description :
# @Attention :
*/
package array_list
type List interface {
Add(data interface{})
RemoveByIndex(index int) (interface{},error)
Show() func() (interface{}, bool)
Size()int
}
type DoublyList interface {
List
InsertHead(data interface{})
InsertTail(data interface{})
}
|
package q_test
import (
"github.com/elliotchance/gedcom"
"github.com/elliotchance/gedcom/q"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"testing"
)
func TestNodesWithTagPathExpr_Evaluate(t *testing.T) {
engine := &q.Engine{}
doc := gedcom.NewDocument()
individual := doc.AddIndividual("P1")
individual.AddBirthDate("16 Apr 1973")
individual2 := doc.AddIndividual("P2")
individual2.AddBirthDate("8 Mar 1884")
for testName, test := range map[string]struct {
input interface{}
args []*q.Statement
expectedGEDCOM string
}{
"Nil": {
input: nil,
args: nil,
expectedGEDCOM: "",
},
"IndividualNone": {
input: individual,
args: nil,
expectedGEDCOM: "",
},
"IndividualBIRT": {
input: individual,
args: []*q.Statement{
{Expressions: []q.Expression{&q.ConstantExpr{Value: "BIRT"}}},
},
expectedGEDCOM: "0 BIRT\n1 DATE 16 Apr 1973\n",
},
"IndividualDATE": {
input: individual,
args: []*q.Statement{
{Expressions: []q.Expression{&q.ConstantExpr{Value: "DATE"}}},
},
expectedGEDCOM: "",
},
"IndividualBIRTDATE": {
input: individual,
args: []*q.Statement{
{Expressions: []q.Expression{&q.ConstantExpr{Value: "BIRT"}}},
{Expressions: []q.Expression{&q.ConstantExpr{Value: "DATE"}}},
},
expectedGEDCOM: "0 DATE 16 Apr 1973\n",
},
"IndividualBIRTDEAT": {
input: individual,
args: []*q.Statement{
{Expressions: []q.Expression{&q.ConstantExpr{Value: "BIRT"}}},
{Expressions: []q.Expression{&q.ConstantExpr{Value: "DEAT"}}},
},
expectedGEDCOM: "",
},
"IndividualsBIRT": {
input: doc.Individuals(),
args: []*q.Statement{
{Expressions: []q.Expression{&q.ConstantExpr{Value: "BIRT"}}},
},
expectedGEDCOM: "0 BIRT\n1 DATE 16 Apr 1973\n0 BIRT\n1 DATE 8 Mar 1884\n",
},
"IndividualsDATE": {
input: doc.Individuals(),
args: []*q.Statement{
{Expressions: []q.Expression{&q.ConstantExpr{Value: "DATE"}}},
},
expectedGEDCOM: "",
},
"IndividualsBIRTDATE": {
input: doc.Individuals(),
args: []*q.Statement{
{Expressions: []q.Expression{&q.ConstantExpr{Value: "BIRT"}}},
{Expressions: []q.Expression{&q.ConstantExpr{Value: "DATE"}}},
},
expectedGEDCOM: "0 DATE 16 Apr 1973\n0 DATE 8 Mar 1884\n",
},
"IndividualsBIRTDEAT": {
input: doc.Individuals(),
args: []*q.Statement{
{Expressions: []q.Expression{&q.ConstantExpr{Value: "BIRT"}}},
{Expressions: []q.Expression{&q.ConstantExpr{Value: "DEAT"}}},
},
expectedGEDCOM: "",
},
} {
t.Run(testName, func(t *testing.T) {
actual, err := (&q.NodesWithTagPathExpr{}).Evaluate(engine, test.input, test.args)
require.NoError(t, err)
assert.Equal(t, test.expectedGEDCOM, gedcom.NewDocumentWithNodes(actual.(gedcom.Nodes)).String())
})
}
}
|
package buildah
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"github.com/werf/werf/pkg/buildah/types"
"github.com/werf/werf/pkg/werf"
)
const (
DefaultShmSize = "65536k"
BuildahImage = "ghcr.io/werf/buildah:v1.22.3-1"
BuildahStorageContainerName = "werf-buildah-storage"
)
type CommonOpts struct {
LogWriter io.Writer
}
type BuildFromDockerfileOpts struct {
CommonOpts
ContextTar io.Reader
}
type RunCommandOpts struct {
CommonOpts
BuildArgs []string
}
type FromCommandOpts struct {
CommonOpts
}
type PullOpts struct {
CommonOpts
}
type PushOpts struct {
CommonOpts
}
type TagOpts struct {
CommonOpts
}
type RmiOpts struct {
CommonOpts
Force bool
}
type Buildah interface {
Tag(ctx context.Context, ref, newRef string, opts TagOpts) error
Push(ctx context.Context, ref string, opts PushOpts) error
BuildFromDockerfile(ctx context.Context, dockerfile []byte, opts BuildFromDockerfileOpts) (string, error)
RunCommand(ctx context.Context, container string, command []string, opts RunCommandOpts) error
FromCommand(ctx context.Context, container string, image string, opts FromCommandOpts) error
Pull(ctx context.Context, ref string, opts PullOpts) error
Inspect(ctx context.Context, ref string) (*types.BuilderInfo, error)
Rmi(ctx context.Context, ref string, opts RmiOpts) error
}
type Mode string
const (
ModeAuto Mode = "auto"
ModeNativeRootless Mode = "native-rootless"
ModeDockerWithFuse Mode = "docker-with-fuse"
)
func ProcessStartupHook(mode Mode) (bool, error) {
switch ResolveMode(mode) {
case ModeNativeRootless:
return NativeRootlessProcessStartupHook(), nil
case ModeDockerWithFuse:
return false, nil
default:
return false, fmt.Errorf("unsupported mode %q", mode)
}
}
type CommonBuildahOpts struct {
TmpDir string
Insecure bool
}
type NativeRootlessModeOpts struct{}
type DockerWithFuseModeOpts struct{}
type BuildahOpts struct {
CommonBuildahOpts
DockerWithFuseModeOpts
NativeRootlessModeOpts
}
func NewBuildah(mode Mode, opts BuildahOpts) (b Buildah, err error) {
if opts.CommonBuildahOpts.TmpDir == "" {
opts.CommonBuildahOpts.TmpDir = filepath.Join(werf.GetHomeDir(), "buildah", "tmp")
}
switch ResolveMode(mode) {
case ModeNativeRootless:
switch runtime.GOOS {
case "linux":
b, err = NewNativeRootlessBuildah(opts.CommonBuildahOpts, opts.NativeRootlessModeOpts)
if err != nil {
return nil, fmt.Errorf("unable to create new Buildah instance with mode %q: %s", mode, err)
}
default:
panic("ModeNativeRootless can't be used on this OS")
}
case ModeDockerWithFuse:
b, err = NewDockerWithFuseBuildah(opts.CommonBuildahOpts, opts.DockerWithFuseModeOpts)
if err != nil {
return nil, fmt.Errorf("unable to create new Buildah instance with mode %q: %s", mode, err)
}
default:
return nil, fmt.Errorf("unsupported mode %q", mode)
}
return b, nil
}
func ResolveMode(mode Mode) Mode {
switch mode {
case ModeAuto:
switch runtime.GOOS {
case "linux":
return ModeNativeRootless
default:
return ModeDockerWithFuse
}
default:
return mode
}
}
func debug() bool {
return os.Getenv("WERF_BUILDAH_DEBUG") == "1"
}
|
// Copyright 2019 The OpenSDS Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package nfs
import (
"errors"
"path"
"strings"
log "github.com/golang/glog"
. "github.com/sodafoundation/dock/contrib/drivers/utils/config"
"github.com/sodafoundation/dock/pkg/model"
pb "github.com/sodafoundation/dock/pkg/model/proto"
"github.com/sodafoundation/dock/pkg/utils"
"github.com/sodafoundation/dock/pkg/utils/config"
uuid "github.com/satori/go.uuid"
)
const (
defaultTgtConfDir = "/etc/tgt/conf.d"
defaultTgtBindIp = "127.0.0.1"
defaultConfPath = "/etc/opensds/driver/nfs.yaml"
FileSharePrefix = "fileshare-"
snapshotPrefix = "_snapshot-"
blocksize = 4096
sizeShiftBit = 30
opensdsnvmepool = "opensds-nvmegroup"
nvmeofAccess = "nvmeof"
iscsiAccess = "iscsi"
)
const (
KLvPath = "lvPath"
KLvsPath = "lvsPath"
KFileshareName = "nfsFileshareName"
KFileshareID = "nfsFileshareID"
KFileshareSnapName = "snapshotName"
KFileshareSnapID = "snapshotID"
AccessLevelRo = "ro"
AccessLevelRw = "rw"
MountPath = "/mnt"
)
type NFSConfig struct {
TgtBindIp string `yaml:"tgtBindIp"`
TgtConfDir string `yaml:"tgtConfDir"`
EnableChapAuth bool `yaml:"enableChapAuth"`
Pool map[string]PoolProperties `yaml:"pool,flow"`
}
type Driver struct {
conf *NFSConfig
cli *Cli
}
func (d *Driver) Setup() error {
// Read nfs config file
d.conf = &NFSConfig{TgtBindIp: defaultTgtBindIp, TgtConfDir: defaultTgtConfDir}
p := config.CONF.OsdsDock.Backends.NFS.ConfigPath
if "" == p {
p = defaultConfPath
}
if _, err := Parse(d.conf, p); err != nil {
return err
}
cli, err := NewCli()
if err != nil {
return err
}
d.cli = cli
return nil
}
func (*Driver) Unset() error { return nil }
func (d *Driver) CreateFileShareAcl(opt *pb.CreateFileShareAclOpts) (*model.FileShareAclSpec, error) {
var access string
// Get accessto list
accessTo := opt.GetAccessTo()
// get accessCapability list
accessCapability := opt.GetAccessCapability()
// get fileshare name
fname := opt.Name
permissions := []string{"write"}
WriteAccess := false
for _, value := range accessCapability {
value = strings.ToLower(value)
if utils.Contains(permissions, value) {
WriteAccess = true
}
if value == "Execute" {
log.Error("invalid permission:", value)
return nil, nil
}
}
if WriteAccess {
access = AccessLevelRw
} else {
access = AccessLevelRo
}
if err := d.cli.CreateAccess(accessTo, access, fname); err != nil {
log.Errorf("grant access %s to %s failed %v", accessTo, fname, err)
return nil, err
}
shareAccess := &model.FileShareAclSpec{
BaseModel: &model.BaseModel{
Id: opt.Id,
},
FileShareId: opt.FileshareId,
Type: opt.Type,
AccessCapability: accessCapability,
AccessTo: accessTo,
Metadata: map[string]string{},
}
return shareAccess, nil
}
func (d *Driver) DeleteFileShareAcl(opt *pb.DeleteFileShareAclOpts) error {
// Get accessto list
accessTo := opt.GetAccessTo()
// get fileshare name
fname := opt.Name
if err := d.cli.DeleteAccess(accessTo, fname); err != nil {
log.Error("cannot revoke access:", err)
return err
}
return nil
}
func (d *Driver) CreateFileShare(opt *pb.CreateFileShareOpts) (*model.FileShareSpec, error) {
var fshare *model.FileShareSpec
//get the server ip for configuration
var server = d.conf.TgtBindIp
//get fileshare name
var name = opt.GetName()
//get volume group
var vg = opt.GetPoolName()
// Crete a directory to mount
var dirName = path.Join(MountPath, name)
// create a fileshare path
var lvPath = path.Join("/dev", vg, name)
if err := d.cli.CreateDirectory(dirName); err != nil {
log.Error("failed to create a directory:", err)
return nil, err
}
if opt.SnapshotId != "" {
// User requested for creating fileshare using existing snapshot
//get fileshare name
var existingFsName = opt.GetMetadata()[KFileshareName]
// get existingfileshare snap logical path
//get volume group
var vg = opt.GetPoolName()
// create a fileshare device path
var lvPathForSnap = path.Join("/dev", vg, opt.SnapshotName)
// create a existing fileshare device path
var lvPathExistingPath = path.Join("/dev", vg, existingFsName)
// get directory where fileshare mounted
var olddirName = path.Join(MountPath, existingFsName)
// umount the volume to directory
if err := d.cli.UnMount(olddirName); err != nil {
log.Error("failed to unmount a directory:", err)
return nil, err
}
if err := d.cli.CreateFileShareFromSnapshot(lvPathForSnap); err != nil {
log.Error("failed to create filesystem from given snapshot:", err)
return nil, err
}
// mount the volume to directory
if err := d.cli.Mount(lvPathExistingPath, dirName); err != nil {
log.Error("failed to mount a directory:", err)
return nil, err
}
} else {
if err := d.cli.CreateVolume(name, vg, opt.GetSize()); err != nil {
return nil, err
}
// remove created volume if got error
defer func() {
// using return value as the error flag
if fshare == nil {
if err := d.cli.Delete(name, vg); err != nil {
log.Error("failed to remove volume fileshare:", err)
}
}
}()
// Crete fileshare on this path
if err := d.cli.CreateFileShare(lvPath); err != nil {
log.Error("failed to create filesystem logic volume:", err)
return nil, err
}
// mount the volume to directory
if err := d.cli.Mount(lvPath, dirName); err != nil {
log.Error("failed to mount a directory:", err)
return nil, err
}
}
// Set permission to directory
if err := d.cli.SetPermission(dirName); err != nil {
log.Error("failed to set permission:", err)
return nil, err
}
// get export location of fileshare
var location []string
location = []string{d.cli.GetExportLocation(name, server)}
if len(location) == 0 {
errMsg := errors.New("failed to get exportlocation: export location is empty!")
log.Error(errMsg)
return nil, errMsg
}
fshare = &model.FileShareSpec{
BaseModel: &model.BaseModel{
Id: opt.GetId(),
},
Name: opt.GetName(),
Size: opt.GetSize(),
Description: opt.GetDescription(),
AvailabilityZone: opt.GetAvailabilityZone(),
PoolId: opt.GetPoolId(),
Protocols: []string{NFSProtocol},
ExportLocations: location,
Metadata: map[string]string{
KFileshareName: name,
KFileshareSnapName: "",
KFileshareID: opt.GetId(),
KLvPath: lvPath,
},
}
return fshare, nil
}
// ListPools
func (d *Driver) ListPools() ([]*model.StoragePoolSpec, error) {
vgs, err := d.cli.ListVgs()
if err != nil {
return nil, err
}
var pols []*model.StoragePoolSpec
for _, vg := range *vgs {
if _, ok := d.conf.Pool[vg.Name]; !ok {
continue
}
pol := &model.StoragePoolSpec{
BaseModel: &model.BaseModel{
Id: uuid.NewV5(uuid.NamespaceOID, vg.UUID).String(),
},
Name: vg.Name,
TotalCapacity: vg.TotalCapacity,
FreeCapacity: vg.FreeCapacity,
StorageType: d.conf.Pool[vg.Name].StorageType,
Extras: d.conf.Pool[vg.Name].Extras,
AvailabilityZone: d.conf.Pool[vg.Name].AvailabilityZone,
}
if pol.AvailabilityZone == "" {
pol.AvailabilityZone = "default"
}
pols = append(pols, pol)
}
return pols, nil
}
// delete fileshare from device
func (d *Driver) DeleteFileShare(opt *pb.DeleteFileShareOpts) error {
// get fileshare name to be deleted
fname := opt.GetMetadata()[KFileshareName]
if !d.cli.Exists(fname) {
log.Warningf("fileshare(%s) does not exist, nothing to remove", fname)
return nil
}
// get fileshare path
lvPath := opt.GetMetadata()[KLvPath]
// get directory where fileshare mounted
var dirName = path.Join(MountPath, fname)
// umount the volume to directory
if err := d.cli.UnMount(dirName); err != nil {
log.Error("failed to unmount the directory:", err)
return err
}
// delete the actual fileshare from device
if err := d.cli.Delete(fname, lvPath); err != nil {
log.Error("failed to remove logic volume:", err)
return err
}
// Delete the directory
if err := d.cli.DeleteDirectory(dirName); err != nil {
log.Error("failed to delete the directory:", err)
return err
}
return nil
}
// CreateFileShareSnapshot
func (d *Driver) CreateFileShareSnapshot(opt *pb.CreateFileShareSnapshotOpts) (*model.FileShareSnapshotSpec, error) {
lvPath, ok := opt.GetMetadata()[KLvPath]
if !ok {
err := errors.New("can't find 'lvPath' in snapshot metadata")
log.Error(err)
return nil, err
}
snapName := opt.GetName()
fields := strings.Split(lvPath, "/")
vg, sourceLvName := fields[2], fields[3]
if err := d.cli.CreateLvSnapshot(snapName, sourceLvName, vg, opt.GetSize()); err != nil {
log.Error("failed to create logic volume snapshot:", err)
return nil, err
}
return &model.FileShareSnapshotSpec{
BaseModel: &model.BaseModel{
Id: opt.GetId(),
},
Name: opt.GetName(),
SnapshotSize: opt.GetSize(),
Description: opt.GetDescription(),
Metadata: map[string]string{
KFileshareSnapName: snapName,
KFileshareSnapID: opt.GetId(),
KLvPath: lvPath,
},
}, nil
}
// DeleteFileShareSnapshot
func (d *Driver) DeleteFileShareSnapshot(opt *pb.DeleteFileShareSnapshotOpts) error {
lvsPath, ok := opt.GetMetadata()[KLvPath]
snapName := opt.GetMetadata()[KFileshareSnapName]
if !ok {
err := errors.New("can't find 'lvsPath' in snapshot metadata, ingnore it!")
log.Error(err)
return nil
}
fields := strings.Split(lvsPath, "/")
vg := fields[2]
if !d.cli.Exists(snapName) {
log.Warningf("Snapshot(%s) does not exist, nothing to remove", snapName)
return nil
}
if err := d.cli.DeleteFileShareSnapshots(snapName, vg); err != nil {
log.Error("failed to remove logic volume:", err)
return err
}
return nil
}
|
package main
/*
* @lc app=leetcode.cn id=11 lang=golang
*
* [11] 盛最多水的容器
*/
// @lc code=start
func maxArea(height []int) int {
left := 0
right := len(height) - 1
maxArea := 0
for left < right {
var w, h int
w = right - left
if height[left] < height[right] {
h = height[left]
left++
} else {
h = height[right]
right--
}
area := w * h
if area > maxArea {
maxArea = area
}
}
return maxArea
}
// @lc code=end
|
package main
//测试用客户端
import (
"bytes"
//"encoding/json"
"fmt"
"io/ioutil"
"net/http"
)
type hust struct {
s string
}
func main() {
test := []string{"test1", "test2", "test3", "test4", "test5"}
server := "http://127.0.0.1:9090/?action=log"
var _test []byte
for _, v := range test {
_test = append(_test, []byte(v)...)
}
body := bytes.NewReader(_test)
resp, err := http.Post(server, "", body)
if err != nil {
fmt.Println(err)
return
}
defer resp.Body.Close()
txt, _ := ioutil.ReadAll(resp.Body)
fmt.Println(string(txt))
}
|
package kafka
import (
"log"
"logAgent/config"
"github.com/Shopify/sarama"
)
func InitKafka() (kafkaServerClient sarama.SyncProducer){
kafkaServerConf := sarama.NewConfig()
kafkaServerConf.Producer.RequiredAcks = sarama.WaitForAll
kafkaServerConf.Producer.Partitioner = sarama.NewRandomPartitioner
kafkaServerConf.Producer.Return.Successes = true
var err error
kafkaServerClient, err = sarama.NewSyncProducer(config.KafkaAddressList, kafkaServerConf)
if err != nil {
log.Fatalf("producer create failed, %s", err)
}
return
}
|
// generated by stringer -type=OpCode; DO NOT EDIT
package main
import "fmt"
const _OpCode_name = "CharMatchJumpSplitSaveNop"
var _OpCode_index = [...]uint8{0, 4, 9, 13, 18, 22, 25}
func (i OpCode) String() string {
if i+1 >= OpCode(len(_OpCode_index)) {
return fmt.Sprintf("OpCode(%d)", i)
}
return _OpCode_name[_OpCode_index[i]:_OpCode_index[i+1]]
}
|
package httputil
import (
"encoding/json"
"log"
"net/http"
"github.com/asaskevich/govalidator"
"github.com/sirupsen/logrus"
)
type ContentType int
const (
JSON ContentType = iota
Form
HTML
)
type jsonResponse struct {
StatusCode int `json:"status_code"`
Messages []string `json:"messages"`
Data interface{} `json:"data"`
}
type Util struct {
requestContentType ContentType
responseContentType ContentType
appError error
decodeRequestError error
}
func (u *Util) SetApplicationError(err error) {
u.appError = err
}
func (u *Util) SetDecodeRequestError(err error) {
u.decodeRequestError = err
}
func (u *Util) SetRequestContentType(contentType ContentType) {
u.requestContentType = contentType
}
func (u *Util) DecodeRequest(r *http.Request, req interface{}) error {
switch u.requestContentType {
case JSON:
if err := DecodeJSONRequest(r, req); err != nil {
logrus.Error(err)
return u.decodeRequestError
}
return nil
case Form:
if err := DecodeFormRequest(r, req); err != nil {
logrus.Error(err)
return u.decodeRequestError
}
return nil
default:
return nil
}
}
func (u *Util) DecodeValidateRequest(r *http.Request, req interface{}) (bool, error) {
if err := u.DecodeRequest(r, req); err != nil {
logrus.Error(err)
return false, err
}
isValid, err := govalidator.ValidateStruct(req)
if err != nil {
logrus.Error(err)
return isValid, err
}
return isValid, nil
}
func (u *Util) EncodeResponse(resp interface{}) ([]byte, error) {
switch u.responseContentType {
case JSON:
return EncodeJSONResponse(resp)
default:
return nil, nil
}
}
func (u *Util) ErrorJSON(w http.ResponseWriter, err error, statusCode int, data interface{}) {
w.Header().Set("Content-Type", "application/json")
switch {
case err == u.appError:
w.WriteHeader(http.StatusInternalServerError)
w.Write(u.appJsonError())
case err == u.decodeRequestError:
w.WriteHeader(http.StatusBadRequest)
w.Write(u.encodeJSONResponse(http.StatusBadRequest, []string{err.Error()}, data))
default:
w.WriteHeader(statusCode)
w.Write(u.encodeJSONResponse(statusCode, []string{err.Error()}, data))
}
}
func (u *Util) JSON(w http.ResponseWriter, statusCode int, messages []string, data interface{}) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(statusCode)
w.Write(u.encodeJSONResponse(statusCode, messages, data))
}
func (u *Util) encodeJSONResponse(statusCode int, messages []string, data interface{}) []byte {
resp := jsonResponse{
StatusCode: statusCode,
Messages: messages,
Data: data,
}
bs, err := json.Marshal(&resp)
if err != nil {
log.Println(err)
return u.appJsonError()
}
return bs
}
func (u *Util) appJsonError() []byte {
resp := jsonResponse{
StatusCode: http.StatusInternalServerError,
Messages: []string{"Internal Server Error"},
Data: nil,
}
bs, _ := json.Marshal(&resp)
return bs
}
func New() *Util {
return &Util{
requestContentType: JSON,
responseContentType: JSON,
appError: ErrInternalServerError,
decodeRequestError: ErrDecodeRequest,
}
}
|
package main
import "fmt"
var(
as =new([3]int)
)
func main() {
/**
冒泡排序
*/
arr:= [10]int{4, 4, 1, 2, 12, 5, 6, 834, 3, 0}
//var arr = new([10]int)
fmt.Println(arr)
for i := len(arr)-1; i >=0; i-- {
for j := 0; j < i; j++ {
if arr[j]>arr[j+1]{
tem := arr[j+1]
arr[j+1] = arr[j]
arr[j] = tem
}
}
}
fmt.Println(&arr)
var ss Stu = Stu{"string"}
ss.print()
}
type Stu struct {
text string
}
//给Stu类型绑定方法
func (s Stu) print(){
text := s.text
fmt.Println("text:",text)
}
|
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grumpy
import (
"fmt"
"io/ioutil"
"os"
"regexp"
"testing"
)
var (
wantFileOperationRead = 1
wantFileOperationWrite = 2
)
func TestFileInit(t *testing.T) {
f := newTestFile("blah blah")
defer f.cleanup()
cases := []invokeTestCase{
{args: wrapArgs(newObject(FileType), f.path), want: None},
{args: wrapArgs(newObject(FileType)), wantExc: mustCreateException(TypeErrorType, "'__init__' requires 2 arguments")},
{args: wrapArgs(newObject(FileType), f.path, "abc"), wantExc: mustCreateException(ValueErrorType, `invalid mode string: "abc"`)},
{args: wrapArgs(newObject(FileType), "nonexistent-file"), wantExc: mustCreateException(IOErrorType, "open nonexistent-file: no such file or directory")},
}
for _, cas := range cases {
if err := runInvokeMethodTestCase(FileType, "__init__", &cas); err != "" {
t.Error(err)
}
}
}
func TestFileClosed(t *testing.T) {
f := newTestFile("foo\nbar")
defer f.cleanup()
closedFile := f.open("r")
// This puts the file into an invalid state since Grumpy thinks
// it's open even though the underlying file was closed.
closedFile.file.Close()
cases := []invokeTestCase{
{args: wrapArgs(newObject(FileType)), want: True.ToObject()},
{args: wrapArgs(f.open("r")), want: False.ToObject()},
{args: wrapArgs(closedFile), want: False.ToObject()},
}
for _, cas := range cases {
if err := runInvokeMethodTestCase(FileType, "closed", &cas); err != "" {
t.Error(err)
}
}
}
func TestFileCloseExit(t *testing.T) {
f := newTestFile("foo\nbar")
defer f.cleanup()
for _, method := range []string{"close", "__exit__"} {
closedFile := f.open("r")
// This puts the file into an invalid state since Grumpy thinks
// it's open even though the underlying file was closed.
closedFile.file.Close()
cases := []invokeTestCase{
{args: wrapArgs(newObject(FileType)), want: None},
{args: wrapArgs(f.open("r")), want: None},
{args: wrapArgs(closedFile), wantExc: mustCreateException(IOErrorType, closedFile.file.Close().Error())},
}
for _, cas := range cases {
if err := runInvokeMethodTestCase(FileType, method, &cas); err != "" {
t.Error(err)
}
}
}
}
func TestFileGetName(t *testing.T) {
fun := wrapFuncForTest(func(f *Frame, file *File) (*Object, *BaseException) {
return GetAttr(f, file.ToObject(), NewStr("name"), nil)
})
foo := newTestFile("foo")
defer foo.cleanup()
cases := []invokeTestCase{
{args: wrapArgs(foo.open("r")), want: NewStr(foo.path).ToObject()},
{args: wrapArgs(newObject(FileType)), want: NewStr("<uninitialized file>").ToObject()},
}
for _, cas := range cases {
if err := runInvokeTestCase(fun, &cas); err != "" {
t.Error(err)
}
}
}
func TestFileIter(t *testing.T) {
files := makeTestFiles()
defer files.cleanup()
closedFile := files[0].open("r")
closedFile.file.Close()
_, closedFileReadError := closedFile.file.Read(make([]byte, 10))
cases := []invokeTestCase{
{args: wrapArgs(files[0].open("r")), want: newTestList("foo").ToObject()},
{args: wrapArgs(files[0].open("rU")), want: newTestList("foo").ToObject()},
{args: wrapArgs(files[1].open("r")), want: newTestList("foo\n").ToObject()},
{args: wrapArgs(files[1].open("rU")), want: newTestList("foo\n").ToObject()},
{args: wrapArgs(files[2].open("r")), want: newTestList("foo\n", "bar").ToObject()},
{args: wrapArgs(files[2].open("rU")), want: newTestList("foo\n", "bar").ToObject()},
{args: wrapArgs(files[3].open("r")), want: newTestList("foo\r\n").ToObject()},
{args: wrapArgs(files[3].open("rU")), want: newTestList("foo\n").ToObject()},
{args: wrapArgs(files[4].open("r")), want: newTestList("foo\rbar").ToObject()},
{args: wrapArgs(files[4].open("rU")), want: newTestList("foo\n", "bar").ToObject()},
{args: wrapArgs(closedFile), wantExc: mustCreateException(IOErrorType, closedFileReadError.Error())},
{args: wrapArgs(newObject(FileType)), wantExc: mustCreateException(ValueErrorType, "I/O operation on closed file")},
}
for _, cas := range cases {
if err := runInvokeTestCase(ListType.ToObject(), &cas); err != "" {
t.Error(err)
}
}
}
func TestFileNext(t *testing.T) {
files := makeTestFiles()
defer files.cleanup()
closedFile := files[0].open("r")
closedFile.file.Close()
_, closedFileReadError := closedFile.file.Read(make([]byte, 10))
cases := []invokeTestCase{
{args: wrapArgs(files[0].open("r")), want: NewStr("foo").ToObject()},
{args: wrapArgs(files[0].open("rU")), want: NewStr("foo").ToObject()},
{args: wrapArgs(files[1].open("r")), want: NewStr("foo\n").ToObject()},
{args: wrapArgs(files[1].open("rU")), want: NewStr("foo\n").ToObject()},
{args: wrapArgs(files[2].open("r")), want: NewStr("foo\n").ToObject()},
{args: wrapArgs(files[2].open("rU")), want: NewStr("foo\n").ToObject()},
{args: wrapArgs(files[3].open("r")), want: NewStr("foo\r\n").ToObject()},
{args: wrapArgs(files[3].open("rU")), want: NewStr("foo\n").ToObject()},
{args: wrapArgs(files[4].open("r")), want: NewStr("foo\rbar").ToObject()},
{args: wrapArgs(files[4].open("rU")), want: NewStr("foo\n").ToObject()},
{args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "unbound method next() must be called with file instance as first argument (got nothing instead)")},
{args: wrapArgs(closedFile), wantExc: mustCreateException(IOErrorType, closedFileReadError.Error())},
{args: wrapArgs(newObject(FileType)), wantExc: mustCreateException(ValueErrorType, "I/O operation on closed file")},
}
for _, cas := range cases {
if err := runInvokeMethodTestCase(FileType, "next", &cas); err != "" {
t.Error(err)
}
}
}
func TestFileRead(t *testing.T) {
f := newTestFile("foo\nbar")
defer f.cleanup()
closedFile := f.open("r")
closedFile.file.Close()
_, closedFileReadError := closedFile.file.Read(make([]byte, 10))
cases := []invokeTestCase{
{args: wrapArgs(f.open("r")), want: NewStr("foo\nbar").ToObject()},
{args: wrapArgs(f.open("r"), 3), want: NewStr("foo").ToObject()},
{args: wrapArgs(f.open("r"), 1000), want: NewStr("foo\nbar").ToObject()},
{args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "unbound method read() must be called with file instance as first argument (got nothing instead)")},
{args: wrapArgs(closedFile), wantExc: mustCreateException(IOErrorType, closedFileReadError.Error())},
{args: wrapArgs(newObject(FileType)), wantExc: mustCreateException(ValueErrorType, "I/O operation on closed file")},
{args: wrapArgs(newObject(FileType), "abc"), wantExc: mustCreateException(ValueErrorType, "invalid literal for int() with base 10: abc")},
{args: wrapArgs(newObject(FileType), 123, 456), wantExc: mustCreateException(TypeErrorType, "'read' of 'file' requires 2 arguments")},
}
for _, cas := range cases {
if err := runInvokeMethodTestCase(FileType, "read", &cas); err != "" {
t.Error(err)
}
}
}
func TestFileReadLine(t *testing.T) {
files := makeTestFiles()
defer files.cleanup()
closedFile := files[0].open("r")
closedFile.file.Close()
_, closedFileReadError := closedFile.file.Read(make([]byte, 10))
partialReadFile := files[5].open("rU")
partialReadFile.readLine(-1)
cases := []invokeTestCase{
{args: wrapArgs(files[0].open("r")), want: NewStr("foo").ToObject()},
{args: wrapArgs(files[0].open("rU")), want: NewStr("foo").ToObject()},
{args: wrapArgs(files[1].open("r")), want: NewStr("foo\n").ToObject()},
{args: wrapArgs(files[1].open("rU")), want: NewStr("foo\n").ToObject()},
{args: wrapArgs(files[2].open("r")), want: NewStr("foo\n").ToObject()},
{args: wrapArgs(files[2].open("rU")), want: NewStr("foo\n").ToObject()},
{args: wrapArgs(files[2].open("r"), 2), want: NewStr("fo").ToObject()},
{args: wrapArgs(files[2].open("r"), 3), want: NewStr("foo").ToObject()},
{args: wrapArgs(files[2].open("r"), 4), want: NewStr("foo\n").ToObject()},
{args: wrapArgs(files[2].open("r"), 5), want: NewStr("foo\n").ToObject()},
{args: wrapArgs(files[3].open("r")), want: NewStr("foo\r\n").ToObject()},
{args: wrapArgs(files[3].open("rU")), want: NewStr("foo\n").ToObject()},
{args: wrapArgs(files[3].open("rU"), 3), want: NewStr("foo").ToObject()},
{args: wrapArgs(files[3].open("rU"), 4), want: NewStr("foo\n").ToObject()},
{args: wrapArgs(files[3].open("rU"), 5), want: NewStr("foo\n").ToObject()},
{args: wrapArgs(files[4].open("r")), want: NewStr("foo\rbar").ToObject()},
{args: wrapArgs(files[4].open("rU")), want: NewStr("foo\n").ToObject()},
// Ensure that reading after a \r\n returns the requested
// number of bytes when possible. Check that the trailing \n
// does not count toward the bytes read.
{args: wrapArgs(partialReadFile, 3), want: NewStr("bar").ToObject()},
{args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "unbound method readline() must be called with file instance as first argument (got nothing instead)")},
{args: wrapArgs(closedFile), wantExc: mustCreateException(IOErrorType, closedFileReadError.Error())},
{args: wrapArgs(newObject(FileType)), wantExc: mustCreateException(ValueErrorType, "I/O operation on closed file")},
{args: wrapArgs(newObject(FileType), "abc"), wantExc: mustCreateException(ValueErrorType, "invalid literal for int() with base 10: abc")},
{args: wrapArgs(newObject(FileType), 123, 456), wantExc: mustCreateException(TypeErrorType, "'readline' of 'file' requires 2 arguments")},
}
for _, cas := range cases {
if err := runInvokeMethodTestCase(FileType, "readline", &cas); err != "" {
t.Error(err)
}
}
}
func TestFileReadLines(t *testing.T) {
files := makeTestFiles()
defer files.cleanup()
closedFile := files[0].open("r")
closedFile.file.Close()
_, closedFileReadError := closedFile.file.Read(make([]byte, 10))
partialReadFile := files[5].open("rU")
partialReadFile.readLine(-1)
cases := []invokeTestCase{
{args: wrapArgs(files[0].open("r")), want: newTestList("foo").ToObject()},
{args: wrapArgs(files[0].open("rU")), want: newTestList("foo").ToObject()},
{args: wrapArgs(files[1].open("r")), want: newTestList("foo\n").ToObject()},
{args: wrapArgs(files[1].open("rU")), want: newTestList("foo\n").ToObject()},
{args: wrapArgs(files[2].open("r")), want: newTestList("foo\n", "bar").ToObject()},
{args: wrapArgs(files[2].open("rU")), want: newTestList("foo\n", "bar").ToObject()},
{args: wrapArgs(files[2].open("r"), 2), want: newTestList("foo\n").ToObject()},
{args: wrapArgs(files[2].open("r"), 3), want: newTestList("foo\n").ToObject()},
{args: wrapArgs(files[2].open("r"), 4), want: newTestList("foo\n").ToObject()},
{args: wrapArgs(files[2].open("r"), 5), want: newTestList("foo\n", "bar").ToObject()},
{args: wrapArgs(files[3].open("r")), want: newTestList("foo\r\n").ToObject()},
{args: wrapArgs(files[3].open("rU")), want: newTestList("foo\n").ToObject()},
{args: wrapArgs(files[3].open("rU"), 3), want: newTestList("foo\n").ToObject()},
{args: wrapArgs(files[3].open("rU"), 4), want: newTestList("foo\n").ToObject()},
{args: wrapArgs(files[3].open("rU"), 5), want: newTestList("foo\n").ToObject()},
{args: wrapArgs(files[4].open("r")), want: newTestList("foo\rbar").ToObject()},
{args: wrapArgs(files[4].open("rU")), want: newTestList("foo\n", "bar").ToObject()},
// Ensure that reading after a \r\n returns the requested
// number of bytes when possible. Check that the trailing \n
// does not count toward the bytes read.
{args: wrapArgs(partialReadFile, 3), want: newTestList("bar\n").ToObject()},
{args: wrapArgs(), wantExc: mustCreateException(TypeErrorType, "unbound method readlines() must be called with file instance as first argument (got nothing instead)")},
{args: wrapArgs(closedFile), wantExc: mustCreateException(IOErrorType, closedFileReadError.Error())},
{args: wrapArgs(newObject(FileType)), wantExc: mustCreateException(ValueErrorType, "I/O operation on closed file")},
{args: wrapArgs(newObject(FileType), "abc"), wantExc: mustCreateException(ValueErrorType, "invalid literal for int() with base 10: abc")},
{args: wrapArgs(newObject(FileType), 123, 456), wantExc: mustCreateException(TypeErrorType, "'readlines' of 'file' requires 2 arguments")},
}
for _, cas := range cases {
if err := runInvokeMethodTestCase(FileType, "readlines", &cas); err != "" {
t.Error(err)
}
}
}
func TestFileStrRepr(t *testing.T) {
fun := newBuiltinFunction("TestFileStrRepr", func(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
if raised := checkFunctionArgs(f, "TestFileStrRepr", args, ObjectType, StrType); raised != nil {
return nil, raised
}
o := args[0]
re := regexp.MustCompile(toStrUnsafe(args[1]).Value())
s, raised := ToStr(f, o)
if raised != nil {
return nil, raised
}
Assert(f, GetBool(re.MatchString(s.Value())).ToObject(), nil)
s, raised = Repr(f, o)
if raised != nil {
return nil, raised
}
Assert(f, GetBool(re.MatchString(s.Value())).ToObject(), nil)
return None, nil
}).ToObject()
f := newTestFile("foo\nbar")
defer f.cleanup()
closedFile := f.open("r").ToObject()
mustNotRaise(fileClose(NewRootFrame(), []*Object{closedFile}, nil))
// Open a file for write.
writeFile := f.open("wb")
cases := []invokeTestCase{
{args: wrapArgs(f.open("r"), `^<open file "[^"]+", mode "r" at \w+>$`), want: None},
{args: wrapArgs(writeFile, `^<open file "[^"]+", mode "wb" at \w+>$`), want: None},
{args: wrapArgs(newObject(FileType), `^<closed file "<uninitialized file>", mode "<uninitialized file>" at \w+>$`), want: None},
{args: wrapArgs(closedFile, `^<closed file "[^"]+", mode "r" at \w+>$`), want: None},
}
for _, cas := range cases {
if err := runInvokeTestCase(fun, &cas); err != "" {
t.Error(err)
}
}
}
func TestFileWrite(t *testing.T) {
fun := newBuiltinFunction("TestFileWrite", func(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
if raised := checkMethodArgs(f, "TestFileWrite", args, StrType, StrType, StrType); raised != nil {
return nil, raised
}
writeFile, raised := FileType.Call(f, args[:2], nil)
if raised != nil {
return nil, raised
}
write, raised := GetAttr(f, writeFile, NewStr("write"), nil)
if raised != nil {
return nil, raised
}
if _, raised := write.Call(f, args[2:], nil); raised != nil {
return nil, raised
}
contents, err := ioutil.ReadFile(toStrUnsafe(args[0]).Value())
if err != nil {
return nil, f.RaiseType(RuntimeErrorType, fmt.Sprintf("error reading file: %s", err.Error()))
}
return NewStr(string(contents)).ToObject(), nil
}).ToObject()
// Create a temporary directory and cd to it.
dir, err := ioutil.TempDir("", "TestFileWrite")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
defer os.RemoveAll(dir)
oldWd, err := os.Getwd()
if err != nil {
t.Fatalf("Getwd() failed: %s", err)
}
if err := os.Chdir(dir); err != nil {
t.Fatalf("Chdir(%q) failed: %s", dir, err)
}
defer os.Chdir(oldWd)
for _, filename := range []string{"truncate.txt", "readonly.txt", "append.txt", "rplus.txt", "aplus.txt", "wplus.txt"} {
if err := ioutil.WriteFile(filename, []byte(filename), 0644); err != nil {
t.Fatalf("ioutil.WriteFile(%q) failed: %s", filename, err)
}
}
cases := []invokeTestCase{
{args: wrapArgs("noexist.txt", "w", "foo\nbar"), want: NewStr("foo\nbar").ToObject()},
{args: wrapArgs("truncate.txt", "w", "new contents"), want: NewStr("new contents").ToObject()},
{args: wrapArgs("append.txt", "a", "\nbar"), want: NewStr("append.txt\nbar").ToObject()},
{args: wrapArgs("rplus.txt", "r+", "fooey"), want: NewStr("fooey.txt").ToObject()},
{args: wrapArgs("noexistplus1.txt", "r+", "pooey"), wantExc: mustCreateException(IOErrorType, "open noexistplus1.txt: no such file or directory")},
{args: wrapArgs("aplus.txt", "a+", "\napper"), want: NewStr("aplus.txt\napper").ToObject()},
{args: wrapArgs("noexistplus3.txt", "a+", "snappbacktoreality"), want: NewStr("snappbacktoreality").ToObject()},
{args: wrapArgs("wplus.txt", "w+", "destructo"), want: NewStr("destructo").ToObject()},
{args: wrapArgs("noexistplus2.txt", "w+", "wapper"), want: NewStr("wapper").ToObject()},
{args: wrapArgs("readonly.txt", "r", "foo"), wantExc: mustCreateException(IOErrorType, "write readonly.txt: bad file descriptor")},
}
for _, cas := range cases {
if err := runInvokeTestCase(fun, &cas); err != "" {
t.Error(err)
}
}
}
type testFile struct {
path string
files []*File
}
func newTestFile(contents string) *testFile {
osFile, err := ioutil.TempFile("", "")
if err != nil {
panic(err)
}
if _, err := osFile.WriteString(contents); err != nil {
panic(err)
}
if err := osFile.Close(); err != nil {
panic(err)
}
return &testFile{path: osFile.Name()}
}
func (f *testFile) cleanup() {
for _, file := range f.files {
file.file.Close()
}
os.Remove(f.path)
}
func (f *testFile) open(mode string) *File {
args := wrapArgs(f.path, mode)
o := mustNotRaise(FileType.Call(NewRootFrame(), args, nil))
if o == nil || !o.isInstance(FileType) {
panic(fmt.Sprintf("file%v = %v, want file object", args, o))
}
file := toFileUnsafe(o)
f.files = append(f.files, file)
return file
}
type testFileSlice []*testFile
func makeTestFiles() testFileSlice {
return []*testFile{
newTestFile("foo"),
newTestFile("foo\n"),
newTestFile("foo\nbar"),
newTestFile("foo\r\n"),
newTestFile("foo\rbar"),
newTestFile("foo\r\nbar\r\nbaz"),
}
}
func (files testFileSlice) cleanup() {
for _, f := range files {
f.cleanup()
}
}
|
package controllers
import (
"net/http"
"github.com/gin-gonic/gin"
)
var statusMessage = map[int]string{
http.StatusBadRequest: "参数有误",
http.StatusUnauthorized: "缺少认证信息",
http.StatusForbidden: "无权限",
http.StatusMethodNotAllowed: "服务器未实现的请求方法",
http.StatusInternalServerError: "服务器出错",
http.StatusNotImplemented: "服务器未实现",
}
func ResponseJson(c *gin.Context, httpCode int, data interface{}) {
var result = map[string]interface{}{
"code": httpCode,
"message": GetStatusMsg(httpCode),
"data": data,
}
c.JSON(httpCode, result)
}
func ResponseJsonMore(c *gin.Context, httpCode int, data interface{}, moreInfo map[string]interface{}) {
var result = map[string]interface{}{
"code": httpCode,
"message": GetStatusMsg(httpCode),
"data": data,
}
for k, v := range moreInfo {
result[k] = v
}
c.JSON(httpCode, result)
}
func GetStatusMsg(code int) string {
msg, ok := statusMessage[code]
if ok {
return msg
}
return http.StatusText(code)
}
func Ping(c *gin.Context) {
ResponseJson(c, http.StatusOK, nil)
}
|
package main
import (
"fmt"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
)
type HouseChange struct {
Recordid int64 `gorm:"primary_key;type:int(11) auto_increment;not null"`
Customerid int64 `gorm:"type:int(11)"`
OldHouseid int64 `gorm:"type:int(11)"`
PresentHouseid int64 `gorm:"type:int(11)"`
Reason string `gorm:"type:varchar(128)"`
Charge int `gorm:"type:int"`
Date time.Time
}
// DaoIdea struct contains a db object of grom.DB
type DaoHouseChange struct {
db *gorm.DB
}
// NewDaoIdea create a db connect to table Idea.
func NewDaoHouseChange() (d *DaoHouseChange) {
d = &DaoHouseChange{}
d.db, _ = gorm.Open("mysql", DBAddress)
if DBSqlPrint == 1 {
d.db.LogMode(true)
}
return
}
// Insert insert data into table Idea.
func (this *DaoHouseChange) Insert(u *HouseChange) {
if !this.db.HasTable(&HouseChange{}) {
err := this.db.Set("gorm:table_options", "ENGINE=InnoDB DEFAULT CHARSET=utf8").CreateTable(&HouseChange{}).Error
if err != nil {
fmt.Printf("创建表失败 Error:%s", err.Error())
return
}
}
this.db.Create(u)
}
// Print all
func (this *DaoHouseChange) PrintAll() {
if !this.db.HasTable(&HouseChange{}) {
return
}
list := make([]*HouseChange, 0)
err := this.db.Model(&HouseChange{}).Offset(0).Limit(1000).Find(&list).Error
if err == nil && len(list) > 0 {
PrintWithColor("正在输出house_change表全部查询结果:\n")
PrintStructField(HouseChange{})
for _, item := range list {
fmt.Printf("%v\n", item)
}
}
}
// Print all
func (this *DaoHouseChange) PrintByCustomerid(customerid int64) {
if !this.db.HasTable(&HouseChange{}) {
PrintWithColor("该顾客无换房记录\n")
return
}
list := make([]*HouseChange, 0)
err := this.db.Model(&HouseChange{}).Where("customerid = ?", customerid).Limit(1000).Find(&list).Error
if err == nil && len(list) > 0 {
PrintWithColor("正在输出该顾客的换房记录:\n")
PrintStructField(HouseChange{})
for _, item := range list {
fmt.Printf("%v\n", item)
}
} else {
PrintWithColor("该顾客无换房记录\n")
}
}
func (this *DaoHouseChange) Close() {
if this.db != nil {
this.db.Close()
}
}
|
package main
import (
"github.com/hashicorp/terraform/helper/schema"
)
func resourceGCPAccount() *schema.Resource {
return &schema.Resource{
Create: resourceGCPAccountCreate,
Read: resourceGCPAccountRead,
Update: resourceGCPAccountUpdate,
Delete: resourceGCPAccountDelete,
Schema: map[string]*schema.Schema{
"type": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"projectId": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"privateKeyId": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"privateKey": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"clientEmail": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"clientId": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"authUri": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"tokenUri": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
},
}
}
func resourceGCPAccountCreate(d *schema.ResourceData, m interface{}) error {
return nil
}
func resourceGCPAccountRead(d *schema.ResourceData, m interface{}) error {
return nil
}
func resourceGCPAccountUpdate(d *schema.ResourceData, m interface{}) error {
return nil
}
func resourceGCPAccountDelete(d *schema.ResourceData, m interface{}) error {
return nil
}
|
package state
import (
"io"
valuetransaction "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
"github.com/iotaledger/wasp/packages/coretypes"
"github.com/iotaledger/wasp/packages/hashing"
"github.com/iotaledger/wasp/packages/kv/buffered"
)
// represents an interface to the mutable state of the smart contract
type VirtualState interface {
// index of the current state. State index is incremented when state transition occurs
// index 0 means origin state
BlockIndex() uint32
ApplyBlockIndex(uint32)
// timestamp
Timestamp() int64
// updates state without changing state index
ApplyStateUpdate(stateUpd StateUpdate)
// applies block of state updates, state index and timestamp
ApplyBlock(Block) error
// commit means saving virtual state to sc db, making it persistent (solid)
CommitToDb(batch Block) error
// return hash of the variable state. It is a root of the Merkle chain of all
// state updates starting from the origin
Hash() hashing.HashValue
// the storage of variable/value pairs
Variables() buffered.BufferedKVStore
Clone() VirtualState
DangerouslyConvertToString() string
}
// State update represents update to the variable state
// it is calculated by the VM (in batches)
// State updates comes in batches, all state updates within one block
// has same state index, state tx id and block size. ResultBlock index is unique in block
// ResultBlock is completed when it contains one state update for each index
type StateUpdate interface {
// request which resulted in this state update
RequestID() *coretypes.RequestID
Timestamp() int64
WithTimestamp(int64) StateUpdate
// the payload of variables/values
String() string
Mutations() buffered.MutationSequence
Clone() StateUpdate
Write(io.Writer) error
Read(io.Reader) error
}
// Block is a sequence of state updates applicable to the variable state
type Block interface {
ForEach(func(uint16, StateUpdate) bool)
StateIndex() uint32
WithBlockIndex(uint32) Block
StateTransactionID() valuetransaction.ID
WithStateTransaction(valuetransaction.ID) Block
Timestamp() int64
Size() uint16
RequestIDs() []*coretypes.RequestID
EssenceHash() hashing.HashValue // except state transaction id
String() string
Write(io.Writer) error
Read(io.Reader) error
}
|
package main
import "fmt"
func main(){
//Go 语言的字符有以下两种:
//一种是 uint8 类型,或者叫 byte 型,代表了 ASCII 码的一个字符。
//另一种是 rune 类型,代表一个 UTF-8 字符。当需要处理中文、日文或者其他复合字符时,则需要用到 rune 类型。rune 类型实际是一个 int32。
var a byte = 'a'
var b uint8 ='b'
var c rune = '中'
var d int32 = '国'
//使用 fmt.Printf 中的%T动词可以输出变量的实际类型,使用这个方法可以查看 byte 和 rune 的本来类型
fmt.Printf("%d %T\n",a,a)
fmt.Printf("%d %T\n",b,b)
fmt.Printf("%d %T\n",c,c)
fmt.Printf("%d %T\n",d,d)
//https://www.cnblogs.com/logo-fox/p/6473125.html
}
|
package notbearparser
// func TestQuery(t *testing.T) {
// queryStr := `div[data-pk="test_pk", hidden] p>.red`
// queryList, err := NewQueryList(queryStr)
// if err != nil {
// t.Fatal(err)
// }
// for _, query := range queryList {
// fmt.Println(query.NodeName, query.AttrList, query.Target)
// }
// }
|
// +build !windows
package fs
import (
"fmt"
"os"
"path/filepath"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"golang.org/x/sys/unix"
)
type Fslock struct {
FileName string
fd *os.File
}
func (f *Fslock) Lock() error {
flockF, err := os.Create(f.FileName)
if err != nil {
return fmt.Errorf("cannot create lock file %q: qs", f.FileName, err)
}
if err := unix.Flock(int(flockF.Fd()), unix.LOCK_EX|unix.LOCK_NB); err != nil {
return fmt.Errorf("cannot acquire lock on file %q: %q", f.FileName, err)
}
return nil
}
func (f *Fslock) Unlock() error {
f.fd.Close()
return nil
}
// CreateFlockFile creates flock.lock file in the directory dir
// and returns the handler to the file.
func CreateFlockFile(dir string) (*Fslock, error) {
f := &Fslock{FileName: filepath.Join(dir, "flock.lock")}
return f, f.Lock()
}
// MustGetFreeSpace returns free space for the given directory path.
func MustGetFreeSpace(path string) uint64 {
d, err := os.Open(path)
if err != nil {
logger.Panicf("FATAL: cannot determine free disk space on %q: %s", path, err)
}
defer MustClose(d)
fd := d.Fd()
var stat unix.Statfs_t
if err := unix.Fstatfs(int(fd), &stat); err != nil {
logger.Panicf("FATAL: cannot determine free disk space on %q: %s", path, err)
}
freeSpace := uint64(stat.Bavail) * uint64(stat.Bsize)
return freeSpace
}
|
package render
import (
"testing"
"github.com/sh0rez/docsonnet/pkg/docsonnet"
"github.com/stretchr/testify/assert"
)
func TestSortFields(t *testing.T) {
api := docsonnet.Fields{
"new": dfn(),
"newNamed": dfn(),
"aaa": dfn(),
"bbb": dobj(),
"ccc": dfn(),
"metadata": dobj(),
}
sorted := []string{
"new",
"newNamed",
"aaa",
"ccc",
"bbb",
"metadata",
}
res := sortFields(api)
assert.Equal(t, sorted, res)
}
func dobj() docsonnet.Field {
return docsonnet.Field{
Object: &docsonnet.Object{},
}
}
func dfn() docsonnet.Field {
return docsonnet.Field{
Function: &docsonnet.Function{},
}
}
|
// Copyright (C) 2021 Cisco Systems Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package vpplink
import (
"fmt"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/interface_types"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/session"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/types"
)
func (v *VppLink) enableDisableSessionLayer(isEnable bool) error {
client := session.NewServiceClient(v.GetConnection())
_, err := client.SessionEnableDisable(v.GetContext(), &session.SessionEnableDisable{
IsEnable: isEnable,
})
if err != nil {
return fmt.Errorf("failed to %s session: %w", strEnableDisable[isEnable], err)
}
return nil
}
func (v *VppLink) EnableSessionLayer() error {
return v.enableDisableSessionLayer(true)
}
func (v *VppLink) DisableSessionLayer() error {
return v.enableDisableSessionLayer(false)
}
func (v *VppLink) enableDisableSessionSAPILayer(isEnable bool) error {
client := session.NewServiceClient(v.GetConnection())
_, err := client.SessionSapiEnableDisable(v.GetContext(), &session.SessionSapiEnableDisable{
IsEnable: isEnable,
})
if err != nil {
return fmt.Errorf("failed to %s SAPI session: %w", strEnableDisable[isEnable], err)
}
return nil
}
func (v *VppLink) EnableSessionSAPI() error {
return v.enableDisableSessionSAPILayer(true)
}
func (v *VppLink) DisableSessionSAPI() error {
return v.enableDisableSessionSAPILayer(false)
}
func (v *VppLink) addDelSessionAppNamespace(namespace *types.SessionAppNamespace, isAdd bool) error {
client := session.NewServiceClient(v.GetConnection())
_, err := client.AppNamespaceAddDelV3(v.GetContext(), &session.AppNamespaceAddDelV3{
Secret: namespace.Secret,
NamespaceID: namespace.NamespaceId,
Netns: namespace.Netns,
SockName: namespace.SocketName,
SwIfIndex: interface_types.InterfaceIndex(namespace.SwIfIndex),
IsAdd: isAdd,
})
if err != nil {
return fmt.Errorf("failed to %s session namespace: %w", strAddRemove[isAdd], err)
}
return nil
}
func (v *VppLink) AddSessionAppNamespace(namespace *types.SessionAppNamespace) error {
return v.addDelSessionAppNamespace(namespace, true /* isAdd */)
}
func (v *VppLink) DelSessionAppNamespace(namespace *types.SessionAppNamespace) error {
return v.addDelSessionAppNamespace(namespace, false /* isAdd */)
}
|
package log
import (
"io"
"os"
"path/filepath"
"runtime"
"github.com/Sirupsen/logrus"
)
/*************************************************
Debug Level Setting
- debug
- info
- warning
- error
- fatal
- panic
*************************************************/
const (
fileTag = "file"
lineTag = "line"
funcTag = "func"
)
type logConf struct { //執行時期的 log 功能配置
showFileInfo bool //是否顯示 file name, func name, line number
}
var rtLogConf logConf
//InitLog config the log
func InitLog(level logrus.Level, filename string, MultiWriter bool, showFileInfo bool) {
logrus.SetLevel(level)
logrus.SetFormatter(&logrus.TextFormatter{ForceColors: false, FullTimestamp: true})
err := os.MkdirAll(filepath.Dir(filename), 0744)
if err != nil {
Error("error folder create : ", err)
os.Exit(1)
}
f, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
Error("error opening file: ", err)
os.Exit(1)
}
if MultiWriter {
logrus.SetOutput(io.MultiWriter(f, os.Stdout))
} else {
logrus.SetOutput(f)
}
rtLogConf.showFileInfo = showFileInfo
}
func Init(environment string, filename string, level string) {
debugLV, _ := logrus.ParseLevel(level)
switch environment {
case "development":
InitLog(debugLV, filename, true, true)
case "production":
InitLog(debugLV, filename, false, false)
}
}
func getBaseName(fileName string, funcName string) (string, string) {
return filepath.Base(fileName), filepath.Base(funcName)
}
// Println same as Debug
func Println(args ...interface{}) {
Debug(args)
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
if !rtLogConf.showFileInfo {
logrus.Debug(args...)
return
}
if pc, file, line, ok := runtime.Caller(1); ok {
fileName, funcName := getBaseName(file, runtime.FuncForPC(pc).Name())
logrus.WithField(fileTag, fileName).WithField(lineTag, line).WithField(funcTag, funcName).Debug(args...)
} else {
logrus.Debug(args...)
}
}
// Debugf logs a message at level Debug on the standard logger.
func Debugf(msg string, args ...interface{}) {
if !rtLogConf.showFileInfo {
logrus.Debugf(msg, args...)
return
}
if pc, file, line, ok := runtime.Caller(1); ok {
fileName, funcName := getBaseName(file, runtime.FuncForPC(pc).Name())
logrus.WithField(fileTag, fileName).WithField(lineTag, line).WithField(funcTag, funcName).Debugf(msg, args...)
} else {
logrus.Debugf(msg, args...)
}
}
// Info logs a message at level Info on the standard logger.
func Info(args ...interface{}) {
if !rtLogConf.showFileInfo {
logrus.Info(args...)
return
}
if pc, file, line, ok := runtime.Caller(1); ok {
fileName, funcName := getBaseName(file, runtime.FuncForPC(pc).Name())
logrus.WithField(fileTag, fileName).WithField(lineTag, line).WithField(funcTag, funcName).Info(args...)
} else {
logrus.Info(args...)
}
}
// Infof logs a message at level Info on the standard logger.
func Infof(msg string, args ...interface{}) {
if !rtLogConf.showFileInfo {
logrus.Infof(msg, args...)
return
}
if pc, file, line, ok := runtime.Caller(1); ok {
fileName, funcName := getBaseName(file, runtime.FuncForPC(pc).Name())
logrus.WithField(fileTag, fileName).WithField(lineTag, line).WithField(funcTag, funcName).Infof(msg, args...)
} else {
logrus.Infof(msg, args...)
}
}
// Warn logs a message at level Warn on the standard logger.
func Warn(msg ...interface{}) {
if !rtLogConf.showFileInfo {
logrus.Warn(msg...)
return
}
if pc, file, line, ok := runtime.Caller(1); ok {
fileName, funcName := getBaseName(file, runtime.FuncForPC(pc).Name())
logrus.WithField(fileTag, fileName).WithField(lineTag, line).WithField(funcTag, funcName).Warn(msg...)
} else {
logrus.Warn(msg...)
}
}
// Warnf logs a message at level Warn on the standard logger.
func Warnf(msg string, args ...interface{}) {
if !rtLogConf.showFileInfo {
logrus.Warnf(msg, args...)
return
}
if pc, file, line, ok := runtime.Caller(1); ok {
fileName, funcName := getBaseName(file, runtime.FuncForPC(pc).Name())
logrus.WithField(fileTag, fileName).WithField(lineTag, line).WithField(funcTag, funcName).Warnf(msg, args...)
} else {
logrus.Warnf(msg, args...)
}
}
// Error logs a message at level Error on the standard logger.
func Error(msg ...interface{}) {
if pc, file, line, ok := runtime.Caller(1); ok {
fileName, funcName := getBaseName(file, runtime.FuncForPC(pc).Name())
logrus.WithField(fileTag, fileName).WithField(lineTag, line).WithField(funcTag, funcName).Error(msg...)
} else {
logrus.Error(msg...)
}
}
// Errorf logs a message at level Error on the standard logger.
func Errorf(msg string, args ...interface{}) {
if pc, file, line, ok := runtime.Caller(1); ok {
fileName, funcName := getBaseName(file, runtime.FuncForPC(pc).Name())
logrus.WithField(fileTag, fileName).WithField(lineTag, line).WithField(funcTag, funcName).Errorf(msg, args...)
} else {
logrus.Errorf(msg, args...)
}
}
// Fatal logs a message at level Fatal on the standard logger.
func Fatal(msg ...interface{}) {
if pc, file, line, ok := runtime.Caller(1); ok {
fileName, funcName := getBaseName(file, runtime.FuncForPC(pc).Name())
logrus.WithField(fileTag, fileName).WithField(lineTag, line).WithField(funcTag, funcName).Fatal(msg...)
} else {
logrus.Fatal(msg...)
}
}
// Fatalf logs a message at level Fatal on the standard logger.
func Fatalf(msg string, args ...interface{}) {
if pc, file, line, ok := runtime.Caller(1); ok {
fileName, funcName := getBaseName(file, runtime.FuncForPC(pc).Name())
logrus.WithField(fileTag, fileName).WithField(lineTag, line).WithField(funcTag, funcName).Fatalf(msg, args...)
} else {
logrus.Fatalf(msg, args...)
}
}
// Panic logs a message at level Panic on the standard logger.
func Panic(msg ...interface{}) {
if pc, file, line, ok := runtime.Caller(1); ok {
fileName, funcName := getBaseName(file, runtime.FuncForPC(pc).Name())
logrus.WithField(fileTag, fileName).WithField(lineTag, line).WithField(funcTag, funcName).Panic(msg...)
} else {
logrus.Panic(msg...)
}
}
// Panicf logs a message at level Panic on the standard logger.
func Panicf(msg string, args ...interface{}) {
if pc, file, line, ok := runtime.Caller(1); ok {
fileName, funcName := getBaseName(file, runtime.FuncForPC(pc).Name())
logrus.WithField(fileTag, fileName).WithField(lineTag, line).WithField(funcTag, funcName).Panicf(msg, args...)
} else {
logrus.Panicf(msg, args...)
}
}
|
package number_test
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/ywardhana/golib/number"
)
func TestToString(t *testing.T) {
tests := []struct {
Title string
Value interface{}
Expected string
}{
{
Title: "Test int",
Value: 123,
Expected: "123",
},
{
Title: "Test int64",
Value: int64(999999999999999999),
Expected: "999999999999999999",
},
{
Title: "Test uint",
Value: uint(123),
Expected: "123",
},
{
Title: "Test uint8",
Value: uint8(123),
Expected: "123",
},
{
Title: "Test uint16",
Value: uint16(123),
Expected: "123",
},
{
Title: "Test uint32",
Value: uint32(123),
Expected: "123",
},
{
Title: "Test uint64",
Value: uint64(999999999999999999),
Expected: "999999999999999999",
},
{
Title: "Test float32",
Value: float32(1.2),
Expected: "1.2",
},
{
Title: "Test float64",
Value: 1.2,
Expected: "1.2",
},
}
for _, tt := range tests {
assert.Equal(t, tt.Expected, number.ToString(tt.Value), tt.Title)
}
}
func TestToHexString(t *testing.T) {
tests := []struct {
Title string
Value interface{}
Expected string
}{
{
Title: "Test int",
Value: 123,
Expected: fmt.Sprintf("%x", 123),
},
{
Title: "Test int64",
Value: int64(999999999999999999),
Expected: fmt.Sprintf("%x", 999999999999999999),
},
{
Title: "Test uint",
Value: uint(123),
Expected: fmt.Sprintf("%x", 123),
},
{
Title: "Test uint8",
Value: uint8(123),
Expected: fmt.Sprintf("%x", 123),
},
{
Title: "Test uint16",
Value: uint16(123),
Expected: fmt.Sprintf("%x", 123),
},
{
Title: "Test uint32",
Value: uint32(123),
Expected: fmt.Sprintf("%x", 123),
},
{
Title: "Test uint64",
Value: uint64(999999999999999999),
Expected: fmt.Sprintf("%x", 999999999999999999),
},
}
for _, tt := range tests {
assert.Equal(t, tt.Expected, number.ToHexString(tt.Value), tt.Title)
}
}
func TestToBinaryString(t *testing.T) {
tests := []struct {
Title string
Value interface{}
Expected string
}{
{
Title: "Test int",
Value: 123,
Expected: fmt.Sprintf("%b", 123),
},
{
Title: "Test int64",
Value: int64(999999999999999999),
Expected: fmt.Sprintf("%b", 999999999999999999),
},
{
Title: "Test uint",
Value: uint(123),
Expected: fmt.Sprintf("%b", 123),
},
{
Title: "Test uint8",
Value: uint8(123),
Expected: fmt.Sprintf("%b", 123),
},
{
Title: "Test uint16",
Value: uint16(123),
Expected: fmt.Sprintf("%b", 123),
},
{
Title: "Test uint32",
Value: uint32(123),
Expected: fmt.Sprintf("%b", 123),
},
{
Title: "Test uint64",
Value: uint64(999999999999999999),
Expected: fmt.Sprintf("%b", 999999999999999999),
},
}
for _, tt := range tests {
assert.Equal(t, tt.Expected, number.ToBinaryString(tt.Value), tt.Title)
}
}
|
package utils
import (
"io"
"strings"
)
func ShowPaged(w io.Writer, text string) error {
return showPagedReader(w, strings.NewReader(text))
}
func ShowPagedReader(w io.Writer, r io.Reader) error {
return showPagedReader(w, r)
}
|
// Package static adds a static string to i3bar. Its main purpose is
// demonstrating the module API of `i3gostatus` and it acts as a template for
// new modules.
package static
import (
"time"
"github.com/pelletier/go-toml"
"github.com/rumpelsepp/i3gostatus/lib/model"
)
const (
name = "static"
moduleName = "i3gostatus.modules." + name
)
type Config struct {
model.BaseConfig
}
func (c *Config) ParseConfig(configTree *toml.TomlTree) {
c.BaseConfig.Parse(name, configTree)
}
func (c *Config) Run(args *model.ModuleArgs) {
outputBlock := model.NewBlock(moduleName, c.BaseConfig, args.Index)
for range time.NewTicker(c.Period).C {
outputBlock.FullText = c.Format
args.OutCh <- outputBlock
}
}
|
/*
* @lc app=leetcode id=901 lang=golang
*
* [901] Online Stock Span
*/
// @lc code=start
type Pair struct {
Val int
Res int
}
type StockSpanner struct {
Item []Pair
}
func Constructor() StockSpanner {
stockspanner := StockSpanner{make([]Pair, 0)}
return stockspanner
}
func (this *StockSpanner) Next(price int) int {
res := 1
for len(this.Item) > 0 && this.Item[len(this.Item)-1].Val <= price {
res += this.Item[len(this.Item)-1].Res
this.Item = this.Item[:len(this.Item)-1]
}
this.Item = append(this.Item, Pair{price, res})
return res
}
/**
* Your StockSpanner object will be instantiated and called as such:
* obj := Constructor();
* param_1 := obj.Next(price);
*/
// @lc code=end
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
package types
import (
"bytes"
"encoding/json"
"errors"
"io"
"strconv"
)
// Security type.
//
// https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/xpack/usage/types.ts#L434-L447
type Security struct {
Anonymous FeatureToggle `json:"anonymous"`
ApiKeyService FeatureToggle `json:"api_key_service"`
Audit Audit `json:"audit"`
Available bool `json:"available"`
Enabled bool `json:"enabled"`
Fips140 FeatureToggle `json:"fips_140"`
Ipfilter IpFilter `json:"ipfilter"`
OperatorPrivileges Base `json:"operator_privileges"`
Realms map[string]XpackRealm `json:"realms"`
RoleMapping map[string]XpackRoleMapping `json:"role_mapping"`
Roles SecurityRoles `json:"roles"`
Ssl Ssl `json:"ssl"`
SystemKey *FeatureToggle `json:"system_key,omitempty"`
TokenService FeatureToggle `json:"token_service"`
}
func (s *Security) UnmarshalJSON(data []byte) error {
dec := json.NewDecoder(bytes.NewReader(data))
for {
t, err := dec.Token()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
switch t {
case "anonymous":
if err := dec.Decode(&s.Anonymous); err != nil {
return err
}
case "api_key_service":
if err := dec.Decode(&s.ApiKeyService); err != nil {
return err
}
case "audit":
if err := dec.Decode(&s.Audit); err != nil {
return err
}
case "available":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseBool(v)
if err != nil {
return err
}
s.Available = value
case bool:
s.Available = v
}
case "enabled":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseBool(v)
if err != nil {
return err
}
s.Enabled = value
case bool:
s.Enabled = v
}
case "fips_140":
if err := dec.Decode(&s.Fips140); err != nil {
return err
}
case "ipfilter":
if err := dec.Decode(&s.Ipfilter); err != nil {
return err
}
case "operator_privileges":
if err := dec.Decode(&s.OperatorPrivileges); err != nil {
return err
}
case "realms":
if s.Realms == nil {
s.Realms = make(map[string]XpackRealm, 0)
}
if err := dec.Decode(&s.Realms); err != nil {
return err
}
case "role_mapping":
if s.RoleMapping == nil {
s.RoleMapping = make(map[string]XpackRoleMapping, 0)
}
if err := dec.Decode(&s.RoleMapping); err != nil {
return err
}
case "roles":
if err := dec.Decode(&s.Roles); err != nil {
return err
}
case "ssl":
if err := dec.Decode(&s.Ssl); err != nil {
return err
}
case "system_key":
if err := dec.Decode(&s.SystemKey); err != nil {
return err
}
case "token_service":
if err := dec.Decode(&s.TokenService); err != nil {
return err
}
}
}
return nil
}
// NewSecurity returns a Security.
func NewSecurity() *Security {
r := &Security{
Realms: make(map[string]XpackRealm, 0),
RoleMapping: make(map[string]XpackRoleMapping, 0),
}
return r
}
|
package language
import (
"html/template"
"testing"
"github.com/GoAdminGroup/go-admin/modules/config"
"github.com/stretchr/testify/assert"
)
func TestAdd(t *testing.T) {
Add("cn", map[string]string{})
}
func TestGetWithScope(t *testing.T) {
config.Initialize(&config.Config{
Language: CN,
})
cn["foo"] = "bar"
assert.Equal(t, GetWithScope("foo"), "bar")
cn["user.table.foo2"] = "bar"
assert.Equal(t, GetWithScope("foo2"), "foo2")
assert.Equal(t, GetWithScope("foo2", "user"), "foo2")
assert.Equal(t, GetWithScope("foo2", "user", "table"), "bar")
}
func TestGet(t *testing.T) {
config.Initialize(&config.Config{
Language: CN,
})
cn["foo"] = "bar"
assert.Equal(t, Get("foo"), "bar")
}
func TestWithScopes(t *testing.T) {
assert.Equal(t, WithScopes("foo", "user", "table"), "user.table.foo")
}
func TestGetFromHtml(t *testing.T) {
config.Initialize(&config.Config{
Language: CN,
})
cn["user.table.foo"] = "bar"
assert.Equal(t, GetFromHtml("foo", "user", "table"), template.HTML("bar"))
}
|
package main
import (
"fmt"
"time"
)
var workerID int
var publisherID int
func main() {
input := make(chan string)
go workerProcess(input)
go workerProcess(input)
go workerProcess(input)
go publisher(input)
go publisher(input)
go publisher(input)
go publisher(input)
time.Sleep(1 * time.Millisecond)
}
// publisher pushes data into a channel
func publisher(out chan string) {
publisherID++
thisID := publisherID
dataID := 0
for {
dataID++
fmt.Printf("publisher %d is pushing data \n", thisID)
data := fmt.Sprintf("Data from publisher %d. Data %d", thisID, dataID)
out <- data
}
}
func workerProcess(in <-chan string) {
workerID++
thisID := workerID
for {
fmt.Printf("%d: waiting for input...\n", thisID)
input := <-in
fmt.Printf("%d: input is: %s\n", thisID, input)
}
}
/*
CHALLENGE #1
Is this fan out?
-> Yes
Are we "fanning out" work? Yes. We've launched several goroutines that are silmutaneously publishing a
message onto our channel. The golang blog says, "Fan out means you have multiple functions reading from
the same channel until that channel is closed." Here we do have multiple functions reading from the same channel.
So, okay, we're fanning out.
CHALLENGE #2
Is this fan in?
-> No
What is being "fanned in" here? We have launched several goroutines of the same function: workerProcess.
What do those goroutines do? They are all reading from an unbuffered channel. If there was a tremendous
amount of processing that each "workerProcess" func executed, then all three of the "workerProcess"
funcs could be processing in parallel: pulling values off the channel and processing them, There is no
"fanning in" though here. Remember what the golang blog describes fan in: "A function can read from
multiple inputs and proceed until all are closed by multiplexing the input channels onto a single
channel that's closed when all the inputs are closed." We don't have many channels here converging into
one channel.
*/
|
// outer_events.go provides EventsAPI particular outer events
package slackevents
import (
"encoding/json"
)
// EventsAPIEvent is the base EventsAPIEvent
type EventsAPIEvent struct {
Token string `json:"token"`
TeamID string `json:"team_id"`
Type string `json:"type"`
APIAppID string `json:"api_app_id"`
EnterpriseID string `json:"enterprise_id"`
Data interface{}
InnerEvent EventsAPIInnerEvent
}
// EventsAPIURLVerificationEvent received when configuring a EventsAPI driven app
type EventsAPIURLVerificationEvent struct {
Token string `json:"token"`
Challenge string `json:"challenge"`
Type string `json:"type"`
}
// ChallengeResponse is a response to a EventsAPIEvent URLVerification challenge
type ChallengeResponse struct {
Challenge string
}
// EventsAPICallbackEvent is the main (outer) EventsAPI event.
type EventsAPICallbackEvent struct {
Type string `json:"type"`
Token string `json:"token"`
TeamID string `json:"team_id"`
APIAppID string `json:"api_app_id"`
EnterpriseID string `json:"enterprise_id"`
InnerEvent *json.RawMessage `json:"event"`
AuthedUsers []string `json:"authed_users"`
AuthedTeams []string `json:"authed_teams"`
EventID string `json:"event_id"`
EventTime int `json:"event_time"`
EventContext string `json:"event_context"`
}
// EventsAPIAppRateLimited indicates your app's event subscriptions are being rate limited
type EventsAPIAppRateLimited struct {
Type string `json:"type"`
Token string `json:"token"`
TeamID string `json:"team_id"`
MinuteRateLimited int `json:"minute_rate_limited"`
APIAppID string `json:"api_app_id"`
}
const (
// CallbackEvent is the "outer" event of an EventsAPI event.
CallbackEvent = "event_callback"
// URLVerification is an event used when configuring your EventsAPI app
URLVerification = "url_verification"
// AppRateLimited indicates your app's event subscriptions are being rate limited
AppRateLimited = "app_rate_limited"
)
// EventsAPIEventMap maps OUTER Event API events to their corresponding struct
// implementations. The structs should be instances of the unmarshalling
// target for the matching event type.
var EventsAPIEventMap = map[string]interface{}{
CallbackEvent: EventsAPICallbackEvent{},
URLVerification: EventsAPIURLVerificationEvent{},
AppRateLimited: EventsAPIAppRateLimited{},
}
|
package echo
import (
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
)
func TestContext(t *testing.T) {
b, _ := json.Marshal(u1)
r, _ := http.NewRequest(POST, "/users/1", bytes.NewReader(b))
c := &Context{
Response: &response{Writer: httptest.NewRecorder()},
Request: r,
params: make(Params, 5),
store: make(store),
}
//**********//
// Bind //
//**********//
// JSON
r.Header.Set(HeaderContentType, MIMEJSON)
u2 := new(user)
if err := c.Bind(u2); err != nil {
t.Error(err)
}
verifyUser(u2, t)
// FORM
r.Header.Set(HeaderContentType, MIMEForm)
u2 = new(user)
if err := c.Bind(u2); err != nil {
t.Error(err)
}
// TODO: add verification
// Unsupported
r.Header.Set(HeaderContentType, "")
u2 = new(user)
if err := c.Bind(u2); err == nil {
t.Error(err)
}
// TODO: add verification
//***********//
// Param //
//***********//
// By id
c.params = Params{{"id", "1"}}
if c.P(0) != "1" {
t.Error("param id should be 1")
}
// By name
if c.Param("id") != "1" {
t.Error("param id should be 1")
}
// Store
c.Set("user", u1.Name)
n := c.Get("user")
if n != u1.Name {
t.Error("user name should be Joe")
}
// JSON
r.Header.Set(HeaderAccept, MIMEJSON)
if err := c.JSON(http.StatusOK, u1); err != nil {
t.Errorf("render json %v", err)
}
// String
r.Header.Set(HeaderAccept, MIMEText)
c.Response.committed = false
if err := c.String(http.StatusOK, "Hello, World!"); err != nil {
t.Errorf("render string %v", err)
}
// HTML
r.Header.Set(HeaderAccept, MIMEHTML)
c.Response.committed = false
if err := c.HTML(http.StatusOK, "Hello, <strong>World!</strong>"); err != nil {
t.Errorf("render html %v", err)
}
// Redirect
c.Response.committed = false
c.Redirect(http.StatusMovedPermanently, "http://labstack.github.io/echo")
}
|
// Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package result
import (
"os"
"sort"
"time"
"github.com/olekukonko/tablewriter"
tmv1beta1 "github.com/gardener/test-infra/pkg/apis/testmachinery/v1beta1"
)
func printStatusTable(steps []*tmv1beta1.StepStatus) {
orderSteps(steps)
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Name", "Step", "Phase", "Duration"})
for _, s := range steps {
d := time.Duration(s.Duration) * time.Second
table.Append([]string{s.TestDefinition.Name, s.Position.Step, string(s.Phase), d.String()})
}
table.Render()
}
// orderSteps orders the steps by their finished date.
// If the ddate is not defined the step status are ordered by their step name
func orderSteps(steps []*tmv1beta1.StepStatus) {
sort.Sort(StepStatusList(steps))
}
type StepStatusList []*tmv1beta1.StepStatus
func (s StepStatusList) Less(a, b int) bool {
// order by step name if startdate is not set
if s[a].StartTime.IsZero() && s[b].StartTime.IsZero() {
return s[a].Position.Step < s[b].Position.Step
}
if s[a].StartTime.IsZero() {
return false
}
if s[b].StartTime.IsZero() {
return true
}
return s[a].StartTime.Before(s[b].StartTime)
}
func (s StepStatusList) Len() int { return len(s) }
func (s StepStatusList) Swap(a, b int) { s[a], s[b] = s[b], s[a] }
|
package iproto
import "github.com/DmiAS/cube_cli/internal/app/models"
const cube_svc int32 = 0x00000002
// для передачи по сети используем слайс байт, соответственно нужно завернуть токен и скоуп
// в пакет, сформировать структура пакета запроса и преобразовать ее в слайс байт
func packRequest(token, scope string) ([]byte, error) {
binBody, err := packBody(token, scope)
if err != nil {
return nil, err
}
binHeader, err := packHeader(binBody)
if err != nil {
return nil, err
}
req := append(binHeader, binBody...)
return req, nil
}
func packHeader(body []byte) ([]byte, error) {
bodyLength := int32(len(body))
var reqID int32 = 0
return Marshal(&models.Header{
SvcID: cube_svc,
BodyLength: bodyLength,
RequestID: reqID,
})
}
func packBody(token, scope string) ([]byte, error) {
svcToken := strToProtoString(token)
svcScope := strToProtoString(scope)
return Marshal(&models.Request{
SvcMsg: svcMsg,
Token: svcToken,
Scope: svcScope,
})
}
|
package main
import ui "github.com/gizak/termui"
type OnInputFn func(message string)
type OnCloseFn func()
type ChatWindow struct {
messages [][]string
messageWindow *ui.Table
scrolledRows int
}
func (chat *ChatWindow) Height() int {
numRows := chat.messageWindow.Height
if chat.messageWindow.Border {
// subtract two for the border
numRows -= 2
}
if chat.messageWindow.Separator {
// divide by two for our separators
numRows /= 2
}
return numRows
}
func (chat *ChatWindow) renderMessages() {
numRows := chat.Height()
chat.messageWindow.FgColors = make([]ui.Attribute, numRows)
chat.messageWindow.BgColors = make([]ui.Attribute, numRows)
for i := 0; i < numRows; i++ {
chat.messageWindow.FgColors[i] = chat.messageWindow.FgColor
chat.messageWindow.BgColors[i] = chat.messageWindow.BgColor
}
numMessages := len(chat.messages)
firstCut := numMessages - numRows - chat.scrolledRows
if firstCut < 0 {
firstCut = 0
}
chat.messageWindow.Rows = chat.messages[firstCut : numMessages-chat.scrolledRows]
ui.Render(chat.messageWindow)
}
func (chat *ChatWindow) AddMessage(user string, message string) {
row := []string{user, message}
chat.messages = append(chat.messages, row)
chat.renderMessages()
}
func (chat *ChatWindow) Start(onInput OnInputFn, onClose OnCloseFn) {
if err := ui.Init(); err != nil {
panic(err)
}
ui.Handle("/sys/kbd/<escape>", func(ui.Event) {
// quit
ui.StopLoop()
onClose()
})
// try to make a table with one row per message
chat.messages = [][]string{}
chat.messageWindow = ui.NewTable()
chat.messageWindow.Rows = chat.messages // type [][]string
chat.messageWindow.FgColor = ui.ColorWhite
chat.messageWindow.BgColor = ui.ColorDefault
chat.messageWindow.Height = ui.TermHeight() - 3
chat.messageWindow.Width = ui.TermWidth()
chat.messageWindow.Y = 0
chat.messageWindow.X = 0
chat.messageWindow.Border = true
chat.messageWindow.Separator = true
ui.Render(chat.messageWindow)
inputBox := ui.NewPar("")
inputBox.Height = 3
inputBox.Width = ui.TermWidth()
inputBox.TextFgColor = ui.ColorWhite
inputBox.BorderLabel = "Input"
inputBox.BorderFg = ui.ColorCyan
inputBox.Y = chat.messageWindow.Y + chat.messageWindow.Height
ui.Render(inputBox)
ui.Handle("/sys/kbd/<enter>", func(ui.Event) {
onInput(inputBox.Text)
inputBox.Text = ""
ui.Render(inputBox)
})
ui.Handle("/sys/kbd/<up>", func(ui.Event) {
// scroll up
if chat.scrolledRows < (len(chat.messages) - chat.Height()) {
chat.scrolledRows += 1
chat.renderMessages()
}
})
ui.Handle("/sys/kbd/<down>", func(ui.Event) {
// scroll down
chat.scrolledRows -= 1
if chat.scrolledRows < 0 {
chat.scrolledRows = 0
}
chat.renderMessages()
})
ui.Handle("/sys/kbd/C-8", func(ui.Event) {
// backspace
if length := len(inputBox.Text); length > 0 {
inputBox.Text = inputBox.Text[:length-1]
ui.Render(inputBox)
}
})
ui.Handle("/sys/kbd/<space>", func(ui.Event) {
inputBox.Text += " "
ui.Render(inputBox)
})
ui.Handle("/sys/kbd/", func(e ui.Event) {
event := e.Data.(ui.EvtKbd)
if len(event.KeyStr) == 1 {
inputBox.Text += event.KeyStr
ui.Render(inputBox)
}
})
go func() {
ui.Loop()
}()
}
func (chat *ChatWindow) Close() {
ui.Close()
}
|
package main
import (
"flag"
"fmt"
"gocomp/compiler/lexer"
"io/ioutil"
"log"
"os"
)
var (
filepath string
)
func init() {
flag.StringVar(&filepath, "f", "", "Path to needed file")
}
func readFile(path string) (string, error) {
file, err := os.Open(path)
if err != nil {
return "", err
}
data, err := ioutil.ReadAll(file)
if err != nil {
return "", err
}
return string(data), nil
}
func main() {
flag.Parse()
data, err := readFile(filepath)
if err != nil {
log.Fatal(err)
}
lex := lexer.NewLexer(data)
for lex.TokenIdentifier != lexer.TokenEOF {
fmt.Println(lex.Token, "\t", lex.TokenIdentifier)
lex.NextToken()
}
}
|
package wrpc
import (
"sync"
"log"
"github.com/samuel/go-zookeeper/zk"
)
type Register struct {
ser *Server
mutex sync.Mutex
}
func NewRegister(ser *Server) *Register {
return &Register{ser: ser}
}
func (r *Register) registe(){
r.mutex.Lock() //加锁
zkc := r.ser.GetZkClient()
conf := r.ser.GetConf()
var path string
if conf.GetIp() != ""{
path = zkc.GetAbsolutePath(conf.GetPath())
}else{
oldPath := zkc.GetAbsolutePath(conf.GetPath())
//reset ip
conf.SetLocalIp()
path = zkc.GetAbsolutePath(conf.GetPath())
if path != oldPath{
//delete old path if local ip changed
existsOldPath, stat, _ := zkc.GetConn().Exists(oldPath)
if existsOldPath{
derr := zkc.GetConn().Delete(path, stat.Version)
if derr != nil{
log.Println("ERROR- ", derr)
}else{
log.Println("delete old path: ", oldPath)
}
}
}
}
log.Println("prelook register path: ", path)
exists, _, _ := zkc.GetConn().Exists(path)
if !exists {
_, err := zkc.GetConn().Create(path, nil, zk.FlagEphemeral, zk.WorldACL(zk.PermAll))
if err != nil {
log.Println("ERROR- ", err)
}else{
log.Println("registe server success.")
}
}
r.mutex.Unlock() //解锁
}
func (r *Register) RegisteAndListen(){
zkc := r.ser.GetZkClient()
if zkc == nil{
return
}
conf := r.ser.GetConf()
zkc.EnsurePath(zkc.GetAbsolutePath(conf.GetParentPath()), 0)
r.registe()
fun := func (event zk.Event) {
switch event.State{
case zk.StateConnected:
log.Println("Connection is connected.")
go r.registe()
case zk.StateDisconnected:
log.Println("Connection is disconnected.")
case zk.StateExpired:
log.Println("Connection session is expired.")
case zk.StateConnecting:
log.Println("Connection is connecting.")
default:
log.Println("Connection is unknown.")
}
}
option := zk.WithEventCallback(fun)
option(zkc.GetConn())
}
func (r *Register) Close(){
if r.ser != nil{
r.ser.Stop()
}
}
|
//判断链表是有存在环
package main
import (
"fmt"
)
type LNode struct {
Data int
Next *LNode
}
func IsLoop(head *LNode) *LNode{
if head == nil || head.Next == nil {
return head
}
slow := head.Next
fast := head.Next
for fast != nil && fast.Next != nil {
slow = slow.Next
fast = fast.Next.Next
if slow == fast {
return slow
}
}
return nil
}
func FindLoopNode(head *LNode, meetNode *LNode) *LNode {
first := head.Next
second := meetNode
for first != second {
fmt.Println(first.Data, second.Data)
first = first.Next
second = second.Next
}
return first
}
func main(){
l1 := LNode{3, nil}
l2 := LNode{4, nil}
l3 := LNode{5, nil}
l4 := LNode{6, nil}
l5 := LNode{7, nil}
l6 := LNode{8, nil}
l7 := LNode{9, nil}
l1.Next = &l2
l2.Next = &l3
l3.Next = &l4
l4.Next = &l5
l5.Next = &l6
l6.Next = &l7
l7.Next = &l3
head := LNode{0, &l1}
meetNode := IsLoop(&head)
FindLoopNode(&head, meetNode)
}
|
package cloudflare_test
import (
"context"
"encoding/json"
"fmt"
"log"
cloudflare "github.com/cloudflare/cloudflare-go"
)
func ExampleAPI_CreateLogpushJob() {
api, err := cloudflare.New(apiKey, user)
if err != nil {
log.Fatal(err)
}
zoneID, err := api.ZoneIDByName(domain)
if err != nil {
log.Fatal(err)
}
job, err := api.CreateLogpushJob(context.Background(), cloudflare.ZoneIdentifier(zoneID), cloudflare.CreateLogpushJobParams{
Enabled: false,
Name: "example.com",
LogpullOptions: "fields=RayID,ClientIP,EdgeStartTimestamp×tamps=rfc3339",
DestinationConf: "s3://mybucket/logs?region=us-west-2",
})
if err != nil {
log.Fatal(err)
}
fmt.Printf("%+v\n", job)
}
func ExampleAPI_UpdateLogpushJob() {
api, err := cloudflare.New(apiKey, user)
if err != nil {
log.Fatal(err)
}
zoneID, err := api.ZoneIDByName(domain)
if err != nil {
log.Fatal(err)
}
err = api.UpdateLogpushJob(context.Background(), cloudflare.ZoneIdentifier(zoneID), cloudflare.UpdateLogpushJobParams{
ID: 1,
Enabled: true,
Name: "updated.com",
LogpullOptions: "fields=RayID,ClientIP,EdgeStartTimestamp",
DestinationConf: "gs://mybucket/logs",
})
if err != nil {
log.Fatal(err)
}
}
func ExampleAPI_ListLogpushJobs() {
api, err := cloudflare.New(apiKey, user)
if err != nil {
log.Fatal(err)
}
zoneID, err := api.ZoneIDByName(domain)
if err != nil {
log.Fatal(err)
}
jobs, err := api.ListLogpushJobs(context.Background(), cloudflare.ZoneIdentifier(zoneID), cloudflare.ListLogpushJobsParams{})
if err != nil {
log.Fatal(err)
}
fmt.Printf("%+v\n", jobs)
for _, r := range jobs {
fmt.Printf("%+v\n", r)
}
}
func ExampleAPI_GetLogpushJob() {
api, err := cloudflare.New(apiKey, user)
if err != nil {
log.Fatal(err)
}
zoneID, err := api.ZoneIDByName(domain)
if err != nil {
log.Fatal(err)
}
job, err := api.GetLogpushJob(context.Background(), cloudflare.ZoneIdentifier(zoneID), 1)
if err != nil {
log.Fatal(err)
}
fmt.Printf("%+v\n", job)
}
func ExampleAPI_DeleteLogpushJob() {
api, err := cloudflare.New(apiKey, user)
if err != nil {
log.Fatal(err)
}
zoneID, err := api.ZoneIDByName(domain)
if err != nil {
log.Fatal(err)
}
err = api.DeleteLogpushJob(context.Background(), cloudflare.ZoneIdentifier(zoneID), 1)
if err != nil {
log.Fatal(err)
}
}
func ExampleAPI_GetLogpushOwnershipChallenge() {
api, err := cloudflare.New(apiKey, user)
if err != nil {
log.Fatal(err)
}
zoneID, err := api.ZoneIDByName(domain)
if err != nil {
log.Fatal(err)
}
ownershipChallenge, err := api.GetLogpushOwnershipChallenge(context.Background(), cloudflare.ZoneIdentifier(zoneID), cloudflare.GetLogpushOwnershipChallengeParams{DestinationConf: "destination_conf"})
if err != nil {
log.Fatal(err)
}
fmt.Printf("%+v\n", ownershipChallenge)
}
func ExampleAPI_ValidateLogpushOwnershipChallenge() {
api, err := cloudflare.New(apiKey, user)
if err != nil {
log.Fatal(err)
}
zoneID, err := api.ZoneIDByName(domain)
if err != nil {
log.Fatal(err)
}
isValid, err := api.ValidateLogpushOwnershipChallenge(context.Background(), cloudflare.ZoneIdentifier(zoneID), cloudflare.ValidateLogpushOwnershipChallengeParams{
DestinationConf: "destination_conf",
OwnershipChallenge: "ownership_challenge",
})
if err != nil {
log.Fatal(err)
}
fmt.Printf("%+v\n", isValid)
}
func ExampleAPI_CheckLogpushDestinationExists() {
api, err := cloudflare.New(apiKey, user)
if err != nil {
log.Fatal(err)
}
zoneID, err := api.ZoneIDByName(domain)
if err != nil {
log.Fatal(err)
}
exists, err := api.CheckLogpushDestinationExists(context.Background(), cloudflare.ZoneIdentifier(zoneID), "destination_conf")
if err != nil {
log.Fatal(err)
}
fmt.Printf("%+v\n", exists)
}
func ExampleLogpushJob_MarshalJSON() {
job := cloudflare.LogpushJob{
Name: "example.com static assets",
LogpullOptions: "fields=RayID,ClientIP,EdgeStartTimestamp×tamps=rfc3339&CVE-2021-44228=true",
Dataset: "http_requests",
DestinationConf: "s3://<BUCKET_PATH>?region=us-west-2/",
Filter: &cloudflare.LogpushJobFilters{
Where: cloudflare.LogpushJobFilter{
And: []cloudflare.LogpushJobFilter{
{Key: "ClientRequestPath", Operator: cloudflare.Contains, Value: "/static\\"},
{Key: "ClientRequestHost", Operator: cloudflare.Equal, Value: "example.com"},
},
},
},
}
jobstring, err := json.Marshal(job)
if err != nil {
log.Fatal(err)
}
fmt.Printf("%s", jobstring)
// Output: {"filter":"{\"where\":{\"and\":[{\"key\":\"ClientRequestPath\",\"operator\":\"contains\",\"value\":\"/static\\\\\"},{\"key\":\"ClientRequestHost\",\"operator\":\"eq\",\"value\":\"example.com\"}]}}","dataset":"http_requests","enabled":false,"name":"example.com static assets","logpull_options":"fields=RayID,ClientIP,EdgeStartTimestamp\u0026timestamps=rfc3339\u0026CVE-2021-44228=true","destination_conf":"s3://\u003cBUCKET_PATH\u003e?region=us-west-2/"}
}
|
package registry
import (
"net"
"net/http"
"net/url"
"time"
"github.com/docker/docker/registry"
"github.com/docker/go-connections/sockets"
"golang.docker.com/go-docker/api/types"
"golang.docker.com/go-docker/registry/auth"
"golang.docker.com/go-docker/registry/auth/challenge"
"golang.docker.com/go-docker/registry/transport"
)
// NewDefaultRegistry is the simplest way to instantiate a Registry.
// Useful when talking to server "https://registry-1.docker.io"
// If authConfig is nil, push is not authorized and only publicly available content can be pulled.
func NewDefaultRegistry(server string, authConfig *types.AuthConfig) (Registry, error) {
return NewRegistry(server, defaultTransport(server, authConfig))
}
func defaultTransport(server string, authConfig *types.AuthConfig) (http.RoundTripper, error) {
direct := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}
// TODO(dmcgowan): Call close idle connections when complete, use keep alive
base := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: direct.Dial,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: nil,
// TODO(dmcgowan): Call close idle connections when complete and use keep alive
DisableKeepAlives: true,
}
proxyDialer, err := sockets.DialerFromEnvironment(direct)
if err == nil {
base.Dial = proxyDialer.Dial
}
modifiers := []transport.RequestModifier{
transport.NewHeaderRequestModifier(http.Header{
"User-Agent": []string{"go-docker-v1"},
}),
}
authTransport := transport.NewTransport(base, modifiers...)
pingClient := &http.Client{
Transport: authTransport,
Timeout: 5 * time.Second,
}
req, err := http.NewRequest("GET", baseURL+"/v2/", nil)
if err != nil {
return nil, err
}
challengeManager := challenge.NewSimpleManager()
resp, err := pingClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err := challengeManager.AddResponse(resp); err != nil {
return nil, err
}
tokenHandlerOptions := auth.TokenHandlerOptions{
Transport: authTransport,
Scopes: []auth.Scope{"pull"},
ClientID: registry.AuthClientID,
}
if authConfig != nil {
tokenHandlerOptions.Scopes = append(tokenHandlerOptions.Scopes, "push")
tokenHandlerOptions.Credentials = staticCredentialStore{authConfig}
}
tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions)
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, auth.NewBasicHandler(nil)))
/*
if authConfig.RegistryToken != "" {
passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken}
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler))
} else {
*/
scope := auth.RepositoryScope{
Repository: repoName,
Actions: actions,
Class: repoInfo.Class,
}
basicHandler := auth.NewBasicHandler(creds)
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
//}
return transport.NewTransport(base, modifiers...), nil
}
type staticCredentialStore struct {
auth *types.AuthConfig
}
func (scs staticCredentialStore) Basic(*url.URL) (string, string) {
if scs.auth == nil {
return "", ""
}
return scs.auth.Username, scs.auth.Password
}
func (scs staticCredentialStore) RefreshToken(*url.URL, string) string {
if scs.auth == nil {
return ""
}
return scs.auth.IdentityToken
}
func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) {
}
|
package main
import (
"fmt"
)
func calc(index string, a, b int) int {
ret := a + b
fmt.Println(index, a, b, ret)
return ret
}
type Users struct {
Name string
}
func main() {
//x := 1
//y := 2
//tp1 := calc("B", x, y)
//defer calc("A", x, tp1)
//x = 3
//tp2 := calc("D", x, y)
//defer calc("C", x, tp2)
//y = 4
var b Users
b = Users{"123"}
fmt.Println(b)
xx(b)
}
func xx(x interface{}) {
a, e := x.(Users)
fmt.Println(e)
fmt.Println(a)
}
|
// Copyright 2013 Walter Schulze
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package funcs
import (
"fmt"
"reflect"
"strings"
"github.com/katydid/katydid/relapse/types"
)
var errTyp = reflect.TypeOf((*error)(nil)).Elem()
var funcTyp = reflect.TypeOf((*Func)(nil)).Elem()
// Register registers a function as function that can composed.
func Register(name string, fnc interface{}) {
typ := reflect.TypeOf(fnc)
if typ.Kind() != reflect.Func {
panic(fmt.Sprintf("expecting constructor function for %s, but got %T", name, fnc))
}
if typ.NumOut() == 2 {
if !typ.Out(1).Implements(errTyp) {
panic(fmt.Sprintf("the second return type of the constructor for %s is not an error", name))
}
}
if typ.NumOut() > 2 {
panic(fmt.Sprintf("the constructor for %s has more than 2 return values", name))
}
if typ.NumOut() == 0 {
panic(fmt.Sprintf("the constructor for %s has no return values", name))
}
eval, ok := typ.Out(0).MethodByName("Eval")
if !ok {
panic(fmt.Sprintf("the constructor for %s returns a type without an Eval method", name))
}
if !typ.Out(0).Implements(funcTyp) {
panic(fmt.Sprintf("the constructor for %s returns a type that does not implement funcs.Func", name))
}
returnType := eval.Type
ins := typ.NumIn()
fMaker := &Maker{
Name: name,
Out: types.FromGo(returnType.Out(0)),
newfnc: fnc,
}
for i := 0; i < ins; i++ {
meth, ok := typ.In(i).MethodByName("Eval")
if !ok {
continue
}
if !typ.In(i).Implements(funcTyp) {
panic(fmt.Sprintf("the constructor for %s has an input parameter (number %d) that does not implement funcs.Func", name, i))
}
fMaker.InConst = append(fMaker.InConst, IsConst(typ.In(i)))
inType := types.FromGo(meth.Type.Out(0))
fMaker.In = append(fMaker.In, inType)
}
globalFactory.register(fMaker)
}
// IsConst returns whether a reflected type is a function that is actually a constant value.
func IsConst(typ reflect.Type) bool {
switch typ {
case typConstDouble:
case typConstInt:
case typConstUint:
case typConstBool:
case typConstString:
case typConstBytes:
case typConstDoubles:
case typConstInts:
case typConstUints:
case typConstBools:
case typConstStrings:
case typConstListOfBytes:
default:
return false
}
return true
}
// Which returns the Funk (function creator) of the function given the function name and parameter types.
func GetMaker(name string, ins ...types.Type) (*Maker, error) {
return globalFactory.getMaker(name, ins...)
}
type errUnknownFunction struct {
f string
ins []string
}
func newErrUnknownFunction(name string, ins []types.Type) error {
inss := make([]string, len(ins))
for i, in := range ins {
inss[i] = in.String()
}
return &errUnknownFunction{name, inss}
}
func (this *errUnknownFunction) Error() string {
return "relapse/funcs: unknown function: " + this.f + "(" + strings.Join(this.ins, ", ") + ")"
}
var globalFactory = newFactory()
type Factory map[string][]*Maker
func newFactory() Factory {
return make(map[string][]*Maker)
}
func (this Factory) register(f *Maker) {
if _, ok := this[f.Name]; !ok {
this[f.Name] = []*Maker{}
}
this[f.Name] = append(this[f.Name], f)
}
func (this Factory) getMaker(name string, ins ...types.Type) (*Maker, error) {
funks, ok := this[name]
if !ok {
return nil, newErrUnknownFunction(name, ins)
}
for _, f := range funks {
if len(f.In) != len(ins) {
continue
}
eq := true
for i := range f.In {
if f.In[i] != ins[i] {
eq = false
break
}
}
if !eq {
continue
}
return f, nil
}
return nil, newErrUnknownFunction(name, ins)
}
type Maker struct {
Name string
In []types.Type
InConst []bool
Out types.Type
newfnc interface{}
}
func (this *Maker) String() string {
ins := make([]string, len(this.In))
for i, in := range this.In {
ins[i] = in.String()
}
return fmt.Sprintf("func %v(%v) %v", this.Name, strings.Join(ins, ","), this.Out.String())
}
func (f *Maker) New(values ...interface{}) (interface{}, error) {
newf := reflect.ValueOf(f.newfnc)
rvalues := make([]reflect.Value, len(values))
for i := range rvalues {
rvalues[i] = reflect.ValueOf(values[i])
}
res := newf.Call(rvalues)
if len(res) == 2 {
if !res[1].IsNil() {
return res[0].Interface(), res[1].Interface().(error)
}
}
return res[0].Interface(), nil
}
// IsFalse returns whether a function is a false constant.
func IsFalse(fn Bool) bool {
v, ok := fn.(*constBool)
if !ok {
return false
}
return v.v == false
}
// IsTrue returns whether a function is a true constant.
func IsTrue(fn Bool) bool {
v, ok := fn.(*constBool)
if !ok {
return false
}
return v.v == true
}
// Equal returns whether two functions are equal.
func Equal(l, r Comparable) bool {
hl := l.Hash()
hr := r.Hash()
if hl != hr {
return false
}
return l.Compare(r) == 0
}
// IsSimpleEqual returns whether the input function is a simple equal expression,
// where one argument is a constant and the other is a variable.
func IsSimpleEqual(f Bool) bool {
switch eq := f.(type) {
case *stringEq:
_, v := isVarConst(eq.V1, eq.V2)
return v
case *intEq:
_, v := isVarConst(eq.V1, eq.V2)
return v
case *uintEq:
_, v := isVarConst(eq.V1, eq.V2)
return v
}
return false
}
func isVarConst(a, b interface{}) (aConst, bool) {
if c, aok := a.(aConst); aok {
if _, bok := b.(aVariable); bok {
return c, true
}
} else if c, bok := b.(aConst); bok {
if _, aok := a.(aVariable); aok {
return c, true
}
}
return nil, false
}
// Hash calculates a hash for a function, given a name and its parameters.
func Hash(name string, hs ...Hashable) uint64 {
h := uint64(17)
h = 31*h + deriveHashString(name)
for _, hashable := range hs {
h = 31*h + hashable.Hash()
}
return h
}
func hashWithId(id uint64, hs ...Hashable) uint64 {
h := uint64(17)
h = 31*h + id
for _, hashable := range hs {
h = 31*h + hashable.Hash()
}
return h
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//58. Length of Last Word
//Given a string s consists of upper/lower-case alphabets and empty space characters ' ', return the length of last word in the string.
//If the last word does not exist, return 0.
//Note: A word is defined as a character sequence consists of non-space characters only.
//Example:
//Input: "Hello World"
//Output: 5
//func lengthOfLastWord(s string) int {
//}
// Time Is Money
|
package main
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"path/filepath"
"strings"
)
var configPath string
var config = map[string]string{}
func authHandler(w http.ResponseWriter, r *http.Request) {
pw, err := getConfig(r.URL.Path)
if err != nil || pw == "" {
//http.Error(w, "no authentication config found for "+r.URL.Path+"!", http.StatusForbidden)
fmt.Println("No authentication config found for " + r.URL.Path)
fmt.Println("Authorization successful for " + r.URL.Path)
fmt.Fprintf(w, "Authorization successful for "+r.URL.Path)
return
}
pw = strings.Replace(pw, "\n", "", -1)
r.ParseForm()
providedPw := r.FormValue("pw")
if pw == providedPw {
fmt.Println("Authorization successful for " + r.URL.Path)
fmt.Fprintf(w, "Authorization successful for "+r.URL.Path)
return
}
fmt.Println("Authorization failed for "+r.URL.Path, http.StatusForbidden)
http.Error(w, "Authorization failed for "+r.URL.Path, http.StatusForbidden)
}
func getConfig(key string) (string, error) {
if entry, ok := config[key]; ok {
return entry, nil
}
val, err := loadConfig(key)
if err == nil {
config[key] = val
}
return val, nil
}
func loadConfig(key string) (string, error) {
filename := filepath.Join("/etc/auth_config/", filepath.Clean(key), "password")
fmt.Println("Loading config from " + filename)
body, err := ioutil.ReadFile(filename)
if err != nil {
return "", err
}
return string(body), nil
}
func main() {
fmt.Println("Starting authentication server.")
http.HandleFunc("/publish", authHandler)
http.HandleFunc("/play", authHandler)
log.Fatal(http.ListenAndServe(":80", nil))
fmt.Println("Shutdown...")
}
|
package controllers
import (
"os"
"path/filepath"
. "mick/models"
"github.com/jinzhu/gorm"
)
func CheckImageFile(db *gorm.DB) {
d, err := os.Open("./images/")
CheckErr(err)
defer d.Close()
files, err := d.Readdir(-1)
CheckErr(err)
var photo Photo
for _, file := range files {
if file.Mode().IsRegular() {
if filepath.Ext(file.Name()) == ".jpg" || filepath.Ext(file.Name()) == ".png" {
if f := db.Where("name = ?", file.Name()).Find(&photo); f.RecordNotFound() == true {
db.Save(&Photo{Name: file.Name()})
}
}
}
}
}
|
package job_build
import (
"github.com/yangqinjiang/mycrontab/worker/common"
)
//推送任务执行结果 事件的管理者
type JobResultPusher interface {
PushResult(jobResult *common.JobExecuteResult)
}
|
package ads
import (
"encoding/json"
"fmt"
"log"
"regexp"
"sync"
"time"
)
var portOpen bool
type Connection struct {
addr *AmsAddr
port int
symbolsLoaded bool
Symbols map[string]*ADSSymbol
datatypes map[string]ADSSymbolUploadDataType
handles map[uint32]*ADSSymbol
notificationHandles map[uint32]*ADSSymbol
}
type ADSSymbol struct {
Connection *Connection
Self *ADSSymbol
FullName string
LastUpdateTime int64
MinUpdateInterval int64
Name string
DataType string
Comment string
Handle uint32
NotificationHandle uint32
ChangedHandlers []func(ADSSymbol) // Fix: doesn't allow change values
Group uint32
Offset uint32
Length uint32
Value string
Valid bool
Changed bool
Parent *ADSSymbol
Childs map[string]*ADSSymbol
}
var lock *sync.Mutex
func init() {
lock = &sync.Mutex{}
}
func AddLocalConnection() (conn *Connection, err error) {
localConnection := Connection{}
open, err := adsAmsPortEnabled()
if err != nil {
return nil, err
}
if !open {
adsPortOpen()
}
localConnection.addr = &AmsAddr{}
localConnection.adsGetLocalAddress()
fmt.Printf("local connection at %d %d %d \n", localConnection.port, localConnection.addr.Port, localConnection.addr.NetId.B[0])
localConnection.addr.Port = 851
localConnection.initializeConnection()
err = localConnection.initializeConnVariables()
if err != nil {
return
}
connections = append(connections, &localConnection)
conn = &localConnection
return
}
func AddRemoteConnection(netID string) (conn *Connection, err error) {
localConnection := Connection{}
open, err := adsAmsPortEnabled()
if !open {
adsPortOpen()
}
if err != nil {
return nil, err
}
localConnection.addr = &AmsAddr{}
localConnection.setRemoteAddress(netID)
fmt.Printf("remote connection at %d %d %d \n", localConnection.port, localConnection.addr.Port, localConnection.addr.NetId.B[0])
localConnection.addr.Port = 851
localConnection.initializeConnection()
err = localConnection.initializeConnVariables()
if err != nil {
return
}
connections = append(connections, &localConnection)
conn = &localConnection
return conn, err
}
func (localConnection *Connection) initializeConnVariables() error {
uploadInfo, err := localConnection.getSymbolUploadInfo()
if err != nil {
return err
}
fmt.Println("uploadinfo loaded", uploadInfo.NDatatypeSize, uploadInfo.NSymSize)
err = localConnection.uploadSymbolInfoDataTypes(uploadInfo.NDatatypeSize)
if err != nil {
return err
}
fmt.Println("uploadSymbolInfoDataTypes loaded")
err = localConnection.uploadSymbolInfoSymbols(uploadInfo.NSymSize)
if err != nil {
return err
}
fmt.Println("uploadSymbolInfoSymbols loaded")
return err
}
func (localConnection *Connection) initializeConnection() {
localConnection.Symbols = map[string]*ADSSymbol{}
localConnection.datatypes = map[string]ADSSymbolUploadDataType{}
localConnection.handles = map[uint32]*ADSSymbol{}
localConnection.notificationHandles = map[uint32]*ADSSymbol{}
}
// CloseAllConnections closes open connections
func CloseAllConnections() {
for _, conn := range connections {
conn.CloseConnection()
}
err := adsPortClose()
if err != nil {
log.Println(err)
}
}
// CloseConnection closes current connection
func (localConnection *Connection) CloseConnection() {
for k := range localConnection.handles {
err := localConnection.releaseHandle(k)
if err != nil {
fmt.Println(err)
} else {
fmt.Printf("deleted handle %d", k)
}
}
for k := range localConnection.notificationHandles {
err := localConnection.releasNotificationeHandle(k)
if err != nil {
fmt.Println(err)
} else {
fmt.Printf("deleted notification handle %d", k)
}
}
}
func showComments(info *ADSSymbolUploadDataType) {
fmt.Println(info.Name)
for _, value := range info.Childs {
showComments(value)
}
}
func showInfoComments(info *ADSSymbol) {
fmt.Println(info.Name)
for _, value := range info.Childs {
showInfoComments(value)
}
}
// AddNotification adds event notification to handle
func (node *ADSSymbol) AddNotification(mode uint32, cycleTime time.Duration, maxTime time.Duration, callback func(ADSSymbol)) {
node.adsSyncAddDeviceNotificationReq(mode, uint32(maxTime), uint32(cycleTime))
node.addCallback(callback)
}
// GetStringValue returns value from PLC in string format
func (node *ADSSymbol) GetStringValue() (value string, err error) {
if node.Handle == 0 {
err = node.getHandle()
}
if err != nil {
return "", err
}
data, err := node.Connection.getValueByHandle(
node.Handle,
node.Length)
if err != nil {
return "", err
}
node.parse(data, 0)
return node.Value, err
}
func (node *ADSSymbol) Write(value string) {
if node.Handle == 0 {
node.getHandle()
}
node.writeToNode(value, 0)
}
// GetJSON (onlyChanged bool) string
func (node *ADSSymbol) GetJSON(onlyChanged bool) (string, error) {
if !onlyChanged {
_, err := node.GetStringValue()
if err != nil {
return "", err
}
}
// data := make(map[string]interface{})
// data[node.FullName] = node.parseNode(onlyChanged)
data := node.parseNode(onlyChanged)
if jsonData, err := json.Marshal(data); err == nil {
return string(jsonData), nil
}
return "", nil
}
// ParseNode returns JSON interface for symbol
func (node *ADSSymbol) parseNode(onlyChanged bool) (rData interface{}) {
if len(node.Childs) == 0 {
rData = node.Value
// node.Changed = false
} else {
// if strings.HasPrefix(node.DataType, "ARRAY") {
// re := regexp.MustCompile(`\[.*\.\.(\d+)\]`)
// arraySize, _ := strconv.Atoi(re.FindAllStringSubmatch(node.DataType, 1)[0][1])
// arraySize++
// localArray := make([]interface{}, arraySize)
// for _, child := range node.Childs {
// re := regexp.MustCompile(`\[(\d+)\]`)
// arrayIndex, _ := strconv.Atoi(re.FindAllStringSubmatch(child.Name, 1)[0][1])
// localArray[arrayIndex] = child.ParseNode()
// }
// rData = localArray
// } else {
localMap := make(map[string]interface{})
for _, child := range node.Childs {
if onlyChanged {
if child.Changed {
var re = regexp.MustCompile(`\[`)
s := re.ReplaceAllString(child.Name, `"[`)
re = regexp.MustCompile(`\]`)
s = re.ReplaceAllString(s, `]"`)
localMap[s] = child.parseNode(true)
// child.Changed = false
}
} else {
var re = regexp.MustCompile(`\[`)
s := re.ReplaceAllString(child.Name, `"[`)
re = regexp.MustCompile(`\]`)
s = re.ReplaceAllString(s, `]"`)
localMap[s] = child.parseNode(false)
}
}
rData = localMap
return
}
// if node.Parent == nil {
// tempData := make(map[string]interface{})
// tempData[node.Name] = rData
// rData = tempData
// }
return
}
// return
// }
|
package main
import (
"fmt"
)
// 六种特殊情况,左边比右边小,表示的值=右边-左边,也就意味着,遇到这种情况时,减去该数就行
func romanToInt(s string) int {
roman := make(map[byte]int)
roman['I'] = 1
roman['V'] = 5
roman['X'] = 10
roman['L'] = 50
roman['C'] = 100
roman['D'] = 500
roman['M'] = 1000
var ret int
for i := 0; i < len(s)-1; i++ {
if roman[s[i]] < roman[s[i+1]] {
ret -= roman[s[i]]
} else {
ret += roman[s[i]]
}
}
ret += roman[s[len(s)-1]]
return ret
}
func main() {
s := "LVIII"
fmt.Println(romanToInt(s))
}
|
package base
import (
"hash/crc32"
"hash/crc64"
)
var HashTable = map[string]func(Buffer) uint64{
"checksum8": __hashChecksum8,
"checksum16": __hashChecksum16,
"checksum32": __hashChecksum32,
"checksum64": __hashChecksum64,
"crc32": __hashCrc32,
"crc64": __hashCrc64ISO,
"crc64.iso": __hashCrc64ISO,
"crc64.ecma": __hashCrc64ECMA,
}
func __hashChecksum8(b Buffer) uint64 {
r := byte(0)
for _, v := range b {
r += v
}
return uint64(r)
}
func __hashChecksum16(b Buffer) uint64 {
r := uint16(0)
for _, v := range b {
r += uint16(v)
}
return uint64(r)
}
func __hashChecksum32(b Buffer) uint64 {
r := uint32(0)
for _, v := range b {
r += uint32(v)
}
return uint64(r)
}
func __hashChecksum64(b Buffer) uint64 {
r := uint64(0)
for _, v := range b {
r += uint64(v)
}
return uint64(r)
}
func __hashCrc32(b Buffer) uint64 {
h := crc32.NewIEEE()
h.Write(b)
return uint64(h.Sum32())
}
func __hashCrc64ISO(b Buffer) uint64 {
h := crc64.New(crc64.MakeTable(crc64.ISO))
h.Write(b)
return h.Sum64()
}
func __hashCrc64ECMA(b Buffer) uint64 {
h := crc64.New(crc64.MakeTable(crc64.ECMA))
h.Write(b)
return h.Sum64()
}
|
// Copyright 2020 CUE Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"github.com/spf13/cobra"
)
// TODO: intersperse the examples at the end of the texts in the
// body of text to make things more concerte for the user early on?
// The current approach works will if users just print the text without
// "more" or "less", in which case the examples show more prominently.
// The user can then scroll up to get a more in-depth explanation. But is
// this how users use it?
func newHelpTopics(c *Command) []*cobra.Command {
return []*cobra.Command{
inputsHelp,
flagsHelp,
filetypeHelp,
}
}
var inputsHelp = &cobra.Command{
Use: "inputs",
Short: "package list, patterns, and files",
Long: `Many commands apply to a set of inputs:
cue <command> [inputs]
The list [inputs] may specify CUE packages, CUE files, non-CUE
files or some combinations of those. An empty list specifies
the package in the current directory, provided there is a single
named package in this directory.
CUE packages are specified as an import path. An import path
that is a rooted path --one that begins with a "." or ".."
element-- is interpreted as a file system path and denotes the
package instance in that directory.
Otherwise, the import path P denotes and external package found
in cue.mod/{pkg|gen|usr}/P.
An import path may contain one or more "..." to match any
subdirectory: pkg/... matches all packages below pkg, including
pkg itself, while foo/.../bar matches all directories named bar
within foo. In all cases, directories containing cue.mod
directories are excluded from the result.
A package may also be specified as a list of .cue files.
The special symbol '-' denotes stdin or stdout and defaults to
the cue file type for stdin. For stdout, the default depends on
the cue command. A .cue file package may not be combined with
regular packages.
Non-cue files are interpreted based on their file extension or,
if present, an explicit file qualifier (see the "filetypes"
help topic). Non-cue files may be interpreted as concrete data
or schema. Schema are treated as single-file packages by default.
See the "filetypes" and "flags" help topics on how to combine
schema into a single package.
Data files can be combined into a single package, in which case
each file is unified into a defined location within this single
package. If a data file has multiple values, such as allowed
with JSON Lines or YAML, each value is interpreted as a separate
file.
The --schema/-d flag can be used to unify each data file against
a specific schema within a non-data package. For OpenAPI, the -d
flag specifies a schema name. For JSON Schema the -d flag
specifies a schema defined in "definitions". In all other cases,
the -d flag is a CUE expression that is evaluated within the
package.
Examples (also see also "flags" and "filetypes" help topics):
# Show the definition of each package named foo for each
# directory dir under path.
$ cue def ./path/.../dir:foo
# Unify each document in foo.yaml with the value Foo in pkg.
$ cue export ./pkg -d Foo foo.yaml
# Unify data.json with schema.json.
$ cue export data.json schema: schema.json
`,
}
var flagsHelp = &cobra.Command{
Use: "flags",
Short: "common flags for composing packages",
Long: `Non-CUE files are treated as individual files by
default, but can be combined into a single package using a
combination of the following flags.
Assigning values to a CUE path
The --path/-l flag can be used to specify a CUE path at which to
place a value. Each -l flag specifies either a CUE expression or
a CUE field (without the value following the colon), both of
which are evaluated within the value. Together, the -l flags
specify the path at increasingly deeper nesting. In the path
notation, path elements that end with a "::", instead of ":",
are created as definitions. An expression may refer to builtin
packages as long as the name can be uniquely identified.
The --with-context flag can be used to evaluate the label
expression within a struct of contextual data, instead of
within the value itself. This struct has the following fields:
{
// data holds the original source data
// (perhaps one of several records in a file).
data: _
// filename holds the full path to the file.
filename: string
// index holds the 0-based index element of the
// record within the file. For files containing only
// one record, this will be 0.
index: uint & <recordCount
// recordCount holds the total number of records
// within the file.
recordCount: int & >=1
}
Handling multiple documents or streams
To handle multi-document files, such as JSON Lines or YAML
files with document separators (---), the user must specify
a the --path, --list, or --files flag.
The --path flag merges each element into a single package as
if each element was defined in a separate file. The --list flag
concatenates each entry in a file into a list.
Using --list flag in combination with the --path flag
concatenates entries with the same path into a list, instead of
unifying them.
Finally, the --files option causes each entry to be written to
a different file. The -files flag may only be used in
combination with the import command.
Examples:
# Put a value at a path based on its "kind" and "name" fields.
$ cue eval -l 'strings.ToLower(kind)' -l name foo.yaml
# Include a schema under the "myschema" field using the path notation.
$ cue eval -l myschema: schema: foo.json
# Base the path values on its kind and file name.
$ cue eval --with-context -l 'path.Base(filename)' -l data.kind foo.yaml
`,
}
var filetypeHelp = &cobra.Command{
Use: "filetypes",
Short: "supported file types and qualifiers",
Long: `The cue tools supports the following file types:
Tag Extensions Description
cue .cue CUE source files.
json .json JSON files.
yaml .yaml/.yml YAML files.
jsonl .jsonl/.ldjson Line-separated JSON values.
jsonschema JSON Schema.
openapi OpenAPI schema.
proto .proto Protocol Buffer definitions.
go .go Go source files.
text .txt Raw text file; the evaluated
value must be of type string.
OpenAPI, JSON Schema and Protocol Buffer definitions are
always interpreted as schema. YAML and JSON are always
interpreted as data. CUE and Go are interpreted as schema by
default, but may be selected to operate in data mode.
The cue tool will infer a file's type from its extension by
default. The user my override this behavior by using qualifiers.
A qualifier takes the form
<tag>{'+'<tag>}':'
For instance,
cue eval json: foo.data
specifies that 'foo.data' should be read as a JSON file. File
formats that do not have a default extension may be represented
in any data format using the same notation:
cue def jsonschema: bar.cue foo.yaml openapi+yaml: baz.def
interprets the files bar.cue and foo.yaml as data in the
respective formats encoding an JSON Schema, while 'baz.def' is
defined to be a YAML file which contents encode OpenAPI
definitions.
A qualifier applies to all files following it on the command line
until the next qualifier. The cue tool does not allow a ':' in
filenames.
The following tags can be used in qualifiers to further
influence input or output. For input these act as
restrictions, validating the input. For output these act
as filters, showing only the requested data and picking
defaults as requested.
Tag Description
data Require concrete input and output that does
not require any evaluation.
graph Like data, but allow references.
schema Export data and definitions.
Many commands also support the --out and --outfile/-o flags.
The --out flag specifies the output type using a qualifier
(without the ':'). The -o flag specifies an output file
possibly prefixed with a qualifier.
Examples:
# Interpret bar.cue and foo.yaml as OpenAPI data.
$ cue def openapi: bar.cue foo.yaml
# Write a CUE package as OpenAPI encoded as YAML, using
# an alternate file extension.
$ cue def -o openapi+yaml:foo.openapi
# Print the data for the current package as YAML.
$ cue export --out=yaml
# Print the string value of the "name" field as a string.
$ cue export -e name --out=text
# Write the string value of the "name" field to a text file.
$ cue export -e name -o=foo.txt
# Write the string value of the "name" field to a file foo.
$ cue export -e name -o=text:foo
`,
}
// TODO: tags
// - doc/nodoc
// - attr/noattr
// - id=<url>
// TODO: filetypes:
// - textpb
// - binpb
// TODO: document
// <tag>['='<value>]{'+'<tag>['='<value>]}':'
// TODO: cue.mod help topic
|
package handler
import (
"fmt"
"net/http"
"tpay_backend/cashier/internal/lang"
"tpay_backend/cashier/internal/svc"
"tpay_backend/utils"
"github.com/gin-gonic/gin"
)
func GetCurrentLang(c *gin.Context, svcCtx *svc.ServiceContext) (currentLang string, currentLangList []string) {
// 1.程序内的默认语言
currentLang = lang.DefaultLang
//configModel := model.NewGlobalConfigModel(svcCtx.DbEngine)
//
//// 2.获取配置的语言列表
//if list, err := configModel.CashierLangList(); err != nil {
// fmt.Printf("获取语言列表配置失败:%v\n", err)
//} else {
// for _, v := range list {
// // 配置的语言必须在程序允许的范围内
// if utils.InSlice(v, lang.LangList) {
// currentLangList = append(currentLangList, v)
// }
// }
//}
//
//// 3.获取配置的默认语言
//if defaultLang, err := configModel.CashierDefaultLang(); err != nil {
// fmt.Printf("获取默认语言配置失败:%v\n", err)
//} else {
// // 2.配置的默认语言不为空并且在允许的范围内,则为默认语言
// if defaultLang != "" && utils.InSlice(defaultLang, currentLangList) {
// currentLang = defaultLang
// }
//}
// 4.cookie中有语言
if cookieLang, err := c.Cookie(LangCookieName); err != nil {
if err == http.ErrNoCookie {
//fmt.Printf("cookie不存在\n")
} else {
fmt.Printf("获取cooke失败:%v\n", err)
}
} else {
//fmt.Printf("lang_cookie:%v\n", cookieLang)
if utils.InSlice(cookieLang, currentLangList) {
currentLang = cookieLang
}
}
return
}
|
package modules
import (
"encoding/json"
"time"
"github.com/fatih/structs"
"github.com/sirupsen/logrus"
)
// gen:qs
type NotificationType struct {
ID uint `description:""`
CreatedAt time.Time `description:"등록일"`
UpdatedAt time.Time `description:"수정일"`
Name string `description:"이름"`
Description string `description:"설명" sql:"type:text"`
CreatorID uint `description:"작성자"`
UpdaterID uint `description:"최종수정자"`
Group string `description:""`
IsEnable bool `description:""`
IsManual bool `description:""`
ActionName string `description:"Action 이름"`
ActionType string `description:"CRUD 타입"`
ResourceName string `description:"이벤트 대상"`
ResourceID uint `description:"이벤트 대상 ID"`
TargetWhere string `description:""`
TitleTemplate string `description:""`
MessageTemplate string `description:"" gorm:"size:2500"`
ListItemTemplate string `description:"" gorm:"size:2500"`
WebhookURLs string `description:"" gorm:"size:2500"`
ReplaceText string `description:""`
DiffMode bool `description:""`
DiffKey string `description:""`
DiffNewValue string `description:""`
DiffOldValue string `description:""`
}
func (m *NotificationType) CheckDiff(crudEvent *CrudEvent) bool {
if m.DiffKey == "" {
return true
}
type UpdateProperty struct {
Key string
OldValue string
NewValue string
}
mapUpdateItem := map[string]interface{}{}
if err := json.Unmarshal([]byte(crudEvent.UpdatedData), &mapUpdateItem); err != nil {
logrus.WithError(err).Error("")
return false
}
mapOldItem := map[string]interface{}{}
if err := json.Unmarshal([]byte(crudEvent.OldData), &mapOldItem); err != nil {
logrus.WithError(err).Error("")
return false
}
mapUpdateProperties := map[string]UpdateProperty{}
for key, value := range mapUpdateItem {
if !structs.IsStruct(value) {
oldValue := ""
if tempOldValue, ok := mapOldItem[key]; ok {
oldValue = convInterface(tempOldValue)
}
mapUpdateProperties[key] = UpdateProperty{
Key: ToDBName(key),
NewValue: convInterface(value),
OldValue: oldValue,
}
}
}
if len(mapUpdateProperties) <= 0 {
return false
}
if updateProperty, ok := mapUpdateProperties[m.DiffKey]; ok {
if m.DiffNewValue != "" {
if m.DiffNewValue != "" && m.DiffNewValue != updateProperty.NewValue {
return false
}
}
if m.DiffOldValue != "" {
if m.DiffOldValue != "" && m.DiffOldValue != updateProperty.OldValue {
return false
}
}
return updateProperty.NewValue != updateProperty.OldValue
}
return false
}
func AddNotificationType(notificationType *NotificationType) (id uint, err error) {
err = notificationType.Create(gGormDB)
id = notificationType.ID
return
}
func GetNotificationTypeByID(id uint) (notificationType *NotificationType, err error) {
notificationType = &NotificationType{
ID: id,
}
err = NewNotificationTypeQuerySet(gGormDB).One(notificationType)
return
}
func GetAllNotificationType(queryPage *QueryPage) (notificationTypes []NotificationType, err error) {
err = NewNotificationTypeQuerySet(gGormDB).All(¬ificationTypes)
return
}
func UpdateNotificationTypeByID(notificationType *NotificationType) (err error) {
err = notificationType.Update(gGormDB,
NotificationTypeDBSchema.Description,
)
return
}
func DeleteNotificationType(id uint) (err error) {
notificationType := &NotificationType{
ID: id,
}
err = notificationType.Delete(gGormDB)
return
}
func GetNotificationsTypes(isManual bool) (notificationTypes []NotificationType, err error) {
err = NewNotificationTypeQuerySet(gGormDB).
IsManualEq(isManual).
IsEnableEq(true).
All(¬ificationTypes)
return
}
|
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"path/filepath"
"gopkg.in/yaml.v2"
)
type routes struct {
Path string
URL string
}
func parseYaml(file string) ([]routes, error) {
ymlFile, _ := filepath.Abs(file + ".yml")
ymlData, err := ioutil.ReadFile(ymlFile)
if err != nil {
return nil, err
}
var urlmap []routes
err = yaml.Unmarshal(ymlData, &urlmap)
if err != nil {
return nil, err
}
return urlmap, nil
}
func mapHandler(paths map[string]string, fallback http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
url := paths[r.URL.Path]
if url != "" {
http.Redirect(w, r, url, 301)
return
}
fallback.ServeHTTP(w, r)
}
}
func buildMap(routes []routes) map[string]string {
paths := make(map[string]string)
for _, y := range routes {
paths[y.Path] = y.URL
}
return paths
}
func yamlHandler(yml string, fallback http.Handler) (http.HandlerFunc, error) {
parsedYaml, err := parseYaml(yml)
if err != nil {
return nil, err
}
pathMap := buildMap(parsedYaml)
return mapHandler(pathMap, fallback), nil
}
func defaultMux() *http.ServeMux {
mux := http.NewServeMux()
mux.HandleFunc("/", mate)
return mux
}
func mate(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Whoa mate!")
}
func main() {
yaml := flag.String("yml", "ymlData", "filename of csv")
flag.Parse()
fallbackHandler := defaultMux()
mapHandler, _ := yamlHandler(*yaml, fallbackHandler)
http.HandleFunc("/", mapHandler)
fmt.Println("Listening on port 8080")
err := http.ListenAndServe(":8080", mapHandler)
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
|
package main
import (
"fmt"
)
func d7() {
for i := 3; i > 0; i-- {
defer func(n int) {
fmt.Print(n, " ")
}(i)
}
}
// Replace d8 with main to execute the programme
func main() {
d7()
}
// Due to the parameter of the anonymous function, each time the anonymous
// function is deferred, it gets and therefore uses the current value of i . As a result, each
// execution of the anonymous function has a different value to process, hence the generated
// output.
|
package module
import (
//"loger"
)
type SkillInfo struct {
ID int `bson:"_id"`
Name string `json:"name"`
}
type SkillInfoLst map[int]*SkillInfo
type SkillMgr struct {
moduleMgr *ModuleMgr
skillLst SkillInfoLst //! 玩家技能信息
}
//! 初始化
func (self *SkillMgr) Init(moduleMgr *ModuleMgr) {
self.moduleMgr = moduleMgr
self.skillLst = make(SkillInfoLst)
}
func NewSkillMgr(moduleMgr *ModuleMgr) *SkillMgr {
mgr := new(SkillMgr)
mgr.Init(moduleMgr)
return mgr
}
|
package pbengine
import (
"io/ioutil"
"log"
"os"
"os/exec"
"github.com/vanishs/gwsrpc/swg"
"github.com/vanishs/gwsrpc/utils"
)
//GenFile GenFile
func GenFile(pkgname, filename string) {
//copy file
err := os.MkdirAll("./gwsrpcpbfile/"+pkgname, 0777)
if err != nil {
log.Fatalln(err)
}
//复制原来的文件改名pkgname.proto
err = utils.Copyfile("./gwsrpcpbfile/"+pkgname+"/"+pkgname+".proto", filename)
if err != nil {
log.Fatalln(err)
}
//gen and append rpc pb
genproto("./gwsrpcpbfile/"+pkgname+"/"+pkgname+".proto", filename)
//gen golang code
out, err := exec.Command("protoc",
"--gogofaster_out=plugins=grpc:.",
"./gwsrpcpbfile/"+pkgname+"/"+pkgname+".proto").Output()
if err != nil {
log.Fatalln("protoc",
"--gogofaster_out=plugins=grpc:.",
"./gwsrpcpbfile/"+pkgname+"/"+pkgname+".proto", err)
}
if string(out) != "" {
log.Println("protoc",
"--gogofaster_out=plugins=grpc:.",
"./gwsrpcpbfile/"+pkgname+"/"+pkgname+".proto", "Error:", out)
}
//make ts dir
err = os.MkdirAll("./src/providers/"+pkgname, 0777)
if err != nil {
log.Fatalln(err)
}
//gen pb json file and json string
outJSONbuf, err := exec.Command("pbjs", filename).Output()
if err != nil {
log.Fatalln("pbjs", filename, err)
}
outputJSONDir := "./src/providers/" + pkgname + "/" + pkgname + ".json"
ioutil.WriteFile(outputJSONDir, outJSONbuf, 0666)
//save json string to swg.Top
swg.Top.FileDatas[filename].PbJSONbackquote = "`" + string(outJSONbuf) + "`"
protobyte, err := ioutil.ReadFile(filename)
if err != nil {
log.Println("read proto file err.", err)
}
swg.Top.FileDatas[filename].PbPROTObackquote = "`" + string(protobyte) + "`"
//gen ts file
outTSbuf, err := exec.Command("proto2ts", "-c", "false", "-f", outputJSONDir).Output()
if err != nil {
log.Fatalln("proto2ts", "-c", "false", "-f", outputJSONDir, err)
}
outputDTSdir := "./src/providers/" + pkgname + "/" + pkgname + ".d.ts"
ioutil.WriteFile(outputDTSdir, []byte(outTSbuf), 0666)
//remove json file
os.Remove(outputJSONDir)
}
//GenMain GenMain
func GenMain() {
//genws
err := os.MkdirAll("./gengateway/genws", 0777)
if err != nil {
log.Fatalln(err)
}
genws("./gengateway/genws/genws.go")
//gengateway
err = os.MkdirAll("./gengateway", 0777)
if err != nil {
log.Fatalln(err)
}
gengateway("./gengateway/gengateway.go")
//src/providers
err = os.MkdirAll("./src/providers", 0777)
if err != nil {
log.Fatalln(err)
}
gwsrpclits("./src/providers/gwsrpcli.ts")
//main.go
genmain("./main.go")
//Dockerfile
gendocker("./Dockerfile")
}
|
package queue
import (
"io"
"os"
"sync"
)
type Queue struct {
file *os.File
rwMutex sync.RWMutex
}
type Element struct {
size int64
msg uintptr
}
const ElementMetadataSize = 8
func (q *Queue) Push(bytes []byte) error {
size := int64(len(bytes))
data := append(int64ToBytes(size), bytes...)
q.rwMutex.Lock()
defer q.rwMutex.Unlock()
_, err := q.file.Write(data)
if err != nil {
return err
}
return nil
}
func (q *Queue) Pop(offset int64) (newOffset int64, msg []byte, err error) {
sizeBytes := make([]byte, ElementMetadataSize)
// TODO mix read at
q.rwMutex.RLock()
_, err = q.file.ReadAt(sizeBytes, offset)
if err != nil {
if err == io.EOF {
q.rwMutex.RUnlock()
return offset, nil, err
}
q.rwMutex.RUnlock()
return 0, nil, err
}
q.rwMutex.RUnlock()
size := bytesToInt64(sizeBytes)
data := make([]byte, size)
_, err = q.file.ReadAt(data, offset+ElementMetadataSize)
if err != nil {
return 0, nil, err
}
return offset + ElementMetadataSize + size, data, nil
}
func (q *Queue) Close() error {
return q.file.Close()
}
func (q *Queue) NewConsumer() *Consumer {
return &Consumer{
queue: q,
consumAt: 0,
}
}
|
package command
import (
Cli "github.com/ajpen/termsnippet/cli"
"gopkg.in/urfave/cli.v1"
)
func InstallCommand(c cli.Command) {
Cli.App.Commands = append(Cli.App.Commands, c)
}
|
package store
import (
"time"
"github.com/go-ocf/cloud/cloud2cloud-connector/events"
"github.com/go-ocf/cloud/cloud2cloud-connector/store"
)
type Subscription struct {
ID string // Id
URL string // href
CorrelationID string // uuid
Type store.Type
ContentType string // application/json or application/vnd.ocf+cbor
EventTypes []events.EventType
DeviceID string // filled for device and resource events
Href string // filled for resource events
SequenceNumber uint64
UserID string
SigningSecret string
}
type DevicesSubscription struct {
// EventTypes = [devices_registered, devices_unregistered, devices_online, devices_offline]
Subscription
AccessToken string
LastDevicesRegistered events.DevicesRegistered
LastDevicesOnline events.DevicesOnline
LastDevicesOffline events.DevicesOffline
LastCheck time.Time
}
|
// Copyright 2019-present PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package raftstore
import (
"bytes"
"fmt"
"math"
"sync/atomic"
"time"
"github.com/cznic/mathutil"
"github.com/golang/protobuf/proto"
"github.com/pingcap/badger"
"github.com/pingcap/badger/y"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/eraftpb"
"github.com/pingcap/kvproto/pkg/metapb"
rspb "github.com/pingcap/kvproto/pkg/raft_serverpb"
"github.com/pingcap/log"
"github.com/pingcap/tidb/store/mockstore/unistore/tikv/dbreader"
"github.com/zhangjinpeng1987/raft"
)
// JobStatus represents a job status.
type JobStatus = uint32
// JobStatus
const (
JobStatusPending JobStatus = 0 + iota
JobStatusRunning
JobStatusCancelling
JobStatusCancelled
JobStatusFinished
JobStatusFailed
)
// SnapStateType represents a snapshot state type.
type SnapStateType int
// SnapStateType
const (
SnapStateRelax SnapStateType = 0 + iota
SnapStateGenerating
SnapStateApplying
SnapStateApplyAborted
)
// SnapState represents a snapshot state.
type SnapState struct {
StateType SnapStateType
Status *JobStatus
Receiver chan *eraftpb.Snapshot
}
// const
const (
// When we create a region peer, we should initialize its log term/index > 0,
// so that we can force the follower peer to sync the snapshot first.
RaftInitLogTerm = 5
RaftInitLogIndex = 5
MaxSnapRetryCnt = 5
raftLogMultiGetCnt = 8
MaxCacheCapacity = 1024 - 1
)
// CompactRaftLog discards all log entries prior to compact_index. We must guarantee
// that the compact_index is not greater than applied index.
func CompactRaftLog(tag string, state *applyState, compactIndex, compactTerm uint64) error {
log.S().Debugf("%s compact log entries to prior to %d", tag, compactIndex)
if compactIndex <= state.truncatedIndex {
return errors.New("try to truncate compacted entries")
} else if compactIndex > state.appliedIndex {
return errors.Errorf("compact index %d > applied index %d", compactIndex, state.appliedIndex)
}
// we don't actually delete the logs now, we add an async task to do it.
state.truncatedIndex = compactIndex
state.truncatedTerm = compactTerm
return nil
}
// EntryCache represents an entry cache.
type EntryCache struct {
cache []eraftpb.Entry
}
func (ec *EntryCache) front() eraftpb.Entry {
return ec.cache[0]
}
func (ec *EntryCache) back() eraftpb.Entry {
return ec.cache[len(ec.cache)-1]
}
func (ec *EntryCache) length() int {
return len(ec.cache)
}
func (ec *EntryCache) fetchEntriesTo(begin, end, maxSize uint64, fetchSize *uint64, ents []eraftpb.Entry) []eraftpb.Entry {
if begin >= end {
return nil
}
y.Assert(ec.length() > 0)
cacheLow := ec.front().Index
y.Assert(begin >= cacheLow)
cacheStart := int(begin - cacheLow)
cacheEnd := int(end - cacheLow)
if cacheEnd > ec.length() {
cacheEnd = ec.length()
}
for i := cacheStart; i < cacheEnd; i++ {
entry := ec.cache[i]
y.AssertTruef(entry.Index == cacheLow+uint64(i), "%d %d %d", entry.Index, cacheLow, i)
entrySize := uint64(entry.Size())
*fetchSize += entrySize
if *fetchSize != entrySize && *fetchSize > maxSize {
break
}
ents = append(ents, entry)
}
return ents
}
func (ec *EntryCache) append(tag string, entries []eraftpb.Entry) {
if len(entries) == 0 {
return
}
if ec.length() > 0 {
firstIndex := entries[0].Index
cacheLastIndex := ec.back().Index
if cacheLastIndex >= firstIndex {
if ec.front().Index >= firstIndex {
ec.cache = ec.cache[:0]
} else {
left := ec.length() - int(cacheLastIndex-firstIndex+1)
ec.cache = ec.cache[:left]
}
} else if cacheLastIndex+1 < firstIndex {
panic(fmt.Sprintf("%s unexpected hole %d < %d", tag, cacheLastIndex, firstIndex))
}
}
ec.cache = append(ec.cache, entries...)
if ec.length() > MaxCacheCapacity {
extraSize := ec.length() - MaxCacheCapacity
ec.cache = ec.cache[extraSize:]
}
}
func (ec *EntryCache) compactTo(idx uint64) {
if ec.length() == 0 {
return
}
firstIdx := ec.front().Index
if firstIdx > idx {
return
}
pos := mathutil.Min(int(idx-firstIdx), ec.length())
ec.cache = ec.cache[pos:]
}
// ApplySnapResult defines a result of applying snapshot.
type ApplySnapResult struct {
// PrevRegion is the region before snapshot applied
PrevRegion *metapb.Region
Region *metapb.Region
}
// InvokeContext represents a invoker context.
type InvokeContext struct {
RegionID uint64
RaftState raftState
ApplyState applyState
lastTerm uint64
SnapRegion *metapb.Region
}
// NewInvokeContext returns a new InvokeContext.
func NewInvokeContext(store *PeerStorage) *InvokeContext {
ctx := &InvokeContext{
RegionID: store.region.GetId(),
RaftState: store.raftState,
ApplyState: store.applyState,
lastTerm: store.lastTerm,
}
return ctx
}
func (ic *InvokeContext) hasSnapshot() bool {
return ic.SnapRegion != nil
}
func (ic *InvokeContext) saveRaftStateTo(wb *WriteBatch) {
key := y.KeyWithTs(RaftStateKey(ic.RegionID), RaftTS)
wb.Set(key, ic.RaftState.Marshal())
}
func (ic *InvokeContext) saveApplyStateTo(wb *WriteBatch) {
key := y.KeyWithTs(ApplyStateKey(ic.RegionID), KvTS)
wb.Set(key, ic.ApplyState.Marshal())
}
func (ic *InvokeContext) saveSnapshotRaftStateTo(snapshotIdx uint64, wb *WriteBatch) {
snapshotRaftState := ic.RaftState
snapshotRaftState.commit = snapshotIdx
snapshotRaftState.lastIndex = snapshotIdx
key := y.KeyWithTs(SnapshotRaftStateKey(ic.RegionID), KvTS)
wb.Set(key, snapshotRaftState.Marshal())
}
func recoverFromApplyingState(engines *Engines, raftWB *WriteBatch, regionID uint64) error {
snapRaftStateKey := SnapshotRaftStateKey(regionID)
snapRaftState := raftState{}
val, err := getValue(engines.kv.DB, snapRaftStateKey)
if err != nil {
return errors.Errorf("region %d failed to get raftstate from kv engine when recover from applying state", regionID)
}
snapRaftState.Unmarshal(val)
raftStateKey := RaftStateKey(regionID)
raftState := raftState{}
val, err = getValue(engines.raft, raftStateKey)
if err != nil && err != badger.ErrKeyNotFound {
return errors.WithStack(err)
}
raftState.Unmarshal(val)
// if we recv append log when applying snapshot, last_index in raft_local_state will
// larger than snapshot_index. since raft_local_state is written to raft engine, and
// raft write_batch is written after kv write_batch, raft_local_state may wrong if
// restart happen between the two write. so we copy raft_local_state to kv engine
// (snapshot_raft_state), and set snapshot_raft_state.last_index = snapshot_index.
// after restart, we need check last_index.
if snapRaftState.lastIndex > raftState.lastIndex {
raftWB.Set(y.KeyWithTs(raftStateKey, RaftTS), snapRaftState.Marshal())
}
return nil
}
var _ raft.Storage = new(PeerStorage)
// PeerStorage implements the raft.Storage interface.
type PeerStorage struct {
Engines *Engines
peerID uint64
region *metapb.Region
raftState raftState
applyState applyState
appliedIndexTerm uint64
lastTerm uint64
snapState SnapState
genSnapTask *GenSnapTask
regionSched chan<- task
snapTriedCnt int
cache *EntryCache
stats *CacheQueryStats
Tag string
}
// NewPeerStorage creates a new PeerStorage.
func NewPeerStorage(engines *Engines, region *metapb.Region, regionSched chan<- task, peerID uint64, tag string) (*PeerStorage, error) {
log.S().Debugf("%s creating storage for %s", tag, region.String())
raftState, err := initRaftState(engines.raft, region)
if err != nil {
return nil, err
}
applyState, err := initApplyState(engines.kv.DB, region)
if err != nil {
return nil, err
}
if raftState.lastIndex < applyState.appliedIndex {
panic(fmt.Sprintf("%s unexpected raft log index: lastIndex %d < appliedIndex %d",
tag, raftState.lastIndex, applyState.appliedIndex))
}
lastTerm, err := initLastTerm(engines.raft, region, raftState, applyState)
if err != nil {
return nil, err
}
return &PeerStorage{
Engines: engines,
peerID: peerID,
region: region,
Tag: tag,
raftState: raftState,
applyState: applyState,
lastTerm: lastTerm,
regionSched: regionSched,
cache: &EntryCache{},
stats: &CacheQueryStats{},
}, nil
}
func getMsg(engine *badger.DB, key []byte, msg proto.Message) error {
val, err := getValue(engine, key)
if err != nil {
return err
}
return proto.Unmarshal(val, msg)
}
type storageError string
func (e storageError) Error() string {
return string(e)
}
func getRegionLocalState(db *badger.DB, regionID uint64) (*rspb.RegionLocalState, error) {
regionLocalState := new(rspb.RegionLocalState)
if err := getMsg(db, RegionStateKey(regionID), regionLocalState); err != nil {
return nil, &ErrRegionNotFound{regionID}
}
return regionLocalState, nil
}
func getApplyState(db *badger.DB, regionID uint64) (applyState, error) {
applyState := applyState{}
val, err := getValue(db, ApplyStateKey(regionID))
if err != nil {
return applyState, storageError(fmt.Sprintf("couldn't load raft state of region %d", regionID))
}
applyState.Unmarshal(val)
return applyState, nil
}
func getRaftEntry(db *badger.DB, regionID, idx uint64) (*eraftpb.Entry, error) {
entry := new(eraftpb.Entry)
if err := getMsg(db, RaftLogKey(regionID, idx), entry); err != nil {
return nil, storageError(fmt.Sprintf("entry %d of %d not found", idx, regionID))
}
return entry, nil
}
func getValueTxn(txn *badger.Txn, key []byte) ([]byte, error) {
i, err := txn.Get(key)
if err != nil {
return nil, err
}
return i.Value()
}
func getValue(engine *badger.DB, key []byte) ([]byte, error) {
var result []byte
err := engine.View(func(txn *badger.Txn) error {
item, err := txn.Get(key)
if err != nil {
return err
}
val, err := item.ValueCopy(nil)
result = val
return err
})
return result, err
}
func initRaftState(raftEngine *badger.DB, region *metapb.Region) (raftState, error) {
stateKey := RaftStateKey(region.Id)
raftState := raftState{}
val, err := getValue(raftEngine, stateKey)
if err != nil && err != badger.ErrKeyNotFound {
return raftState, err
}
if err == badger.ErrKeyNotFound {
if len(region.Peers) > 0 {
// new split region
raftState.lastIndex = RaftInitLogIndex
raftState.term = RaftInitLogTerm
raftState.commit = RaftInitLogIndex
wb := new(WriteBatch)
wb.Set(y.KeyWithTs(stateKey, RaftTS), raftState.Marshal())
err = wb.WriteToRaft(raftEngine)
if err != nil {
return raftState, err
}
}
} else {
raftState.Unmarshal(val)
}
return raftState, nil
}
func initApplyState(kvEngine *badger.DB, region *metapb.Region) (applyState, error) {
key := ApplyStateKey(region.Id)
applyState := applyState{}
val, err := getValue(kvEngine, key)
if err != nil && err != badger.ErrKeyNotFound {
return applyState, err
}
if err == badger.ErrKeyNotFound {
if len(region.Peers) > 0 {
applyState.appliedIndex = RaftInitLogIndex
applyState.truncatedIndex = RaftInitLogIndex
applyState.truncatedTerm = RaftInitLogTerm
}
} else {
y.AssertTruef(len(val) == 24, "apply state val %v", val)
applyState.Unmarshal(val)
}
return applyState, nil
}
func initLastTerm(raftEngine *badger.DB, region *metapb.Region,
raftState raftState, applyState applyState) (uint64, error) {
lastIdx := raftState.lastIndex
if lastIdx == 0 {
return 0, nil
} else if lastIdx == RaftInitLogIndex {
return RaftInitLogTerm, nil
} else if lastIdx == applyState.truncatedIndex {
return applyState.truncatedTerm, nil
} else {
y.Assert(lastIdx > RaftInitLogIndex)
}
lastLogKey := RaftLogKey(region.Id, lastIdx)
e := new(eraftpb.Entry)
err := getMsg(raftEngine, lastLogKey, e)
if err != nil {
return 0, errors.Errorf("[region %s] entry at %d doesn't exist, may lost data.", region, lastIdx)
}
return e.Term, nil
}
// InitialState implements the raft.Storage InitialState method.
func (ps *PeerStorage) InitialState() (eraftpb.HardState, eraftpb.ConfState, error) {
raftState := ps.raftState
if raftState.commit == 0 && raftState.term == 0 && raftState.vote == 0 {
y.AssertTruef(!ps.isInitialized(),
"peer for region %s is initialized but local state %s has empty hard state",
ps.region, ps.raftState)
return eraftpb.HardState{}, eraftpb.ConfState{}, nil
}
return eraftpb.HardState{
Term: raftState.term,
Vote: raftState.vote,
Commit: raftState.commit,
}, confStateFromRegion(ps.region), nil
}
func confStateFromRegion(region *metapb.Region) (confState eraftpb.ConfState) {
for _, p := range region.Peers {
if p.Role == metapb.PeerRole_Learner {
confState.Learners = append(confState.Learners, p.GetId())
} else {
confState.Voters = append(confState.Voters, p.GetId())
}
}
return
}
func (ps *PeerStorage) isInitialized() bool {
return len(ps.region.Peers) > 0
}
// Region returns the region of the peer storage.
func (ps *PeerStorage) Region() *metapb.Region {
return ps.region
}
// IsApplyingSnapshot returns whether the peer storage is applying a snapshot or not.
func (ps *PeerStorage) IsApplyingSnapshot() bool {
return ps.snapState.StateType == SnapStateApplying
}
// Entries implements the raft.Storage Entries method.
func (ps *PeerStorage) Entries(low, high, maxSize uint64) ([]eraftpb.Entry, error) {
err := ps.checkRange(low, high)
if err != nil {
return nil, err
}
ents := make([]eraftpb.Entry, 0, high-low)
if low == high {
return ents, nil
}
cacheLow := uint64(math.MaxUint64)
if ps.cache.length() > 0 {
cacheLow = ps.cache.front().Index
}
reginID := ps.region.Id
if high <= cacheLow {
// not overlap
ps.stats.miss++
ents, _, err = fetchEntriesTo(ps.Engines.raft, reginID, low, high, maxSize, ents)
if err != nil {
return ents, err
}
return ents, nil
}
var fetchedSize, beginIdx uint64
if low < cacheLow {
ps.stats.miss++
ents, fetchedSize, err = fetchEntriesTo(ps.Engines.raft, reginID, low, cacheLow, maxSize, ents)
if err != nil {
return ents, err
}
if fetchedSize > maxSize {
// maxSize exceed.
return ents, nil
}
beginIdx = cacheLow
} else {
beginIdx = low
}
ps.stats.hit++
return ps.cache.fetchEntriesTo(beginIdx, high, maxSize, &fetchedSize, ents), nil
}
// Term implements the raft.Storage Term method.
func (ps *PeerStorage) Term(idx uint64) (uint64, error) {
if idx == ps.truncatedIndex() {
return ps.truncatedTerm(), nil
}
err := ps.checkRange(idx, idx+1)
if err != nil {
return 0, err
}
if ps.truncatedTerm() == ps.lastTerm || idx == ps.raftState.lastIndex {
return ps.lastTerm, nil
}
entries, err := ps.Entries(idx, idx+1, math.MaxUint64)
if err != nil {
return 0, err
}
return entries[0].Term, nil
}
func (ps *PeerStorage) checkRange(low, high uint64) error {
if low > high {
return errors.Errorf("low %d is greater than high %d", low, high)
} else if low <= ps.truncatedIndex() {
return raft.ErrCompacted
} else if high > ps.raftState.lastIndex+1 {
return errors.Errorf("entries' high %d is out of bound, lastIndex %d",
high, ps.raftState.lastIndex)
}
return nil
}
func (ps *PeerStorage) truncatedIndex() uint64 {
return ps.applyState.truncatedIndex
}
func (ps *PeerStorage) truncatedTerm() uint64 {
return ps.applyState.truncatedTerm
}
// LastIndex implements the raft.Storage LastIndex method.
func (ps *PeerStorage) LastIndex() (uint64, error) {
return ps.raftState.lastIndex, nil
}
// AppliedIndex returns applied index of the peer storage.
func (ps *PeerStorage) AppliedIndex() uint64 {
return ps.applyState.appliedIndex
}
// FirstIndex implements the raft.Storage FirstIndex method.
func (ps *PeerStorage) FirstIndex() (uint64, error) {
return firstIndex(ps.applyState), nil
}
func firstIndex(applyState applyState) uint64 {
return applyState.truncatedIndex + 1
}
func (ps *PeerStorage) validateSnap(snap *eraftpb.Snapshot) bool {
idx := snap.GetMetadata().GetIndex()
if idx < ps.truncatedIndex() {
log.S().Infof("snapshot is stale, generate again, regionID: %d, peerID: %d, snapIndex: %d, truncatedIndex: %d", ps.region.GetId(), ps.peerID, idx, ps.truncatedIndex())
return false
}
var snapData rspb.RaftSnapshotData
if err := proto.UnmarshalMerge(snap.GetData(), &snapData); err != nil {
log.S().Errorf("failed to decode snapshot, it may be corrupted, regionID: %d, peerID: %d, err: %v", ps.region.GetId(), ps.peerID, err)
return false
}
snapEpoch := snapData.GetRegion().GetRegionEpoch()
latestEpoch := ps.region.GetRegionEpoch()
if snapEpoch.GetConfVer() < latestEpoch.GetConfVer() {
log.S().Infof("snapshot epoch is stale, regionID: %d, peerID: %d, snapEpoch: %s, latestEpoch: %s", ps.region.GetId(), ps.peerID, snapEpoch, latestEpoch)
return false
}
return true
}
// Snapshot implements the raft.Storage Snapshot method.
func (ps *PeerStorage) Snapshot() (eraftpb.Snapshot, error) {
var snap eraftpb.Snapshot
if ps.snapState.StateType == SnapStateGenerating {
select {
case s := <-ps.snapState.Receiver:
snap = *s
default:
return snap, raft.ErrSnapshotTemporarilyUnavailable
}
ps.snapState.StateType = SnapStateRelax
if snap.GetMetadata() != nil {
ps.snapTriedCnt = 0
if ps.validateSnap(&snap) {
return snap, nil
}
} else {
log.S().Warnf("failed to try generating snapshot, regionID: %d, peerID: %d, times: %d", ps.region.GetId(), ps.peerID, ps.snapTriedCnt)
}
}
if ps.snapTriedCnt >= MaxSnapRetryCnt {
err := errors.Errorf("failed to get snapshot after %d times", ps.snapTriedCnt)
ps.snapTriedCnt = 0
return snap, err
}
log.S().Infof("requesting snapshot, regionID: %d, peerID: %d", ps.region.GetId(), ps.peerID)
ps.snapTriedCnt++
ch := make(chan *eraftpb.Snapshot, 1)
ps.snapState = SnapState{
StateType: SnapStateGenerating,
Receiver: ch,
}
ps.genSnapTask = newGenSnapTask(ps.region.GetId(), ch)
return snap, raft.ErrSnapshotTemporarilyUnavailable
}
// Append the given entries to the raft log using previous last index or self.last_index.
// Return the new last index for later update. After we commit in engine, we can set last_index
// to the return one.
func (ps *PeerStorage) Append(invokeCtx *InvokeContext, entries []eraftpb.Entry, raftWB *WriteBatch) error {
log.S().Debugf("%s append %d entries", ps.Tag, len(entries))
prevLastIndex := invokeCtx.RaftState.lastIndex
if len(entries) == 0 {
return nil
}
lastEntry := entries[len(entries)-1]
lastIndex := lastEntry.Index
lastTerm := lastEntry.Term
for _, entry := range entries {
err := raftWB.SetMsg(y.KeyWithTs(RaftLogKey(ps.region.Id, entry.Index), RaftTS), &entry)
if err != nil {
return err
}
}
// Delete any previously appended log entries which never committed.
for i := lastIndex + 1; i <= prevLastIndex; i++ {
raftWB.Delete(y.KeyWithTs(RaftLogKey(ps.region.Id, i), RaftTS))
}
invokeCtx.RaftState.lastIndex = lastIndex
invokeCtx.lastTerm = lastTerm
// TODO: if the writebatch is failed to commit, the cache will be wrong.
ps.cache.append(ps.Tag, entries)
return nil
}
// CompactTo compacts the cache with the given index.
func (ps *PeerStorage) CompactTo(idx uint64) {
ps.cache.compactTo(idx)
}
// MaybeGCCache tries to clear the cache.
func (ps *PeerStorage) MaybeGCCache(replicatedIdx, appliedIdx uint64) {
if replicatedIdx == appliedIdx {
// The region is inactive, clear the cache immediately.
ps.cache.compactTo(appliedIdx + 1)
} else {
if ps.cache.length() == 0 {
return
}
cacheFirstIdx := ps.cache.front().Index
if cacheFirstIdx > replicatedIdx+1 {
// Catching up log requires accessing fs already, let's optimize for
// the common case.
// Maybe gc to second least replicated_idx is better.
ps.cache.compactTo(appliedIdx + 1)
}
}
}
func (ps *PeerStorage) clearMeta(kvWB, raftWB *WriteBatch) error {
return ClearMeta(ps.Engines, kvWB, raftWB, ps.region.Id, ps.raftState.lastIndex)
}
// CacheQueryStats is used to record the status of cache querying.
type CacheQueryStats struct {
hit uint64
miss uint64
}
// clearExtraData deletes all data that is not covered by `new_region`.
func (ps *PeerStorage) clearExtraData(newRegion *metapb.Region) {
oldStartKey, oldEndKey := RawStartKey(ps.region), RawEndKey(ps.region)
newStartKey, newEndKey := RawStartKey(newRegion), RawEndKey(newRegion)
regionID := newRegion.Id
if bytes.Compare(oldStartKey, newStartKey) < 0 {
ps.regionSched <- task{
tp: taskTypeRegionDestroy,
data: ®ionTask{
regionID: regionID,
startKey: oldStartKey,
endKey: newStartKey,
},
}
}
if bytes.Compare(newEndKey, oldEndKey) < 0 {
ps.regionSched <- task{
tp: taskTypeRegionDestroy,
data: ®ionTask{
regionID: regionID,
startKey: newEndKey,
endKey: oldEndKey,
},
}
}
}
func fetchEntriesTo(engine *badger.DB, regionID, low, high, maxSize uint64, buf []eraftpb.Entry) ([]eraftpb.Entry, uint64, error) {
var totalSize uint64
nextIndex := low
exceededMaxSize := false
txn := engine.NewTransaction(false)
defer txn.Discard()
if high-low <= raftLogMultiGetCnt {
// If election happens in inactive regions, they will just try
// to fetch one empty log.
for i := low; i < high; i++ {
key := RaftLogKey(regionID, i)
item, err := txn.Get(key)
if err == badger.ErrKeyNotFound {
return nil, 0, raft.ErrUnavailable
} else if err != nil {
return nil, 0, err
}
val, err := item.Value()
if err != nil {
return nil, 0, err
}
var entry eraftpb.Entry
err = entry.Unmarshal(val)
if err != nil {
return nil, 0, err
}
y.Assert(entry.Index == i)
totalSize += uint64(len(val))
if len(buf) == 0 || totalSize <= maxSize {
buf = append(buf, entry)
}
if totalSize > maxSize {
break
}
}
return buf, totalSize, nil
}
startKey := RaftLogKey(regionID, low)
endKey := RaftLogKey(regionID, high)
iter := dbreader.NewIterator(txn, false, startKey, endKey)
defer iter.Close()
for iter.Seek(startKey); iter.Valid(); iter.Next() {
item := iter.Item()
if bytes.Compare(item.Key(), endKey) >= 0 {
break
}
val, err := item.Value()
if err != nil {
return nil, 0, err
}
var entry eraftpb.Entry
err = entry.Unmarshal(val)
if err != nil {
return nil, 0, err
}
// May meet gap or has been compacted.
if entry.Index != nextIndex {
break
}
nextIndex++
totalSize += uint64(len(val))
exceededMaxSize = totalSize > maxSize
if !exceededMaxSize || len(buf) == 0 {
buf = append(buf, entry)
}
if exceededMaxSize {
break
}
}
// If we get the correct number of entries, returns,
// or the total size almost exceeds max_size, returns.
if len(buf) == int(high-low) || exceededMaxSize {
return buf, totalSize, nil
}
// Here means we don't fetch enough entries.
return nil, 0, raft.ErrUnavailable
}
// ClearMeta deletes meta.
func ClearMeta(engines *Engines, kvWB, raftWB *WriteBatch, regionID uint64, lastIndex uint64) error {
start := time.Now()
kvWB.Delete(y.KeyWithTs(RegionStateKey(regionID), KvTS))
kvWB.Delete(y.KeyWithTs(ApplyStateKey(regionID), KvTS))
firstIndex := lastIndex + 1
beginLogKey := RaftLogKey(regionID, 0)
endLogKey := RaftLogKey(regionID, firstIndex)
err := engines.raft.View(func(txn *badger.Txn) error {
it := txn.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
it.Seek(beginLogKey)
if it.Valid() && bytes.Compare(it.Item().Key(), endLogKey) < 0 {
logIdx, err1 := RaftLogIndex(it.Item().Key())
if err1 != nil {
return err1
}
firstIndex = logIdx
}
return nil
})
if err != nil {
return err
}
for i := firstIndex; i <= lastIndex; i++ {
raftWB.Delete(y.KeyWithTs(RaftLogKey(regionID, i), RaftTS))
}
raftWB.Delete(y.KeyWithTs(RaftStateKey(regionID), RaftTS))
log.S().Infof(
"[region %d] clear peer 1 meta key 1 apply key 1 raft key and %d raft logs, takes %v",
regionID,
lastIndex+1-firstIndex,
time.Since(start),
)
return nil
}
// WritePeerState adds the peer state to the WriteBatch.
func WritePeerState(kvWB *WriteBatch, region *metapb.Region, state rspb.PeerState, mergeState *rspb.MergeState) {
regionID := region.Id
regionState := new(rspb.RegionLocalState)
regionState.State = state
regionState.Region = region
if mergeState != nil {
regionState.MergeState = mergeState
}
data, err := regionState.Marshal()
if err != nil {
log.S().Error(err)
}
kvWB.Set(y.KeyWithTs(RegionStateKey(regionID), KvTS), data)
}
// ApplySnapshot Applies the peer with the given snapshot.
func (ps *PeerStorage) ApplySnapshot(ctx *InvokeContext, snap *eraftpb.Snapshot, kvWB *WriteBatch, raftWB *WriteBatch) error {
log.S().Infof("%v begin to apply snapshot", ps.Tag)
snapData := new(rspb.RaftSnapshotData)
if err := snapData.Unmarshal(snap.Data); err != nil {
return err
}
if snapData.Region.Id != ps.region.Id {
return fmt.Errorf("mismatch region id %v != %v", snapData.Region.Id, ps.region.Id)
}
if ps.isInitialized() {
// we can only delete the old data when the peer is initialized.
if err := ps.clearMeta(kvWB, raftWB); err != nil {
return err
}
}
WritePeerState(kvWB, snapData.Region, rspb.PeerState_Applying, nil)
lastIdx := snap.Metadata.Index
ctx.RaftState.lastIndex = lastIdx
ctx.lastTerm = snap.Metadata.Term
ctx.ApplyState.appliedIndex = lastIdx
// The snapshot only contains log which index > applied index, so
// here the truncate state's (index, term) is in snapshot metadata.
ctx.ApplyState.truncatedIndex = lastIdx
ctx.ApplyState.truncatedTerm = snap.Metadata.Term
log.S().Debugf("%v apply snapshot for region %v with state %v ok", ps.Tag, snapData.Region, ctx.ApplyState)
ctx.SnapRegion = snapData.Region
return nil
}
// SaveReadyState saves memory states to disk.
//
// This function only write data to `ready_ctx`'s `WriteBatch`. It's caller's duty to write
// it explicitly to disk. If it's flushed to disk successfully, `post_ready` should be called
// to update the memory states properly.
// Do not modify ready in this function, this is a requirement to advance the ready object properly later.
func (ps *PeerStorage) SaveReadyState(kvWB, raftWB *WriteBatch, ready *raft.Ready) (*InvokeContext, error) {
ctx := NewInvokeContext(ps)
var snapshotIdx uint64
if !raft.IsEmptySnap(&ready.Snapshot) {
if err := ps.ApplySnapshot(ctx, &ready.Snapshot, kvWB, raftWB); err != nil {
return nil, err
}
snapshotIdx = ctx.RaftState.lastIndex
}
if len(ready.Entries) != 0 {
if err := ps.Append(ctx, ready.Entries, raftWB); err != nil {
return nil, err
}
}
// Last index is 0 means the peer is created from raft message
// and has not applied snapshot yet, so skip persistent hard state.
if ctx.RaftState.lastIndex > 0 {
if !raft.IsEmptyHardState(ready.HardState) {
ctx.RaftState.commit = ready.HardState.Commit
ctx.RaftState.term = ready.HardState.Term
ctx.RaftState.vote = ready.HardState.Vote
}
}
if ctx.RaftState != ps.raftState {
ctx.saveRaftStateTo(raftWB)
if snapshotIdx > 0 {
// in case of restart happen when we just write region state to Applying,
// but not write raft_local_state to raft rocksdb in time.
// we write raft state to default rocksdb, with last index set to snap index,
// in case of recv raft log after snapshot.
ctx.saveSnapshotRaftStateTo(snapshotIdx, kvWB)
}
}
// only when apply snapshot
if ctx.ApplyState != ps.applyState {
ctx.saveApplyStateTo(kvWB)
}
return ctx, nil
}
// PeerEqual returns a boolean value indicating whether two peers are equal.
func PeerEqual(l, r *metapb.Peer) bool {
return l.Id == r.Id && l.StoreId == r.StoreId && l.Role == r.Role
}
// RegionEqual returns a boolean value indicating whether two regions are equal.
func RegionEqual(l, r *metapb.Region) bool {
if l == nil || r == nil {
return false
}
return l.Id == r.Id && l.RegionEpoch.Version == r.RegionEpoch.Version && l.RegionEpoch.ConfVer == r.RegionEpoch.ConfVer
}
// PostReadyPersistent updates the memory state after ready changes are flushed to disk successfully.
func (ps *PeerStorage) PostReadyPersistent(ctx *InvokeContext) *ApplySnapResult {
ps.raftState = ctx.RaftState
ps.applyState = ctx.ApplyState
ps.lastTerm = ctx.lastTerm
// If we apply snapshot ok, we should update some infos like applied index too.
if ctx.SnapRegion == nil {
return nil
}
// cleanup data before scheduling apply task
if ps.isInitialized() {
ps.clearExtraData(ps.region)
}
ps.ScheduleApplyingSnapshot()
prevRegion := ps.region
ps.region = ctx.SnapRegion
ctx.SnapRegion = nil
return &ApplySnapResult{
PrevRegion: prevRegion,
Region: ps.region,
}
}
// ScheduleApplyingSnapshot schedules a task of applying snapshot.
func (ps *PeerStorage) ScheduleApplyingSnapshot() {
status := JobStatusPending
ps.snapState = SnapState{
StateType: SnapStateApplying,
Status: &status,
}
ps.regionSched <- task{
tp: taskTypeRegionApply,
data: ®ionTask{
regionID: ps.region.Id,
status: &status,
},
}
}
// SetRegion sets the region.
func (ps *PeerStorage) SetRegion(region *metapb.Region) {
ps.region = region
}
// ClearData clears the data.
func (ps *PeerStorage) ClearData() error {
// Todo: currently it is a place holder
return nil
}
// CancelApplyingSnap cancels a task of applying snapshot.
func (ps *PeerStorage) CancelApplyingSnap() bool {
// Todo: currently it is a place holder
return true
}
// CheckApplyingSnap checks if the storage is applying a snapshot.
func (ps *PeerStorage) CheckApplyingSnap() bool {
switch ps.snapState.StateType {
case SnapStateApplying:
switch atomic.LoadUint32(ps.snapState.Status) {
case JobStatusFinished:
ps.snapState = SnapState{StateType: SnapStateRelax}
case JobStatusCancelled:
ps.snapState = SnapState{StateType: SnapStateApplyAborted}
case JobStatusFailed:
panic(fmt.Sprintf("%v applying snapshot failed", ps.Tag))
default:
return true
}
}
return false
}
func createAndInitSnapshot(snap *regionSnapshot, key SnapKey, mgr *SnapManager) (*eraftpb.Snapshot, error) {
region := snap.regionState.GetRegion()
confState := confStateFromRegion(region)
snapshot := &eraftpb.Snapshot{
Metadata: &eraftpb.SnapshotMetadata{
Index: snap.index,
Term: snap.term,
ConfState: &confState,
},
}
s, err := mgr.GetSnapshotForBuilding(key)
if err != nil {
return nil, err
}
// Set snapshot data
snapshotData := &rspb.RaftSnapshotData{Region: region}
snapshotStatics := SnapStatistics{}
err = s.Build(snap, region, snapshotData, &snapshotStatics, mgr)
if err != nil {
return nil, err
}
snapshot.Data, err = snapshotData.Marshal()
return snapshot, err
}
func getAppliedIdxTermForSnapshot(raft *badger.DB, kv *badger.Txn, regionID uint64) (uint64, uint64, error) {
applyState := applyState{}
val, err := getValueTxn(kv, ApplyStateKey(regionID))
if err != nil {
return 0, 0, err
}
applyState.Unmarshal(val)
idx := applyState.appliedIndex
var term uint64
if idx == applyState.truncatedIndex {
term = applyState.truncatedTerm
} else {
entry, err := getRaftEntry(raft, regionID, idx)
if err != nil {
return 0, 0, err
}
term = entry.GetTerm()
}
return idx, term, nil
}
func doSnapshot(engines *Engines, mgr *SnapManager, regionID, redoIdx uint64) (*eraftpb.Snapshot, error) {
log.S().Debugf("begin to generate a snapshot. [regionID: %d]", regionID)
snap, err := engines.newRegionSnapshot(regionID, redoIdx)
if err != nil {
return nil, err
}
defer snap.txn.Discard()
if snap.regionState.GetState() != rspb.PeerState_Normal {
return nil, storageError(fmt.Sprintf("snap job %d seems stale, skip", regionID))
}
key := SnapKey{RegionID: regionID, Index: snap.index, Term: snap.term}
mgr.Register(key, SnapEntryGenerating)
defer mgr.Deregister(key, SnapEntryGenerating)
return createAndInitSnapshot(snap, key, mgr)
}
|
// CookieJar - A contestant's algorithm toolbox
// Copyright (c) 2013 Peter Szilagyi. All rights reserved.
//
// CookieJar is dual licensed: use of this source code is governed by a BSD
// license that can be found in the LICENSE file. Alternatively, the CookieJar
// toolbox may be used in accordance with the terms and conditions contained
// in a signed written agreement between you and the author(s).
package set
import (
"math/rand"
"testing"
)
func TestSet(t *testing.T) {
// Create some initial data
size := 65536
data := make([]int, size)
for i := 0; i < size; i++ {
data[i] = rand.Int()
}
// Fill the set with the data and verify that they're all set
set := New()
for _, val := range data {
set.Insert(val)
}
for _, val := range data {
if !set.Exists(val) {
t.Errorf("failed to locate element in set: %v in %v", val, set)
}
}
// Remove a few elements and ensure they're out
rems := data[:1024]
for _, val := range rems {
set.Remove(val)
}
for _, val := range rems {
if set.Exists(val) {
t.Errorf("element exists after remove: %v in %v", val, set)
}
}
// Calcualte the sum of the remainder and verify
sumSet := int64(0)
set.Do(func(val interface{}) {
sumSet += int64(val.(int))
})
sumDat := int64(0)
for _, val := range data {
sumDat += int64(val)
}
for _, val := range rems {
sumDat -= int64(val)
}
if sumSet != sumDat {
t.Errorf("iteration sum mismatch: have %v, want %v", sumSet, sumDat)
}
// Clear the set and ensure nothing's left
set.Reset()
for _, val := range data {
if set.Exists(val) {
t.Errorf("element exists after reset: %v in %v", val, set)
}
}
}
func BenchmarkInsert(b *testing.B) {
// Create some initial data
data := make([]int, b.N)
for i := 0; i < len(data); i++ {
data[i] = rand.Int()
}
// Execute the benchmark
b.ResetTimer()
set := New()
for _, val := range data {
set.Insert(val)
}
}
func BenchmarkRemove(b *testing.B) {
// Create some initial data and fill the set
data := rand.Perm(b.N)
set := New()
for _, val := range data {
set.Insert(val)
}
// Execute the benchmark (different order)
rems := rand.Perm(b.N)
b.ResetTimer()
for _, val := range rems {
set.Remove(val)
}
}
func BenchmarkDo(b *testing.B) {
// Create some initial data
data := make([]int, b.N)
for i := 0; i < len(data); i++ {
data[i] = rand.Int()
}
// Fill the set with it
set := New()
for _, val := range data {
set.Insert(val)
}
// Execute the benchmark
b.ResetTimer()
set.Do(func(val interface{}) {
// Do nothing
})
}
|
package utils
import (
"os"
"path/filepath"
"strings"
terrascanUtils "github.com/accurics/terrascan/pkg/utils"
)
// FindAllSubDirectories finds all the sub directories in a path and filters would any directories specified in dirFilter
func FindAllSubDirectories(basePath string, dirFilter []string) ([]string, error) {
_, err := os.Stat(basePath)
if err != nil {
return nil, err
}
dirList := make([]string, 0)
err = filepath.Walk(basePath, func(filePath string, fileInfo os.FileInfo, err error) error {
// don't include the base path
if filePath == basePath {
return err
}
for i := range dirFilter {
if !strings.Contains(filePath, dirFilter[i]) && fileInfo != nil && fileInfo.IsDir() {
dirList = append(dirList, filePath)
}
}
return err
})
return dirList, err
}
// FindAllMetadaFilesInDir finds all rule metadata json files in specified path
func FindAllMetadaFilesInDir(basePath string) ([]string, error) {
fileList := make([]string, 0)
allFiles, err := terrascanUtils.FindFilesBySuffixInDir(basePath, []string{"json"})
if err != nil {
return nil, err
}
for i := range allFiles {
fileList = append(fileList, filepath.Join(basePath, *allFiles[i]))
}
return fileList, nil
}
// FileFileByPrefix finds file with specified prefix in a path
func FileFileByPrefix(basePath, prefix string) (string, error) {
dirEntries, err := os.ReadDir(basePath)
if err != nil {
return "", err
}
for i := range dirEntries {
if strings.HasPrefix(dirEntries[i].Name(), prefix) {
return dirEntries[i].Name(), nil
}
}
// file with prefix not found
return "", nil
}
|
package parked_domain
import (
"fmt"
"time"
"strings"
"encoding/hex"
"crypto/sha256"
"github.com/google/uuid"
)
var (
// define array condition(s) for parked domains
ParkedDomainConditions = []func(body string) bool {
GoDaddyDomainParked,
}
)
// function used to check godaddy domain for parked domain.
// note that this is done by checking for a particular substring
func GoDaddyDomainParked(body string) bool {
// convert to lowercase
body = strings.ToLower(body)
// check for common markers/messages
parkedMessage := strings.Contains(body, "this web page is parked free, courtesy of godaddy")
brokerMessage := strings.Contains(body, "our domain broker service may be able to get it for you")
landerMessage := strings.Contains(body, "parking-lander")
return parkedMessage || brokerMessage || landerMessage
}
// function used to generate notification hash
func generateNotificationHash(businessId uuid.UUID) string {
notifyString := fmt.Sprintf("%s:parked-domain-check:%s", businessId, time.Now().Format("01-02-2006"))
notificationHash := sha256.Sum256([]byte(notifyString))
return hex.EncodeToString(notificationHash[0:])
}
|
// +build !race
package dsstore
import (
"context"
"testing"
"time"
"github.com/square/p2/pkg/replication"
"github.com/square/p2/pkg/util"
. "github.com/anthonybishopric/gotcha"
ds_fields "github.com/square/p2/pkg/ds/fields"
"github.com/square/p2/pkg/logging"
"github.com/square/p2/pkg/manifest"
pc_fields "github.com/square/p2/pkg/pc/fields"
"github.com/square/p2/pkg/store/consul"
"github.com/square/p2/pkg/store/consul/consulutil"
"github.com/square/p2/pkg/store/consul/transaction"
"github.com/square/p2/pkg/types"
klabels "k8s.io/kubernetes/pkg/labels"
)
func TestCreate(t *testing.T) {
fixture := consulutil.NewFixture(t)
defer fixture.Stop()
store := newStore(fixture.Client.KV())
createDaemonSet(store, fixture.Client.KV(), t)
// Create a bad DaemonSet
podID := types.PodID("")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
azLabel := pc_fields.AvailabilityZone("some_zone")
selector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID("")
podManifest := manifestBuilder.GetManifest()
timeout := replication.NoTimeout
ctx, cancelFunc := transaction.New(context.Background())
defer cancelFunc()
if _, err := store.Create(ctx, podManifest, minHealth, clusterName, selector, podID, timeout); err == nil {
t.Error("Expected create to fail on bad pod id")
}
podID = types.PodID("pod_id")
if _, err := store.Create(ctx, podManifest, minHealth, clusterName, selector, podID, timeout); err == nil {
t.Error("Expected create to fail on bad manifest pod id")
}
manifestBuilder = manifest.NewBuilder()
manifestBuilder.SetID("different_pod_id")
podManifest = manifestBuilder.GetManifest()
if _, err := store.Create(ctx, podManifest, minHealth, clusterName, selector, podID, timeout); err == nil {
t.Error("Expected create to fail on pod id and manifest pod id mismatch")
}
}
func createDaemonSet(store *ConsulStore, txner transaction.Txner, t *testing.T) ds_fields.DaemonSet {
podID := types.PodID("some_pod_id")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
azLabel := pc_fields.AvailabilityZone("some_zone")
selector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
manifest := manifestBuilder.GetManifest()
timeout := replication.NoTimeout
ctx, cancelFunc := transaction.New(context.Background())
defer cancelFunc()
ds, err := store.Create(ctx, manifest, minHealth, clusterName, selector, podID, timeout)
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
err = transaction.MustCommit(ctx, txner)
if err != nil {
t.Fatalf("could not commit transaction to create daemon set: %s", err)
}
if ds.ID == "" {
t.Error("daemon set should have an id")
}
Assert(t).AreNotEqual(ds.ID, "", "Daemon set should have an id")
Assert(t).AreNotEqual(ds.PodID, "", "Daemon set should have a pod id")
Assert(t).AreEqual(ds.PodID, podID, "Daemon set pod id was not set correctly")
Assert(t).AreEqual(ds.MinHealth, minHealth, "Daemon set minimum health was not set correctly")
Assert(t).AreEqual(ds.Name, clusterName, "Daemon set cluster name was not set correctly")
Assert(t).IsFalse(ds.Disabled, "Daemon set disabled field was not set correctly")
testLabels := klabels.Set{
pc_fields.AvailabilityZoneLabel: azLabel.String(),
}
if matches := ds.NodeSelector.Matches(testLabels); !matches {
t.Error("The daemon set has a bad node selector")
}
originalSHA, err := manifest.SHA()
if err != nil {
t.Fatal("Unable to retrieve SHA from manifest")
}
getSHA, err := ds.Manifest.SHA()
if err != nil {
t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set")
}
Assert(t).AreEqual(originalSHA, getSHA, "Daemon set manifest not set correctly")
return ds
}
func TestDelete(t *testing.T) {
fixture := consulutil.NewFixture(t)
defer fixture.Stop()
store := newStore(fixture.Client.KV())
ds := createDaemonSet(store, fixture.Client.KV(), t)
if err := store.Delete("bad_id"); err != nil {
t.Error("Expected no errors while deleting a daemon set that does not exist")
}
if err := store.Delete(ds.ID); err != nil {
t.Errorf("Unable to delete existing daemon set: %s", err)
}
if _, _, err := store.Get(ds.ID); err == nil {
t.Error("Expected to encounter an error while getting a deleted daemon set")
}
if err := store.Delete(ds.ID); err != nil {
t.Error("Expected no errors on while deleting a deleted daemon set")
}
}
func TestGet(t *testing.T) {
fixture := consulutil.NewFixture(t)
defer fixture.Stop()
store := newStore(fixture.Client.KV())
//
// Create DaemonSet
//
podID := types.PodID("some_pod_id")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
azLabel := pc_fields.AvailabilityZone("some_zone")
selector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
manifest := manifestBuilder.GetManifest()
timeout := replication.NoTimeout
ctx, cancelFunc := transaction.New(context.Background())
defer cancelFunc()
ds, err := store.Create(ctx, manifest, minHealth, clusterName, selector, podID, timeout)
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
err = transaction.MustCommit(ctx, fixture.Client.KV())
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
Assert(t).AreNotEqual(ds.ID, "", "Daemon set should have an id")
//
// Get DaemonSet and verify it is the same
//
getDS, _, err := store.Get(ds.ID)
if err != nil {
t.Fatalf("Error retrieving created daemon set: %s", err)
}
Assert(t).AreNotEqual(getDS.ID, "", "Daemon set should have an id")
Assert(t).AreNotEqual(getDS.PodID, "", "Daemon set should have a pod id")
Assert(t).AreEqual(ds.ID, getDS.ID, "Daemon set should be equal ids")
Assert(t).AreEqual(ds.PodID, getDS.PodID, "Daemon set should have equal pod ids")
Assert(t).AreEqual(ds.MinHealth, getDS.MinHealth, "Daemon set should have equal minimum healths")
Assert(t).AreEqual(ds.Name, getDS.Name, "Daemon set should have equal names")
Assert(t).AreEqual(ds.Disabled, getDS.Disabled, "Daemon set should have same disabled fields")
testLabels := klabels.Set{
pc_fields.AvailabilityZoneLabel: azLabel.String(),
}
if matches := getDS.NodeSelector.Matches(testLabels); !matches {
t.Error("The daemon set has a bad node selector")
}
originalSHA, err := manifest.SHA()
if err != nil {
t.Fatal("Unable to retrieve SHA from manifest")
}
getSHA, err := getDS.Manifest.SHA()
if err != nil {
t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set")
}
Assert(t).AreEqual(originalSHA, getSHA, "Daemon set shas were not equal")
// Invalid get opertaion
_, _, err = store.Get("bad_id")
if err == nil {
t.Error("Expected get operation to fail when getting a daemon set which does not exist")
}
}
func TestList(t *testing.T) {
fixture := consulutil.NewFixture(t)
defer fixture.Stop()
store := newStore(fixture.Client.KV())
// Create first DaemonSet
firstPodID := types.PodID("some_pod_id")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
azLabel := pc_fields.AvailabilityZone("some_zone")
selector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(firstPodID)
firstManifest := manifestBuilder.GetManifest()
timeout := replication.NoTimeout
ctx, cancelFunc := transaction.New(context.Background())
defer cancelFunc()
firstDS, err := store.Create(ctx, firstManifest, minHealth, clusterName, selector, firstPodID, timeout)
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
err = transaction.MustCommit(ctx, fixture.Client.KV())
if err != nil {
t.Fatalf("unable to commit daemon set: %s", err)
}
// Create second DaemonSet
secondPodID := types.PodID("different_pod_id")
manifestBuilder = manifest.NewBuilder()
manifestBuilder.SetID(secondPodID)
secondManifest := manifestBuilder.GetManifest()
ctx2, cancelFunc2 := transaction.New(context.Background())
defer cancelFunc2()
secondDS, err := store.Create(ctx2, secondManifest, minHealth, clusterName, selector, secondPodID, timeout)
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
err = transaction.MustCommit(ctx2, fixture.Client.KV())
if err != nil {
t.Fatalf("unable to commit daemon set: %s", err)
}
daemonSetList, err := store.List()
if err != nil {
t.Fatalf("Error getting list of daemon sets: %s", err)
}
Assert(t).AreEqual(len(daemonSetList), 2, "Unexpected number of daemon sets listed")
for _, daemonSet := range daemonSetList {
if daemonSet.ID == firstDS.ID {
Assert(t).AreEqual(daemonSet.PodID, firstPodID, "Listed daemon set pod ids were not equal")
} else if daemonSet.PodID == secondDS.PodID {
Assert(t).AreEqual(daemonSet.PodID, secondPodID, "Listed daemon set pod ids were not equal")
} else {
t.Errorf("Unexpected daemon set listed: %v", daemonSet)
}
}
}
func TestMutate(t *testing.T) {
fixture := consulutil.NewFixture(t)
defer fixture.Stop()
store := newStore(fixture.Client.KV())
podID := types.PodID("some_pod_id")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
azLabel := pc_fields.AvailabilityZone("some_zone")
selector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
timeout := replication.NoTimeout
ctx, cancelFunc := transaction.New(context.Background())
defer cancelFunc()
ds, err := store.Create(ctx, podManifest, minHealth, clusterName, selector, podID, timeout)
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
err = transaction.MustCommit(ctx, fixture.Client.KV())
if err != nil {
t.Fatalf("unable to commit daemon set: %s", err)
}
//
// Invalid mutates
//
errorMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
return dsToMutate, util.Errorf("This is an error")
}
_, err = store.MutateDS(ds.ID, errorMutator)
if err == nil {
t.Error("Expected error when mutator produces an error")
}
badIDMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToMutate.ID = ""
return dsToMutate, nil
}
_, err = store.MutateDS(ds.ID, badIDMutator)
if err == nil {
t.Error("Expected error when mutating daemon set ID")
}
badPodIDMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToMutate.PodID = ""
return dsToMutate, nil
}
_, err = store.MutateDS(ds.ID, badPodIDMutator)
if err == nil {
t.Error("Expected error when mutating daemon set PodID to mismatch manifest")
}
//
// A valid mutate followed by validation
//
someOtherDisabled := !ds.Disabled
someOtherMinHealth := 42
someOtherName := ds_fields.ClusterName("some_other_name")
someOtherPodID := types.PodID("some_other_pod_id")
manifestBuilder = manifest.NewBuilder()
manifestBuilder.SetID(someOtherPodID)
someOtherManifest := manifestBuilder.GetManifest()
someOtherAZLabel := pc_fields.AvailabilityZone("some_other_zone")
someOtherSelector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{someOtherAZLabel.String()})
goodMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToMutate.Disabled = someOtherDisabled
dsToMutate.Manifest = someOtherManifest
dsToMutate.MinHealth = someOtherMinHealth
dsToMutate.Name = someOtherName
dsToMutate.NodeSelector = someOtherSelector
dsToMutate.PodID = someOtherPodID
return dsToMutate, nil
}
someOtherDS, err := store.MutateDS(ds.ID, goodMutator)
if err != nil {
t.Fatalf("Unable to mutate daemon set: %s", err)
}
Assert(t).AreEqual(someOtherDS.ID, ds.ID, "Daemon sets should be equal ids")
Assert(t).AreEqual(someOtherDS.PodID, someOtherPodID, "Daemon sets should have equal pod ids")
Assert(t).AreEqual(someOtherDS.MinHealth, someOtherMinHealth, "Daemon sets should have equal minimum healths")
Assert(t).AreEqual(someOtherDS.Name, someOtherName, "Daemon sets should have equal names")
Assert(t).AreEqual(someOtherDS.Disabled, someOtherDisabled, "Daemon sets should have same disabled fields")
someOtherLabels := klabels.Set{
pc_fields.AvailabilityZoneLabel: someOtherAZLabel.String(),
}
if matches := someOtherDS.NodeSelector.Matches(someOtherLabels); !matches {
t.Error("The daemon set has a bad node selector")
}
someOtherSHA, err := someOtherManifest.SHA()
if err != nil {
t.Fatal("Unable to retrieve SHA from manifest")
}
dsSHA, err := someOtherDS.Manifest.SHA()
if err != nil {
t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set")
}
Assert(t).AreEqual(someOtherSHA, dsSHA, "Daemon set shas were not equal")
//
// Validate daemon set from a get function
//
getDS, _, err := store.Get(ds.ID)
if err != nil {
t.Fatalf("Unable to get daemon set: %s", err)
}
Assert(t).AreEqual(getDS.ID, ds.ID, "Daemon sets should be equal ids")
Assert(t).AreEqual(getDS.PodID, someOtherPodID, "Daemon sets should have equal pod ids")
Assert(t).AreEqual(getDS.MinHealth, someOtherMinHealth, "Daemon sets should have equal minimum healths")
Assert(t).AreEqual(getDS.Name, someOtherName, "Daemon sets should have equal names")
Assert(t).AreEqual(getDS.Disabled, someOtherDisabled, "Daemon sets should have same disabled fields")
someOtherLabels = klabels.Set{
pc_fields.AvailabilityZoneLabel: someOtherAZLabel.String(),
}
if matches := getDS.NodeSelector.Matches(someOtherLabels); !matches {
t.Error("The daemon set has a bad node selector")
}
someOtherSHA, err = someOtherManifest.SHA()
if err != nil {
t.Fatal("Unable to retrieve SHA from manifest")
}
dsSHA, err = getDS.Manifest.SHA()
if err != nil {
t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set")
}
Assert(t).AreEqual(someOtherSHA, dsSHA, "Daemon set shas were not equal")
}
func TestWatch(t *testing.T) {
fixture := consulutil.NewFixture(t)
defer fixture.Stop()
store := newStore(fixture.Client.KV())
//
// Create a new daemon set
//
podID := types.PodID("some_pod_id")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
azLabel := pc_fields.AvailabilityZone("some_zone")
selector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
timeout := replication.NoTimeout
ctx, cancelFunc := transaction.New(context.Background())
defer cancelFunc()
ds, err := store.Create(ctx, podManifest, minHealth, clusterName, selector, podID, timeout)
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
err = transaction.MustCommit(ctx, fixture.Client.KV())
if err != nil {
t.Fatalf("could not commit transaction to create daemon set: %s", err)
}
//
// Create another new daemon set
//
someOtherPodID := types.PodID("some_other_pod_id")
manifestBuilder = manifest.NewBuilder()
manifestBuilder.SetID(someOtherPodID)
someOtherManifest := manifestBuilder.GetManifest()
ctx2, cancelFunc2 := transaction.New(context.Background())
defer cancelFunc2()
someOtherDS, err := store.Create(ctx2, someOtherManifest, minHealth, clusterName, selector, someOtherPodID, timeout)
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
err = transaction.MustCommit(ctx2, fixture.Client.KV())
if err != nil {
t.Fatalf("could not commit transaction to create daemon set: %s", err)
}
//
// Watch for changes
//
quitCh := make(chan struct{})
inCh := store.Watch(quitCh)
defer close(quitCh)
var watched WatchedDaemonSets
select {
case watched = <-inCh:
case <-time.After(5 * time.Second):
t.Fatal("Expected something on channel but found nothing")
}
if watched.Err != nil {
t.Errorf("Unexpected error on watched daemon sets: %s", watched.Err)
}
Assert(t).AreEqual(len(watched.Created), 2, "Unexpected number of creates watched")
Assert(t).AreEqual(len(watched.Updated), 0, "Unexpected number of updates watched")
Assert(t).AreEqual(len(watched.Deleted), 0, "Unexpected number of deletes watched")
for _, watchedDS := range watched.Created {
if watchedDS.ID == ds.ID {
Assert(t).AreEqual(watchedDS.PodID, ds.PodID, "Daemon sets should have equal pod ids")
} else if watchedDS.ID == someOtherDS.ID {
Assert(t).AreEqual(watchedDS.PodID, someOtherDS.PodID, "Daemon sets should have equal pod ids")
} else {
t.Errorf("Expected to find id '%s' among watch results, but was not present", watchedDS.ID)
}
}
//
// Make sure Watch does not output any daemon sets something gets deleted
//
err = store.Delete(someOtherDS.ID)
if err != nil {
t.Error("Unable to delete daemon set")
}
select {
case watched = <-inCh:
case <-time.After(5 * time.Second):
t.Fatal("Expected something on channel but found nothing")
}
if watched.Err != nil {
t.Errorf("Unexpected error on watched daemon sets: %s", watched.Err)
}
Assert(t).AreEqual(len(watched.Created), 0, "Unexpected number of creates watched")
Assert(t).AreEqual(len(watched.Updated), 0, "Unexpected number of updates watched")
Assert(t).AreEqual(len(watched.Deleted), 1, "Unexpected number of deletes watched")
Assert(t).AreEqual(someOtherDS.ID, watched.Deleted[0].ID, "Daemon sets should have equal ids")
//
// Make sure Watch outputs only 1 daemon set when something gets updated
//
mutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToMutate.Disabled = !dsToMutate.Disabled
return dsToMutate, nil
}
ds, err = store.MutateDS(ds.ID, mutator)
if err != nil {
t.Fatalf("Unable to mutate daemon set: %s", err)
}
select {
case watched = <-inCh:
case <-time.After(5 * time.Second):
t.Fatal("Expected something on channel but found nothing")
}
if watched.Err != nil {
t.Errorf("Unexpected error on watched daemon sets: %s", watched.Err)
}
Assert(t).AreEqual(len(watched.Created), 0, "Unexpected number of creates watched")
Assert(t).AreEqual(len(watched.Updated), 1, "Unexpected number of updates watched")
Assert(t).AreEqual(len(watched.Deleted), 0, "Unexpected number of deletes watched")
Assert(t).AreEqual(ds.ID, watched.Updated[0].ID, "Daemon sets should have equal ids")
Assert(t).AreEqual(ds.PodID, watched.Updated[0].PodID, "Daemon sets should have equal pod ids")
}
func TestWatchAll(t *testing.T) {
fixture := consulutil.NewFixture(t)
defer fixture.Stop()
store := newStore(fixture.Client.KV())
//
// Create a new daemon set
//
podID := types.PodID("some_pod_id")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
azLabel := pc_fields.AvailabilityZone("some_zone")
selector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
timeout := replication.NoTimeout
ctx, cancelFunc := transaction.New(context.Background())
defer cancelFunc()
ds, err := store.Create(ctx, podManifest, minHealth, clusterName, selector, podID, timeout)
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
err = transaction.MustCommit(ctx, fixture.Client.KV())
if err != nil {
t.Fatalf("could not commit transaction to create daemon set: %s", err)
}
//
// Create another new daemon set
//
someOtherPodID := types.PodID("some_other_pod_id")
manifestBuilder = manifest.NewBuilder()
manifestBuilder.SetID(someOtherPodID)
someOtherManifest := manifestBuilder.GetManifest()
ctx2, cancelFunc2 := transaction.New(context.Background())
defer cancelFunc2()
someOtherDS, err := store.Create(ctx2, someOtherManifest, minHealth, clusterName, selector, someOtherPodID, timeout)
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
err = transaction.MustCommit(ctx2, fixture.Client.KV())
if err != nil {
t.Fatalf("could not commit transaction to create daemon set: %s", err)
}
//
// Watch for create and verify
//
quitCh := make(chan struct{})
inCh := store.WatchAll(quitCh, 0)
defer close(quitCh)
var watched WatchedDaemonSetList
select {
case watched = <-inCh:
case <-time.After(5 * time.Second):
t.Fatal("Expected something on channel but found nothing")
}
if watched.Err != nil {
t.Errorf("Unexpected error on watched daemon sets: %s", watched.Err)
}
Assert(t).AreEqual(len(watched.DaemonSets), 2, "Unexpected number of daemon sets watched")
for _, watchedDS := range watched.DaemonSets {
if watchedDS.ID == ds.ID {
Assert(t).AreEqual(watchedDS.PodID, ds.PodID, "Daemon sets should have equal pod ids")
} else if watchedDS.ID == someOtherDS.ID {
Assert(t).AreEqual(watchedDS.PodID, someOtherDS.PodID, "Daemon sets should have equal pod ids")
} else {
t.Errorf("Expected to find id '%s' among watch results, but was not present", watchedDS.ID)
}
}
//
// Watch for delete and verify
//
err = store.Delete(someOtherDS.ID)
if err != nil {
t.Error("Unable to delete daemon set")
}
select {
case watched = <-inCh:
case <-time.After(5 * time.Second):
t.Fatal("Expected something on channel but found nothing")
}
if watched.Err != nil {
t.Errorf("Unexpected error on watched daemon sets: %s", watched.Err)
}
Assert(t).AreEqual(len(watched.DaemonSets), 1, "Unexpected number of daemon sets watched")
Assert(t).AreEqual(ds.ID, watched.DaemonSets[0].ID, "Daemon sets should have equal ids")
//
// Watch for update and verify
//
mutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToMutate.Disabled = !dsToMutate.Disabled
return dsToMutate, nil
}
ds, err = store.MutateDS(ds.ID, mutator)
if err != nil {
t.Fatalf("Unable to mutate daemon set: %s", err)
}
select {
case watched = <-inCh:
case <-time.After(5 * time.Second):
t.Fatal("Expected something on channel but found nothing")
}
if watched.Err != nil {
t.Errorf("Unexpected error on watched daemon sets: %s", watched.Err)
}
Assert(t).AreEqual(len(watched.DaemonSets), 1, "Unexpected number of daemon sets watched")
Assert(t).AreEqual(ds.ID, watched.DaemonSets[0].ID, "Daemon sets should have equal ids")
Assert(t).AreEqual(ds.PodID, watched.DaemonSets[0].PodID, "Daemon sets should have equal pod ids")
}
func TestEnableTxnAndDisableTxn(t *testing.T) {
fixture := consulutil.NewFixture(t)
defer fixture.Stop()
store := newStore(fixture.Client.KV())
//
// Create a new daemon set
//
podID := types.PodID("some_pod_id")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
azLabel := pc_fields.AvailabilityZone("some_zone")
selector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
timeout := replication.NoTimeout
ctx, cancelFunc := transaction.New(context.Background())
defer cancelFunc()
ds, err := store.Create(ctx, podManifest, minHealth, clusterName, selector, podID, timeout)
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
err = transaction.MustCommit(ctx, fixture.Client.KV())
if err != nil {
t.Fatalf("could not commit transaction to create daemon set: %s", err)
}
ctx, cancelFunc = transaction.New(context.Background())
defer cancelFunc()
_, err = store.DisableTxn(ctx, ds.ID)
if err != nil {
t.Fatal(err)
}
err = transaction.MustCommit(ctx, fixture.Client.KV())
if err != nil {
t.Fatalf("could not commit transaction to disable daemon set: %s", err)
}
ds, _, err = store.Get(ds.ID)
if err != nil {
t.Fatal(err)
}
if !ds.Disabled {
t.Fatal("daemon set should have been disabled")
}
ctx, cancelFunc = transaction.New(context.Background())
defer cancelFunc()
_, err = store.EnableTxn(ctx, ds.ID)
if err != nil {
t.Fatal(err)
}
err = transaction.MustCommit(ctx, fixture.Client.KV())
if err != nil {
t.Fatalf("could not commit transaction to enable daemon set: %s", err)
}
ds, _, err = store.Get(ds.ID)
if err != nil {
t.Fatal(err)
}
if ds.Disabled {
t.Fatal("daemon set should have been enabled")
}
}
func TestLockForOwnershipTxn(t *testing.T) {
fixture := consulutil.NewFixture(t)
defer fixture.Stop()
rootCtx, rootCancel := context.WithCancel(context.Background())
defer rootCancel()
dsStore := newStore(fixture.Client.KV())
kvStore := consul.NewConsulStore(fixture.Client)
lockCtx, lockCancel := transaction.New(rootCtx)
defer lockCancel()
checkLockCtx, checkLockCancel := transaction.New(rootCtx)
defer checkLockCancel()
ds := createDaemonSet(dsStore, fixture.Client.KV(), t)
session, errCh, err := kvStore.NewSession("ds-test-lock-for-ownership", nil)
if err != nil {
t.Fatal(err)
}
defer session.Destroy()
go func() {
select {
case <-rootCtx.Done():
case err, ok := <-errCh:
if ok {
t.Error(err)
}
}
}()
unlocker, err := dsStore.LockForOwnershipTxn(lockCtx, ds.ID, session)
if err != nil {
t.Fatal(err)
}
unlockCtx, unlockCancel := transaction.New(rootCtx)
defer unlockCancel()
err = unlocker.UnlockTxn(unlockCtx)
if err != nil {
t.Fatal(err)
}
// confirm that unlocking doesn't work prior to commiting the lock transaction
ok, _, err := transaction.Commit(unlockCtx, fixture.Client.KV())
if err != nil {
t.Fatalf("unexpected error submitting unlock transaction (even though expected a conflict): %s", err)
}
if ok {
t.Fatal("expected a transaction conflict when trying to unlock a lock we don't hold yet")
}
// refresh unlockCtx so we can use it again later
unlockCtx, unlockCancel = transaction.New(unlockCtx)
defer unlockCancel()
err = unlocker.CheckLockedTxn(checkLockCtx)
if err != nil {
t.Fatal(err)
}
// confirm that checking for the lock fails in the same way as unlocking
ok, _, err = transaction.Commit(checkLockCtx, fixture.Client.KV())
if err != nil {
t.Fatalf("unexpected error submitting check-lock transaction (even though expected a conflict): %s", err)
}
if ok {
t.Fatal("expected a transaction conflict when trying to check a lock we don't hold yet")
}
// refresh checkLockCtx so we can use it again later
checkLockCtx, checkLockCancel = transaction.New(checkLockCtx)
defer checkLockCancel()
// now acquire the lock
err = transaction.MustCommit(lockCtx, fixture.Client.KV())
if err != nil {
t.Fatal(err)
}
// now make sure checking the lock succeeds
err = transaction.MustCommit(checkLockCtx, fixture.Client.KV())
if err != nil {
t.Fatal(err)
}
// make sure another lock can't be acquired
newLockCtx, newLockCancel := transaction.New(rootCtx)
defer newLockCancel()
newSession, newErrCh, err := kvStore.NewSession("ds-test-lock-for-ownership2", nil)
if err != nil {
t.Fatal(err)
}
go func() {
select {
case <-rootCtx.Done():
newSession.Destroy()
case err := <-newErrCh:
t.Error(err)
}
}()
_, err = dsStore.LockForOwnershipTxn(newLockCtx, ds.ID, newSession)
if err != nil {
t.Fatal(err)
}
// now we expect a failure because the lock is held by a different session
err = transaction.MustCommit(newLockCtx, fixture.Client.KV())
if err == nil {
t.Fatal("expected a lock failure since the lock is already held")
}
// now unlock it
err = transaction.MustCommit(unlockCtx, fixture.Client.KV())
if err != nil {
t.Fatal(err)
}
// now we should be able to acquire again, (but first refresh newLockCtx)
newLockCtx, newLockCancel = transaction.New(rootCtx)
defer newLockCancel()
err = transaction.MustCommit(newLockCtx, fixture.Client.KV())
if err != nil {
t.Fatal(err)
}
}
func newStore(kv consulKV) *ConsulStore {
return &ConsulStore{
kv: kv,
logger: logging.DefaultLogger,
retries: 0,
}
}
|
package scroll_test
import (
"fmt"
"github.com/fatlotus/scroll"
"golang.org/x/net/context"
)
// Define what a mutation looks like.
type Mutation interface {
Update(b *Backend)
}
// Define the types of mutations to store in the log.
type AddItem string
type RemoveItem string
// Make sure duplicate operations are merged together.
func (a AddItem) Key() string { return string(a) }
func (r RemoveItem) Key() string { return string(r) }
func (a AddItem) Update(b *Backend) {
fmt.Printf("AddItem(%s)\n", a)
for _, x := range b.Todos {
if string(a) == x {
return
}
}
b.Todos = append(b.Todos, string(a))
}
func (r RemoveItem) Update(b *Backend) {
fmt.Printf("RemoveItem(%s)\n", r)
j := 0
for i := 0; i < len(b.Todos); i++ {
if b.Todos[i] != string(r) {
b.Todos[j] = b.Todos[i]
j++
}
}
b.Todos = b.Todos[:j]
}
// Define the in-memory representation of the application.
type Backend struct {
Todos []string
cursor scroll.Cursor
log scroll.Log
}
// Pull the latest versions from Scroll.
func (b *Backend) Update() (c context.Context, err error) {
var m Mutation
for {
err = b.cursor.Next(c, &m)
if err == scroll.Done {
break
} else if err != nil {
return
}
m.Update(b)
}
return
}
// Define an interface for clients to use.
func (b *Backend) Add(c context.Context, item string) {
b.log.Append(c, AddItem(item))
}
func (b *Backend) Remove(c context.Context, item string) {
b.log.Append(c, RemoveItem(item))
}
func NewBackend() *Backend {
log := scroll.MemoryLog()
cursor := log.Cursor()
return &Backend{Todos: make([]string, 0), log: log, cursor: cursor}
}
// Add and remove a few items from the to-do list.
func Example() {
c := context.Background()
b := NewBackend()
b.Add(c, "apples")
b.Add(c, "bananas")
b.Add(c, "pears")
b.Add(c, "bananas")
b.Remove(c, "pears")
b.Update()
fmt.Printf("b.Todos: %s\n", b.Todos)
// Output:
// AddItem(apples)
// AddItem(bananas)
// RemoveItem(pears)
// b.Todos: [apples bananas]
}
|
package server
import (
"encoding/json"
"github.com/golang/glog"
"io/ioutil"
"log"
"net/http"
)
type ConfigFilePagesData struct {
Dns_proxy int `json:"dns_proxy"`
Redirect int `json:"redirect"`
Subcompany string `json:"subcompany"`
CompanyName string `json:"companyName"`
}
type reqConfigFilePagesNew struct {
Token string `json:"omitempty`
}
func ConfigFilePagesNew(w http.ResponseWriter, req *http.Request) {
//SetHttpHeader(w,req)
if req.Method == "POST" {
result, _ := ioutil.ReadAll(req.Body)
req.Body.Close()
glog.Info("ConfigFilePagesEdit recv body:", string(result))
var m reqConfigFilePagesNew
err := json.Unmarshal([]byte(result), &m)
if err != nil {
glog.Info(err)
retError(err,w)
return
}
Queryconfig, err := Db.Query(`SELECT id, typeCode, configValue, companyName,
subcompany, platform FROM proxypro.config_value1 where estatus = 1`)
if err != nil {
log.Println(err)
}
defer Queryconfig.Close()
data_slice := make([]interface{},0)
companyName_map := make(map[string]([](map[string]string)),0)
for Queryconfig.Next(){
var id string
var typeCode string
var configValue string
var companyName string
var subcompany string
var platform string
err := Queryconfig.Scan(&id, &typeCode, &configValue, &companyName, &subcompany, &platform)
if err!=nil{
glog.Info(err)
//continue
}
tmp_map := make(map[string]string,0)
tmp_map["id"] = id
tmp_map["typeCode"] = typeCode
tmp_map["configValue"] = configValue
tmp_map["companyName"] = companyName
tmp_map["subcompany"] = subcompany
tmp_map["platform"] = platform
glog.Info(typeCode)
if _, ok := companyName_map[companyName]; ok{
companyName_map[companyName] = append(companyName_map[companyName], tmp_map)
}else{
tmp_slice := make([]map[string]string,0)
tmp_slice = append(tmp_slice, tmp_map)
companyName_map[companyName] = tmp_slice
}
}
data_slice = append(data_slice, companyName_map)
data_slice1 := make([](map[string]*ConfigFilePagesData),0)
glog.Info(len(companyName_map))
for key , value := range companyName_map {
tmp_map := make(map[string]*ConfigFilePagesData, 0) //子公司name作为key
glog.Info(key, len(value), value)
for _, v := range value {
if _, ok := tmp_map[v["subcompany"]]; ok {
if v["typeCode"] == "dns_proxy" {
t := (tmp_map[v["subcompany"]]).Dns_proxy
(tmp_map[v["subcompany"]]).Dns_proxy = t + 1
}
if v["typeCode"] == "redirect" {
t := (tmp_map[v["subcompany"]]).Redirect
(tmp_map[v["subcompany"]]).Redirect = t + 1
}
} else {
tmp_map[v["subcompany"]] = &ConfigFilePagesData{}
(tmp_map[v["subcompany"]]).CompanyName = key
(tmp_map[v["subcompany"]]).Dns_proxy = 0
(tmp_map[v["subcompany"]]).Redirect = 0
(tmp_map[v["subcompany"]]).Subcompany = v["subcompany"]
if v["typeCode"] == "dns_proxy" {
t := (tmp_map[v["subcompany"]]).Dns_proxy
(tmp_map[v["subcompany"]]).Dns_proxy = t + 1
}
if v["typeCode"] == "redirect" {
t := (tmp_map[v["subcompany"]]).Redirect
(tmp_map[v["subcompany"]]).Redirect = t + 1
}
}
}
data_slice1 = append(data_slice1, tmp_map)
}
tmp_slice := make([]interface{},0)
for _, value := range data_slice1{
if len(value) > 0{
for _,value := range value {
tmp_slice = append(tmp_slice, value)
}
}
}
retValue := &ResponseData{}
retValue.Success = true
retValue.Code = 0
retValue.Message = "Success"
retValue.Data = tmp_slice
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}else {
retValue := &ResponseData{}
retValue.Success = false
retValue.Code = 0
retValue.Message = "Request Method error: POST"
retValue.Data = ""
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}
}
type reqConfigFilePagesNewEdit struct {
Token string
CompanyName string
}
func ConfigFilePagesNewEdit(w http.ResponseWriter, req *http.Request){
//SetHttpHeader(w,req)
if req.Method == "POST" {
result, _ := ioutil.ReadAll(req.Body)
req.Body.Close()
glog.Info("ConfigFilePagesEdit recv body:", string(result))
var m reqConfigFilePagesNewEdit
err := json.Unmarshal([]byte(result), &m)
if err != nil {
glog.Info(err)
retError(err,w)
return
}
//查询TTL
ttl_map :=make(map[int]int,0)
{
QueryTTL, err := Db.Query(`SELECT platform,loadTime FROM proxypro.config_version `)
if err != nil {
glog.Info(err)
}
for QueryTTL.Next(){
var platform int
var loadTime int
err := QueryTTL.Scan(&platform,&loadTime)
if err != nil{
glog.Info(err)
}
ttl_map[platform] = loadTime
}
QueryTTL.Close()
}
glog.Info("ttl_map: ", ttl_map)
Queryconfig, err := Db.Query(`SELECT id, typeCode, configValue, companyName,
subcompany, platform FROM proxypro.config_value1 where estatus = 1`)
if err != nil {
log.Println(err)
}
defer Queryconfig.Close()
companyName_map := make(map[string]([](map[string]interface{})),0)
for Queryconfig.Next(){
var id int
var typeCode string
var configValue string
var companyName string
var subcompany string
var platform int
err := Queryconfig.Scan(&id, &typeCode, &configValue, &companyName, &subcompany, &platform)
if err!=nil{
glog.Info(err)
}
tmp_map := make(map[string]interface{},0)
tmp_map["id"] = id
tmp_map["typeCode"] = typeCode
tmp_map["configValue"] = configValue
tmp_map["companyName"] = companyName
tmp_map["subcompany"] = subcompany
tmp_map["platform"] = platform
if _,ok := ttl_map[platform]; ok{
tmp_map["ttl"] = ttl_map[platform]
}else {
tmp_map["ttl"] = 0
}
if _, ok := companyName_map[companyName]; ok{
companyName_map[companyName] = append(companyName_map[companyName], tmp_map)
}else{
tmp_slice := make([]map[string]interface{},0)
tmp_slice = append(tmp_slice, tmp_map)
companyName_map[companyName] = tmp_slice
}
}
type DataSlice struct {
//LoadTime string `json:"load-time"`
Redirect []interface{} `json:"redirect"`
Dnsproxy []interface{} `json:"dns_proxy"`
}
ret_data := &DataSlice{}
if len(companyName_map) > 0 {
for _, value1 := range companyName_map {
if len(m.CompanyName) > 0 && len(value1) > 0 {
for _, value2 := range value1 {
if value2["typeCode"] == "dns_proxy" && value2["companyName"] == m.CompanyName {
ret_data.Dnsproxy = append(ret_data.Dnsproxy, value2)
}
if value2["typeCode"] == "redirect" && value2["companyName"] == m.CompanyName {
ret_data.Redirect = append(ret_data.Redirect, value2)
}
}
} else if len(value1) > 0 {
for _, value2 := range value1 {
if value2["typeCode"] == "dns_proxy" {
ret_data.Dnsproxy = append(ret_data.Dnsproxy, value2)
}
if value2["typeCode"] == "redirect" {
ret_data.Redirect = append(ret_data.Redirect, value2)
}
}
}
}
}
retValue := &ResponseData{}
retValue.Success = true
retValue.Code = 0
retValue.Message = "Success"
retValue.Data = ret_data
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}else {
retValue := &ResponseData{}
retValue.Success = false
retValue.Code = 0
retValue.Message = "Request Method error: POST"
retValue.Data = ""
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}
}
type reqGetCompanyName struct {
Token string
}
func GetCompanyName(w http.ResponseWriter, req *http.Request) {
//SetHttpHeader(w, req)
if req.Method == "POST" {
result, _ := ioutil.ReadAll(req.Body)
req.Body.Close()
glog.Info("ConfigFilePagesEdit recv body:", string(result))
var m reqGetCompanyName
err := json.Unmarshal([]byte(result), &m)
if err != nil {
glog.Info(err)
retError(err,w)
return
}
Queryconfig, err := Db.Query(`SELECT id, typeCode, configValue, companyName,
subcompany, platform FROM proxypro.config_value1 where estatus = 1`)
if err != nil {
glog.Info(err)
}
defer Queryconfig.Close()
companyName_map := make(map[string]([]string), 0)
for Queryconfig.Next() {
var id string
var typeCode string
var configValue string
var companyName string
var subcompany string
var platform string
err := Queryconfig.Scan(&id, &typeCode, &configValue, &companyName, &subcompany, &platform)
if err != nil {
glog.Info(err)
}
companyName_map[companyName] = append(companyName_map[companyName], subcompany)
}
tmp_companyName_map := make(map[string]interface{}, 0)
if len(companyName_map) > 0 {
for key, value1 := range companyName_map {
tmp_map := make(map[string]bool, 0)
if len(value1) > 0 {
for _, value2 := range value1 {
tmp_map[value2] = true
}
}
tmp_companyName_map[key] = tmp_map
}
}
glog.Info(tmp_companyName_map)
retValue := &ResponseData{}
retValue.Success = true
retValue.Code = 0
retValue.Message = "Success"
retValue.Data = tmp_companyName_map
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}else {
retValue := &ResponseData{}
retValue.Success = false
retValue.Code = 0
retValue.Message = "Request Method error: POST"
retValue.Data = ""
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}
}
type reqEditCompanyName struct {
Token string
CompanyName string
Subcompany string
EiditCompanyName string
EditSubcompany string
}
func EditCompanyName(w http.ResponseWriter, req *http.Request){
//SetHttpHeader(w, req)
if req.Method == "POST" {
result, _ := ioutil.ReadAll(req.Body)
req.Body.Close()
glog.Info("ConfigFilePagesEdit recv body:", string(result))
var m reqEditCompanyName
err := json.Unmarshal([]byte(result), &m)
if err != nil {
glog.Info(err)
retError(err,w)
return
}
stmt, err := Db.Prepare(`update proxypro.config_value1 set companyName = ?, subcompany= ?
where companyName = ? and subcompany= ? and estatus = 1`)
res, err := stmt.Exec(m.EiditCompanyName, m.EditSubcompany, m.CompanyName, m.Subcompany)
glog.Info("res",res)
if err!=nil {
glog.Info(res)
glog.Info(err)
return
}
retValue := &ResponseData{}
retValue.Success = true
retValue.Code = 0
retValue.Message = "Success"
retValue.Data = ""
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}else {
retValue := &ResponseData{}
retValue.Success = false
retValue.Code = 0
retValue.Message = "Request Method error: POST"
retValue.Data = ""
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}
}
func DeleteCompanyName(w http.ResponseWriter, req *http.Request){
//SetHttpHeader(w, req)
if req.Method == "POST" {
result, _ := ioutil.ReadAll(req.Body)
req.Body.Close()
glog.Info("ConfigFilePagesEdit recv body:", string(result))
var m reqEditCompanyName
err := json.Unmarshal([]byte(result), &m)
if err != nil {
glog.Info(err)
}
stmt, err := Db.Prepare(`update proxypro.config_value1 set estatus = 0 where companyName = ? and subcompany= ? `)
res, err := stmt.Exec(m.CompanyName, m.Subcompany)
glog.Info("res",res)
if err!=nil {
glog.Info(res)
glog.Info(err)
return
}
retValue := &ResponseData{}
retValue.Success = true
retValue.Code = 0
retValue.Message = "Success"
retValue.Data = ""
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}else {
retValue := &ResponseData{}
retValue.Success = false
retValue.Code = 0
retValue.Message = "Request Method error: POST"
retValue.Data = ""
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}
}
//
func SvaeCompanyName(w http.ResponseWriter, req *http.Request){
//SetHttpHeader(w, req)
if req.Method == "POST" {
result, _ := ioutil.ReadAll(req.Body)
req.Body.Close()
glog.Info("ConfigFilePagesEdit recv body:", string(result))
var m reqEditCompanyName
err := json.Unmarshal([]byte(result), &m)
if err != nil {
glog.Info(err)
}
stmt, err := Db.Prepare(`INSERT INTO proxypro.config_value1 (companyName, subcompany,configValue) values (?, ?,?)`)
res, err := stmt.Exec(m.CompanyName, m.Subcompany,"")
glog.Info("res",res)
if err!=nil {
glog.Info(res)
glog.Info(err)
return
}
stmt.Close()
retValue := &ResponseData{}
retValue.Success = true
retValue.Code = 0
retValue.Message = "Success"
retValue.Data = ""
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}else {
retValue := &ResponseData{}
retValue.Success = false
retValue.Code = 0
retValue.Message = "Request Method error: POST"
retValue.Data = ""
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}
}
type reqAddConfigFilePages struct {
URL string
TO string
TypeCode string
CompanyName string
Subcompany string
}
func AddConfigFilePages(w http.ResponseWriter, req *http.Request){
//SetHttpHeader(w, req)
if req.Method == "POST" {
result, _ := ioutil.ReadAll(req.Body)
req.Body.Close()
glog.Info("ConfigFilePagesEdit recv body:", string(result))
var m reqAddConfigFilePages
err := json.Unmarshal([]byte(result), &m)
if err != nil {
glog.Info(err)
}
configValue := m.URL +"->" +m.TO
stmt, err := Db.Prepare(`INSERT INTO proxypro.config_value1 (typeCode, configValue, companyName, subcompany, estatus)
values (?, ?, ?, ?, ?)`)
res, err := stmt.Exec(m.TypeCode,configValue,m.CompanyName, m.Subcompany,1)
glog.Info("res",res)
if err!=nil {
glog.Info(res)
glog.Info(err)
return
}
stmt.Close()
retValue := &ResponseData{}
retValue.Success = true
retValue.Code = 0
retValue.Message = "Success"
retValue.Data = ""
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}else {
retValue := &ResponseData{}
retValue.Success = false
retValue.Code = 0
retValue.Message = "Request Method error: POST"
retValue.Data = ""
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}
}
type reqEidtConfigFilePages struct {
Id int
URL string
TO string
TypeCode string
CompanyName string
Subcompany string
}
func EidtConfigFilePages(w http.ResponseWriter, req *http.Request){
//SetHttpHeader(w, req)
if req.Method == "POST" {
result, _ := ioutil.ReadAll(req.Body)
req.Body.Close()
glog.Info("ConfigFilePagesEdit recv body:", string(result))
var m reqEidtConfigFilePages
err := json.Unmarshal([]byte(result), &m)
if err != nil {
glog.Info(err)
retError(err,w)
return
}
configValue := m.URL +"->" +m.TO
stmt, err := Db.Prepare(`update proxypro.config_value1 set configValue = ?, companyName = ?, subcompany= ?
where id = ? and estatus = 1`)
res, err := stmt.Exec(configValue, m.CompanyName, m.Subcompany, m.Id)
defer stmt.Close()
glog.Info("res",res)
if err!=nil {
glog.Info(res)
glog.Info(err)
return
}
retValue := &ResponseData{}
retValue.Success = true
retValue.Code = 0
retValue.Message = "Success"
retValue.Data = ""
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}else {
retValue := &ResponseData{}
retValue.Success = false
retValue.Code = 0
retValue.Message = "Request Method error: POST"
retValue.Data = ""
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}
}
type reqEidtTTL struct {
Token string
Id int
Platform int
LoadTime int
}
func EidtTTL(w http.ResponseWriter, req *http.Request){
//SetHttpHeader(w, req)
if req.Method == "POST" {
result, _ := ioutil.ReadAll(req.Body)
req.Body.Close()
glog.Info("reqEidtTTL recv body:", string(result))
var m reqEidtTTL
err := json.Unmarshal([]byte(result), &m)
if err != nil {
glog.Info(err)
retError(err,w)
return
}
glog.Info("66666666666666666666666666666666666666")
{
stmt, err := Db.Prepare(`update proxypro.config_version set loadTime = ? where id = ? `)
res, err := stmt.Exec(m.LoadTime, m.Id)
stmt.Close()
if err != nil {
glog.Info(res)
glog.Info(err)
return
}
stmt.Close()
//添加当loadtime变化的时候 修改缓存
{
go CacheConfigJM()
}
}
glog.Info("66666666666666666666666666666666666666")
{
//当loadtime变化的时候version_code+1
stmt2, err2 := Db.Prepare(`update proxypro.config_version set version_code = version_code+1 where id = ? `)
res2, err2 := stmt2.Exec(m.Id)
if err2 != nil {
glog.Info(res2)
glog.Info(err2)
return
}
stmt2.Close()
}
glog.Info("66666666666666666666666666666666666666")
retValue := &ResponseData{}
retValue.Success = true
retValue.Code = 200
retValue.Message = "Success"
retValue.Data = ""
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}else {
retValue := &ResponseData{}
retValue.Success = false
retValue.Code = 0
retValue.Message = "Request Method error: POST"
retValue.Data = ""
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}
}
type reqChangePlatform struct {
Token string
Platform int
Status int
}
func ChangePlatform(w http.ResponseWriter, req *http.Request) {
if req.Method == "POST" {
result, _ := ioutil.ReadAll(req.Body)
req.Body.Close()
glog.Info("reqChangePlatform recv body:", string(result))
var m reqChangePlatform
err := json.Unmarshal([]byte(result), &m)
if err != nil {
glog.Info(err)
retError(err,w)
return
}
stmt, err := Db.Prepare(`update proxypro.config_version set status = ? where platform = ? `)
res, err := stmt.Exec(m.Status, m.Platform)
if err!=nil {
glog.Info(res)
glog.Info(err)
return
}
stmt.Close()
{
go CacheConfigJM()
}
retValue := &ResponseData{}
retValue.Success = true
retValue.Code = 0
retValue.Message = "Success"
retValue.Data = ""
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}else {
retValue := &ResponseData{}
retValue.Success = false
retValue.Code = 0
retValue.Message = "Request Method error: POST"
retValue.Data = ""
bytes, _ := json.Marshal(retValue)
w.Write([]byte(bytes))
}
}
|
package userinterface
import (
ui "github.com/gizak/termui/v3"
"github.com/gizak/termui/v3/widgets"
)
// ProgressGraph is loading graph of searching of image
type ProgressGraph struct {
*widgets.Gauge
}
// SetPercent updates the percentage
func (graph *ProgressGraph) SetPercent(percent int) {
graph.Percent = percent
}
// SetTitle updates the title
func (graph *ProgressGraph) SetTitle(title string) {
graph.Title = title
}
// SetBarColor updates the bar color
func (graph *ProgressGraph) SetBarColor(barColor ui.Color) {
graph.BarColor = barColor
}
// SetLabelStyle updates the label style
func (graph *ProgressGraph) SetLabelStyle(labelStyle ui.Color) {
graph.LabelStyle = ui.NewStyle(labelStyle)
}
// SetBorderStyleFg updates the border style
func (graph *ProgressGraph) SetBorderStyleFg(borderStyle ui.Color) {
graph.BorderStyle.Fg = borderStyle
}
// NewProgressGraph returns progress graph of searching
func NewProgressGraph() *ProgressGraph {
graph := &ProgressGraph{widgets.NewGauge()}
graph.SetTitle("Progress Indicator")
graph.SetPercent(0)
graph.SetBarColor(ui.ColorYellow)
graph.SetLabelStyle(ui.ColorBlue)
graph.SetBorderStyleFg(ui.ColorWhite)
return graph
}
|
package engine
import "net/http"
// ResponseWriter ...
type ResponseWriter interface {
responseWriterBase
// get the http.Pusher for server push
Pusher() http.Pusher
}
func (w *responseWriter) Pusher() (pusher http.Pusher) {
if pusher, ok := w.ResponseWriter.(http.Pusher); ok {
return pusher
}
return nil
}
|
package methodtest
func delOneFromArray(slice []int, n int) []int {
length := len(slice)
result := []int{}
for i := 0; i < length; i++ {
if n != slice[i] {
result = append(result, slice[i])
}
}
return result
}
|
/*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package proxy
import (
"encoding/binary"
"encoding/json"
"fmt"
"net/http"
"regexp"
"strconv"
"strings"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/encoding"
grpcproto "google.golang.org/grpc/encoding/proto"
"github.com/Tencent/bk-bcs/bcs-common/common/blog"
"github.com/Tencent/bk-bcs/bcs-common/pkg/auth/iam"
"github.com/Tencent/bk-bcs/bcs-common/pkg/auth/jwt"
"github.com/Tencent/bk-bcs/bcs-scenarios/bcs-gitops-manager/pkg/common"
"github.com/Tencent/bk-bcs/bcs-scenarios/bcs-gitops-manager/pkg/store"
)
// GitOpsOptions for revese proxy
type GitOpsOptions struct {
// backend gitops kubernetes service and port
Service string
// URL prefix like /gitopsmanager/proxy/
PathPrefix string
// storage interface for access gitops data
Storage store.Store
// JWTClient for authentication
JWTDecoder *jwt.JWTClient
// IAMClient is basic client
IAMClient iam.PermClient
SecretOption *SecretOption
TraceOption *TraceOption
}
// TraceOption defines the config of bkmonitor APM
type TraceOption struct {
Endpoint string
Token string
}
// SecretOption defines the config of secret
type SecretOption struct {
Address string
Port string
}
// Validate options
func (opt *GitOpsOptions) Validate() error {
if len(opt.Service) == 0 {
return fmt.Errorf("lost gitops system information")
}
if opt.Storage == nil {
return fmt.Errorf("lost gitops storage access")
}
return nil
}
// GitOpsProxy definition for all kinds of
// gitops solution
type GitOpsProxy interface {
http.Handler
// Init proxy
Init() error
}
// UserInfo for token validate
type UserInfo struct {
*jwt.UserClaimsInfo
}
// GetUser string
func (user *UserInfo) GetUser() string {
if len(user.UserName) != 0 {
return user.UserName
}
if len(user.ClientID) != 0 {
return user.ClientID
}
return ""
}
const (
headerBKUserName = "bkUserName"
AdminClientUser = "admin"
AdminGitOpsUser = "bcs-gitops-manager"
)
// GetJWTInfo from request
func GetJWTInfo(req *http.Request, client *jwt.JWTClient) (*UserInfo, error) {
raw := req.Header.Get("Authorization")
user, err := GetJWTInfoWithAuthorization(raw, client)
if err != nil {
return nil, errors.Wrapf(err, "get authorization user failed")
}
if user.ClientID == AdminGitOpsUser || user.ClientID == AdminClientUser {
userName := req.Header.Get(headerBKUserName)
user.UserName = userName
}
return user, nil
}
// GetJWTInfoWithAuthorization 根据 token 获取用户信息
func GetJWTInfoWithAuthorization(authorization string, client *jwt.JWTClient) (*UserInfo, error) {
if len(authorization) == 0 {
return nil, fmt.Errorf("lost 'Authorization' header")
}
if !strings.HasPrefix(authorization, "Bearer ") {
return nil, fmt.Errorf("hader 'Authorization' malform")
}
token := strings.TrimPrefix(authorization, "Bearer ")
claim, err := client.JWTDecode(token)
if err != nil {
return nil, err
}
u := &UserInfo{claim}
if u.GetUser() == "" {
return nil, fmt.Errorf("lost user information")
}
return u, nil
}
// IsAdmin check if request comes from admin,
// only use for gitops command line
func IsAdmin(req *http.Request) bool {
token := req.Header.Get(common.HeaderBCSClient)
return token == common.ServiceNameShort
}
// JSONResponse convenient tool for response
func JSONResponse(w http.ResponseWriter, obj interface{}) {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(http.StatusOK)
content, _ := json.Marshal(obj)
fmt.Fprintln(w, string(content))
}
// DirectlyResponse 对象本身就是个字符串,直接写入返回
func DirectlyResponse(w http.ResponseWriter, obj interface{}) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, obj)
}
var (
httpStatusCode = map[int]codes.Code{
http.StatusOK: codes.OK,
http.StatusBadRequest: codes.InvalidArgument,
http.StatusNotFound: codes.NotFound,
http.StatusUnauthorized: codes.Unauthenticated,
http.StatusInternalServerError: codes.Internal,
http.StatusServiceUnavailable: codes.Unavailable,
}
)
func returnGrpcCode(statusCode int) codes.Code {
v, ok := httpStatusCode[statusCode]
if !ok {
return codes.Unknown
}
return v
}
// GRPCErrorResponse 返回 gRPC 错误给客户端
func GRPCErrorResponse(w http.ResponseWriter, statusCode int, err error) {
grpcCode := returnGrpcCode(statusCode)
w.Header().Set("Content-Type", "application/grpc+proto")
w.Header().Set("grpc-status", strconv.Itoa(int(grpcCode)))
w.Header().Set("grpc-message", err.Error())
w.WriteHeader(http.StatusOK)
// Write the error message as the response body
_, _ = w.Write(nil)
}
var (
grpcSuffixBytes = []byte{128, 0, 0, 0, 54, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 58, 32, 97,
112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 103, 114, 112, 99, 43, 112, 114, 111, 116, 111, 13, 10,
103, 114, 112, 99, 45, 115, 116, 97, 116, 117, 115, 58, 32, 48, 13, 10}
)
// GRPCResponse 将对象通过 grpc 的数据格式返回
// grpc 返回的 proto 数据格式规定如下:
// - 第 1 个 byte 表明是否是 compressed, 参见: google.golang.org/grpc/rpc_util.go 中的 compressed
// - 第 2-5 个 byte 表明 body 的长度,参见: google.golang.org/grpc/rpc_util.go 的 recvMsg 方法
// 长度需要用到大端转换来获取实际值
// - 后续的 byte 位是 body + content-type
//
// 在获取到 body 字节后,可以通过 grpc.encoding 来反序列化
func GRPCResponse(w http.ResponseWriter, obj interface{}) {
w.Header().Set("Content-Type", "application/grpc+proto")
w.Header().Set("grpc-status", "0")
w.WriteHeader(http.StatusOK)
bs, err := encoding.GetCodec(grpcproto.Name).Marshal(obj)
if err != nil {
blog.Errorf("grpc proto encoding marshal failed: %s", err.Error())
_, _ = w.Write([]byte{})
return
}
header := make([]byte, 4)
binary.BigEndian.PutUint32(header, uint32(len(bs)))
result := make([]byte, 0, 5+len(bs)+len(grpcSuffixBytes))
// grpc 返回的第一个 byte 表明是否是 compressed
// 参见: google.golang.org/grpc/rpc_util.go 中的 compressionNone
result = append(result, 0)
result = append(result, header...)
result = append(result, bs...)
result = append(result, grpcSuffixBytes...)
_, _ = w.Write(result)
}
// BUG21955Workaround ! copy from argocd
type BUG21955Workaround struct {
Handler http.Handler
}
// Workaround for https://github.com/golang/go/issues/21955 to support escaped URLs in URL path.
var pathPatters = []*regexp.Regexp{
regexp.MustCompile(`/api/v1/clusters/[^/]+`),
regexp.MustCompile(`/api/v1/repositories/[^/]+`),
regexp.MustCompile(`/api/v1/repocreds/[^/]+`),
regexp.MustCompile(`/api/v1/repositories/[^/]+/apps`),
regexp.MustCompile(`/api/v1/repositories/[^/]+/apps/[^/]+`),
regexp.MustCompile(`/settings/clusters/[^/]+`),
}
// ServeHTTP implementation
func (work *BUG21955Workaround) ServeHTTP(w http.ResponseWriter, r *http.Request) {
blog.Infof("proxy %s RequestURI %s, header: %+v", r.Method, r.URL.RequestURI(), r.Header)
for _, pattern := range pathPatters {
if pattern.MatchString(r.URL.RawPath) {
r.URL.Path = r.URL.RawPath
blog.Warnf("proxy URL RawPath fix %s", r.URL.RawPath)
break
}
}
work.Handler.ServeHTTP(w, r)
}
|
package main
import (
"flag"
"omokServer"
)
type hostConf struct {
maxGameCount int
startTcpPort int
RedisAddress string
RedisPoolSize int
RedisReqTaskChanCapacity int
omokConf omokServer.OmokConf
}
//-c_maxGameCount=100 -c_startTcpPort=11021 -c_network=tcp4 -c_ipAddress=127.0.0.1
func createHostConf() hostConf {
var conf hostConf
flag.IntVar(&conf.maxGameCount, "c_maxGameCount", 1, "int flag")
flag.IntVar(&conf.startTcpPort, "c_startTcpPort", 11021, "int flag")
flag.StringVar(&conf.RedisAddress, "c_redisAddress", "127.0.0.1:6379", "string flag")
flag.IntVar(&conf.RedisPoolSize, "c_redisPoolSize", 8, "int flag")
flag.IntVar(&conf.RedisReqTaskChanCapacity, "c_redisReqTaskChanCapacity", 32, "int flag")
flag.StringVar(&conf.omokConf.Network, "c_network", "tcp4", "string flag")
flag.StringVar(&conf.omokConf.IPAddress, "c_ipAddress", "127.0.0.1", "string flag")
flag.IntVar(&conf.omokConf.MaxSessionCount, "c_maxSessionCount", 4, "int flag")
flag.IntVar(&conf.omokConf.MaxPacketSize, "c_maxPacketSize", 1024, "int flag")
flag.IntVar(&conf.omokConf.RecvPacketRingBufferMaxSize, "c_recvPacketRingBufferMaxSize", 1024 * 16, "int flag")
flag.IntVar(&conf.omokConf.MaxNetMsgChanBufferCount, "c_maxNetMsgChanBufferCount", 128, "int flag")
flag.IntVar(&conf.omokConf.RedisResChanCapacity, "c_redisResChanCapacity", 32, "int flag")
flag.IntVar(&conf.omokConf.UserSendBufferSzie, "c_userSendBufferSzie", 1024 * 16, "int flag")
flag.IntVar(&conf.omokConf.HeartbeatReqIntervalTimeMSec, "c_heartbeatReqIntervalTimeMSec", 3000, "int flag")
flag.IntVar(&conf.omokConf.HeartbeatWaitTimeMSec, "c_heartbeatWaitTimeMSec", 3000, "int flag")
flag.Parse()
return conf
}
|
package nebulatest
import (
"fmt"
"strings"
"github.com/vesoft-inc/nebula-go/graph"
)
type Differ interface {
Diff(result string)
Error() error
}
type DifferError struct {
err error
}
func (d *DifferError) Error() error {
return d.err
}
func NewDiffer(resp *graph.ExecutionResponse, dType string, order bool) (Differ, error) {
switch strings.ToLower(dType) {
case "json":
return &JsonDiffer{
Response: resp,
Order: order,
}, nil
case "table":
return &TableDiffer{
Response: resp,
}, nil
default:
return nil, fmt.Errorf("Invalid differ type: %s", dType)
}
}
|
package command
import (
"flag"
"fmt"
"sort"
"rsc.io/getopt"
)
// Help is a "help" cli command and "-h"
type Help struct {
*command
}
// NewHelp creates an instance of Generate
func NewHelp(pool Pooler, name string) *Help {
return &Help{newCommand(pool, name)}
}
// Run implements Commander
func (c *Help) Run(args []string) (int, error) {
var help, list bool
fs := getopt.NewFlagSet(c.Name(), flag.ExitOnError)
fs.BoolVar(&help, "help", false, "show help")
fs.BoolVar(&list, "list", false, "list all available commands")
fs.Alias("h", "help")
fs.Alias("l", "list")
fs.SetOutput(stderr)
if err := fs.Parse(args); err != nil {
return 1, err
}
if list {
cmds := c.pool.Commands()
names := make([]string, len(cmds))
for name := range cmds {
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
mark := ""
if name == defaultCmd {
mark = " default command"
}
fmt.Fprintf(stdout, " %s%s\n", name, mark)
}
fmt.Fprint(
stdout,
`
Run 'maze help [command]' to see help about specific command.")
`,
)
return 0, nil
}
rest := fs.Args()
if len(rest) > 0 {
name := rest[0]
cmd := c.pool.Command(name)
if cmd != nil {
fmt.Fprint(stdout, cmd.Usage())
return 0, nil
}
return 1, fmt.Errorf("unknown help topic %q", name)
}
fmt.Fprint(
stdout,
`maze [command] [options]
Run 'maze help -l' to see available commands.
Run 'maze help [command]' to see help about specific command.
The default command is 'gen'. Run 'maze help gen' too see help for generation.
TBW: ...
`,
)
return 0, nil
}
// Usage implements Commander
func (c *Help) Usage() string {
return `maze help [options] [command]
maze (-h | --help) [options] [command]
Show help either about specified 'command' or common usage help.
Options:
-l, --list
List all available commands.
`
}
|
package descriptor
import (
"google.golang.org/grpc"
"testing"
)
func Test_descriptor_empty_package(t *testing.T) {
desc, err := NewDescriptor("../testdata/protobuf/service/service.pb")
if err != nil {
t.Fatal(err)
}
sds := ServiceDescs(desc)
service := "Service"
if s := sds[0].ServiceName; s != service {
t.Errorf("%s != %s", s, service)
}
metadata := "testdata/protobuf/service/service.proto"
if s := sds[0].Metadata; s != metadata {
t.Errorf("%s != %s", s, metadata)
}
method := "Call"
if s := sds[0].Methods[0].MethodName; s != method {
t.Errorf("%s != %s", s, method)
}
}
func Test_descriptor_ping(t *testing.T) {
desc, err := NewDescriptor("../testdata/protobuf/ping/ping.pb")
if err != nil {
t.Fatal(err)
}
sds := ServiceDescs(desc)
service := "ping.PingService"
if s := sds[0].ServiceName; s != service {
t.Errorf("%s != %s", s, service)
}
metadata := "testdata/protobuf/ping/ping.proto"
if s := sds[0].Metadata; s != metadata {
t.Errorf("%s != %s", s, metadata)
}
method := "Send"
if s := sds[0].Methods[0].MethodName; s != method {
t.Errorf("%s != %s", s, method)
}
checkStream := func(sd grpc.StreamDesc, name string, c bool, s bool) {
if s := sd.StreamName; s != name {
t.Errorf("%s != %s %v", s, name, sd)
}
if b := sd.ClientStreams; b != c {
t.Errorf("%v != %v %v", b, c, sd)
}
if b := sd.ServerStreams; b != s {
t.Errorf("%v != %v %v", b, s, sd)
}
}
checkStream(sds[0].Streams[0], "SendStreamC", true, false)
checkStream(sds[0].Streams[1], "SendStreamS", false, true)
checkStream(sds[0].Streams[2], "SendStreamB", true, true)
}
|
package main
import (
"bytes"
"errors"
"io"
"mime/multipart"
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
)
func TestMainFunc(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatalf("Main panicked ??")
}
}()
go main()
time.Sleep(1 * time.Second)
}
func TestModifyAPI(t *testing.T) {
testserver := httptest.NewServer(getHandlers())
defer testserver.Close()
newreq := func(method, url string, body io.Reader) *http.Request {
r, err := http.NewRequest(method, url, body)
if err != nil {
t.Fatal(err)
}
return r
}
tests := []struct {
name string
r *http.Request
status int
}{
{name: "1: test get", r: newreq("GET", testserver.URL+"/modify/img/input.png?mode=2", nil), status: 200},
{name: "2: test get", r: newreq("GET", testserver.URL+"/modify/img/input.png?mode=2&number=100", nil), status: 200},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resp, err := http.DefaultClient.Do(tt.r)
if err != nil {
t.Error("error in response")
}
defer resp.Body.Close()
if resp.StatusCode != tt.status {
t.Error("error in debug api")
}
})
}
}
func TestSingleModeAPI(t *testing.T) {
testserver := httptest.NewServer(getHandlers())
defer testserver.Close()
newreq := func(method, url string, body io.Reader) *http.Request {
r, err := http.NewRequest(method, url, body)
if err != nil {
t.Fatal(err)
}
return r
}
tests := []struct {
name string
r *http.Request
status int
}{
{name: "1:/localhost:3000", r: newreq("GET", testserver.URL+"/", nil), status: 200},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resp, err := http.DefaultClient.Do(tt.r)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != tt.status {
t.Error("error")
}
})
}
}
func TestUploadAPI(t *testing.T) {
testserver := httptest.NewServer(getHandlers())
defer testserver.Close()
request := func(method, url string) *http.Request {
file, err := os.Open("./img/input.png")
if err != nil {
t.Error(err)
}
body := &bytes.Buffer{}
writter := multipart.NewWriter(body)
part, err := writter.CreateFormFile("image", file.Name())
if err != nil {
t.Error("error in copy")
}
_, err = io.Copy(part, file)
if err != nil {
t.Error("error in copy")
}
err = writter.Close()
if err != nil {
t.Error("error in close writer")
}
r, err := http.NewRequest(method, url, body)
if err != nil {
t.Fatal(err)
}
r.Header.Set("Content-Type", writter.FormDataContentType())
return r
}
tests := []struct {
name string
r *http.Request
status int
}{
{name: "1: /Upload", r: request("POST", testserver.URL+"/upload"), status: 200},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resp, err := http.DefaultClient.Do(tt.r)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != tt.status {
t.Error("error in debug api")
}
})
}
}
func TestErrorResponse(t *testing.T) {
w := httptest.NewRecorder()
err := errors.New("error")
errorResponse(w, err)
}
func TestUploadAPIError(t *testing.T) {
testserver := httptest.NewServer(getHandlers())
defer testserver.Close()
request := func(method, url string) *http.Request {
file, err := os.Open("./img/input.png")
if err != nil {
t.Error(err)
}
body := &bytes.Buffer{}
writter := multipart.NewWriter(body)
part, err := writter.CreateFormFile("ima", file.Name())
if err != nil {
t.Error("error in copy")
}
_, err = io.Copy(part, file)
if err != nil {
t.Error("error in copy")
}
err = writter.Close()
if err != nil {
t.Error("error in close writer")
}
r, err := http.NewRequest(method, url, body)
if err != nil {
t.Fatal(err)
}
r.Header.Set("Content-Type", writter.FormDataContentType())
return r
}
tests := []struct {
name string
r *http.Request
status int
}{
{name: "1: /Upload", r: request("POST", testserver.URL+"/upload"), status: 500},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resp, err := http.DefaultClient.Do(tt.r)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != tt.status {
t.Error("error in debug api")
}
})
}
}
func TestUploadAPIError1(t *testing.T) {
testserver := httptest.NewServer(getHandlers())
defer testserver.Close()
request := func(method, url string) *http.Request {
file, err := os.Open("./img/input.png")
if err != nil {
t.Error(err)
}
body := &bytes.Buffer{}
writter := multipart.NewWriter(body)
part, err := writter.CreateFormFile("image", "input.pngg")
if err != nil {
t.Error("error in copy")
}
_, err = io.Copy(part, file)
if err != nil {
t.Error("error in copy")
}
// err = writter.Close()
// if err != nil {
// t.Error("error in close writer")
// }
r, err := http.NewRequest(method, url, body)
if err != nil {
t.Fatal(err)
}
r.Header.Set("Content-Type", writter.FormDataContentType())
return r
}
tests := []struct {
name string
r *http.Request
status int
}{
{name: "1: /Upload", r: request("POST", testserver.URL+"/upload"), status: 500},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resp, err := http.DefaultClient.Do(tt.r)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != tt.status {
t.Error("error in debug api")
}
})
}
}
|
package sync
import (
"time"
"github.com/cpusoft/goutil/belogs"
"github.com/cpusoft/goutil/xormdb"
"xorm.io/xorm"
)
func delMftDb(session *xorm.Session, filePathPrefix string) (err error) {
start := time.Now()
belogs.Debug("delMftDb():will delete lab_rpki_mft_*** by filePathPrefix :", filePathPrefix)
// get mftIds
mftIds := make([]int64, 0)
err = session.SQL("select id from lab_rpki_mft Where filePath like ? ",
filePathPrefix+"%").Find(&mftIds)
if err != nil {
belogs.Error("delMftDb(): get mftIds fail, filePathPrefix: ", filePathPrefix, err)
return err
}
if len(mftIds) == 0 {
belogs.Debug("delMftDb(): len(mftIds)==0, filePathPrefix: ", filePathPrefix)
return nil
}
mftIdsStr := xormdb.Int64sToInString(mftIds)
belogs.Debug("delMftDb():will delete lab_rpki_mft len(mftIds):", len(mftIds), mftIdsStr,
" filePathPrefix:", filePathPrefix)
// get filehashIds
fileHashIds, err := getIdsByParamIdsDb("lab_rpki_mft_file_hash", "mftId", mftIdsStr)
if err != nil {
belogs.Error("delMftDb(): get fileHashIds fail, filePathPrefix: ", filePathPrefix,
" mftIdsStr:", mftIdsStr, err)
return err
}
belogs.Debug("delMftDb(): len(fileHashIds):", len(fileHashIds), " filePathPrefix:", filePathPrefix,
" mftIdsStr:", mftIdsStr)
// get siaIds
siaIds, err := getIdsByParamIdsDb("lab_rpki_mft_sia", "mftId", mftIdsStr)
if err != nil {
belogs.Error("delMftDb(): get siaIds fail, filePathPrefix: ", filePathPrefix,
" mftIdsStr:", mftIdsStr, err)
return err
}
belogs.Debug("delMftDb(): len(siaIds):", len(siaIds), " filePathPrefix:", filePathPrefix,
" mftIdsStr:", mftIdsStr)
// get aiaIds
aiaIds, err := getIdsByParamIdsDb("lab_rpki_mft_aia", "mftId", mftIdsStr)
if err != nil {
belogs.Error("delMftDb(): get aiaIds fail, filePathPrefix: ", filePathPrefix,
" mftIdsStr:", mftIdsStr, err)
return err
}
belogs.Debug("delMftDb(): len(aiaIds):", len(aiaIds), " filePathPrefix:", filePathPrefix,
" mftIdsStr:", mftIdsStr)
// del filehashIds
fileHashIdsStr := xormdb.Int64sToInString(fileHashIds)
if len(fileHashIdsStr) > 0 {
_, err := session.Exec("delete from lab_rpki_mft_file_hash where id in " + fileHashIdsStr)
if err != nil {
belogs.Error("delMftDb():delete from lab_rpki_mft_file_hash fail: fileHashIdsStr: ", fileHashIdsStr,
" filePathPrefix:", filePathPrefix, " err:", err)
return err
}
}
// del siaIds
siaIdsStr := xormdb.Int64sToInString(siaIds)
if len(siaIdsStr) > 0 {
_, err = session.Exec("delete from lab_rpki_mft_sia where id in " + siaIdsStr)
if err != nil {
belogs.Error("delMftDb():delete from lab_rpki_mft_sia fail: siaIdsStr: ", siaIdsStr,
" filePathPrefix:", filePathPrefix, " err:", err)
return err
}
}
// del siaIds
aiaIdsStr := xormdb.Int64sToInString(aiaIds)
if len(aiaIdsStr) > 0 {
_, err = session.Exec("delete from lab_rpki_mft_aia where id in " + aiaIdsStr)
if err != nil {
belogs.Error("delMftDb():delete from lab_rpki_mft_aia fail: aiaIdsStr: ", aiaIdsStr,
" filePathPrefix:", filePathPrefix, " err:", err)
return err
}
}
// del mftIds
_, err = session.Exec("delete from lab_rpki_mft where id in " + mftIdsStr)
if err != nil {
belogs.Error("delMftDb():delete from lab_rpki_mft fail: mftIdsStr: ", mftIdsStr,
" filePathPrefix:", filePathPrefix, " err:", err)
return err
}
belogs.Info("delMftDb():delete lab_rpki_mft_*** ok, by filePathPrefix :", filePathPrefix,
" len(mftIds)", len(mftIds), " time(s):", time.Since(start))
return nil
}
|
// Copyright © 2019 The Things Network Foundation, The Things Industries B.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package gatewayserver contains the structs and methods necessary to start a gRPC Gateway Server
package gatewayserver
import (
"context"
"fmt"
"net"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/gogo/protobuf/types"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"go.thethings.network/lorawan-stack/pkg/auth/rights"
"go.thethings.network/lorawan-stack/pkg/cluster"
"go.thethings.network/lorawan-stack/pkg/component"
"go.thethings.network/lorawan-stack/pkg/encoding/lorawan"
"go.thethings.network/lorawan-stack/pkg/errors"
"go.thethings.network/lorawan-stack/pkg/events"
"go.thethings.network/lorawan-stack/pkg/frequencyplans"
"go.thethings.network/lorawan-stack/pkg/gatewayserver/io"
iogrpc "go.thethings.network/lorawan-stack/pkg/gatewayserver/io/grpc"
"go.thethings.network/lorawan-stack/pkg/gatewayserver/io/mqtt"
"go.thethings.network/lorawan-stack/pkg/gatewayserver/io/udp"
"go.thethings.network/lorawan-stack/pkg/gatewayserver/scheduling"
"go.thethings.network/lorawan-stack/pkg/log"
"go.thethings.network/lorawan-stack/pkg/rpcmetadata"
"go.thethings.network/lorawan-stack/pkg/rpcmiddleware/hooks"
"go.thethings.network/lorawan-stack/pkg/rpcmiddleware/rpclog"
"go.thethings.network/lorawan-stack/pkg/ttnpb"
"go.thethings.network/lorawan-stack/pkg/unique"
"google.golang.org/grpc"
)
// GatewayServer implements the Gateway Server component.
//
// The Gateway Server exposes the Gs, GtwGs and NsGs services and MQTT and UDP frontends for gateways.
type GatewayServer struct {
*component.Component
ctx context.Context
io.Server
config *Config
connections sync.Map
}
// Context returns the context of the Gateway Server.
func (gs *GatewayServer) Context() context.Context {
return gs.ctx
}
var (
errListenFrontend = errors.DefineFailedPrecondition(
"listen_frontend",
"failed to start frontend listener `{protocol}` on address `{address}`",
)
errNotConnected = errors.DefineNotFound("not_connected", "gateway `{gateway_uid}` not connected")
)
// New returns new *GatewayServer.
func New(c *component.Component, conf *Config) (gs *GatewayServer, err error) {
gs = &GatewayServer{
Component: c,
ctx: log.NewContextWithField(c.Context(), "namespace", "gatewayserver"),
config: conf,
}
ctx, cancel := context.WithCancel(gs.Context())
defer func() {
if err != nil {
cancel()
}
}()
for addr, fallbackFrequencyPlanID := range conf.UDP.Listeners {
var conn *net.UDPConn
conn, err = gs.ListenUDP(addr)
if err != nil {
return nil, errListenFrontend.WithCause(err).WithAttributes(
"protocol", "udp",
"address", addr,
)
}
lisCtx := ctx
if fallbackFrequencyPlanID != "" {
lisCtx = frequencyplans.WithFallbackID(ctx, fallbackFrequencyPlanID)
}
udp.Start(lisCtx, gs, conn, conf.UDP.Config)
}
for _, version := range []struct {
Format mqtt.Format
Config MQTTConfig
}{
{
Format: mqtt.Protobuf,
Config: conf.MQTT,
},
{
Format: mqtt.ProtobufV2,
Config: conf.MQTTV2,
},
} {
for _, lis := range []struct {
Listen string
Protocol string
Net func(component.Listener) (net.Listener, error)
}{
{
Listen: version.Config.Listen,
Protocol: "tcp",
Net: component.Listener.TCP,
},
{
Listen: version.Config.ListenTLS,
Protocol: "tls",
Net: component.Listener.TLS,
},
} {
if lis.Listen == "" {
continue
}
var componentLis component.Listener
var netLis net.Listener
componentLis, err = gs.ListenTCP(lis.Listen)
if err == nil {
netLis, err = lis.Net(componentLis)
}
if err != nil {
return nil, errListenFrontend.WithCause(err).WithAttributes(
"protocol", lis.Protocol,
"address", lis.Listen,
)
}
mqtt.Start(ctx, gs, netLis, version.Format, lis.Protocol)
}
}
hooks.RegisterUnaryHook("/ttn.lorawan.v3.NsGs", rpclog.NamespaceHook, rpclog.UnaryNamespaceHook("gatewayserver"))
hooks.RegisterUnaryHook("/ttn.lorawan.v3.NsGs", cluster.HookName, c.ClusterAuthUnaryHook())
c.RegisterGRPC(gs)
return gs, nil
}
// RegisterServices registers services provided by gs at s.
func (gs *GatewayServer) RegisterServices(s *grpc.Server) {
ttnpb.RegisterGsServer(s, gs)
ttnpb.RegisterNsGsServer(s, gs)
ttnpb.RegisterGtwGsServer(s, iogrpc.New(gs))
}
// RegisterHandlers registers gRPC handlers.
func (gs *GatewayServer) RegisterHandlers(s *runtime.ServeMux, conn *grpc.ClientConn) {
ttnpb.RegisterGsHandler(gs.Context(), s, conn)
}
// Roles returns the roles that the Gateway Server fulfills.
func (gs *GatewayServer) Roles() []ttnpb.PeerInfo_Role {
return []ttnpb.PeerInfo_Role{ttnpb.PeerInfo_GATEWAY_SERVER}
}
// CustomContextFromIdentifier returns a derived context based on the given identifiers to use for the connection.
var CustomContextFromIdentifier func(context.Context, ttnpb.GatewayIdentifiers) (context.Context, error)
var (
errEntityRegistryNotFound = errors.DefineNotFound(
"entity_registry_not_found",
"Entity Registry not found",
)
errGatewayEUINotRegistered = errors.DefineNotFound(
"gateway_eui_not_registered",
"gateway EUI `{eui}` is not registered",
)
errEmptyIdentifiers = errors.Define("empty_identifiers", "empty identifiers")
)
// FillGatewayContext fills the given context and identifiers.
func (gs *GatewayServer) FillGatewayContext(ctx context.Context, ids ttnpb.GatewayIdentifiers) (context.Context, ttnpb.GatewayIdentifiers, error) {
ctx = gs.FillContext(ctx)
if ids.IsZero() {
return nil, ttnpb.GatewayIdentifiers{}, errEmptyIdentifiers
}
if ids.GatewayID == "" {
er := gs.GetPeer(ctx, ttnpb.PeerInfo_ENTITY_REGISTRY, nil)
if er == nil {
return nil, ttnpb.GatewayIdentifiers{}, errEntityRegistryNotFound
}
extIDs, err := ttnpb.NewGatewayRegistryClient(er.Conn()).GetIdentifiersForEUI(ctx, &ttnpb.GetGatewayIdentifiersForEUIRequest{
EUI: *ids.EUI,
}, gs.WithClusterAuth())
if err == nil {
ids = *extIDs
} else if errors.IsNotFound(err) {
if gs.config.RequireRegisteredGateways {
return nil, ttnpb.GatewayIdentifiers{}, errGatewayEUINotRegistered.WithAttributes("eui", *ids.EUI).WithCause(err)
}
ids.GatewayID = fmt.Sprintf("eui-%v", strings.ToLower(ids.EUI.String()))
} else {
return nil, ttnpb.GatewayIdentifiers{}, err
}
}
if filler := CustomContextFromIdentifier; filler != nil {
var err error
if ctx, err = filler(ctx, ids); err != nil {
return nil, ttnpb.GatewayIdentifiers{}, err
}
}
return ctx, ids, nil
}
var (
errGatewayNotRegistered = errors.DefineNotFound(
"gateway_not_registered",
"gateway `{gateway_uid}` is not registered",
)
errNoFallbackFrequencyPlan = errors.DefineNotFound(
"no_fallback_frequency_plan",
"gateway `{gateway_uid}` is not registered and no fallback frequency plan defined",
)
)
// Connect connects a gateway by its identifiers to the Gateway Server, and returns a io.Connection for traffic and
// control.
func (gs *GatewayServer) Connect(ctx context.Context, protocol string, ids ttnpb.GatewayIdentifiers) (*io.Connection, error) {
if err := rights.RequireGateway(ctx, ids, ttnpb.RIGHT_GATEWAY_LINK); err != nil {
return nil, err
}
uid := unique.ID(ctx, ids)
logger := log.FromContext(ctx).WithField("gateway_uid", uid)
ctx = events.ContextWithCorrelationID(ctx, fmt.Sprintf("gs:conn:%s", events.NewCorrelationID()))
er := gs.GetPeer(ctx, ttnpb.PeerInfo_ENTITY_REGISTRY, nil)
if er == nil {
return nil, errEntityRegistryNotFound
}
var err error
var callOpt grpc.CallOption
callOpt, err = rpcmetadata.WithForwardedAuth(ctx, gs.AllowInsecureForCredentials())
if errors.IsUnauthenticated(err) {
callOpt = gs.WithClusterAuth()
} else if err != nil {
return nil, err
}
gtw, err := ttnpb.NewGatewayRegistryClient(er.Conn()).Get(ctx, &ttnpb.GetGatewayRequest{
GatewayIdentifiers: ids,
FieldMask: types.FieldMask{
Paths: []string{
"frequency_plan_id",
"schedule_downlink_late",
"enforce_duty_cycle",
"downlink_path_constraint",
},
},
}, callOpt)
if errors.IsNotFound(err) {
if gs.config.RequireRegisteredGateways {
return nil, errGatewayNotRegistered.WithAttributes("gateway_uid", uid).WithCause(err)
}
fpID, ok := frequencyplans.FallbackIDFromContext(ctx)
if !ok {
return nil, errNoFallbackFrequencyPlan.WithAttributes("gateway_uid", uid)
}
logger.Warn("Connect unregistered gateway")
gtw = &ttnpb.Gateway{
GatewayIdentifiers: ids,
FrequencyPlanID: fpID,
EnforceDutyCycle: true,
DownlinkPathConstraint: ttnpb.DOWNLINK_PATH_CONSTRAINT_NONE,
}
} else if err != nil {
return nil, err
}
fp, err := gs.FrequencyPlans.GetByID(gtw.FrequencyPlanID)
if err != nil {
return nil, err
}
scheduler, err := scheduling.NewScheduler(ctx, fp, gtw.EnforceDutyCycle, nil)
if err != nil {
return nil, err
}
conn := io.NewConnection(ctx, protocol, gtw, fp, scheduler)
gs.connections.Store(uid, conn)
registerGatewayConnect(ctx, ids)
logger.Info("Connected")
go gs.handleUpstream(conn)
return conn, nil
}
// GetConnection returns the *io.Connection for the given gateway. If not found, this method returns nil, false.
func (gs *GatewayServer) GetConnection(ctx context.Context, ids ttnpb.GatewayIdentifiers) (*io.Connection, bool) {
conn, loaded := gs.connections.Load(unique.ID(ctx, ids))
if !loaded {
return nil, false
}
return conn.(*io.Connection), true
}
var errNoNetworkServer = errors.DefineNotFound("no_network_server", "no Network Server found to handle message")
var (
// maxUpstreamHandlers is the maximum number of goroutines per gateway connection to handle upstream messages.
maxUpstreamHandlers = int32(1 << 5)
// upstreamHandlerIdleTimeout is the duration after which an idle upstream handler stops to save resources.
upstreamHandlerIdleTimeout = (1 << 6) * time.Millisecond
)
func (gs *GatewayServer) handleUpstream(conn *io.Connection) {
ctx := conn.Context()
logger := log.FromContext(ctx)
defer func() {
ids := conn.Gateway().GatewayIdentifiers
gs.connections.Delete(unique.ID(ctx, ids))
gs.UnclaimDownlink(ctx, ids)
registerGatewayDisconnect(ctx, ids)
logger.Info("Disconnected")
}()
wg := &sync.WaitGroup{}
handlers := int32(0)
handleCh := make(chan interface{})
handleFn := func() {
defer wg.Done()
defer atomic.AddInt32(&handlers, -1)
for {
select {
case <-ctx.Done():
return
case <-time.After(upstreamHandlerIdleTimeout):
return
case item := <-handleCh:
switch msg := item.(type) {
case *ttnpb.UplinkMessage:
ctx := events.ContextWithCorrelationID(ctx, fmt.Sprintf("gs:uplink:%s", events.NewCorrelationID()))
msg.CorrelationIDs = append(msg.CorrelationIDs, events.CorrelationIDsFromContext(ctx)...)
registerReceiveUplink(ctx, conn.Gateway(), msg)
drop := func(ids ttnpb.EndDeviceIdentifiers, err error) {
logger := logger.WithError(err)
if ids.JoinEUI != nil && !ids.JoinEUI.IsZero() {
logger = logger.WithField("join_eui", *ids.JoinEUI)
}
if ids.DevEUI != nil && !ids.DevEUI.IsZero() {
logger = logger.WithField("dev_eui", *ids.DevEUI)
}
if ids.DevAddr != nil && !ids.DevAddr.IsZero() {
logger = logger.WithField("dev_addr", *ids.DevAddr)
}
logger.Debug("Drop message")
registerDropUplink(ctx, ids, conn.Gateway(), msg, err)
}
ids, err := lorawan.GetUplinkMessageIdentifiers(msg)
if err != nil {
drop(ttnpb.EndDeviceIdentifiers{}, err)
break
}
ns := gs.GetPeer(ctx, ttnpb.PeerInfo_NETWORK_SERVER, ids)
if ns == nil {
drop(ids, errNoNetworkServer)
break
}
if _, err := ttnpb.NewGsNsClient(ns.Conn()).HandleUplink(ctx, msg, gs.WithClusterAuth()); err != nil {
drop(ids, err)
break
}
registerForwardUplink(ctx, ids, conn.Gateway(), msg, ns.Name())
case *ttnpb.GatewayStatus:
ctx := events.ContextWithCorrelationID(ctx, fmt.Sprintf("gs:status:%s", events.NewCorrelationID()))
registerReceiveStatus(ctx, conn.Gateway(), msg)
case *ttnpb.TxAcknowledgment:
ctx := events.ContextWithCorrelationID(ctx, fmt.Sprintf("gs:tx_ack:%s", events.NewCorrelationID()))
msg.CorrelationIDs = append(msg.CorrelationIDs, events.CorrelationIDsFromContext(ctx)...)
if msg.Result == ttnpb.TxAcknowledgment_SUCCESS {
registerSuccessDownlink(ctx, conn.Gateway())
} else {
registerFailDownlink(ctx, conn.Gateway(), msg)
}
}
}
}
}
defer wg.Wait()
for {
var item interface{}
select {
case <-ctx.Done():
return
case item = <-conn.Up():
case item = <-conn.Status():
case item = <-conn.TxAck():
}
select {
case handleCh <- item:
default:
if atomic.LoadInt32(&handlers) < maxUpstreamHandlers {
atomic.AddInt32(&handlers, 1)
wg.Add(1)
go handleFn()
}
handleCh <- item
}
}
}
// GetFrequencyPlan gets the specified frequency plan by the gateway identifiers.
func (gs *GatewayServer) GetFrequencyPlan(ctx context.Context, ids ttnpb.GatewayIdentifiers) (*frequencyplans.FrequencyPlan, error) {
er := gs.GetPeer(ctx, ttnpb.PeerInfo_ENTITY_REGISTRY, nil)
if er == nil {
return nil, errEntityRegistryNotFound
}
var err error
var callOpt grpc.CallOption
callOpt, err = rpcmetadata.WithForwardedAuth(ctx, gs.AllowInsecureForCredentials())
if errors.IsUnauthenticated(err) {
callOpt = gs.WithClusterAuth()
} else if err != nil {
return nil, err
}
gtw, err := ttnpb.NewGatewayRegistryClient(er.Conn()).Get(ctx, &ttnpb.GetGatewayRequest{
GatewayIdentifiers: ids,
FieldMask: types.FieldMask{Paths: []string{"frequency_plan_id"}},
}, callOpt)
var fpID string
if err == nil {
fpID = gtw.FrequencyPlanID
} else if errors.IsNotFound(err) {
var ok bool
fpID, ok = frequencyplans.FallbackIDFromContext(ctx)
if !ok {
return nil, err
}
} else {
return nil, err
}
return gs.FrequencyPlans.GetByID(fpID)
}
// ClaimDownlink claims the downlink path for the given gateway.
func (gs *GatewayServer) ClaimDownlink(ctx context.Context, ids ttnpb.GatewayIdentifiers) error {
return gs.ClaimIDs(ctx, ids)
}
// UnclaimDownlink releases the claim of the downlink path for the given gateway.
func (gs *GatewayServer) UnclaimDownlink(ctx context.Context, ids ttnpb.GatewayIdentifiers) error {
return gs.UnclaimIDs(ctx, ids)
}
|
package cache
import (
"fmt"
"time"
"project/common/global"
"project/utils"
"project/utils/config"
"github.com/go-redis/redis/v7"
)
// GetUserCache 获取用户缓存
func GetUserCache(keys *[]string, userId int) (cacheMap map[string]*redis.StringCmd) {
cacheMap = make(map[string]*redis.StringCmd, len(*keys))
pipe := global.Rdb.TxPipeline()
for _, k := range *keys {
cacheMap[k] = pipe.Get(fmt.Sprintf("%s%d", k, userId))
}
_, _ = pipe.Exec()
return
}
// SetUserCache 设置用户缓存
func SetUserCache(userId int, data interface{}, cacheKey string) {
res, err := utils.StructToJson(data)
if err != nil {
return
}
global.Rdb.Set(fmt.Sprintf("%s%d", cacheKey, userId), res, time.Duration(config.JwtConfig.Timeout) * time.Second)
}
|
package main
import (
"fmt"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/mysql"
"gorm_project/modules/relation_tables"
)
func main() {
connStr := "root:Kaka@2019@/gorm_project?charset=utf8&parseTime=True&loc=Local"
db, err := gorm.Open("mysql", connStr)
if err != nil {
panic(err)
}
defer db.Close()
// 增 方法一
//var user2 = relation_tables.User2{
// Name: "dlzeng",
// Age: 18,
// Address: "深圳",
// Article: []relation_tables.Article{
// {
// Title: "Beego详解1",
// Content: "Beego内容1,xxx",
// Desc: "beego详解描述1",
// },
// {
// Title: "Beego详解2",
// Content: "Beego内容2,xxx",
// Desc: "beego详解描述2",
// },
// },
//}
// 增 方法二
//article3 := relation_tables.Article{
// Title: "Beego详解3",
// Content: "Beego内容3,xxx",
// Desc: "beego详解描述3",
//}
//article4 := relation_tables.Article{
// Title: "Beego详解4",
// Content: "Beego内容4,xxx",
// Desc: "beego详解描述4",
//}
//article5 := relation_tables.Article{
// Title: "Beego详解5",
// Content: "Beego内容5,xxx",
// Desc: "beego详解描述5",
//}
//var user2 = relation_tables.User2{
// Name: "dlzeng",
// Age: 18,
// Address: "深圳",
// Article: []relation_tables.Article{
// article3, article5,
// },
//}
//db.Create(&user2)
// 查
var user2 relation_tables.User2
db.Debug().Preload("Article").Find(&user2, 1)
fmt.Println(user2)
var user2_1 relation_tables.User2
db.First(&user2_1, 2)
db.Model(&user2_1).Related(&user2_1.Article, "Article")
fmt.Println(user2_1)
var user2_2 relation_tables.User2
db.First(&user2_2, 3)
db.Model(&user2_2).Association("Article").Find(&user2_2.Article)
fmt.Println(user2_2)
// 改
var user2_4 relation_tables.User2
//db.First(&user2_4,3)
db.Preload("Article").Find(&user2_4, 2)
db.Model(&user2_4.Article).Where("id=?", 1).
Update("title", "Gin详解1")
db.Model(&user2_4.Article).Where("id=?", 2).
Update("title", "GoWeb详解1")
// 删除
var user3_5 relation_tables.User2
db.First(&user3_5, 4)
db.Debug().Model(&user3_5).Related(&user3_5.Article, "Article").Where("id=?", 7).Delete(&user3_5.Article)
}
|
package utils
import (
"net/http"
)
func createCookie(name, value, domain string) *http.Cookie {
var age int
if value == "" {
age = -1
} else {
age = 99999
}
return &http.Cookie{
Name: name,
Value: value,
Domain: domain,
Path: "/",
MaxAge: age,
// HttpOnly: true,
}
}
func SaveCookie(w http.ResponseWriter, name, value, domain string) {
http.SetCookie(w, createCookie(name, value, domain))
}
func GetCookieValue(r *http.Request, name string) (string, error) {
var cookie, err = r.Cookie(name)
if err != nil {
return "", err
}
if cookie.Value == "" {
return "", err
}
return cookie.Value, nil
}
func DeleteCookie(w http.ResponseWriter, name, domain string) {
http.SetCookie(w, createCookie(name, "", domain))
}
|
package validator
import (
"context"
"github.com/go-playground/validator/v10"
"github.com/qiniu/qmgo/operator"
"github.com/stretchr/testify/require"
"go.mongodb.org/mongo-driver/bson"
"testing"
)
// User contains user information
type User struct {
FirstName string `bson:"fname"`
LastName string `bson:"lname"`
Age uint8 `bson:"age" validate:"gte=0,lte=130"`
Email string `bson:"e-mail" validate:"required,email"`
FavouriteColor string `bson:"favouriteColor" validate:"hexcolor|rgb|rgba"`
Addresses []*Address `bson:"addresses" validate:"required,dive,required"` // a person can have a home and cottage...
}
// Address houses a users address information
type Address struct {
Street string `validate:"required"`
City string `validate:"required"`
Planet string `validate:"required"`
Phone string `validate:"required"`
}
// CustomRule use custom rule
type CustomRule struct {
Name string `validate:"required,foo"`
}
func TestValidator(t *testing.T) {
ast := require.New(t)
ctx := context.Background()
user := &User{}
// not need validator op
ast.NoError(Do(ctx, user, operator.BeforeRemove))
ast.NoError(Do(ctx, user, operator.AfterInsert))
// check success
address := &Address{
Street: "Eavesdown Docks",
Planet: "Persphone",
Phone: "none",
City: "Unknown",
}
user = &User{
FirstName: "",
LastName: "",
Age: 45,
Email: "1234@gmail.com",
FavouriteColor: "#000",
Addresses: []*Address{address, address},
}
ast.NoError(Do(ctx, user, operator.BeforeInsert))
ast.NoError(Do(ctx, user, operator.BeforeUpsert))
ast.NoError(Do(ctx, *user, operator.BeforeUpsert))
users := []*User{user, user, user}
ast.NoError(Do(ctx, users, operator.BeforeInsert))
// check failure
user.Age = 150
ast.Error(Do(ctx, user, operator.BeforeInsert))
user.Age = 22
user.Email = "1234@gmail" // invalid email
ast.Error(Do(ctx, user, operator.BeforeInsert))
user.Email = "1234@gmail.com"
user.Addresses[0].City = "" // string tag use default value
ast.Error(Do(ctx, user, operator.BeforeInsert))
// input slice
users = []*User{user, user, user}
ast.Error(Do(ctx, users, operator.BeforeInsert))
useris := []interface{}{user, user, user}
ast.Error(Do(ctx, useris, operator.BeforeInsert))
user.Addresses[0].City = "shanghai"
users = []*User{user, user, user}
ast.NoError(Do(ctx, users, operator.BeforeInsert))
us := []User{*user, *user, *user}
ast.NoError(Do(ctx, us, operator.BeforeInsert))
ast.NoError(Do(ctx, &us, operator.BeforeInsert))
// all bson type
mdoc := []interface{}{bson.M{"name": "", "age": 12}, bson.M{"name": "", "age": 12}}
ast.NoError(Do(ctx, mdoc, operator.BeforeInsert))
adoc := bson.A{"Alex", "12"}
ast.NoError(Do(ctx, adoc, operator.BeforeInsert))
edoc := bson.E{"Alex", "12"}
ast.NoError(Do(ctx, edoc, operator.BeforeInsert))
ddoc := bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
ast.NoError(Do(ctx, ddoc, operator.BeforeInsert))
// nil ptr
user = nil
ast.NoError(Do(ctx, user, operator.BeforeInsert))
ast.NoError(Do(ctx, nil, operator.BeforeInsert))
// use custom rules
customRule := &CustomRule{Name: "bar"}
v := validator.New()
_ = v.RegisterValidation("foo", func(fl validator.FieldLevel) bool {
return fl.Field().String() == "bar"
})
SetValidate(v)
ast.NoError(Do(ctx, customRule, operator.BeforeInsert))
}
|
package scheduler
import (
"container/heap"
"time"
"types"
"github.com/golang/glog"
)
var (
usersPriorityQ types.PriorityQueue
usersPresent map[string]bool //userPresent[Uid] == true means that the Uid has been in usersPriorityQ.
usersActiveQ chan string
usersPodsQ map[string]chan types.Pod
highPriorityCh chan types.Pod
)
func init() {
usersPresent = make(map[string]bool)
usersActiveQ = make(chan string, 10)
usersPodsQ = make(map[string]chan types.Pod)
highPriorityCh = make(chan types.Pod, 10)
}
func DispatchPods() {
for pod := range pendingPodCh {
value, ok := usersPodsQ[pod.Uid]
if !ok {
usersPodsQ[pod.Uid] = make(chan types.Pod, 20)
value = usersPodsQ[pod.Uid]
}
if len(value) == 0 {
usersActiveQ <- pod.Uid
}
value <- pod
}
}
func Schedule() {
for {
// schedule pod in highPriorityCh at first
select {
case pod := <-highPriorityCh:
schedulePod(pod)
continue
default:
}
// fix usersPriorityQ
usersActiveQLen := len(usersActiveQ)
for i := 0; i < usersActiveQLen; i++ {
uid := <-usersActiveQ
present, ok := usersPresent[uid]
if ok && present {
continue
} else {
usersPresent[uid] = true
user := &types.User{
Uid: uid,
Priority: getUserShare(uid),
}
heap.Push(&usersPriorityQ, user)
}
}
// schedule local pod
if len(usersPriorityQ) > 0 {
topUser := heap.Pop(&usersPriorityQ).(*types.User)
select {
case firstPod := <-usersPodsQ[topUser.Uid]:
glog.Info("=============================")
glog.Info("Before Schedule()")
printShare()
weight := schedulePod(firstPod)
topUser.Priority = fixUserShare(firstPod, weight)
heap.Push(&usersPriorityQ, topUser)
glog.Info("After Schedule()")
printShare()
glog.Info("=============================")
userData := types.UserData{
Uid: firstPod.Uid,
CurrentTime: time.Now().Unix(),
Share: topUser.Priority,
Resource: usersAllocatedRes[firstPod.Uid],
}
userDataQ <- userData
default:
usersPresent[topUser.Uid] = false
}
}
time.Sleep(time.Second)
}
}
func schedulePod(pod types.Pod) float64 {
for {
nodes := getNodes()
for _, node := range nodes {
res := allocatedResource[node.Name]
if res.MilliCpu+pod.RequestMilliCpu <= node.MilliCpu && res.Memory+pod.RequestMemory <= node.Memory {
schedulePodToNode(pod, node)
Heartbeat()
return 0
}
}
if local == false {
// if cluster doesn't have enough resourse, outsource the pod.
weight := UploadPod(pod)
deletePodByName(pod.Name, pod.Uid)
return weight
}
time.Sleep(time.Second)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.