text stringlengths 11 4.05M |
|---|
package main
import (
"fmt"
"io"
"os"
)
type (
Logger interface {
Println(msg string)
Printf(format string, args ...interface{})
}
Service struct {
logger Logger
repository interface{
Save(string) bool
}
service int
}
simpleLogger struct {
w io.Writer
}
)
func main() {
f, _ := os.Create("logger.log")
loger := simpleLogger{
w: f,
}
loger.Println("test")
}
func (l simpleLogger) Println(msg string){
fmt.Fprint(l.w, msg)
}
func (l simpleLogger) Printf(format string, args ...interface{}){
fmt.Fprint(l.w, format)
} |
package main
import (
"fmt"
"log"
"net"
)
func main() {
listener, e := net.Listen("tcp", "192.168.20.23:8888")
if e != nil {
log.Fatal(e)
}
defer listener.Close()
for {
conn, e := listener.Accept()
if e != nil {
log.Fatal(e)
}
fmt.Printf("访问客户端信息: con=%v 客户端ip=%v\n", conn, conn.RemoteAddr().String())
go handleConnection(conn)
}
}
func handleConnection(c net.Conn) {
defer c.Close()//关闭conn
for {
//1. 等待客户端通过conn发送信息
//2. 如果客户端没有wrtie[发送],那么协程就阻塞在这里
fmt.Printf("服务器在等待客户端%s 发送信息\n", c.RemoteAddr().String())
buf := make([]byte, 1024 )
n, err := c.Read(buf)
if err != nil {
log.Fatal(err)
break
}
//3. 显示客户端发送的内容到服务器的终端
fmt.Print(string(buf[:n]))
}
}
|
package operation
import (
"testing"
)
func TestSquareMatrix(t *testing.T) {
t.Run("return error if number of cols are greater than rows", func(t *testing.T) {
matrix := [][]string{
{"1","2","3"},
{"4","5","6"},
}
err := squareMatrix(matrix)
assertError(t, err, errInvalidMatrix)
})
t.Run("return error if number of rows are greater than cols", func(t *testing.T) {
matrix := [][]string{
{"1","2"},
{"3","4"},
{"5","6"},
}
err := squareMatrix(matrix)
assertError(t, err, errInvalidMatrix)
})
t.Run("return error there is no number inside the matrix", func(t *testing.T) {
matrix := [][]string{
{},
}
err := squareMatrix(matrix)
assertError(t, err, errInvalidMatrix)
})
t.Run("return error there is no number inside the matrix", func(t *testing.T) {
matrix := [][]string{
{},
}
err := squareMatrix(matrix)
if err != errInvalidMatrix {
t.Fatal("didn't get invalid matrix error but wanted it")
}
})
t.Run("return error there is no number inside the matrix", func(t *testing.T) {
matrix := [][]string{
{"j"},
}
_, err := convertMatrixToInt(matrix)
if err != errInvalidMatrix {
t.Fatal("didn't get invalid matrix error but wanted it")
}
})
t.Run("return error there is no number inside the matrix", func(t *testing.T) {
matrix := [][]string{
{""},
}
_, err := convertMatrixToInt(matrix)
if err != errInvalidMatrix {
t.Fatal("didn't get invalid matrix error but wanted it")
}
})
t.Run("return error there is no number inside the matrix", func(t *testing.T) {
matrix := [][]string{
{" "},
}
_, err := convertMatrixToInt(matrix)
if err != errInvalidMatrix {
t.Fatal("didn't get invalid matrix error but wanted it")
}
})
}
func assertError(t *testing.T, got error, want error) {
t.Helper()
if got == nil {
t.Fatal("didn't get an error but wanted one")
}
if got != want {
t.Errorf("got %q, want %q", got, want)
}
}
|
package hashicups
import (
"strconv"
hc "github.com/hashicorp-demoapp/hashicups-client-go"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
)
func dataSourceOrder() *schema.Resource {
return &schema.Resource{
Read: dataSourceOrderRead,
Schema: map[string]*schema.Schema{
"id": &schema.Schema{
Type: schema.TypeInt,
Required: true,
},
"items": &schema.Schema{
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"coffee": &schema.Schema{
Type: schema.TypeList,
MaxItems: 1,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"id": &schema.Schema{
Type: schema.TypeInt,
Computed: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"teaser": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"description": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"price": &schema.Schema{
Type: schema.TypeInt,
Computed: true,
},
"image": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
},
},
},
"quantity": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
},
},
},
},
}
}
func dataSourceOrderRead(d *schema.ResourceData, m interface{}) error {
c := m.(*hc.Client)
orderID := strconv.Itoa(d.Get("id").(int))
order, err := c.GetOrder(orderID)
if err != nil {
return err
}
orderItems := flattenOrderItems(&order.Items)
if err := d.Set("items", orderItems); err != nil {
return err
}
d.SetId(orderID)
return nil
}
func flattenOrderItems(orderItems *[]hc.OrderItem) []interface{} {
if orderItems != nil {
ois := make([]interface{}, len(*orderItems), len(*orderItems))
for i, orderItem := range *orderItems {
oi := make(map[string]interface{})
oi["coffee"] = flattenCoffee(orderItem.Coffee)
oi["quantity"] = orderItem.Quantity
ois[i] = oi
}
return ois
}
return make([]interface{}, 0)
}
func flattenCoffee(coffee hc.Coffee) []interface{} {
c := make(map[string]interface{})
c["id"] = coffee.ID
c["name"] = coffee.Name
c["teaser"] = coffee.Teaser
c["description"] = coffee.Description
c["price"] = coffee.Price
c["image"] = coffee.Image
return []interface{}{c}
}
|
package queue
type SimpleQueue struct {
linkedList.List
}
|
// Package storage provide generic interface to interact with storage backend.
package storage
import (
"context"
"errors"
"strings"
"time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/known/anypb"
"github.com/pomerium/pomerium/internal/log"
"github.com/pomerium/pomerium/pkg/grpc/databroker"
)
// Errors
var (
ErrNotFound = errors.New("record not found")
ErrStreamDone = errors.New("record stream done")
ErrInvalidServerVersion = status.Error(codes.Aborted, "invalid server version")
)
// Backend is the interface required for a storage backend.
type Backend interface {
// Close closes the backend.
Close() error
// Get is used to retrieve a record.
Get(ctx context.Context, recordType, id string) (*databroker.Record, error)
// GetOptions gets the options for a type.
GetOptions(ctx context.Context, recordType string) (*databroker.Options, error)
// Lease acquires a lease, or renews an existing one. If the lease is acquired true is returned.
Lease(ctx context.Context, leaseName, leaseID string, ttl time.Duration) (bool, error)
// ListTypes lists all the known record types.
ListTypes(ctx context.Context) ([]string, error)
// Put is used to insert or update records.
Put(ctx context.Context, records []*databroker.Record) (serverVersion uint64, err error)
// SetOptions sets the options for a type.
SetOptions(ctx context.Context, recordType string, options *databroker.Options) error
// Sync syncs record changes after the specified version.
Sync(ctx context.Context, recordType string, serverVersion, recordVersion uint64) (RecordStream, error)
// SyncLatest syncs all the records.
SyncLatest(ctx context.Context, recordType string, filter FilterExpression) (serverVersion, recordVersion uint64, stream RecordStream, err error)
}
// MatchAny searches any data with a query.
func MatchAny(any *anypb.Any, query string) bool {
if any == nil {
return false
}
msg, err := any.UnmarshalNew()
if err != nil {
// ignore invalid any types
log.Error(context.TODO()).Err(err).Msg("storage: invalid any type")
return false
}
// search by query
return matchProtoMessage(msg.ProtoReflect(), query)
}
func matchProtoMessage(msg protoreflect.Message, query string) bool {
md := msg.Descriptor()
fds := md.Fields()
for i := 0; i < fds.Len(); i++ {
fd := fds.Get(i)
if !msg.Has(fd) {
continue
}
if matchProtoValue(fd, msg.Get(fd), query) {
return true
}
}
return false
}
func matchProtoValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, query string) bool {
switch {
case fd.IsList():
return matchProtoListValue(fd, v.List(), query)
case fd.IsMap():
return matchProtoMapValue(fd, v.Map(), query)
default:
return matchProtoSingularValue(fd, v, query)
}
}
func matchProtoSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, query string) bool {
switch fd.Kind() {
case protoreflect.MessageKind:
return matchProtoMessage(v.Message(), query)
case protoreflect.StringKind:
return strings.Contains(strings.ToLower(v.String()), query)
}
return false
}
func matchProtoListValue(fd protoreflect.FieldDescriptor, l protoreflect.List, query string) bool {
for i := 0; i < l.Len(); i++ {
if matchProtoSingularValue(fd, l.Get(i), query) {
return true
}
}
return false
}
func matchProtoMapValue(fd protoreflect.FieldDescriptor, m protoreflect.Map, query string) bool {
matches := false
m.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
matches = matches || matchProtoSingularValue(fd, v, query)
return !matches
})
return matches
}
// IsNotFound returns true if the error is because a record was not found.
func IsNotFound(err error) bool {
return errors.Is(err, ErrNotFound) || status.Code(err) == codes.NotFound
}
|
package sort
type Interface interface {
Len() int
Less(i, j int) bool // x, j are indices of sequence elements
Swap(i, j int)
}
/*
其他数据类型使用sort.Sort(pama)时,
只需要定义里面的三个函数, 如stringsort.go所示
*/
|
package cron
// Cron instance
var CronInst Cron
func init() {
CronInst = NewCron()
CronInst.Start()
}
|
package con
import (
"github.com/GanymedeNil/shorturl/config"
"github.com/garyburd/redigo/redis"
"log"
)
var redisConn *redis.Conn
func Redis() redis.Conn {
if redisConn != nil {
return *redisConn
}
address := config.Get("redis.default")
c, err := redis.Dial("tcp", address.(string))
if err != nil {
log.Panic("data conn error")
}
redisConn = &c
return *redisConn
}
|
package main
var lastSum int
func convertBST(root *TreeNode) *TreeNode {
lastSum = 0
convertBSTExec(root)
return root
}
func convertBSTExec(root *TreeNode) {
if root == nil {
return
}
convertBSTExec(root.Right)
root.Val += lastSum
lastSum = root.Val
convertBSTExec(root.Left)
}
/*
题目链接:
https://leetcode-cn.com/problems/convert-bst-to-greater-tree/comments/ 把二叉搜索树转换为累加树
*/
/*
总结
1. 这题就是反序的中序遍历
*/ |
package models
import "time"
type Thread struct {
Slug *string `json:"slug,omitempty"`
Author *string `json:"author"`
Author_id *int `json:"author_id,omitempty"`
Created *time.Time `json:"created,omitempty"`
Forum *string `json:"forum"`
Forum_id *int `json:"forum_id,omitempty"`
Id *int `json:"id"`
Message *string `json:"message"`
Title *string `json:"title"`
Votes *int `json:"votes,omitempty"`
}
type Threads []*Thread |
package app
import (
"fmt"
"net/http"
)
type StreamService struct {
}
func NewStreamService() StreamService {
return *new(StreamService)
}
func (s *StreamService) ServeHlsM3u8(w http.ResponseWriter, r *http.Request, videoId string, m3u8Name string) {
mediaBase := s.getMediaBase(videoId)
mediaFile := fmt.Sprintf("%s\\%s", mediaBase, m3u8Name)
w.Header().Set("Content-Type", "application/x-mpegURL")
//w.WriteHeader(http.StatusOK)
http.ServeFile(w, r, mediaFile)
}
func (s *StreamService) ServeHlsTs(w http.ResponseWriter, r *http.Request, segName, videoId string) {
mediaBase := s.getMediaBase(videoId)
mediaFile := fmt.Sprintf("%s\\%s", mediaBase, segName)
http.ServeFile(w, r, mediaFile)
w.Header().Set("Content-Type", "video/MP2T")
}
func (_ *StreamService) getMediaBase(id string) string {
return "content\\" + id
}
|
package main
import (
"fmt"
"time"
"math/rand"
)
func UnThreadedMergeSorted(left []int, right []int) []int{
var result []int
for (len(left) > 0) && (len(right) > 0){
if(left[0] > right[0]){
result = append(result, right[0])
right = right[1:len(right)]
}else{
result = append(result,left[0])
left = left[1:len(left)]
}
}
if(len(left) > 0){
result = append(result, left...)
}
if(len(right) > 0){
result = append(result, right...)
}
return result
}
func UnThreadedMergeSort(unsorted []int) []int{
var result []int
if(len(unsorted) <= 1){
return unsorted
}else{
left := UnThreadedMergeSort(unsorted[:len(unsorted)/2])
right := UnThreadedMergeSort(unsorted[len(unsorted)/2:])
result = UnThreadedMergeSorted(left, right)
}
return result
}
func MergeSorted(left []int, right []int, ch chan []int){
var result []int
for (len(left) > 0) && (len(right) > 0){
if(left[0] > right[0]){
result = append(result, right[0])
right = right[1:len(right)]
}else{
result = append(result,left[0])
left = left[1:len(left)]
}
}
if(len(left) > 0){
result = append(result, left...)
}
if(len(right) > 0){
result = append(result, right...)
}
ch <- result
}
func MergeSort(unsorted []int,ch chan []int) []int{
if(len(unsorted) <= 1){
return unsorted
}else{
left := MergeSort(unsorted[:len(unsorted)/2], ch)
right := MergeSort(unsorted[len(unsorted)/2:], ch)
go MergeSorted(left, right, ch)
}
return <- ch
}
func main() {
var A []int
for len(A) < 1000000{
A = append(A, rand.Intn(10000000))
}
timer := time.Now()
ch := make(chan []int)
B := A
fmt.Println("LET THE RACE BEGIN\n","+-+-+-+-+-+-+-+-+-+")
MergeSort(A,ch)
meTime := time.Since(timer)
defer fmt.Println("Threaded Time to Sort: ",meTime)
timer = time.Now()
B = UnThreadedMergeSort(B)
stupidGoLangTime := time.Since(timer)
defer fmt.Println("Unthreaded Time to Sort: ",stupidGoLangTime)
if meTime < stupidGoLangTime {
fmt.Println("Finally I've won!")
}else{
fmt.Println("Whatever, I don't care. Nobody won.")
}
} |
package install
import (
"fmt"
"reflect"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/api/wrappers"
"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/certs"
listerfakes "github.com/operator-framework/operator-lifecycle-manager/pkg/fakes/client-go/listers"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient/operatorclientmocks"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorlister/operatorlisterfakes"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil"
)
func keyPair(t *testing.T, expiration time.Time) *certs.KeyPair {
p, err := certs.GenerateCA(expiration, Organization)
assert.NoError(t, err)
return p
}
func selector(t *testing.T, selector string) *metav1.LabelSelector {
s, err := metav1.ParseToLabelSelector(selector)
assert.NoError(t, err)
return s
}
var staticCerts *certs.KeyPair
// staticCertGenerator replaces the CertGenerator to get consistent keys for testing
func staticCertGenerator(notAfter time.Time, organization string, ca *certs.KeyPair, hosts []string) (*certs.KeyPair, error) {
if staticCerts != nil {
return staticCerts, nil
}
c, err := certs.CreateSignedServingPair(notAfter, organization, ca, hosts)
if err != nil {
return nil, err
}
staticCerts = c
return staticCerts, nil
}
type fakeState struct {
existingService *corev1.Service
getServiceError error
existingSecret *corev1.Secret
getSecretError error
existingRole *rbacv1.Role
getRoleError error
existingRoleBinding *rbacv1.RoleBinding
getRoleBindingError error
existingClusterRoleBinding *rbacv1.ClusterRoleBinding
getClusterRoleBindingError error
}
func newFakeLister(state fakeState) *operatorlisterfakes.FakeOperatorLister {
fakeLister := &operatorlisterfakes.FakeOperatorLister{}
fakeCoreV1Lister := &operatorlisterfakes.FakeCoreV1Lister{}
fakeRbacV1Lister := &operatorlisterfakes.FakeRbacV1Lister{}
fakeLister.CoreV1Returns(fakeCoreV1Lister)
fakeLister.RbacV1Returns(fakeRbacV1Lister)
fakeServiceLister := &listerfakes.FakeServiceLister{}
fakeCoreV1Lister.ServiceListerReturns(fakeServiceLister)
fakeServiceNamespacedLister := &listerfakes.FakeServiceNamespaceLister{}
fakeServiceLister.ServicesReturns(fakeServiceNamespacedLister)
fakeServiceNamespacedLister.GetReturns(state.existingService, state.getServiceError)
fakeSecretLister := &listerfakes.FakeSecretLister{}
fakeCoreV1Lister.SecretListerReturns(fakeSecretLister)
fakeSecretNamespacedLister := &listerfakes.FakeSecretNamespaceLister{}
fakeSecretLister.SecretsReturns(fakeSecretNamespacedLister)
fakeSecretNamespacedLister.GetReturns(state.existingSecret, state.getSecretError)
fakeRoleLister := &listerfakes.FakeRoleLister{}
fakeRbacV1Lister.RoleListerReturns(fakeRoleLister)
fakeRoleNamespacedLister := &listerfakes.FakeRoleNamespaceLister{}
fakeRoleLister.RolesReturns(fakeRoleNamespacedLister)
fakeRoleNamespacedLister.GetReturns(state.existingRole, state.getRoleError)
fakeRoleBindingLister := &listerfakes.FakeRoleBindingLister{}
fakeRbacV1Lister.RoleBindingListerReturns(fakeRoleBindingLister)
fakeRoleBindingNamespacedLister := &listerfakes.FakeRoleBindingNamespaceLister{}
fakeRoleBindingLister.RoleBindingsReturns(fakeRoleBindingNamespacedLister)
fakeRoleBindingNamespacedLister.GetReturns(state.existingRoleBinding, state.getRoleBindingError)
fakeClusterRoleBindingLister := &listerfakes.FakeClusterRoleBindingLister{}
fakeRbacV1Lister.ClusterRoleBindingListerReturns(fakeClusterRoleBindingLister)
fakeClusterRoleBindingLister.GetReturns(state.existingClusterRoleBinding, state.getClusterRoleBindingError)
return fakeLister
}
func TestInstallCertRequirementsForDeployment(t *testing.T) {
owner := ownerutil.Owner(&v1alpha1.ClusterServiceVersion{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.ClusterServiceVersionKind,
APIVersion: v1alpha1.ClusterServiceVersionAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: "owner",
Namespace: "test-namespace",
UID: "123-uid",
},
})
ca := keyPair(t, time.Now().Add(time.Hour))
caPEM, _, err := ca.ToPEM()
assert.NoError(t, err)
caHash := certs.PEMSHA256(caPEM)
type fields struct {
owner ownerutil.Owner
previousStrategy Strategy
templateAnnotations map[string]string
initializers DeploymentInitializerFuncChain
apiServiceDescriptions []certResource
webhookDescriptions []certResource
}
type args struct {
deploymentName string
ca *certs.KeyPair
rotateAt time.Time
depSpec appsv1.DeploymentSpec
ports []corev1.ServicePort
}
type expectedExternalFunc func(clientInterface *operatorclientmocks.MockClientInterface, fakeLister *operatorlisterfakes.FakeOperatorLister, namespace string, args args)
tests := []struct {
name string
mockExternal expectedExternalFunc
state fakeState
fields fields
args args
want *appsv1.DeploymentSpec
wantErr bool
}{
{
name: "adds certs to deployment spec",
mockExternal: func(mockOpClient *operatorclientmocks.MockClientInterface, fakeLister *operatorlisterfakes.FakeOperatorLister, namespace string, args args) {
mockOpClient.EXPECT().DeleteService(namespace, "test-service", &metav1.DeleteOptions{}).Return(nil)
service := corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test-service",
Labels: map[string]string{OLMManagedLabelKey: OLMManagedLabelValue},
OwnerReferences: []metav1.OwnerReference{
ownerutil.NonBlockingOwner(&v1alpha1.ClusterServiceVersion{}),
},
},
Spec: corev1.ServiceSpec{
Ports: args.ports,
Selector: selector(t, "test=label").MatchLabels,
},
}
mockOpClient.EXPECT().CreateService(&service).Return(&service, nil)
hosts := []string{
fmt.Sprintf("%s.%s", service.GetName(), namespace),
fmt.Sprintf("%s.%s.svc", service.GetName(), namespace),
}
servingPair, err := certGenerator.Generate(args.rotateAt, Organization, args.ca, hosts)
require.NoError(t, err)
// Create Secret for serving cert
certPEM, privPEM, err := servingPair.ToPEM()
require.NoError(t, err)
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test-service-cert",
Namespace: namespace,
Annotations: map[string]string{OLMCAHashAnnotationKey: caHash},
Labels: map[string]string{OLMManagedLabelKey: OLMManagedLabelValue},
},
Data: map[string][]byte{
"tls.crt": certPEM,
"tls.key": privPEM,
OLMCAPEMKey: caPEM,
},
Type: corev1.SecretTypeTLS,
}
mockOpClient.EXPECT().UpdateSecret(secret).Return(secret, nil)
secretRole := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: secret.GetName(),
Namespace: namespace,
Labels: map[string]string{OLMManagedLabelKey: OLMManagedLabelValue},
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"get"},
APIGroups: []string{""},
Resources: []string{"secrets"},
ResourceNames: []string{secret.GetName()},
},
},
}
mockOpClient.EXPECT().UpdateRole(secretRole).Return(secretRole, nil)
roleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: secret.GetName(),
Namespace: namespace,
Labels: map[string]string{OLMManagedLabelKey: OLMManagedLabelValue},
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: "test-sa",
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: secretRole.GetName(),
},
}
mockOpClient.EXPECT().UpdateRoleBinding(roleBinding).Return(roleBinding, nil)
authDelegatorClusterRoleBinding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: service.GetName() + "-system:auth-delegator",
Labels: map[string]string{OLMManagedLabelKey: OLMManagedLabelValue},
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: "test-sa",
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "system:auth-delegator",
},
}
mockOpClient.EXPECT().UpdateClusterRoleBinding(authDelegatorClusterRoleBinding).Return(authDelegatorClusterRoleBinding, nil)
authReaderRoleBinding := &rbacv1.RoleBinding{
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: args.depSpec.Template.Spec.ServiceAccountName,
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: "extension-apiserver-authentication-reader",
},
}
authReaderRoleBinding.SetName(service.GetName() + "-auth-reader")
authReaderRoleBinding.SetNamespace(KubeSystem)
authReaderRoleBinding.SetLabels(map[string]string{OLMManagedLabelKey: OLMManagedLabelValue})
mockOpClient.EXPECT().UpdateRoleBinding(authReaderRoleBinding).Return(authReaderRoleBinding, nil)
},
state: fakeState{
existingService: &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
ownerutil.NonBlockingOwner(&v1alpha1.ClusterServiceVersion{}),
},
},
},
existingSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{},
},
existingRole: &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{},
},
existingRoleBinding: &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{},
},
existingClusterRoleBinding: &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{},
},
},
fields: fields{
owner: &v1alpha1.ClusterServiceVersion{},
previousStrategy: nil,
templateAnnotations: nil,
initializers: nil,
apiServiceDescriptions: []certResource{},
webhookDescriptions: []certResource{},
},
args: args{
deploymentName: "test",
ca: ca,
rotateAt: time.Now().Add(time.Hour),
ports: []corev1.ServicePort{},
depSpec: appsv1.DeploymentSpec{
Selector: selector(t, "test=label"),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
ServiceAccountName: "test-sa",
},
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"foo": "bar",
},
},
},
},
},
want: &appsv1.DeploymentSpec{
Selector: selector(t, "test=label"),
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"foo": "bar",
OLMCAHashAnnotationKey: caHash},
},
Spec: corev1.PodSpec{
ServiceAccountName: "test-sa",
Volumes: []corev1.Volume{
{
Name: "apiservice-cert",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "test-service-cert",
Items: []corev1.KeyToPath{
{
Key: "tls.crt",
Path: "apiserver.crt",
},
{
Key: "tls.key",
Path: "apiserver.key",
},
},
},
},
},
{
Name: "webhook-cert",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "test-service-cert",
Items: []corev1.KeyToPath{
{
Key: "tls.crt",
Path: "tls.crt",
},
{
Key: "tls.key",
Path: "tls.key",
},
},
},
},
},
},
},
},
},
},
{
name: "doesn't add duplicate service ownerrefs",
mockExternal: func(mockOpClient *operatorclientmocks.MockClientInterface, fakeLister *operatorlisterfakes.FakeOperatorLister, namespace string, args args) {
mockOpClient.EXPECT().DeleteService(namespace, "test-service", &metav1.DeleteOptions{}).Return(nil)
service := corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test-service",
Namespace: owner.GetNamespace(),
Labels: map[string]string{OLMManagedLabelKey: OLMManagedLabelValue},
OwnerReferences: []metav1.OwnerReference{
ownerutil.NonBlockingOwner(owner),
},
},
Spec: corev1.ServiceSpec{
Ports: args.ports,
Selector: selector(t, "test=label").MatchLabels,
},
}
mockOpClient.EXPECT().CreateService(&service).Return(&service, nil)
hosts := []string{
fmt.Sprintf("%s.%s", service.GetName(), namespace),
fmt.Sprintf("%s.%s.svc", service.GetName(), namespace),
}
servingPair, err := certGenerator.Generate(args.rotateAt, Organization, args.ca, hosts)
require.NoError(t, err)
// Create Secret for serving cert
certPEM, privPEM, err := servingPair.ToPEM()
require.NoError(t, err)
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test-service-cert",
Namespace: namespace,
Annotations: map[string]string{OLMCAHashAnnotationKey: caHash},
Labels: map[string]string{OLMManagedLabelKey: OLMManagedLabelValue},
},
Data: map[string][]byte{
"tls.crt": certPEM,
"tls.key": privPEM,
OLMCAPEMKey: caPEM,
},
Type: corev1.SecretTypeTLS,
}
mockOpClient.EXPECT().UpdateSecret(secret).Return(secret, nil)
secretRole := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: secret.GetName(),
Namespace: namespace,
Labels: map[string]string{OLMManagedLabelKey: OLMManagedLabelValue},
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"get"},
APIGroups: []string{""},
Resources: []string{"secrets"},
ResourceNames: []string{secret.GetName()},
},
},
}
mockOpClient.EXPECT().UpdateRole(secretRole).Return(secretRole, nil)
roleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: secret.GetName(),
Namespace: namespace,
Labels: map[string]string{OLMManagedLabelKey: OLMManagedLabelValue},
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: "test-sa",
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: secretRole.GetName(),
},
}
mockOpClient.EXPECT().UpdateRoleBinding(roleBinding).Return(roleBinding, nil)
authDelegatorClusterRoleBinding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: service.GetName() + "-system:auth-delegator",
Labels: map[string]string{OLMManagedLabelKey: OLMManagedLabelValue},
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: "test-sa",
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "system:auth-delegator",
},
}
mockOpClient.EXPECT().UpdateClusterRoleBinding(authDelegatorClusterRoleBinding).Return(authDelegatorClusterRoleBinding, nil)
authReaderRoleBinding := &rbacv1.RoleBinding{
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: args.depSpec.Template.Spec.ServiceAccountName,
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: "extension-apiserver-authentication-reader",
},
}
authReaderRoleBinding.SetName(service.GetName() + "-auth-reader")
authReaderRoleBinding.SetNamespace(KubeSystem)
authReaderRoleBinding.SetLabels(map[string]string{OLMManagedLabelKey: OLMManagedLabelValue})
mockOpClient.EXPECT().UpdateRoleBinding(authReaderRoleBinding).Return(authReaderRoleBinding, nil)
},
state: fakeState{
existingService: &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: owner.GetNamespace(),
OwnerReferences: []metav1.OwnerReference{
ownerutil.NonBlockingOwner(owner),
},
},
},
existingSecret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{},
},
existingRole: &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{},
},
existingRoleBinding: &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{},
},
existingClusterRoleBinding: &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{},
},
},
fields: fields{
owner: owner,
previousStrategy: nil,
templateAnnotations: nil,
initializers: nil,
apiServiceDescriptions: []certResource{},
webhookDescriptions: []certResource{},
},
args: args{
deploymentName: "test",
ca: ca,
rotateAt: time.Now().Add(time.Hour),
ports: []corev1.ServicePort{},
depSpec: appsv1.DeploymentSpec{
Selector: selector(t, "test=label"),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
ServiceAccountName: "test-sa",
},
},
},
},
want: &appsv1.DeploymentSpec{
Selector: selector(t, "test=label"),
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{OLMCAHashAnnotationKey: caHash},
},
Spec: corev1.PodSpec{
ServiceAccountName: "test-sa",
Volumes: []corev1.Volume{
{
Name: "apiservice-cert",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "test-service-cert",
Items: []corev1.KeyToPath{
{
Key: "tls.crt",
Path: "apiserver.crt",
},
{
Key: "tls.key",
Path: "apiserver.key",
},
},
},
},
},
{
Name: "webhook-cert",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "test-service-cert",
Items: []corev1.KeyToPath{
{
Key: "tls.crt",
Path: "tls.crt",
},
{
Key: "tls.key",
Path: "tls.key",
},
},
},
},
},
},
},
},
},
},
{
name: "labels an unlabelled secret if present",
mockExternal: func(mockOpClient *operatorclientmocks.MockClientInterface, fakeLister *operatorlisterfakes.FakeOperatorLister, namespace string, args args) {
mockOpClient.EXPECT().DeleteService(namespace, "test-service", &metav1.DeleteOptions{}).Return(nil)
service := corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test-service",
Labels: map[string]string{OLMManagedLabelKey: OLMManagedLabelValue},
OwnerReferences: []metav1.OwnerReference{
ownerutil.NonBlockingOwner(&v1alpha1.ClusterServiceVersion{}),
},
},
Spec: corev1.ServiceSpec{
Ports: args.ports,
Selector: selector(t, "test=label").MatchLabels,
},
}
mockOpClient.EXPECT().CreateService(&service).Return(&service, nil)
hosts := []string{
fmt.Sprintf("%s.%s", service.GetName(), namespace),
fmt.Sprintf("%s.%s.svc", service.GetName(), namespace),
}
servingPair, err := certGenerator.Generate(args.rotateAt, Organization, args.ca, hosts)
require.NoError(t, err)
// Create Secret for serving cert
certPEM, privPEM, err := servingPair.ToPEM()
require.NoError(t, err)
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test-service-cert",
Namespace: namespace,
Annotations: map[string]string{OLMCAHashAnnotationKey: caHash},
Labels: map[string]string{OLMManagedLabelKey: OLMManagedLabelValue},
OwnerReferences: []metav1.OwnerReference{
ownerutil.NonBlockingOwner(&v1alpha1.ClusterServiceVersion{}),
},
},
Data: map[string][]byte{
"tls.crt": certPEM,
"tls.key": privPEM,
OLMCAPEMKey: caPEM,
},
Type: corev1.SecretTypeTLS,
}
// secret already exists, but without label
mockOpClient.EXPECT().CreateSecret(secret).Return(nil, errors.NewAlreadyExists(schema.GroupResource{
Group: "",
Resource: "secrets",
}, "test-service-cert"))
// update secret with label
mockOpClient.EXPECT().UpdateSecret(secret).Return(secret, nil)
secretRole := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: secret.GetName(),
Namespace: namespace,
Labels: map[string]string{OLMManagedLabelKey: OLMManagedLabelValue},
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"get"},
APIGroups: []string{""},
Resources: []string{"secrets"},
ResourceNames: []string{secret.GetName()},
},
},
}
mockOpClient.EXPECT().UpdateRole(secretRole).Return(secretRole, nil)
roleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: secret.GetName(),
Namespace: namespace,
Labels: map[string]string{OLMManagedLabelKey: OLMManagedLabelValue},
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: "test-sa",
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: secretRole.GetName(),
},
}
mockOpClient.EXPECT().UpdateRoleBinding(roleBinding).Return(roleBinding, nil)
authDelegatorClusterRoleBinding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: service.GetName() + "-system:auth-delegator",
Labels: map[string]string{OLMManagedLabelKey: OLMManagedLabelValue},
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: "test-sa",
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "system:auth-delegator",
},
}
mockOpClient.EXPECT().UpdateClusterRoleBinding(authDelegatorClusterRoleBinding).Return(authDelegatorClusterRoleBinding, nil)
authReaderRoleBinding := &rbacv1.RoleBinding{
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
APIGroup: "",
Name: args.depSpec.Template.Spec.ServiceAccountName,
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: "extension-apiserver-authentication-reader",
},
}
authReaderRoleBinding.SetName(service.GetName() + "-auth-reader")
authReaderRoleBinding.SetNamespace(KubeSystem)
authReaderRoleBinding.SetLabels(map[string]string{OLMManagedLabelKey: OLMManagedLabelValue})
mockOpClient.EXPECT().UpdateRoleBinding(authReaderRoleBinding).Return(authReaderRoleBinding, nil)
},
state: fakeState{
existingService: &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
ownerutil.NonBlockingOwner(&v1alpha1.ClusterServiceVersion{}),
},
},
},
// unlabelled secret won't be in cache
getSecretError: errors.NewNotFound(schema.GroupResource{
Group: "",
Resource: "Secret",
}, "nope"),
existingRole: &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{},
},
existingRoleBinding: &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{},
},
existingClusterRoleBinding: &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{},
},
},
fields: fields{
owner: &v1alpha1.ClusterServiceVersion{},
previousStrategy: nil,
templateAnnotations: nil,
initializers: nil,
apiServiceDescriptions: []certResource{},
webhookDescriptions: []certResource{},
},
args: args{
deploymentName: "test",
ca: ca,
rotateAt: time.Now().Add(time.Hour),
ports: []corev1.ServicePort{},
depSpec: appsv1.DeploymentSpec{
Selector: selector(t, "test=label"),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
ServiceAccountName: "test-sa",
},
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"foo": "bar",
},
},
},
},
},
want: &appsv1.DeploymentSpec{
Selector: selector(t, "test=label"),
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"foo": "bar",
OLMCAHashAnnotationKey: caHash},
},
Spec: corev1.PodSpec{
ServiceAccountName: "test-sa",
Volumes: []corev1.Volume{
{
Name: "apiservice-cert",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "test-service-cert",
Items: []corev1.KeyToPath{
{
Key: "tls.crt",
Path: "apiserver.crt",
},
{
Key: "tls.key",
Path: "apiserver.key",
},
},
},
},
},
{
Name: "webhook-cert",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "test-service-cert",
Items: []corev1.KeyToPath{
{
Key: "tls.crt",
Path: "tls.crt",
},
{
Key: "tls.key",
Path: "tls.key",
},
},
},
},
},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
certGenerator = certs.CertGeneratorFunc(staticCertGenerator)
mockOpClient := operatorclientmocks.NewMockClientInterface(ctrl)
fakeLister := newFakeLister(tt.state)
tt.mockExternal(mockOpClient, fakeLister, tt.fields.owner.GetNamespace(), tt.args)
client := wrappers.NewInstallStrategyDeploymentClient(mockOpClient, fakeLister, tt.fields.owner.GetNamespace())
i := &StrategyDeploymentInstaller{
strategyClient: client,
owner: tt.fields.owner,
previousStrategy: tt.fields.previousStrategy,
templateAnnotations: tt.fields.templateAnnotations,
initializers: tt.fields.initializers,
apiServiceDescriptions: tt.fields.apiServiceDescriptions,
webhookDescriptions: tt.fields.webhookDescriptions,
}
got, _, err := i.installCertRequirementsForDeployment(tt.args.deploymentName, tt.args.ca, tt.args.rotateAt, tt.args.depSpec, tt.args.ports)
if (err != nil) != tt.wantErr {
t.Errorf("installCertRequirementsForDeployment() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("installCertRequirementsForDeployment() \n got = %v \n want = %v\n diff=%s\n", got, tt.want, cmp.Diff(got, tt.want))
}
})
}
}
|
package ratelimit
import (
"log"
"github.com/prometheus/client_golang/prometheus"
"go.bmvs.io/ynab"
)
var (
rateLimitUsed = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "rate_limit_used",
Help: "Rate limit used of YNAB API",
})
rateLimitTotal = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "rate_limit_total",
Help: "Rate limit total of YNAB API",
})
)
func init() {
prometheus.MustRegister(rateLimitUsed)
prometheus.MustRegister(rateLimitTotal)
}
//StartMetrics writes rate limiting metrics from the client response
func StartMetrics(c ynab.ClientServicer) {
log.Print("Getting Rate limting metrics...")
rateLimitUsed.Set(float64(c.RateLimit().Used()))
rateLimitTotal.Set(float64(c.RateLimit().Total()))
}
|
package main
import (
"fmt"
intcode "github.com/seizethedave/advent2019/advent02"
)
const (
target = 19690720
)
func main() {
memory := []intcode.Word{1, 0, 0, 3, 1, 1, 2, 3, 1, 3, 4, 3, 1, 5, 0, 3, 2, 13,
1, 19, 1, 19, 10, 23, 1, 23, 13, 27, 1, 6, 27, 31, 1, 9, 31, 35, 2, 10,
35, 39, 1, 39, 6, 43, 1, 6, 43, 47, 2, 13, 47, 51, 1, 51, 6, 55, 2, 6,
55, 59, 2, 59, 6, 63, 2, 63, 13, 67, 1, 5, 67, 71, 2, 9, 71, 75, 1, 5,
75, 79, 1, 5, 79, 83, 1, 83, 6, 87, 1, 87, 6, 91, 1, 91, 5, 95, 2, 10,
95, 99, 1, 5, 99, 103, 1, 10, 103, 107, 1, 107, 9, 111, 2, 111, 10, 115,
1, 115, 9, 119, 1, 13, 119, 123, 1, 123, 9, 127, 1, 5, 127, 131, 2, 13,
131, 135, 1, 9, 135, 139, 1, 2, 139, 143, 1, 13, 143, 0, 99, 2, 0, 14, 0}
trial := make([]intcode.Word, len(memory))
for noun := intcode.Word(0); noun <= 99; noun++ {
for verb := intcode.Word(0); verb <= 99; verb++ {
copy(trial, memory)
trial[1] = noun
trial[2] = verb
err := intcode.Exec(trial)
if err != nil {
panic(err)
}
if trial[0] == target {
fmt.Println(100*noun + verb)
return
}
}
}
}
|
// Copyright (c) 2019 Aiven, Helsinki, Finland. https://aiven.io/
package aiven
import (
"fmt"
"github.com/aiven/aiven-go-client"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
)
func datasourceServiceIntegrationEndpoint() *schema.Resource {
return &schema.Resource{
Read: datasourceServiceIntegrationEndpointRead,
Schema: resourceSchemaAsDatasourceSchema(aivenServiceIntegrationEndpointSchema, "project", "endpoint_name"),
}
}
func datasourceServiceIntegrationEndpointRead(d *schema.ResourceData, m interface{}) error {
client := m.(*aiven.Client)
projectName := d.Get("project").(string)
endpointName := d.Get("endpoint_name").(string)
endpoints, err := client.ServiceIntegrationEndpoints.List(projectName)
if err != nil {
return err
}
for _, endpoint := range endpoints {
if endpoint.EndpointName == endpointName {
d.SetId(buildResourceID(projectName, endpoint.EndpointID))
return copyServiceIntegrationEndpointPropertiesFromAPIResponseToTerraform(d, endpoint, projectName)
}
}
return fmt.Errorf("Endpoint \"%s\" not found", endpointName)
}
|
package main
import "fmt"
func main() {
fmt.Println("------事先声明cap的情况------")
var numbers = make([]int, 3, 5)
fmt.Printf("len = %d, cap = %d, slice = %v\n", len(numbers), cap(numbers), numbers)
// 向切片追加一个元素1
numbers = append(numbers, 1)
fmt.Printf("len = %d, cap = %d, slice = %v\n", len(numbers), cap(numbers), numbers)
// 向切片追加一个元素2
numbers = append(numbers, 2)
fmt.Printf("len = %d, cap = %d, slice = %v\n", len(numbers), cap(numbers), numbers)
// 向切片追加一个元素3
numbers = append(numbers, 3)
fmt.Printf("len = %d, cap = %d, slice = %v\n", len(numbers), cap(numbers), numbers)
fmt.Println("------没有事先声明cap的情况------")
var numbers2 = make([]int, 3)
fmt.Printf("len = %d, cap = %d, slice = %v\n", len(numbers2), cap(numbers2), numbers2)
// 向切片追加一个元素
numbers2 = append(numbers2, 3)
fmt.Printf("len = %d, cap = %d, slice = %v\n", len(numbers2), cap(numbers2), numbers2)
}
|
// TODO exec this main function by another main
package main
import (
"testing"
// ref: https://github.com/golang/go/blob/bb998747d6c5213e3a366936c482e149dce62720/src/cmd/go/internal/load/test.go#L616
// Todo import path
//{{if .ImportTest}}
//{{if .NeedTest}}_test{{else}}_{{end}} {{.Package.ImportPath | printf "%q"}}
//{{end}}
//{{if .ImportXtest}}
//{{if .NeedXtest}}_xtest{{else}}_{{end}} {{.Package.ImportPath | printf "%s_test" | printf "%q"}}
//{{end}}
//{{if .Cover}}
//{{range $i, $p := .Cover.Vars}}
//_cover{{$i}} {{$p.Package.ImportPath | printf "%q"}}
//{{end}}
//{{end}}
)
var tests = []testing.InternalTest{
// Todo gather tests
//{{range .Tests}}
//{"{{.Name}}", {{.Package}}.{{.Name}}},
//{{end}}
}
var benchmarks = []testing.InternalBenchmark{
// Todo gather benchmarks
//{{range .Benchmarks}}
//{"{{.Name}}", {{.Package}}.{{.Name}}},
//{{end}}
}
var examples = []testing.InternalExample{
// Todo gather examples
//{{range .Examples}}
//{"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}, {{.Unordered}}},
//{{end}}
}
func init() {
// Todo inportpath
//testdeps.ImportPath = {{.ImportPath | printf "%q"}}
}
// Todo coverage report
//{{if .Cover}}
//
//// Only updated by init functions, so no need for atomicity.
//var (
// coverCounters = make(map[string][]uint32)
// coverBlocks = make(map[string][]testing.CoverBlock)
//)
//
//func init() {
// {{range $i, $p := .Cover.Vars}}
// {{range $file, $cover := $p.Vars}}
// coverRegisterFile({{printf "%q" $cover.File}}, _cover{{$i}}.{{$cover.Var}}.Count[:], _cover{{$i}}.{{$cover.Var}}.Pos[:], _cover{{$i}}.{{$cover.Var}}.NumStmt[:])
// {{end}}
// {{end}}
//}
//
//func coverRegisterFile(fileName string, counter []uint32, pos []uint32, numStmts []uint16) {
// if 3*len(counter) != len(pos) || len(counter) != len(numStmts) {
// panic("coverage: mismatched sizes")
// }
// if coverCounters[fileName] != nil {
// // Already registered.
// return
// }
// coverCounters[fileName] = counter
// block := make([]testing.CoverBlock, len(counter))
// for i := range counter {
// block[i] = testing.CoverBlock{
// Line0: pos[3*i+0],
// Col0: uint16(pos[3*i+2]),
// Line1: pos[3*i+1],
// Col1: uint16(pos[3*i+2]>>16),
// Stmts: numStmts[i],
// }
// }
// coverBlocks[fileName] = block
//}
//{{end}}
func main() {
// TODO coverage report
//{{if .Cover}}
//testing.RegisterCover(testing.Cover{
// Mode: {{printf "%q" .Cover.Mode}},
// Counters: coverCounters,
// Blocks: coverBlocks,
// CoveredPackages: {{printf "%q" .Covered}},
//})
//{{end}}
//m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, examples)
//{{with .TestMain}}
//{{.Package}}.{{.Name}}(m)
// Fixme only support TestMain
//os.Exit(int(reflect.ValueOf(m).Elem().FieldByName("exitCode").Int()))
//{{else}}
//os.Exit(m.Run())
//{{end}}
}
|
package models
type INFOTEMP struct {
TASKID string `gorm:"type:varchar(128);"`
COUNT int
//COLLECTED_COUNT int
//INSERT_COUNT int
//PAGE_COUNT int
//COUNT_NULL int
}
type INFO struct {
//ID int `gorm:"cloumn:id"`
//URL string `gorm:"cloumn:url"`
//TITLE string `gorm:"cloumn:title"`
//AUTHOR string `gorm:"cloumn:author"`
//SOURCE string `gorm:"cloumn:source"`
//RELEASE_DATETIME string `gorm:"cloumn:release_datetime"`
//CONTENT string `gorm:"cloumn:content"`
//MEDIA_TYPE string `gorm:"cloumn:media_type"`
//ORIGINAL_TITLE string `gorm:"cloumn:original_title"`
//EDITOR string `gorm:"cloumn:editor"`
//REPORTER string `gorm:"cloumn:reporter"`
//CONTENTS string `gorm:"cloumn:contents"`
//READING_TIMES string `gorm:"cloumn:reading_times"`
//ABSTRACT_DATA string `gorm:"cloumn:abstract_data"`
//MEDIA string `gorm:"cloumn:media"`
//MEDIA_CHANNEL string `gorm:"cloumn:media_channel"`
//LOCATION string `gorm:"cloumn:location"`
//LOCATION_PATH string `gorm:"cloumn:location_path"`
//COLLECTION_TOOL string `gorm:"cloumn:collection_tool"`
//USER string `gorm:"cloumn:user"`
//SITE_URL string `gorm:"cloumn:site_url"`
//LEAF_ID string `gorm:"cloumn:leaf_id"`
TASK_ID string `gorm:"cloumn:task_id"`
//TASK_NAME string `gorm:"cloumn:task_name"`
//GET_TIME string `gorm:"cloumn:get_time"`
//KEYWORD string `gorm:"cloumn:keyword"`
//PUB_TIME string `gorm:"cloumn:pub_time"`
//PROJECT_ID string `gorm:"cloumn:project_id"`
//ERROR_MSG string `gorm:"cloumn:error_msg"`
COUNT int
}
type TASKINFOS struct {
//ID int
TASK_ID string
//CONFIG_INFO string
CREATE_TIME string
//IS_INCREMENTAL int
//IS_TIMER_START int
NAME string
//NEED_SCREENSHOT int
//PROJECT_ID string
//PROJECT_NAME string
//OWNER_ID int
OWNER_NAME string
//START_TIME time.Time
//STATUS int
//STOP_TIME time.Time
//TYPE_REPEAT int
//TYPE_TASK int
//UPDATE_TIME time.Time
//VERSION int
//IS_FAVORITE int
//STEP_PROGRESS int
//IS_START int
//HAS_STOP_TIME int
//IS_KEEP_LAST_DATA int
//RUN_PERIODICALLY int
//INTERVAL int
//INTERVAL_TYPE int
//RULE_INFO string
//REMARKS string
//CHANNEL string
//NEED_SREENSHOT int
ENTRY_LINK string
//PAGE_COUNT string
//COLLECTED_COUNT string
//INSERT_COUNT string
COUNT string
}
type TASKLOGS struct {
//ID int
//OWNER_ID int
TASK_ID string
//TASK_NAME string
//PROJECT_ID string
PAGE_COUNT string
COLLECTED_COUNT string
INSERT_COUNT string
//SERVICE_NAME string
//START_TIME string
//FINISH_TIME string
//UPDATE_TIME string
}
|
package tpl
import (
"crypto/rand"
"encoding/hex"
"fmt"
"os"
)
// CheckIfError should be used to naively panics if an error is not nil.
func CheckIfError(err error) {
if err == nil {
return
}
fmt.Printf("\x1b[31;1m%s\x1b[0m\n", fmt.Sprintf("error: %s", err))
os.Exit(1)
}
func randomHex(n int) (string, error) {
bytes := make([]byte, n)
if _, err := rand.Read(bytes); err != nil {
return "", err
}
return hex.EncodeToString(bytes), nil
}
|
package controllers
import (
"encoding/json"
"github.com/kataras/iris/context"
"gocherry-api-gateway/components/common_enum"
"gocherry-api-gateway/components/etcd_client"
"gocherry-api-gateway/components/utils"
)
type ClusterSaveReq struct {
ClusterName string `json:"cluster_name"`
Title string `json:"title"`
Detail string `json:"detail"`
AppName string `json:"app_name" validate:"required"`
UpdateTime string `json:"update_time"`
}
func (c *ClusterController) GetList(ctx context.Context) {
var req ClusterSaveReq
c.GetRequest(ctx, &req)
var clusterList []ClusterSaveReq
var oneCluster ClusterSaveReq
clusterKey := common_enum.ETCD_KEYS_APP_CLUSTER_LIST + req.AppName
list, _ := etcd_client.GetKvPrefix(clusterKey)
for _, value := range list.Kvs {
_ = json.Unmarshal([]byte(value.Value), &oneCluster)
clusterList = append(clusterList, oneCluster)
}
if len(clusterList) > 0 {
c.RenderJson(ctx, clusterList)
} else {
c.RenderError(ctx, common_enum.ComError{Msg: "无数据"})
}
}
func (c *ClusterController) Save(ctx context.Context) {
var req ClusterSaveReq
c.GetRequest(ctx, &req)
appKey := common_enum.ETCD_KEYS_APP_CLUSTER_LIST + req.AppName + "/" + req.ClusterName
appExists, _ := etcd_client.GetKv(appKey)
if appExists != nil {
}
req.UpdateTime = utils.GetNowTimeFormat()
newCluster, _ := json.Marshal(req)
_, _ = etcd_client.PutKv(appKey, string(newCluster))
c.RenderJson(ctx, "服务创建成功")
}
|
package main
import "math"
func Max(a int, b int) int {
if a > b {
return a
} else {
return b
}
}
func Min(a int, b int) int {
if a < b {
return a
} else {
return b
}
}
// return the index of max element instead of its value
func MaxElement(list []int) int {
idx := 0
max := math.MinInt32
for i, e := range list {
if e > max {
max = e
idx = i
}
}
return idx
}
func MinElement(list []int) int {
idx := 0
min := math.MaxInt32
for i, e := range list {
if e < min {
min = e
idx = i
}
}
return idx
}
func MinMaxElement(list []int) (minIdx int, maxIdx int) {
minIdx, maxIdx = 0, 0
min, max := math.MaxInt32, math.MinInt32
for i, e := range list {
if e < min {
min = e
minIdx = i
}
if e > max {
max = e
maxIdx = i
}
}
return minIdx, maxIdx
}
// is a < b in lexicographical comparison
func LexicographicalCompare(a string, b string) bool {
if len(a) != len(b) {
return len(a) < len(b)
}
chars := []rune(a)
for i, e := range b {
if rune(e) > chars[i] {
return true
} else if rune(e) < chars[i] {
return false
}
}
return false // a == b
}
func IsPermutation(a []int, b []int) bool {
m := make(map[int]int)
for _, k := range a {
if _, exist := m[k]; exist {
m[k]++
} else {
m[k] = 1
}
}
for _, k := range b {
if _, exist := m[k]; exist {
m[k]--
if m[k] < 0 {
return false
}
} else {
return false
}
}
return true
}
// if the next permutation doesn't exist, return false; else return true
func NextPermutation(list []int) bool {
size := len(list)
i := size - 1
if size > 1 {
for {
j := i
i--
if list[i] < list[j] {
k := size - 1
for list[i] >= list[k] {
k--
}
list[i], list[k] = list[k], list[i]
Reverse(list[j:])
return true
}
if i == 0 {
Reverse(list)
return false
}
}
}
return false
}
func PrevPermutation(list []int) bool {
size := len(list)
if size > 1 {
i := size - 1
for {
j := i
i--
if list[i] > list[j] {
k := size - 1
for list[k] >= list[i] {
k--
}
list[i], list[k] = list[k], list[i]
Reverse(list[j:])
return true
}
if i == 0 {
Reverse(list)
return false
}
}
}
return false
}
|
package products
const (
SparkPastaName = "SparkPastaName"
SparkPastaValue = 35
)
var (
SparkPasta = &Product{
Name: SparkPastaName,
Value: SparkPastaValue,
}
)
type Product struct {
Name string
Value int
}
type Repository interface {
Add(*Product) error
List() map[string][]*Product
Check(*Product) int
Purchase(*Product) bool
}
type repository struct {
AvailableProducts map[string][]*Product
}
func New() Repository {
return &repository{
AvailableProducts: make(map[string][]*Product),
}
}
func (r *repository) Add(product *Product) error {
//TODO product validation
r.AvailableProducts[product.Name] = append(r.AvailableProducts[product.Name], product)
return nil
}
func (r *repository) List() map[string][]*Product {
return r.AvailableProducts
}
func (r *repository) Check(product *Product) int {
if _, ok := r.AvailableProducts[product.Name]; ok {
return len(r.AvailableProducts[product.Name])
}
return 0
}
func (r repository) Purchase(product *Product) bool {
if r.Check(product) > 0 {
list := r.AvailableProducts[product.Name]
r.AvailableProducts[product.Name] = list[:len(list)-1]
return true
}
return false
}
|
package mqops
import (
"encoding/json"
"github.com/matscus/Hamster/Guns/busM5/errors"
)
func init() {
GetDepositList()
}
type getDepositListJSON struct {
Data struct {
Hid string `json:"hid"`
} `json:"data"`
}
//GetDepositList - init script struct
func GetDepositList() {
getDepositList := New()
getDepositList.Name = "GetDepositList"
getDepositList.PoolCh = NewCh(10)
stringProperty := make(map[string]string)
stringProperty["autorization"] = "Bearer"
stringProperty["esfl_methodName"] = "getDepositList"
stringProperty["src_systemID"] = "test_systemID"
stringProperty["src_channel"] = "test_channel"
getDepositList.StringProperty = stringProperty
go getDepositListJSONBody(getDepositList.PoolCh)
MQOps = append(MQOps, getDepositList)
}
func getDepositListJSONBody(ch chan string) {
var d getDepositListJSON
for {
data := <-PoolCh
d.Data.Hid = data.Hid
e, err := json.Marshal(d)
errors.CheckError(err, "Error marshal boby")
ch <- string(e)
}
}
|
package query
import (
"time"
"github.com/gofrs/uuid"
)
type AreaReadQuery interface {
FindByID(areaUID uuid.UUID) <-chan QueryResult
}
type CropQuery interface {
FindByBatchID(batchID string) <-chan QueryResult
FindAllCropsByFarm(farmUID uuid.UUID) <-chan QueryResult
FindAllCropsByArea(areaUID uuid.UUID) <-chan QueryResult
}
type CropEventQuery interface {
FindAllByCropID(uid uuid.UUID) <-chan QueryResult
}
type CropReadQuery interface {
FindByID(uid uuid.UUID) <-chan QueryResult
FindByBatchID(batchID string) <-chan QueryResult
FindAllCropsByFarm(farmUID uuid.UUID, status string, page, limit int) <-chan QueryResult
CountAllCropsByFarm(farmUID uuid.UUID, status string) <-chan QueryResult
FindAllCropsByArea(areaUID uuid.UUID) <-chan QueryResult
FindAllCropsArchives(farmUID uuid.UUID, page, limit int) <-chan QueryResult
CountAllArchivedCropsByFarm(farmUID uuid.UUID) <-chan QueryResult
FindCropsInformation(farmUID uuid.UUID) <-chan QueryResult
CountTotalBatch(farmUID uuid.UUID) <-chan QueryResult
}
type CropActivityQuery interface {
FindAllByCropID(uid uuid.UUID) <-chan QueryResult
FindByCropIDAndActivityType(uid uuid.UUID, activityType interface{}) <-chan QueryResult
}
type MaterialReadQuery interface {
FindByID(inventoryUID uuid.UUID) <-chan QueryResult
FindMaterialByPlantTypeCodeAndName(plantType string, name string) <-chan QueryResult
}
type FarmReadQuery interface {
FindByID(farmUID uuid.UUID) <-chan QueryResult
}
type TaskReadQuery interface {
FindByID(taskUID uuid.UUID) <-chan QueryResult
}
type QueryResult struct {
Result interface{}
Error error
}
type CropMaterialQueryResult struct {
UID uuid.UUID `json:"uid"`
TypeCode string `json:"type"`
PlantTypeCode string `json:"plant_type"`
Name string `json:"name"`
}
type CropAreaQueryResult struct {
UID uuid.UUID `json:"uid"`
Name string `json:"name"`
Size struct {
Value float32 `json:"value"`
Symbol string `json:"symbol"`
} `json:"size"`
Type string `json:"type"`
Location string `json:"location"`
FarmUID uuid.UUID `json:"farm_uid"`
}
type CropAreaByAreaQueryResult struct {
UID uuid.UUID `json:"uid"`
BatchID string `json:"batch_id"`
Inventory Inventory `json:"inventory"`
CreatedDate time.Time `json:"seeding_date"`
Area Area `json:"area"`
Container Container `json:"container"`
}
type Area struct {
UID uuid.UUID `json:"uid"`
Name string `json:"name"`
InitialQuantity int `json:"initial_quantity"`
CurrentQuantity int `json:"current_quantity"`
InitialArea InitialArea `json:"initial_area"`
LastWatered *time.Time `json:"last_watered"`
MovingDate time.Time `json:"moving_date"`
}
type InitialArea struct {
UID uuid.UUID `json:"uid"`
Name string `json:"name"`
CreatedDate time.Time `json:"created_date"`
}
type Container struct {
Type string `json:"type"`
Quantity int `json:"quantity"`
Cell int `json:"cell"`
}
type Inventory struct {
UID uuid.UUID `json:"uid"`
PlantType string `json:"plant_type"`
Name string `json:"name"`
}
type CropInformationQueryResult struct {
TotalHarvestProduced float32 `json:"total_harvest_produced"`
TotalPlantVariety int `json:"total_plant_variety"`
}
type CropFarmQueryResult struct {
UID uuid.UUID
Name string
}
type CountTotalBatchQueryResult struct {
VarietyName string `json:"variety_name"`
TotalBatch int `json:"total_batch"`
}
type CropTaskQueryResult struct {
UID uuid.UUID
Title string
Description string
Category string
Status string
Domain string
AssetUID uuid.UUID
MaterialUID uuid.UUID
AreaUID uuid.UUID
}
|
package main
import (
"fmt"
"log"
"net/http"
)
/**
* author: will fan
* created: 2019/9/1 14:33
* description:
*/
type messageHandler struct {
message string
}
func (m *messageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, m.message)
}
func main() {
mux := http.NewServeMux()
mh1 := &messageHandler{"Welcome to Go Web Development"}
mux.Handle("/welcome", mh1)
mh2 := &messageHandler{"net/http is awesome"}
mux.Handle("/message", mh2)
log.Println("Listening...")
http.ListenAndServe(":8080", mux)
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright 2013 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSES/QL-LICENSE file.
package schematracker_test
import (
"bytes"
"context"
"fmt"
"testing"
"github.com/pingcap/tidb/ddl/schematracker"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/stretchr/testify/require"
)
func execCreate(t *testing.T, tracker schematracker.SchemaTracker, sql string) {
sctx := mock.NewContext()
p := parser.New()
stmt, err := p.ParseOneStmt(sql, "", "")
require.NoError(t, err)
err = tracker.CreateTable(sctx, stmt.(*ast.CreateTableStmt))
require.NoError(t, err)
}
func TestNoNumLimit(t *testing.T) {
sql := "create table test.t_too_large ("
cnt := 3000
for i := 1; i <= cnt; i++ {
sql += fmt.Sprintf("a%d double, b%d double, c%d double, d%d double", i, i, i, i)
if i != cnt {
sql += ","
}
}
sql += ");"
tracker := schematracker.NewSchemaTracker(2)
tracker.CreateTestDB()
execCreate(t, tracker, sql)
sql = "create table test.t_too_many_indexes ("
for i := 0; i < 100; i++ {
if i != 0 {
sql += ","
}
sql += fmt.Sprintf("c%d int", i)
}
for i := 0; i < 100; i++ {
sql += ","
sql += fmt.Sprintf("key k%d(c%d)", i, i)
}
sql += ");"
execCreate(t, tracker, sql)
sql = "alter table test.t_too_large add column alter_new_col int"
execAlter(t, tracker, sql)
}
func TestCreateTableLongIndex(t *testing.T) {
sql := "create table test.t (c1 int, c2 blob, c3 varchar(64), index idx_c2(c2(555555)));"
tracker := schematracker.NewSchemaTracker(2)
tracker.CreateTestDB()
execCreate(t, tracker, sql)
}
func execAlter(t *testing.T, tracker schematracker.SchemaTracker, sql string) {
ctx := context.Background()
sctx := mock.NewContext()
p := parser.New()
stmt, err := p.ParseOneStmt(sql, "", "")
require.NoError(t, err)
err = tracker.AlterTable(ctx, sctx, stmt.(*ast.AlterTableStmt))
require.NoError(t, err)
}
func mustTableByName(t *testing.T, tracker schematracker.SchemaTracker, schema, table string) *model.TableInfo {
tblInfo, err := tracker.TableByName(model.NewCIStr(schema), model.NewCIStr(table))
require.NoError(t, err)
return tblInfo
}
func TestAlterPK(t *testing.T) {
sql := "create table test.t (c1 int primary key, c2 blob);"
tracker := schematracker.NewSchemaTracker(2)
tracker.CreateTestDB()
execCreate(t, tracker, sql)
tblInfo := mustTableByName(t, tracker, "test", "t")
require.Equal(t, 1, len(tblInfo.Indices))
sql = "alter table test.t drop primary key;"
execAlter(t, tracker, sql)
// TableInfo should be immutable.
require.Equal(t, 1, len(tblInfo.Indices))
tblInfo = mustTableByName(t, tracker, "test", "t")
require.Equal(t, 0, len(tblInfo.Indices))
sql = "alter table test.t add primary key(c1);"
execAlter(t, tracker, sql)
tblInfo = mustTableByName(t, tracker, "test", "t")
require.Equal(t, 1, len(tblInfo.Indices))
sql = "alter table test.t drop primary key;"
execAlter(t, tracker, sql)
tblInfo = mustTableByName(t, tracker, "test", "t")
require.Equal(t, 0, len(tblInfo.Indices))
}
func TestDropColumn(t *testing.T) {
sql := "create table test.t(a int, b int auto_increment, c int, key(b))"
tracker := schematracker.NewSchemaTracker(2)
tracker.CreateTestDB()
execCreate(t, tracker, sql)
tblInfo := mustTableByName(t, tracker, "test", "t")
require.Equal(t, 1, len(tblInfo.Indices))
sql = "alter table test.t drop column b"
execAlter(t, tracker, sql)
tblInfo = mustTableByName(t, tracker, "test", "t")
require.Equal(t, 0, len(tblInfo.Indices))
sql = "alter table test.t add index idx_2_col(a, c)"
execAlter(t, tracker, sql)
tblInfo = mustTableByName(t, tracker, "test", "t")
require.Equal(t, 1, len(tblInfo.Indices))
sql = "alter table test.t drop column c"
execAlter(t, tracker, sql)
tblInfo = mustTableByName(t, tracker, "test", "t")
require.Equal(t, 1, len(tblInfo.Indices))
require.Equal(t, 1, len(tblInfo.Columns))
}
func TestFullTextIndex(t *testing.T) {
sql := "create table test.t (a text, fulltext key (a))"
tracker := schematracker.NewSchemaTracker(2)
tracker.CreateTestDB()
execCreate(t, tracker, sql)
}
func checkShowCreateTable(t *testing.T, tblInfo *model.TableInfo, expected string) {
sctx := mock.NewContext()
result := bytes.NewBuffer(make([]byte, 0, 512))
err := executor.ConstructResultOfShowCreateTable(sctx, tblInfo, autoid.Allocators{}, result)
require.NoError(t, err)
require.Equal(t, expected, result.String())
}
func TestIndexLength(t *testing.T) {
// copy TestIndexLength in db_integration_test.go
sql := "create table test.t(a text, b text charset ascii, c blob, index(a(768)), index (b(3072)), index (c(3072)));"
tracker := schematracker.NewSchemaTracker(2)
tracker.CreateTestDB()
execCreate(t, tracker, sql)
tblInfo := mustTableByName(t, tracker, "test", "t")
expected := "CREATE TABLE `t` (\n" +
" `a` text DEFAULT NULL,\n" +
" `b` text CHARACTER SET ascii COLLATE ascii_bin DEFAULT NULL,\n" +
" `c` blob DEFAULT NULL,\n" +
" KEY `a` (`a`(768)),\n" +
" KEY `b` (`b`(3072)),\n" +
" KEY `c` (`c`(3072))\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"
checkShowCreateTable(t, tblInfo, expected)
err := tracker.DeleteTable(model.NewCIStr("test"), model.NewCIStr("t"))
require.NoError(t, err)
sql = "create table test.t(a text, b text charset ascii, c blob);"
execCreate(t, tracker, sql)
sql = "alter table test.t add index (a(768))"
execAlter(t, tracker, sql)
sql = "alter table test.t add index (b(3072))"
execAlter(t, tracker, sql)
sql = "alter table test.t add index (c(3072))"
execAlter(t, tracker, sql)
tblInfo = mustTableByName(t, tracker, "test", "t")
checkShowCreateTable(t, tblInfo, expected)
}
func TestIssue5092(t *testing.T) {
// copy TestIssue5092 in db_integration_test.go
sql := "create table test.t (a int)"
tracker := schematracker.NewSchemaTracker(2)
tracker.CreateTestDB()
execCreate(t, tracker, sql)
sql = "alter table test.t add column (b int, c int)"
execAlter(t, tracker, sql)
sql = "alter table test.t add column if not exists (b int, c int)"
execAlter(t, tracker, sql)
sql = "alter table test.t add column b1 int after b, add column c1 int after c"
execAlter(t, tracker, sql)
sql = "alter table test.t add column d int after b, add column e int first, add column f int after c1, add column g int, add column h int first"
execAlter(t, tracker, sql)
sql = "alter table test.t add column if not exists (d int, e int), add column ff text"
execAlter(t, tracker, sql)
sql = "alter table test.t add column b2 int after b1, add column c2 int first"
execAlter(t, tracker, sql)
tblInfo := mustTableByName(t, tracker, "test", "t")
expected := "CREATE TABLE `t` (\n" +
" `c2` int(11) DEFAULT NULL,\n" +
" `h` int(11) DEFAULT NULL,\n" +
" `e` int(11) DEFAULT NULL,\n" +
" `a` int(11) DEFAULT NULL,\n" +
" `b` int(11) DEFAULT NULL,\n" +
" `d` int(11) DEFAULT NULL,\n" +
" `b1` int(11) DEFAULT NULL,\n" +
" `b2` int(11) DEFAULT NULL,\n" +
" `c` int(11) DEFAULT NULL,\n" +
" `c1` int(11) DEFAULT NULL,\n" +
" `f` int(11) DEFAULT NULL,\n" +
" `g` int(11) DEFAULT NULL,\n" +
" `ff` text DEFAULT NULL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"
checkShowCreateTable(t, tblInfo, expected)
}
func TestBitDefaultValues(t *testing.T) {
// copy TestBitDefaultValues in db_integration_test.go
sql := `create table test.testalltypes2 (
field_1 bit null default null,
field_2 tinyint null default null,
field_3 tinyint unsigned null default null,
field_4 bigint null default null,
field_5 bigint unsigned null default null,
field_6 mediumblob null default null,
field_7 longblob null default null,
field_8 blob null default null,
field_9 tinyblob null default null,
field_10 varbinary(255) null default null,
field_11 binary(255) null default null,
field_12 mediumtext null default null,
field_13 longtext null default null,
field_14 text null default null,
field_15 tinytext null default null,
field_16 char(255) null default null,
field_17 numeric null default null,
field_18 decimal null default null,
field_19 integer null default null,
field_20 integer unsigned null default null,
field_21 int null default null,
field_22 int unsigned null default null,
field_23 mediumint null default null,
field_24 mediumint unsigned null default null,
field_25 smallint null default null,
field_26 smallint unsigned null default null,
field_27 float null default null,
field_28 double null default null,
field_29 double precision null default null,
field_30 real null default null,
field_31 varchar(255) null default null,
field_32 date null default null,
field_33 time null default null,
field_34 datetime null default null,
field_35 timestamp null default null
);`
tracker := schematracker.NewSchemaTracker(2)
tracker.CreateTestDB()
execCreate(t, tracker, sql)
}
func TestAddExpressionIndex(t *testing.T) {
sql := "create table test.t (a int, b real);"
tracker := schematracker.NewSchemaTracker(2)
tracker.CreateTestDB()
execCreate(t, tracker, sql)
sql = "alter table test.t add index idx((a+b))"
execAlter(t, tracker, sql)
sql = "alter table test.t add index idx_multi((a+b),(a+1), b);"
execAlter(t, tracker, sql)
tblInfo := mustTableByName(t, tracker, "test", "t")
expected := "CREATE TABLE `t` (\n" +
" `a` int(11) DEFAULT NULL,\n" +
" `b` double DEFAULT NULL,\n" +
" KEY `idx` ((`a` + `b`)),\n" +
" KEY `idx_multi` ((`a` + `b`),(`a` + 1),`b`)\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"
checkShowCreateTable(t, tblInfo, expected)
sql = "alter table test.t drop index idx;"
execAlter(t, tracker, sql)
sql = "alter table test.t drop index idx_multi;"
execAlter(t, tracker, sql)
tblInfo = mustTableByName(t, tracker, "test", "t")
expected = "CREATE TABLE `t` (\n" +
" `a` int(11) DEFAULT NULL,\n" +
" `b` double DEFAULT NULL\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"
checkShowCreateTable(t, tblInfo, expected)
sql = "create table test.t2 (a varchar(10), b varchar(10));"
execCreate(t, tracker, sql)
sql = "alter table test.t2 add unique index ei_ab ((concat(a, b)));"
execAlter(t, tracker, sql)
tblInfo = mustTableByName(t, tracker, "test", "t2")
expected = "CREATE TABLE `t2` (\n" +
" `a` varchar(10) DEFAULT NULL,\n" +
" `b` varchar(10) DEFAULT NULL,\n" +
" UNIQUE KEY `ei_ab` ((concat(`a`, `b`)))\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"
checkShowCreateTable(t, tblInfo, expected)
sql = "alter table test.t2 alter index ei_ab invisible;"
execAlter(t, tracker, sql)
tblInfo = mustTableByName(t, tracker, "test", "t2")
expected = "CREATE TABLE `t2` (\n" +
" `a` varchar(10) DEFAULT NULL,\n" +
" `b` varchar(10) DEFAULT NULL,\n" +
" UNIQUE KEY `ei_ab` ((concat(`a`, `b`))) /*!80000 INVISIBLE */\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"
checkShowCreateTable(t, tblInfo, expected)
sql = "create table test.t3(a int, key((a+1)), key((a+2)), key idx((a+3)), key((a+4)), UNIQUE KEY ((a * 2)));"
execCreate(t, tracker, sql)
tblInfo = mustTableByName(t, tracker, "test", "t3")
expected = "CREATE TABLE `t3` (\n" +
" `a` int(11) DEFAULT NULL,\n" +
" KEY `expression_index` ((`a` + 1)),\n" +
" KEY `expression_index_2` ((`a` + 2)),\n" +
" KEY `idx` ((`a` + 3)),\n" +
" KEY `expression_index_3` ((`a` + 4)),\n" +
" UNIQUE KEY `expression_index_4` ((`a` * 2))\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin"
checkShowCreateTable(t, tblInfo, expected)
sql = `create table test.t4(
a int,
b varchar(100),
c int)
PARTITION BY RANGE ( a ) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11),
PARTITION p2 VALUES LESS THAN (16),
PARTITION p3 VALUES LESS THAN (21)
);`
execCreate(t, tracker, sql)
sql = "alter table test.t4 add index idx((a+c));"
execAlter(t, tracker, sql)
tblInfo = mustTableByName(t, tracker, "test", "t4")
expected = "CREATE TABLE `t4` (\n" +
" `a` int(11) DEFAULT NULL,\n" +
" `b` varchar(100) DEFAULT NULL,\n" +
" `c` int(11) DEFAULT NULL,\n" +
" KEY `idx` ((`a` + `c`))\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" +
"PARTITION BY RANGE (`a`)\n" +
"(PARTITION `p0` VALUES LESS THAN (6),\n" +
" PARTITION `p1` VALUES LESS THAN (11),\n" +
" PARTITION `p2` VALUES LESS THAN (16),\n" +
" PARTITION `p3` VALUES LESS THAN (21))"
checkShowCreateTable(t, tblInfo, expected)
}
func TestAtomicMultiSchemaChange(t *testing.T) {
sql := "create table test.t (a int);"
tracker := schematracker.NewSchemaTracker(2)
tracker.CreateTestDB()
execCreate(t, tracker, sql)
sql = "alter table test.t add b int, add c int;"
execAlter(t, tracker, sql)
tblInfo := mustTableByName(t, tracker, "test", "t")
require.Len(t, tblInfo.Columns, 3)
sql = "alter table test.t add d int, add a int;"
ctx := context.Background()
sctx := mock.NewContext()
p := parser.New()
stmt, err := p.ParseOneStmt(sql, "", "")
require.NoError(t, err)
err = tracker.AlterTable(ctx, sctx, stmt.(*ast.AlterTableStmt))
require.True(t, infoschema.ErrColumnExists.Equal(err))
tblInfo = mustTableByName(t, tracker, "test", "t")
require.Len(t, tblInfo.Columns, 3)
}
func TestImmutableTableInfo(t *testing.T) {
sql := "create table test.t (a varchar(20)) charset latin1;"
tracker := schematracker.NewSchemaTracker(2)
tracker.CreateTestDB()
execCreate(t, tracker, sql)
tblInfo := mustTableByName(t, tracker, "test", "t")
require.Equal(t, "", tblInfo.Comment)
sql = "alter table test.t comment = '123';"
execAlter(t, tracker, sql)
require.Equal(t, "", tblInfo.Comment)
tblInfo = mustTableByName(t, tracker, "test", "t")
require.Equal(t, "123", tblInfo.Comment)
require.Equal(t, "latin1", tblInfo.Charset)
require.Equal(t, "latin1_bin", tblInfo.Collate)
require.Equal(t, "latin1", tblInfo.Columns[0].GetCharset())
require.Equal(t, "latin1_bin", tblInfo.Columns[0].GetCollate())
sql = "alter table test.t convert to character set utf8mb4 collate utf8mb4_general_ci;"
execAlter(t, tracker, sql)
require.Equal(t, "latin1", tblInfo.Charset)
require.Equal(t, "latin1_bin", tblInfo.Collate)
require.Equal(t, "latin1", tblInfo.Columns[0].GetCharset())
require.Equal(t, "latin1_bin", tblInfo.Columns[0].GetCollate())
tblInfo = mustTableByName(t, tracker, "test", "t")
require.Equal(t, "utf8mb4", tblInfo.Charset)
require.Equal(t, "utf8mb4_general_ci", tblInfo.Collate)
require.Equal(t, "utf8mb4", tblInfo.Columns[0].GetCharset())
require.Equal(t, "utf8mb4_general_ci", tblInfo.Columns[0].GetCollate())
}
var _ sqlexec.RestrictedSQLExecutor = (*mockRestrictedSQLExecutor)(nil)
type mockRestrictedSQLExecutor struct {
sessionctx.Context
}
func (m mockRestrictedSQLExecutor) ParseWithParams(ctx context.Context, sql string, args ...interface{}) (ast.StmtNode, error) {
return nil, nil
}
func (m mockRestrictedSQLExecutor) ExecRestrictedStmt(ctx context.Context, stmt ast.StmtNode, opts ...sqlexec.OptionFuncAlias) ([]chunk.Row, []*ast.ResultField, error) {
return nil, nil, nil
}
func (m mockRestrictedSQLExecutor) ExecRestrictedSQL(ctx context.Context, opts []sqlexec.OptionFuncAlias, sql string, args ...interface{}) ([]chunk.Row, []*ast.ResultField, error) {
return nil, nil, nil
}
func TestModifyFromNullToNotNull(t *testing.T) {
sql := "create table test.t (a int, b int);"
tracker := schematracker.NewSchemaTracker(2)
tracker.CreateTestDB()
execCreate(t, tracker, sql)
sql = "alter table test.t modify column a int not null;"
ctx := context.Background()
sctx := mock.NewContext()
p := parser.New()
stmt, err := p.ParseOneStmt(sql, "", "")
require.NoError(t, err)
// converting from NULL to NOT NULL needs to check data, so caller should provide a RestrictedSQLExecutor
executorCtx := mockRestrictedSQLExecutor{sctx}
err = tracker.AlterTable(ctx, executorCtx, stmt.(*ast.AlterTableStmt))
require.NoError(t, err)
tblInfo := mustTableByName(t, tracker, "test", "t")
require.Len(t, tblInfo.Columns, 2)
}
|
// +build ignore
package statequery
import (
"encoding/json"
"fmt"
"time"
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
"github.com/iotaledger/wasp/packages/coretypes"
"github.com/iotaledger/wasp/packages/hashing"
"github.com/iotaledger/wasp/packages/kv"
"github.com/iotaledger/wasp/packages/kv/buffered"
"github.com/iotaledger/wasp/packages/kv/codec"
"github.com/iotaledger/wasp/packages/kv/collections"
"github.com/iotaledger/wasp/packages/webapi/model"
)
type Request struct {
QueryGeneralData bool
KeyQueries []*KeyQuery
}
type Results struct {
KeyQueryResults []*QueryResult
byKey map[kv.Key]*QueryResult
// returned only when QueryGeneralData = true
StateIndex uint32
Timestamp time.Time
StateHash *hashing.HashValue
StateTxId model.ValueTxID
Requests []*coretypes.RequestID
}
type KeyQuery struct {
Key []byte
Type ValueType
Params json.RawMessage // one of MapQueryParams, ArrayQueryParams, ...
}
type ValueType string
const (
ValueTypeScalar = ValueType("scalar")
ValueTypeArray = ValueType("array")
ValueTypeMap = ValueType("map")
ValueTypeMapElement = ValueType("map-elem")
ValueTypeTLogSlice = ValueType("tlog_slice")
ValueTypeTLogSliceData = ValueType("tlog_slice_data")
)
// TLogSliceQueryParams request slice of timestamped log. If FromTs or ToTs is 0,
// it is considered 'earliest' and 'latest' respectively.
// For 0,0 slice corresponds to the whole log
type TLogSliceQueryParams struct {
FromTs int64
ToTs int64
}
// TLogSliceDataQueryParams request data for the slice
type TLogSliceDataQueryParams struct {
FromIndex uint32
ToIndex uint32
Descending bool
}
type MapQueryParams struct {
Limit uint32
}
type MapElementQueryParams struct {
Key []byte
}
type ArrayQueryParams struct {
From uint16
To uint16
}
type QueryResult struct {
Key []byte
Type ValueType
Value json.RawMessage // one of []byte, MapResult, ArrayResult, ...
}
type KeyValuePair struct {
Key []byte
Value []byte
}
type MapResult struct {
Len uint32
Entries []KeyValuePair
}
type MapElementResult struct {
Value []byte
}
type ArrayResult struct {
Len uint16
Values [][]byte
}
type TLogSliceResult struct {
IsNotEmpty bool
FirstIndex uint32
LastIndex uint32
Earliest int64
Latest int64
}
type TLogSliceDataResult struct {
Values [][]byte
}
func NewRequest() *Request {
return &Request{}
}
func (q *Request) AddGeneralData() {
q.QueryGeneralData = true
}
func (q *Request) AddScalar(key kv.Key) {
q.KeyQueries = append(q.KeyQueries, &KeyQuery{
Key: []byte(key),
Type: ValueTypeScalar,
Params: nil,
})
}
func (q *Request) AddArray(key kv.Key, from uint16, to uint16) {
p := &ArrayQueryParams{From: from, To: to}
params, _ := json.Marshal(p)
q.KeyQueries = append(q.KeyQueries, &KeyQuery{
Key: []byte(key),
Type: ValueTypeArray,
Params: json.RawMessage(params),
})
}
func (q *Request) AddMap(key kv.Key, limit uint32) {
p := &MapQueryParams{Limit: limit}
params, _ := json.Marshal(p)
q.KeyQueries = append(q.KeyQueries, &KeyQuery{
Key: []byte(key),
Type: ValueTypeMap,
Params: json.RawMessage(params),
})
}
func (q *Request) AddMapElement(mapKey kv.Key, elemKey []byte) {
p := &MapElementQueryParams{Key: elemKey}
params, _ := json.Marshal(p)
q.KeyQueries = append(q.KeyQueries, &KeyQuery{
Key: []byte(mapKey),
Type: ValueTypeMapElement,
Params: json.RawMessage(params),
})
}
func (q *Request) AddTLogSlice(key kv.Key, fromTs, toTs int64) {
p := TLogSliceQueryParams{
FromTs: fromTs,
ToTs: toTs,
}
params, _ := json.Marshal(p)
q.KeyQueries = append(q.KeyQueries, &KeyQuery{
Key: []byte(key),
Type: ValueTypeTLogSlice,
Params: json.RawMessage(params),
})
}
func (q *Request) AddTLogSliceData(key kv.Key, fromIndex, toIndex uint32, descending bool) {
p := TLogSliceDataQueryParams{
FromIndex: fromIndex,
ToIndex: toIndex,
Descending: descending,
}
params, _ := json.Marshal(p)
q.KeyQueries = append(q.KeyQueries, &KeyQuery{
Key: []byte(key),
Type: ValueTypeTLogSliceData,
Params: json.RawMessage(params),
})
}
func (r *QueryResult) MustBytes() []byte {
var b []byte
err := json.Unmarshal(r.Value, &b)
if err != nil {
panic(err)
}
return b
}
func (r *QueryResult) MustInt64() (int64, bool) {
n, ok, err := codec.DecodeInt64(r.MustBytes())
if err != nil {
panic(err)
}
return n, ok
}
func (r *QueryResult) MustString() (string, bool) {
s, ok, _ := codec.DecodeString(r.MustBytes())
return s, ok
}
func (r *QueryResult) MustAddress() address.Address {
v, _, err := codec.DecodeAddress(r.MustBytes())
if err != nil {
panic(err)
}
return v
}
func (r *QueryResult) MustHashValue() *hashing.HashValue {
v, _, err := codec.DecodeHashValue(r.MustBytes())
if err != nil {
panic(err)
}
return v
}
func (r *QueryResult) MustArrayResult() *ArrayResult {
var ar ArrayResult
err := json.Unmarshal(r.Value, &ar)
if err != nil {
panic(err)
}
return &ar
}
func (r *QueryResult) MustMapResult() *MapResult {
var dr MapResult
err := json.Unmarshal(r.Value, &dr)
if err != nil {
panic(err)
}
return &dr
}
func (r *QueryResult) MustMapElementResult() []byte {
var dr *MapElementResult
err := json.Unmarshal(r.Value, &dr)
if err != nil {
panic(err)
}
if dr == nil {
return nil
}
return dr.Value
}
func (r *QueryResult) MustTLogSliceResult() *TLogSliceResult {
var sr TLogSliceResult
err := json.Unmarshal(r.Value, &sr)
if err != nil {
panic(err) // TODO panicing on wrong external data?
}
return &sr
}
func (r *QueryResult) MustTLogSliceDataResult() *TLogSliceDataResult {
var sr TLogSliceDataResult
err := json.Unmarshal(r.Value, &sr)
if err != nil {
panic(err)
}
return &sr
}
func (r *Results) Get(key kv.Key) *QueryResult {
if r.byKey == nil {
r.byKey = make(map[kv.Key]*QueryResult)
for _, qr := range r.KeyQueryResults {
r.byKey[kv.Key(qr.Key)] = qr
}
}
return r.byKey[key]
}
func (q *KeyQuery) Execute(vars buffered.BufferedKVStore) (*QueryResult, error) {
key := kv.Key(q.Key)
switch q.Type {
case ValueTypeScalar:
value, err := vars.Get(key)
if err != nil {
return nil, err
}
return q.makeResult(value)
case ValueTypeArray:
var params ArrayQueryParams
err := json.Unmarshal(q.Params, ¶ms)
if err != nil {
return nil, err
}
arr := collections.NewArray(vars, string(key))
size, err := arr.Len()
if err != nil {
return nil, err
}
values := make([][]byte, 0)
for i := params.From; i < size && i < params.To; i++ {
v, err := arr.GetAt(i)
if err != nil {
return nil, err
}
values = append(values, v)
}
return q.makeResult(ArrayResult{Len: size, Values: values})
case ValueTypeMap:
var params MapQueryParams
err := json.Unmarshal(q.Params, ¶ms)
if err != nil {
return nil, err
}
m := collections.NewMap(vars, string(key))
entries := make([]KeyValuePair, 0)
err = m.Iterate(func(elemKey []byte, value []byte) bool {
entries = append(entries, KeyValuePair{Key: elemKey, Value: value})
return len(entries) < int(params.Limit)
})
if err != nil {
return nil, err
}
n, err := m.Len()
if err != nil {
return nil, err
}
return q.makeResult(MapResult{Len: n, Entries: entries})
case ValueTypeMapElement:
var params MapElementQueryParams
err := json.Unmarshal(q.Params, ¶ms)
if err != nil {
return nil, err
}
m := collections.NewMap(vars, string(key))
v, err := m.GetAt(params.Key)
if err != nil {
return nil, err
}
if v == nil {
return nil, nil
}
return q.makeResult(MapElementResult{Value: v})
case ValueTypeTLogSlice:
var params TLogSliceQueryParams
err := json.Unmarshal(q.Params, ¶ms)
if err != nil {
return nil, err
}
tlog := collections.NewTimestampedLog(vars, key)
tsl, err := tlog.TakeTimeSlice(params.FromTs, params.ToTs)
if err != nil {
return nil, err
}
if tsl.IsEmpty() {
return q.makeResult(TLogSliceResult{})
}
ret := TLogSliceResult{
IsNotEmpty: true,
Earliest: tsl.Earliest(),
Latest: tsl.Latest(),
}
ret.FirstIndex, ret.LastIndex = tsl.FromToIndices()
return q.makeResult(ret)
case ValueTypeTLogSliceData:
var params TLogSliceDataQueryParams
err := json.Unmarshal(q.Params, ¶ms)
if err != nil {
return nil, err
}
tlog := collections.NewTimestampedLog(vars, key)
ret := TLogSliceDataResult{}
ret.Values, err = tlog.LoadRecordsRaw(params.FromIndex, params.ToIndex, params.Descending)
if err != nil {
return nil, err
}
return q.makeResult(ret)
}
return nil, fmt.Errorf("No handler for type %s", q.Type)
}
func (q *KeyQuery) makeResult(value interface{}) (*QueryResult, error) {
b, err := json.Marshal(value)
if err != nil {
return nil, err
}
return &QueryResult{
Key: q.Key,
Type: q.Type,
Value: json.RawMessage(b),
}, nil
}
|
package main
import (
"fmt"
)
func main() {
nums := []int{2, 1, 2, 4}
fmt.Println(rob(nums))
}
func rob(nums []int) int {
if len(nums) == 0 {
return 0
}
if len(nums) == 1 {
return nums[0]
}
first := nums[0]
second := max(nums[0], nums[1])
for i := 2; i < len(nums); i++ {
tmp := second
second = max(first+nums[i], second)
first = tmp
}
return second
}
func max(x, y int) int {
if x > y {
return x
}
return y
}
|
package main
import "fmt"
func printSlice(x []int) {
fmt.Printf("len=%d cap=%d slice=%v\n", len(x), cap(x), x)
}
func main() {
/* 创建切片 */
numbers := []int{0, 1, 2, 3, 4, 5, 6, 7, 8}
printSlice(numbers)
/* 打印原始切片 */
fmt.Println("numbers ==", numbers)
/* 打印子切片从索引1(包含) 到索引4(不包含)*/
fmt.Println("numbers[1:4] ==", numbers[1:4])
/* 默认下限为 0*/
fmt.Println("numbers[:3] ==", numbers[:3])
/* 默认上限为 len(s)*/
fmt.Println("numbers[4:] ==", numbers[4:])
numbers1 := make([]int, 0, 5)
printSlice(numbers1)
/* 打印子切片从索引 0(包含) 到索引 2(不包含) */
number2 := numbers[:2]
printSlice(number2)
/* 打印子切片从索引 2(包含) 到索引 5(不包含) */
number3 := numbers[2:5]
printSlice(number3)
/* 向切片添加一个元素 */
numbers = append(numbers, 13234234)
numbers = append(numbers, 23)
numbers = append(numbers, 6767)
numbers = append(numbers, 23423)
numbers = append(numbers, 1222)
printSlice(numbers)
/* 创建切片 numbers1 是之前切片的两倍容量*/
numbers3 := make([]int, len(numbers), (cap(numbers))*2)
/* 拷贝 numbers 的内容到 numbers1 */
copy(numbers3, numbers)
printSlice(numbers3)
//这是我们使用range去求一个slice的和。使用数组跟这个很类似
nums := []int{2, 3, 4}
sum := 0
for _, num := range nums {
sum += num
}
fmt.Println("sum:", sum)
//在数组上使用range将传入index和值两个变量。上面那个例子我们不需要使用该元素的序号,所以我们使用空白符"_"省略了。有时侯我们确实需要知道它的索引。
for i, num := range nums {
if num == 3 {
fmt.Println("index:", i)
}
}
}
|
// Package types contains generic data types for use with SQL
package types
|
package cloud
import (
"bytes"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/devinmcgloin/sail/pkg/slog"
)
const (
bucketName = "sail-content"
)
// Upload stores the given sketch in DO Spaces
func Upload(sketch *bytes.Buffer, path string) error {
config := &aws.Config{Region: aws.String("us-east-1")}
sess, err := session.NewSession(config)
if err != nil {
slog.ErrorPrintf("error while constructing new aws session %s", err)
return err
}
svc := s3.New(sess)
fileBytes := bytes.NewReader(sketch.Bytes())
params := &s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(path),
Body: fileBytes,
ContentLength: aws.Int64(int64(sketch.Len())),
ContentType: aws.String("image/png"),
}
if err != nil {
slog.ErrorPrintf("Error while creating AWS params %s", err)
return err
}
_, err = svc.PutObject(params)
if err != nil {
slog.ErrorPrintf("Error while uploading to aws %s", err)
return err
}
return nil
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"bufio"
"bytes"
"context"
"fmt"
"os"
"runtime/pprof"
"strings"
"testing"
"time"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/terror"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/mock"
"github.com/stretchr/testify/require"
)
func parseLog(retriever *slowQueryRetriever, sctx sessionctx.Context, reader *bufio.Reader) ([][]types.Datum, error) {
retriever.taskList = make(chan slowLogTask, 100)
ctx := context.Background()
retriever.parseSlowLog(ctx, sctx, reader, 64)
task, ok := <-retriever.taskList
if !ok {
return nil, nil
}
var rows [][]types.Datum
var err error
result := <-task.resultCh
rows, err = result.rows, result.err
return rows, err
}
func newSlowQueryRetriever() (*slowQueryRetriever, error) {
newISBuilder, err := infoschema.NewBuilder(nil, nil).InitWithDBInfos(nil, nil, nil, 0)
if err != nil {
return nil, err
}
is := newISBuilder.Build()
tbl, err := is.TableByName(util.InformationSchemaName, model.NewCIStr(infoschema.TableSlowQuery))
if err != nil {
return nil, err
}
return &slowQueryRetriever{outputCols: tbl.Meta().Columns}, nil
}
func parseSlowLog(sctx sessionctx.Context, reader *bufio.Reader) ([][]types.Datum, error) {
retriever, err := newSlowQueryRetriever()
if err != nil {
return nil, err
}
// Ignore the error is ok for test.
terror.Log(retriever.initialize(context.Background(), sctx))
rows, err := parseLog(retriever, sctx, reader)
return rows, err
}
func TestParseSlowLogPanic(t *testing.T) {
slowLogStr :=
`# Time: 2019-04-28T15:24:04.309074+08:00
# Txn_start_ts: 405888132465033227
# User@Host: root[root] @ localhost [127.0.0.1]
# Query_time: 0.216905
# Cop_time: 0.38 Process_time: 0.021 Request_count: 1 Total_keys: 637 Processed_keys: 436
# Is_internal: true
# Digest: 42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772
# Stats: t1:1,t2:2
# Cop_proc_avg: 0.1 Cop_proc_p90: 0.2 Cop_proc_max: 0.03 Cop_proc_addr: 127.0.0.1:20160
# Cop_wait_avg: 0.05 Cop_wait_p90: 0.6 Cop_wait_max: 0.8 Cop_wait_addr: 0.0.0.0:20160
# Mem_max: 70724
# Disk_max: 65536
# Plan_from_cache: true
# Plan_from_binding: true
# Succ: false
# Plan_digest: 60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4
# Prev_stmt: update t set i = 1;
use test;
select * from t;`
require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/errorMockParseSlowLogPanic", `return(true)`))
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/errorMockParseSlowLogPanic"))
}()
reader := bufio.NewReader(bytes.NewBufferString(slowLogStr))
loc, err := time.LoadLocation("Asia/Shanghai")
require.NoError(t, err)
sctx := mock.NewContext()
sctx.GetSessionVars().TimeZone = loc
_, err = parseSlowLog(sctx, reader)
require.Error(t, err)
require.Equal(t, err.Error(), "panic test")
}
func TestParseSlowLogFile(t *testing.T) {
slowLogStr :=
`# Time: 2019-04-28T15:24:04.309074+08:00
# Txn_start_ts: 405888132465033227
# User@Host: root[root] @ localhost [127.0.0.1]
# Session_alias: alias123
# Exec_retry_time: 0.12 Exec_retry_count: 57
# Query_time: 0.216905
# Cop_time: 0.38 Process_time: 0.021 Request_count: 1 Total_keys: 637 Processed_keys: 436
# Rocksdb_delete_skipped_count: 10 Rocksdb_key_skipped_count: 10 Rocksdb_block_cache_hit_count: 10 Rocksdb_block_read_count: 10 Rocksdb_block_read_byte: 100
# Is_internal: true
# Digest: 42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772
# Stats: t1:1,t2:2
# Cop_proc_avg: 0.1 Cop_proc_p90: 0.2 Cop_proc_max: 0.03 Cop_proc_addr: 127.0.0.1:20160
# Cop_wait_avg: 0.05 Cop_wait_p90: 0.6 Cop_wait_max: 0.8 Cop_wait_addr: 0.0.0.0:20160
# Cop_backoff_regionMiss_total_times: 200 Cop_backoff_regionMiss_total_time: 0.2 Cop_backoff_regionMiss_max_time: 0.2 Cop_backoff_regionMiss_max_addr: 127.0.0.1 Cop_backoff_regionMiss_avg_time: 0.2 Cop_backoff_regionMiss_p90_time: 0.2
# Cop_backoff_rpcPD_total_times: 200 Cop_backoff_rpcPD_total_time: 0.2 Cop_backoff_rpcPD_max_time: 0.2 Cop_backoff_rpcPD_max_addr: 127.0.0.1 Cop_backoff_rpcPD_avg_time: 0.2 Cop_backoff_rpcPD_p90_time: 0.2
# Cop_backoff_rpcTiKV_total_times: 200 Cop_backoff_rpcTiKV_total_time: 0.2 Cop_backoff_rpcTiKV_max_time: 0.2 Cop_backoff_rpcTiKV_max_addr: 127.0.0.1 Cop_backoff_rpcTiKV_avg_time: 0.2 Cop_backoff_rpcTiKV_p90_time: 0.2
# Mem_max: 70724
# Disk_max: 65536
# Plan_from_cache: true
# Plan_from_binding: true
# Succ: false
# IsExplicitTxn: true
# Plan_digest: 60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4
# Prev_stmt: update t set i = 1;
use test;
select * from t;`
reader := bufio.NewReader(bytes.NewBufferString(slowLogStr))
loc, err := time.LoadLocation("Asia/Shanghai")
require.NoError(t, err)
ctx := mock.NewContext()
ctx.GetSessionVars().TimeZone = loc
rows, err := parseSlowLog(ctx, reader)
require.NoError(t, err)
require.Len(t, rows, 1)
recordString := ""
for i, value := range rows[0] {
str, err := value.ToString()
require.NoError(t, err)
if i > 0 {
recordString += ","
}
recordString += str
}
expectRecordString := `2019-04-28 15:24:04.309074,` +
`405888132465033227,root,localhost,0,alias123,57,0.12,0.216905,` +
`0,0,0,0,0,0,0,0,0,0,0,0,,0,0,0,0,0,0,0.38,0.021,0,0,0,1,637,0,10,10,10,10,100,,,1,42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772,t1:1,t2:2,` +
`0.1,0.2,0.03,127.0.0.1:20160,0.05,0.6,0.8,0.0.0.0:20160,70724,65536,0,0,0,0,0,,` +
`Cop_backoff_regionMiss_total_times: 200 Cop_backoff_regionMiss_total_time: 0.2 Cop_backoff_regionMiss_max_time: 0.2 Cop_backoff_regionMiss_max_addr: 127.0.0.1 Cop_backoff_regionMiss_avg_time: 0.2 Cop_backoff_regionMiss_p90_time: 0.2 Cop_backoff_rpcPD_total_times: 200 Cop_backoff_rpcPD_total_time: 0.2 Cop_backoff_rpcPD_max_time: 0.2 Cop_backoff_rpcPD_max_addr: 127.0.0.1 Cop_backoff_rpcPD_avg_time: 0.2 Cop_backoff_rpcPD_p90_time: 0.2 Cop_backoff_rpcTiKV_total_times: 200 Cop_backoff_rpcTiKV_total_time: 0.2 Cop_backoff_rpcTiKV_max_time: 0.2 Cop_backoff_rpcTiKV_max_addr: 127.0.0.1 Cop_backoff_rpcTiKV_avg_time: 0.2 Cop_backoff_rpcTiKV_p90_time: 0.2,` +
`0,0,1,0,1,1,0,,60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4,` +
`,update t set i = 1;,select * from t;`
require.Equal(t, expectRecordString, recordString)
// Issue 20928
reader = bufio.NewReader(bytes.NewBufferString(slowLogStr))
rows, err = parseSlowLog(ctx, reader)
require.NoError(t, err)
require.Len(t, rows, 1)
recordString = ""
for i, value := range rows[0] {
str, err := value.ToString()
require.NoError(t, err)
if i > 0 {
recordString += ","
}
recordString += str
}
expectRecordString = `2019-04-28 15:24:04.309074,` +
`405888132465033227,root,localhost,0,alias123,57,0.12,0.216905,` +
`0,0,0,0,0,0,0,0,0,0,0,0,,0,0,0,0,0,0,0.38,0.021,0,0,0,1,637,0,10,10,10,10,100,,,1,42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772,t1:1,t2:2,` +
`0.1,0.2,0.03,127.0.0.1:20160,0.05,0.6,0.8,0.0.0.0:20160,70724,65536,0,0,0,0,0,,` +
`Cop_backoff_regionMiss_total_times: 200 Cop_backoff_regionMiss_total_time: 0.2 Cop_backoff_regionMiss_max_time: 0.2 Cop_backoff_regionMiss_max_addr: 127.0.0.1 Cop_backoff_regionMiss_avg_time: 0.2 Cop_backoff_regionMiss_p90_time: 0.2 Cop_backoff_rpcPD_total_times: 200 Cop_backoff_rpcPD_total_time: 0.2 Cop_backoff_rpcPD_max_time: 0.2 Cop_backoff_rpcPD_max_addr: 127.0.0.1 Cop_backoff_rpcPD_avg_time: 0.2 Cop_backoff_rpcPD_p90_time: 0.2 Cop_backoff_rpcTiKV_total_times: 200 Cop_backoff_rpcTiKV_total_time: 0.2 Cop_backoff_rpcTiKV_max_time: 0.2 Cop_backoff_rpcTiKV_max_addr: 127.0.0.1 Cop_backoff_rpcTiKV_avg_time: 0.2 Cop_backoff_rpcTiKV_p90_time: 0.2,` +
`0,0,1,0,1,1,0,,60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4,` +
`,update t set i = 1;,select * from t;`
require.Equal(t, expectRecordString, recordString)
// fix sql contain '# ' bug
slowLog := bytes.NewBufferString(
`# Time: 2019-04-28T15:24:04.309074+08:00
select a# from t;
# Time: 2019-01-24T22:32:29.313255+08:00
# Txn_start_ts: 405888132465033227
# Query_time: 0.216905
# Process_time: 0.021 Request_count: 1 Total_keys: 637 Processed_keys: 436
# Is_internal: true
# Digest: 42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772
# Stats: t1:1,t2:2
# Succ: false
select * from t;
`)
reader = bufio.NewReader(slowLog)
_, err = parseSlowLog(ctx, reader)
require.NoError(t, err)
// test for time format compatibility.
slowLog = bytes.NewBufferString(
`# Time: 2019-04-28T15:24:04.309074+08:00
select * from t;
# Time: 2019-04-24-19:41:21.716221 +0800
select * from t;
`)
reader = bufio.NewReader(slowLog)
rows, err = parseSlowLog(ctx, reader)
require.NoError(t, err)
require.Len(t, rows, 2)
t0Str, err := rows[0][0].ToString()
require.NoError(t, err)
require.Equal(t, t0Str, "2019-04-28 15:24:04.309074")
t1Str, err := rows[1][0].ToString()
require.NoError(t, err)
require.Equal(t, t1Str, "2019-04-24 19:41:21.716221")
// Add parse error check.
slowLog = bytes.NewBufferString(
`# Time: 2019-04-28T15:24:04.309074+08:00
# Succ: abc
select * from t;
`)
reader = bufio.NewReader(slowLog)
_, err = parseSlowLog(ctx, reader)
require.NoError(t, err)
warnings := ctx.GetSessionVars().StmtCtx.GetWarnings()
require.Len(t, warnings, 1)
require.Equal(t, warnings[0].Err.Error(), "Parse slow log at line 2, failed field is Succ, failed value is abc, error is strconv.ParseBool: parsing \"abc\": invalid syntax")
}
// It changes variable.MaxOfMaxAllowedPacket, so must be stayed in SerialSuite.
func TestParseSlowLogFileSerial(t *testing.T) {
loc, err := time.LoadLocation("Asia/Shanghai")
require.NoError(t, err)
ctx := mock.NewContext()
ctx.GetSessionVars().TimeZone = loc
// test for bufio.Scanner: token too long.
slowLog := bytes.NewBufferString(
`# Time: 2019-04-28T15:24:04.309074+08:00
select * from t;
# Time: 2019-04-24-19:41:21.716221 +0800
`)
originValue := variable.MaxOfMaxAllowedPacket
variable.MaxOfMaxAllowedPacket = 65536
sql := strings.Repeat("x", int(variable.MaxOfMaxAllowedPacket+1))
slowLog.WriteString(sql)
reader := bufio.NewReader(slowLog)
_, err = parseSlowLog(ctx, reader)
require.Error(t, err)
require.EqualError(t, err, "single line length exceeds limit: 65536")
variable.MaxOfMaxAllowedPacket = originValue
reader = bufio.NewReader(slowLog)
_, err = parseSlowLog(ctx, reader)
require.NoError(t, err)
}
func TestSlowLogParseTime(t *testing.T) {
t1Str := "2019-01-24T22:32:29.313255+08:00"
t2Str := "2019-01-24T22:32:29.313255"
t1, err := ParseTime(t1Str)
require.NoError(t, err)
loc, err := time.LoadLocation("Asia/Shanghai")
require.NoError(t, err)
t2, err := time.ParseInLocation("2006-01-02T15:04:05.999999999", t2Str, loc)
require.NoError(t, err)
require.Equal(t, t1.Unix(), t2.Unix())
t1Format := t1.In(loc).Format(logutil.SlowLogTimeFormat)
require.Equal(t, t1Format, t1Str)
}
// TestFixParseSlowLogFile bugfix
// sql select * from INFORMATION_SCHEMA.SLOW_QUERY limit 1;
// ERROR 1105 (HY000): string "2019-05-12-11:23:29.61474688" doesn't has a prefix that matches format "2006-01-02-15:04:05.999999999 -0700", err: parsing time "2019-05-12-11:23:29.61474688" as "2006-01-02-15:04:05.999999999 -0700": cannot parse "" as "-0700"
func TestFixParseSlowLogFile(t *testing.T) {
slowLog := bytes.NewBufferString(
`# Time: 2019-05-12-11:23:29.614327491 +0800
# Txn_start_ts: 405888132465033227
# Query_time: 0.216905
# Process_time: 0.021 Request_count: 1 Total_keys: 637 Processed_keys: 436
# Is_internal: true
# Digest: 42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772
# Stats: t1:1,t2:2
# Cop_proc_avg: 0.1 Cop_proc_p90: 0.2 Cop_proc_max: 0.03
# Cop_wait_avg: 0.05 Cop_wait_p90: 0.6 Cop_wait_max: 0.8
# Mem_max: 70724
select * from t
# Time: 2019-05-12-11:23:29.614327491 +0800
# Txn_start_ts: 405888132465033227
# Query_time: 0.216905
# Process_time: 0.021 Request_count: 1 Total_keys: 637 Processed_keys: 436
# Is_internal: true
# Digest: 42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772
# Stats: t1:1,t2:2
# Cop_proc_avg: 0.1 Cop_proc_p90: 0.2 Cop_proc_max: 0.03
# Cop_wait_avg: 0.05 Cop_wait_p90: 0.6 Cop_wait_max: 0.8
# Mem_max: 70724
# Plan_digest: 60e9378c746d9a2be1c791047e008967cf252eb6de9167ad3aa6098fa2d523f4
select * from t;`)
scanner := bufio.NewReader(slowLog)
loc, err := time.LoadLocation("Asia/Shanghai")
require.NoError(t, err)
ctx := mock.NewContext()
ctx.GetSessionVars().TimeZone = loc
_, err = parseSlowLog(ctx, scanner)
require.NoError(t, err)
// Test parser error.
slowLog = bytes.NewBufferString(
`# Time: 2019-05-12-11:23:29.614327491 +0800
# Txn_start_ts: 405888132465033227#
select * from t;
`)
scanner = bufio.NewReader(slowLog)
_, err = parseSlowLog(ctx, scanner)
require.NoError(t, err)
warnings := ctx.GetSessionVars().StmtCtx.GetWarnings()
require.Len(t, warnings, 1)
require.Equal(t, warnings[0].Err.Error(), "Parse slow log at line 2, failed field is Txn_start_ts, failed value is 405888132465033227#, error is strconv.ParseUint: parsing \"405888132465033227#\": invalid syntax")
}
func TestSlowQueryRetriever(t *testing.T) {
logData0 := ""
logData1 := `
# Time: 2020-02-15T18:00:01.000000+08:00
select 1;
# Time: 2020-02-15T19:00:05.000000+08:00
select 2;`
logData2 := `
# Time: 2020-02-16T18:00:01.000000+08:00
select 3;
# Time: 2020-02-16T18:00:05.000000+08:00
select 4;`
logData3 := `
# Time: 2020-02-16T19:00:00.000000+08:00
select 5;
# Time: 2020-02-17T18:00:05.000000+08:00
select 6;
# Time: 2020-04-15T18:00:05.299063744+08:00
select 7;`
logData := []string{logData0, logData1, logData2, logData3}
fileName0 := "tidb-slow-retriever-2020-02-14T19-04-05.01.log"
fileName1 := "tidb-slow-retriever-2020-02-15T19-04-05.01.log"
fileName2 := "tidb-slow-retriever-2020-02-16T19-04-05.01.log"
fileName3 := "tidb-slow-retriever.log"
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.Log.SlowQueryFile = fileName3
})
fileNames := []string{fileName0, fileName1, fileName2, fileName3}
prepareLogs(t, logData, fileNames)
defer func() {
removeFiles(fileNames)
}()
cases := []struct {
startTime string
endTime string
files []string
querys []string
}{
{
startTime: "2020-02-15T18:00:00.000000+08:00",
endTime: "2020-02-17T20:00:00.000000+08:00",
files: []string{fileName1, fileName2, fileName3},
querys: []string{
"select 1;",
"select 2;",
"select 3;",
"select 4;",
"select 5;",
"select 6;",
},
},
{
startTime: "2020-02-15T18:00:02.000000+08:00",
endTime: "2020-02-16T20:00:00.000000+08:00",
files: []string{fileName1, fileName2, fileName3},
querys: []string{
"select 2;",
"select 3;",
"select 4;",
"select 5;",
},
},
{
startTime: "2020-02-16T18:00:03.000000+08:00",
endTime: "2020-02-16T18:59:00.000000+08:00",
files: []string{fileName2},
querys: []string{
"select 4;",
},
},
{
startTime: "2020-02-16T18:00:03.000000+08:00",
endTime: "2020-02-16T20:00:00.000000+08:00",
files: []string{fileName2, fileName3},
querys: []string{
"select 4;",
"select 5;",
},
},
{
startTime: "2020-02-16T19:00:00.000000+08:00",
endTime: "2020-02-17T17:00:00.000000+08:00",
files: []string{fileName3},
querys: []string{
"select 5;",
},
},
{
startTime: "2010-01-01T00:00:00.000000+08:00",
endTime: "2010-01-01T01:00:00.000000+08:00",
files: []string{},
},
{
startTime: "2020-03-01T00:00:00.000000+08:00",
endTime: "2010-03-01T01:00:00.000000+08:00",
files: []string{},
},
{
startTime: "",
endTime: "",
files: []string{fileName3},
querys: []string{
"select 5;",
"select 6;",
"select 7;",
},
},
{
startTime: "2020-04-15T18:00:05.299063744+08:00",
endTime: "2020-04-15T18:00:05.299063744+08:00",
files: []string{fileName3},
querys: []string{
"select 7;",
},
},
}
loc, err := time.LoadLocation("Asia/Shanghai")
require.NoError(t, err)
sctx := mock.NewContext()
sctx.GetSessionVars().TimeZone = loc
sctx.GetSessionVars().SlowQueryFile = fileName3
for i, cas := range cases {
extractor := &plannercore.SlowQueryExtractor{Enable: len(cas.startTime) > 0 && len(cas.endTime) > 0}
if extractor.Enable {
startTime, err := ParseTime(cas.startTime)
require.NoError(t, err)
endTime, err := ParseTime(cas.endTime)
require.NoError(t, err)
extractor.TimeRanges = []*plannercore.TimeRange{{StartTime: startTime, EndTime: endTime}}
}
retriever, err := newSlowQueryRetriever()
require.NoError(t, err)
retriever.extractor = extractor
err = retriever.initialize(context.Background(), sctx)
require.NoError(t, err)
comment := fmt.Sprintf("case id: %v", i)
require.Equal(t, len(retriever.files), len(cas.files), comment)
if len(retriever.files) > 0 {
reader := bufio.NewReader(retriever.files[0].file)
rows, err := parseLog(retriever, sctx, reader)
require.NoError(t, err)
require.Equal(t, len(rows), len(cas.querys), comment)
for i, row := range rows {
require.Equal(t, row[len(row)-1].GetString(), cas.querys[i], comment)
}
}
for i, file := range retriever.files {
require.Equal(t, file.file.Name(), cas.files[i])
require.NoError(t, file.file.Close())
}
require.NoError(t, retriever.close())
}
}
func TestSplitbyColon(t *testing.T) {
cases := []struct {
line string
fields []string
values []string
}{
{
"",
[]string{},
[]string{},
},
{
"123a",
[]string{},
[]string{"123a"},
},
{
"1a: 2b",
[]string{"1a"},
[]string{"2b"},
},
{
"1a: [2b 3c] 4d: 5e",
[]string{"1a", "4d"},
[]string{"[2b 3c]", "5e"},
},
{
"1a: [2b,3c] 4d: 5e",
[]string{"1a", "4d"},
[]string{"[2b,3c]", "5e"},
},
{
"Time: 2021-09-08T14:39:54.506967433+08:00",
[]string{"Time"},
[]string{"2021-09-08T14:39:54.506967433+08:00"},
},
}
for _, c := range cases {
resFields, resValues := splitByColon(c.line)
require.Equal(t, c.fields, resFields)
require.Equal(t, c.values, resValues)
}
}
func TestBatchLogForReversedScan(t *testing.T) {
logData0 := ""
logData1 := `
# Time: 2020-02-15T18:00:01.000000+08:00
select 1;
# Time: 2020-02-15T19:00:05.000000+08:00
select 2;
# Time: 2020-02-15T20:00:05.000000+08:00`
logData2 := `select 3;
# Time: 2020-02-16T18:00:01.000000+08:00
select 4;
# Time: 2020-02-16T18:00:05.000000+08:00
select 5;`
logData3 := `
# Time: 2020-02-16T19:00:00.000000+08:00
select 6;
# Time: 2020-02-17T18:00:05.000000+08:00
select 7;
# Time: 2020-04-15T18:00:05.299063744+08:00`
logData4 := `select 8;
# Time: 2020-04-15T19:00:05.299063744+08:00
select 9;`
logData := []string{logData0, logData1, logData2, logData3, logData4}
fileName0 := "tidb-slow-reverse-scan-2020-02-14T19-04-05.01.log"
fileName1 := "tidb-slow-reverse-scan-2020-02-15T19-04-05.01.log"
fileName2 := "tidb-slow-reverse-scan-2020-02-16T19-04-05.01.log"
fileName3 := "tidb-slow-reverse-scan-2020-02-17T19-04-05.01.log"
fileName4 := "tidb-slow-reverse-scan.log"
defer config.RestoreFunc()()
config.UpdateGlobal(func(conf *config.Config) {
conf.Log.SlowQueryFile = fileName4
})
fileNames := []string{fileName0, fileName1, fileName2, fileName3, fileName4}
prepareLogs(t, logData, fileNames)
defer func() {
removeFiles(fileNames)
}()
cases := []struct {
startTime string
endTime string
files []string
logs [][]string
}{
{
startTime: "2020-02-15T18:00:00.000000+08:00",
endTime: "2020-02-15T19:00:00.000000+08:00",
files: []string{fileName1},
logs: [][]string{
{"# Time: 2020-02-15T19:00:05.000000+08:00",
"select 2;",
"# Time: 2020-02-15T18:00:01.000000+08:00",
"select 1;"},
},
},
{
startTime: "2020-02-15T20:00:05.000000+08:00",
endTime: "2020-02-17T19:00:00.000000+08:00",
files: []string{fileName1, fileName2, fileName3},
logs: [][]string{
{"# Time: 2020-02-17T18:00:05.000000+08:00",
"select 7;",
"# Time: 2020-02-16T19:00:00.000000+08:00",
"select 6;",
"# Time: 2020-02-16T18:00:05.000000+08:00",
"select 5;",
"# Time: 2020-02-16T18:00:01.000000+08:00",
"select 4;",
"# Time: 2020-02-16T18:00:01.000000+08:00",
"select 3;"},
},
},
{
startTime: "2020-02-16T19:00:00.000000+08:00",
endTime: "2020-04-15T20:00:00.000000+08:00",
files: []string{fileName3, fileName4},
logs: [][]string{
{"# Time: 2020-04-15T19:00:05.299063744+08:00",
"select 9;",
"Time: 2020-04-15T18:00:05.299063744+08:00",
"select 8;",
"# Time: 2020-02-17T18:00:05.000000+08:00",
"select 7;",
"# Time: 2020-02-16T19:00:00.000000+08:00",
"select 6;"},
},
},
}
loc, err := time.LoadLocation("Asia/Shanghai")
require.NoError(t, err)
sctx := mock.NewContext()
sctx.GetSessionVars().TimeZone = loc
sctx.GetSessionVars().SlowQueryFile = fileName3
for i, cas := range cases {
extractor := &plannercore.SlowQueryExtractor{Enable: len(cas.startTime) > 0 && len(cas.endTime) > 0, Desc: true}
if extractor.Enable {
startTime, err := ParseTime(cas.startTime)
require.NoError(t, err)
endTime, err := ParseTime(cas.endTime)
require.NoError(t, err)
extractor.TimeRanges = []*plannercore.TimeRange{{StartTime: startTime, EndTime: endTime}}
}
retriever, err := newSlowQueryRetriever()
require.NoError(t, err)
retriever.extractor = extractor
sctx.GetSessionVars().SlowQueryFile = fileName4
err = retriever.initialize(context.Background(), sctx)
require.NoError(t, err)
comment := fmt.Sprintf("case id: %v", i)
if len(retriever.files) > 0 {
reader := bufio.NewReader(retriever.files[0].file)
offset := &offset{length: 0, offset: 0}
rows, err := retriever.getBatchLogForReversedScan(context.Background(), reader, offset, 3)
require.NoError(t, err)
for _, row := range rows {
for j, log := range row {
require.Equal(t, log, cas.logs[0][j], comment)
}
}
}
require.NoError(t, retriever.close())
}
}
func TestCancelParseSlowLog(t *testing.T) {
fileName := "tidb-slow-2020-02-14T19-04-05.01.log"
slowLog := `# Time: 2019-04-28T15:24:04.309074+08:00
select * from t;`
prepareLogs(t, []string{slowLog}, []string{fileName})
defer func() {
removeFiles([]string{fileName})
}()
sctx := mock.NewContext()
sctx.GetSessionVars().SlowQueryFile = fileName
retriever, err := newSlowQueryRetriever()
require.NoError(t, err)
var signal1, signal2 = make(chan int, 1), make(chan int, 1)
ctx := context.WithValue(context.Background(), signalsKey{}, []chan int{signal1, signal2})
ctx, cancel := context.WithCancel(ctx)
err = failpoint.Enable("github.com/pingcap/tidb/executor/mockReadSlowLogSlow", "return(true)")
require.NoError(t, err)
defer func() {
require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/mockReadSlowLogSlow"))
}()
go func() {
_, err := retriever.retrieve(ctx, sctx)
require.Errorf(t, err, "context canceled")
}()
// Wait for parseSlowLog going to add tasks.
<-signal1
// Cancel the retriever and then dataForSlowLog exits.
cancel()
// Assume that there are already unprocessed tasks.
retriever.taskList <- slowLogTask{}
// Let parseSlowLog continue.
signal2 <- 1
// parseSlowLog should exit immediately.
time.Sleep(1 * time.Second)
require.False(t, checkGoroutineExists("parseSlowLog"))
}
func checkGoroutineExists(keyword string) bool {
buf := new(bytes.Buffer)
profile := pprof.Lookup("goroutine")
err := profile.WriteTo(buf, 1)
if err != nil {
panic(err)
}
str := buf.String()
return strings.Contains(str, keyword)
}
func prepareLogs(t *testing.T, logData []string, fileNames []string) {
writeFile := func(file string, data string) {
f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
require.NoError(t, err)
_, err = f.Write([]byte(data))
require.NoError(t, err)
require.NoError(t, f.Close())
}
for i, log := range logData {
writeFile(fileNames[i], log)
}
}
func removeFiles(fileNames []string) {
for _, fileName := range fileNames {
os.Remove(fileName)
}
}
|
package main
// ----------------------- 方法1: 双栈实现 -----------------------
const INF = 1000000000000
type MinStack struct {
dataStack *MyStack
minValueStack *MyStack
}
func Constructor() MinStack {
return MinStack{
dataStack: NewMyStack(),
minValueStack: NewMyStack(),
}
}
func (ms *MinStack) Push(x int) {
nextMinValue := ms.minValueStack.GetMinValueBetweenTopAndReference(x)
ms.dataStack.Push(x)
ms.minValueStack.Push(nextMinValue)
}
func (ms *MinStack) Pop() {
ms.dataStack.Pop()
ms.minValueStack.Pop()
}
func (ms *MinStack) Top() int {
return ms.dataStack.GetTop()
}
func (ms *MinStack) GetMin() int {
return ms.minValueStack.GetTop()
}
// ----------------------- 方法2: 单栈实现 -----------------------
const INF = 1000000000000
type MinStack struct {
compositeStack *MyStack
}
func Constructor() MinStack {
return MinStack{
compositeStack: NewMyStack(),
}
}
func (ms *MinStack) Push(x int) {
nextMinValue := ms.compositeStack.GetMinValueBetweenTopAndReference(x)
ms.compositeStack.Push(x)
ms.compositeStack.Push(nextMinValue)
}
func (ms *MinStack) Pop() {
ms.compositeStack.Pop()
ms.compositeStack.Pop()
}
func (ms *MinStack) Top() int {
defer ms.compositeStack.Push(ms.compositeStack.Pop())
return ms.compositeStack.GetTop()
// 相当于
// minValue := ms.compositeStack.Pop()
// realValue := ms.compositeStack.GetTop()
// ms.compositeStack.Push(minValue)
// return realValue
}
func (ms *MinStack) GetMin() int {
return ms.compositeStack.GetTop()
}
// ------------------- MyStack -------------------
type MyStack struct {
data []int
}
func NewMyStack() *MyStack {
return &MyStack{}
}
func (ms *MyStack) Push(val int) {
ms.data = append(ms.data, val)
}
func (ms *MyStack) Pop() int {
top := ms.data[ms.GetSize()-1]
ms.data = ms.data[:ms.GetSize()-1]
return top
}
func (ms *MyStack) GetTop() int {
return ms.data[ms.GetSize()-1]
}
func (ms *MyStack) IsEmpty() bool {
return ms.GetSize() == 0
}
func (ms *MyStack) GetSize() int {
return len(ms.data)
}
func (ms *MyStack) GetMinValueBetweenTopAndReference(reference int) int{
nextMinValue := INF
if !ms.IsEmpty() {
nextMinValue = min(nextMinValue, ms.GetTop())
}
return min(nextMinValue, reference)
}
func min(a, b int) int {
if a > b {
return b
}
return a
}
/*
题目链接: https://leetcode-cn.com/problems/min-stack/
总结:
1. 还有另外一种编写方式,这种编写方式可以减少常数级别的空间。 那就是minValueStack存储一个结构体,这个结构体有两个字段
一个是最小值,一个是该最小值出现了多少次... 这样就可以用数字代表最小值的实体了,从而减少空间占用。
*/
|
package oss_test
import "testing"
func TestClient_PutObjectFromFile(t *testing.T) {
client, err := getClient()
if err != nil {
t.Error(err)
return
}
if err := client.PutObjectFromFile("file.go", "file.go"); err != nil {
t.Error(err)
return
}
}
func TestClient_GetObjectToFile(t *testing.T) {
client, err := getClient()
if err != nil {
t.Error(err)
return
}
if err := client.GetObjectToFile("my-profile", "p.jpg"); err != nil {
t.Error(err)
return
}
}
|
package opp
import (
"bytes"
"math/rand"
"strconv"
)
// CPUProFile show cpu info
func CPUProFile()error{
max := 100000000
var buf bytes.Buffer
for j := 0;j<max;j++{
num := rand.Int63n((int64(max)))
str := strconv.FormatInt(num,10)
buf.WriteString(str)
}
_ = buf.String()
return nil
} |
package main
import "fmt"
func main() {
studentsAge := map[string]int{
"john": 32,
"bob": 31,
}
fmt.Println(studentsAge)
}
|
package asciitransport
import (
"io"
"sync"
"time"
)
type AsciiTransportClient interface {
OutputEvent() <-chan *OutputEvent
Input([]byte)
InputFrom(io.Reader) error
Resize(uint, uint)
Done() <-chan struct{}
Close() error
}
func Client(conn io.ReadWriteCloser, opts ...Opt) AsciiTransportClient {
at := &AsciiTransport{
conn: conn,
quit: make(chan struct{}),
closeonce: &sync.Once{},
start: time.Now(),
iech: make(chan *InputEvent),
oech: make(chan *OutputEvent),
rech: make(chan *ResizeEvent),
isClient: true,
}
for _, opt := range opts {
opt(at)
}
pr, pw := io.Pipe()
go func() {
io.Copy(pw, conn)
at.Close()
}()
at.goReadConn(pr)
at.goWriteConn(conn)
return at
}
|
// mEmIFy is a meme library. Its practically useless. If you're using this, it's your fault.
package mEmIFy
import (
"errors"
"math/rand"
"regexp"
"strings"
"time"
)
// SpongebobCaseSeed taKes ThE OriGiNaL stRiNg anD ReTuRns It LiKE the SpOnGeBob MocK mEMe. It TaKeS A SEed FoR Y'All CoNTrOl FrEAks.
func SpongebobCaseSeed(s string, seed int64) string {
rand.Seed(seed)
spongebobString := ""
trimmed := strings.TrimSpace(s)
for _, v := range trimmed {
probs := rand.Intn(100)
if probs > 51 {
spongebobString = spongebobString + strings.ToUpper(string(v))
} else {
spongebobString = spongebobString + strings.ToLower(string(v))
}
}
return string(spongebobString)
}
// SpongebobCase iS JuSt A wRaPPeR to SpongebobCaseSeed BuT TAkeS CaRe Of SeEdiNg RaNDoM FoR You, YA NEwb
func SpongebobCase(s string) string {
return SpongebobCaseSeed(s, time.Now().UnixNano())
}
// CCify translates words like protect to protecc
func CCify(s string) (string, error) {
if s == "" {
return "", errors.New("The string passed in is empty")
}
trimmed := strings.TrimSpace(s)
split := strings.Split(trimmed, " ")
regular, err := regexp.Compile(`[a-z]+\W`)
if err != nil {
return "", err
}
newString := make([]string, 0)
for _, v := range split {
keepSafe := v
if regular.MatchString(v) && string(v[len(v)-3]) == "c" {
keepSafe = v[:len(v)-3] + "cc" + string(v[len(v)-1])
} else if string(v[len(v)-2]) == "c" {
keepSafe = v[:len(v)-2] + "cc"
}
newString = append(newString, keepSafe)
}
return strings.Join(newString, " "), nil
}
// Spacity adds a space between each letter in a word. A good example would be to look up nathanwpylestrangeplanet on instagram and look at the one word descriptions on a post
func Spacity(s string) (string, error) {
if s == "" {
return "", errors.New("The string passed in is empty")
}
trimmed := strings.TrimSpace(s)
split := strings.Split(trimmed, " ")
if len(split) > 1 {
return "", errors.New("The string should have one word")
}
split = strings.Split(trimmed, "")
return strings.Join(split, " "), nil
}
|
package cmd
import (
"github.com/gusandrioli/small-aes/aes"
"github.com/spf13/cobra"
)
// pdfEncryptCmd represents the pdfEncrypt command
var pdfEncryptCmd = &cobra.Command{
Use: "pdfEncrypt",
Short: "Encrypts pdf with AES and a 127 byte key",
Long: `A longer description that spans multiple lines and likely contains examples
and usage of using your command. For example:
Cobra is a CLI library for Go that empowers applications.
This application is a tool to generate the needed files
to quickly create a Cobra application.`,
Run: func(cmd *cobra.Command, args []string) {
aes.EncryptPDF(args)
},
}
func init() {
rootCmd.AddCommand(pdfEncryptCmd)
}
|
package main
import (
//"math"
"fmt"
//"io/ioutil"
)
func main() {
var n int
var s string
var d, sum int
var f bool
fmt.Scan(&n)
for i:=0; i<n; i++ {
fmt.Scan(&d, &s)
if s[0] != 'S' && sum == 0 {
f = true
break
}
if s[0] != 'N' && sum == 20000 {
f = true
break
}
if s[0] == 'S' {
sum += d
}
if s[0] == 'N' {
sum -= d
}
if sum<0 || sum>20000 {
f = true
break
}
}
if !f && sum == 0 {
fmt.Print("YES")
} else {
fmt.Print("NO")
}
}
|
package database
import (
"database/sql"
"github.com/darkliquid/leader1/config"
_ "github.com/go-sql-driver/mysql"
"log"
"os"
)
var db *sql.DB
var logger *log.Logger
var cfg *config.DbSettings
func init() {
logger = log.New(os.Stdout, "[database] ", log.LstdFlags)
}
func Config(dbCfg *config.DbSettings) {
cfg = dbCfg
}
func DB() (*sql.DB, error) {
var err error
// No DB? Set it up!
if db == nil {
db, err = openDB()
} else if err = db.Ping(); err != nil {
db.Close()
logger.Printf("MySQL (2) error: %s", err.Error())
db, err = openDB()
return db, err
}
return db, err
}
func openDB() (*sql.DB, error) {
logger.Printf("MySQL: setting up new connection")
db, err := sql.Open("mysql", cfg.DSN)
if err != nil {
logger.Printf("MySQL (1) error: %s", err.Error())
return db, err
}
// Set connection limits
db.SetMaxOpenConns(cfg.MaxOpenConns)
db.SetMaxIdleConns(cfg.MaxIdleConns)
if err = db.Ping(); err != nil {
db.Close()
logger.Printf("MySQL (2) error: %s", err.Error())
return db, err
}
logger.Printf("MySQL: connected")
return db, err
}
|
package acronym
import (
"regexp"
"strings"
)
// Abbreviate - Returns the acronyms of 's'
func Abbreviate(s string) string {
wordsPattern := regexp.MustCompile("\\w+")
words := wordsPattern.FindAllString(s, -1)
return BuildAcro(words, "")
}
// BuildAcro - recursively builds an acronym from the the words in 'words'
func BuildAcro(words []string, acro string) string {
if len(words) == 0 {
return acro
}
first, rest := words[0], words[1:]
acro += strings.ToUpper(string(first[0]))
return BuildAcro(rest, acro)
}
|
/*
Copyright 2015 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
import (
"github.com/crunchydata/crunchy-postgresql-manager-openshift/logit"
"github.com/crunchydata/crunchy-postgresql-manager-openshift/util"
"github.com/robfig/cron"
)
type Command struct {
Output string
}
type BackupProfile struct {
ID string
Name string
}
type BackupStatus struct {
ID string
ContainerName string
StartTime string
BackupName string
ProfileName string
ServerName string
ServerIP string
ScheduleID string
Path string
ElapsedTime string
BackupSize string
Status string
UpdateDt string
}
type BackupRequest struct {
ScheduleID string
ServerID string
ServerName string
ProfileName string
ServerIP string
ContainerName string
}
type BackupSchedule struct {
ID string
ServerID string
ServerName string
ServerIP string
ContainerName string
ProfileName string
Name string
Enabled string
Minutes string
Hours string
DayOfMonth string
Month string
DayOfWeek string
UpdateDt string
}
//global cron instance that gets started, stopped, restarted
var CRONInstance *cron.Cron
const CLUSTERADMIN_DB = "clusteradmin"
//called by backup jobs as they execute
func (t *Command) AddStatus(status *BackupStatus, reply *Command) error {
logit.Info.Println("AddStatus called")
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println("BackupNow: error " + err.Error())
return err
}
defer dbConn.Close()
var id string
id, err = AddStatus(dbConn, *status)
if err != nil {
logit.Error.Println("AddStatus error " + err.Error())
}
reply.Output = id
return err
}
//called by backup jobs as they execute
func (t *Command) UpdateStatus(status *BackupStatus, reply *Command) error {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println("BackupNow: error " + err.Error())
return err
}
defer dbConn.Close()
logit.Info.Println("UpdateStatus called")
err = UpdateStatus(dbConn, *status)
if err != nil {
logit.Error.Println("UpdateStatus error " + err.Error())
return err
}
return err
}
//called by admin do perform an adhoc backup job
func (t *Command) BackupNow(args *BackupRequest, reply *Command) error {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println("BackupNow: error " + err.Error())
return err
}
defer dbConn.Close()
logit.Info.Println("BackupNow.impl called")
err = ProvisionBackupJob(dbConn, args)
if err != nil {
logit.Error.Println("BackupNow.impl error:" + err.Error())
return err
}
logit.Info.Println("BackupNow.impl completed")
return err
}
//called by admin to cause a reload of the cron jobs
func (t *Command) Reload(schedule *BackupSchedule, reply *Command) error {
logit.Info.Println("Reload called")
err := LoadSchedules()
if err != nil {
logit.Error.Println("Reload error " + err.Error())
return err
}
return err
}
func LoadSchedules() error {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println("BackupNow: error " + err.Error())
return err
}
defer dbConn.Close()
logit.Info.Println("LoadSchedules called")
var schedules []BackupSchedule
schedules, err = GetSchedules(dbConn)
if err != nil {
logit.Error.Println("LoadSchedules error " + err.Error())
return err
}
if CRONInstance != nil {
logit.Info.Println("stopping current cron instance...")
CRONInstance.Stop()
}
//kill off the old cron, garbage collect it
CRONInstance = nil
//create a new cron
logit.Info.Println("creating cron instance...")
CRONInstance = cron.New()
var cronexp string
for i := 0; i < len(schedules); i++ {
cronexp = getCron(schedules[i])
logit.Info.Println("would have loaded schedule..." + cronexp)
if schedules[i].Enabled == "YES" {
logit.Info.Println("schedule " + schedules[i].ID + " was enabled so adding it")
x := DefaultJob{}
x.request = BackupRequest{}
x.request.ScheduleID = schedules[i].ID
x.request.ServerID = schedules[i].ServerID
x.request.ServerName = schedules[i].ServerName
x.request.ServerIP = schedules[i].ServerIP
x.request.ContainerName = schedules[i].ContainerName
x.request.ProfileName = schedules[i].ProfileName
CRONInstance.AddJob(cronexp, x)
} else {
logit.Info.Println("schedule " + schedules[i].ID + " NOT enabled so dropping it")
}
}
logit.Info.Println("starting new CRONInstance")
CRONInstance.Start()
return err
}
func getCron(s BackupSchedule) string {
//leave seconds field with 0 as a default
var cronexp = "0"
cronexp = cronexp + " "
cronexp = cronexp + s.Minutes
cronexp = cronexp + " "
cronexp = cronexp + s.Hours
cronexp = cronexp + " "
cronexp = cronexp + s.DayOfMonth
cronexp = cronexp + " "
cronexp = cronexp + s.Month
cronexp = cronexp + " "
cronexp = cronexp + s.DayOfWeek
return cronexp
}
|
package merchant
import (
"context"
"tpay_backend/adminapi/internal/common"
"tpay_backend/model"
"tpay_backend/utils"
"github.com/go-redis/redis/v8"
"tpay_backend/adminapi/internal/svc"
"tpay_backend/adminapi/internal/types"
"github.com/tal-tech/go-zero/core/logx"
)
type EnableMerchantLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewEnableMerchantLogic(ctx context.Context, svcCtx *svc.ServiceContext) EnableMerchantLogic {
return EnableMerchantLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *EnableMerchantLogic) EnableMerchant(req types.EnableMerchantRequest) error {
var err error
switch req.Enable {
case model.MerchantStatusEnable:
err = model.NewMerchantModel(l.svcCtx.DbEngine).EnableMerchant(req.MerchantId)
case model.MerchantStatusDisable:
err = model.NewMerchantModel(l.svcCtx.DbEngine).DisableMerchant(req.MerchantId)
//清除商户的登录token
l.CleanMerchantLoginToken(req.MerchantId)
default:
l.Errorf("操作不支持, req.Enable=%v", req.Enable)
return common.NewCodeError(common.InvalidParam)
}
if err != nil {
l.Errorf("启用|禁用商家失败, req.Enable=%v, err=%v", req.Enable, err)
return common.NewCodeError(common.SysDBUpdate)
}
return nil
}
func (l *EnableMerchantLogic) CleanMerchantLoginToken(merchantId int64) {
redisOptions := &redis.Options{
Addr: l.svcCtx.Config.Redis.Host,
Password: l.svcCtx.Config.Redis.Pass, // no password set
DB: utils.RedisDbMerchantapi, // merchant redis DB
}
// redis连接
redisObj := redis.NewClient(redisOptions)
// 检测redis连接是否正常
if err := redisObj.Ping(context.Background()).Err(); err != nil {
l.Errorf("redis连接失败:%+v, err:%v", redisOptions, err)
return
}
merchantRedisSession := utils.NewRedisSession(redisObj, utils.RedisSessionConfig{
KeyPrefix: common.MerchantLoginRedisKeyPrefix,
})
//清除商户的登录token
if errDel := merchantRedisSession.CleanOtherLogined(merchantId); errDel != nil {
l.Errorf("删除商户[%v]的登录token失败", merchantId)
return
}
}
|
package main
import (
"fmt"
"os"
ldap "gopkg.in/ldap.v3"
)
type loginLDAPerror struct {
message string
}
func newLoginLDAPerror(message string) *loginLDAPerror {
return &loginLDAPerror{
message: message,
}
}
func (e *loginLDAPerror) Error() string {
return e.message
}
const ldapServer = "ads.mc.asu.ru:3268"
func ldapAuth(login, password string) error {
l, err := ldap.Dial("tcp", ldapServer)
if err != nil {
return fmt.Errorf("Не удалось подключиться к ldap серверу")
}
defer l.Close()
bindUsername := os.Getenv("LDAP_LOGIN")
bindPassword := os.Getenv("LDAP_PASSWORD")
err = l.Bind(bindUsername, bindPassword)
if err != nil {
return fmt.Errorf("Не удалось подключиться read only пользователем")
}
searchRequest := ldap.NewSearchRequest(
"dc=mc,dc=asu,dc=ru",
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
fmt.Sprintf("(&(memberOf=cn=billing,ou=groups,ou=vc,dc=mc,dc=asu,dc=ru)(samAccountName=%s))", login),
[]string{"dn"},
nil,
)
sr, err := l.Search(searchRequest)
if err != nil {
return newLoginLDAPerror("Не удалось найти пользователя с таким логином")
}
if len(sr.Entries) != 1 {
return newLoginLDAPerror("Неверный логин")
}
// Подключаемся пользователем для проверки пароля
userdn := sr.Entries[0].DN
err = l.Bind(userdn, password)
if err != nil {
return newLoginLDAPerror("Неверный пароль")
}
return nil
}
|
package cache
import (
"crypto/tls"
"fmt"
"github.com/go-redis/redis/v7"
"strings"
"sync"
"time"
)
var (
onceCacheCluster sync.Once
cacheClusterClientSvc Service
)
func InitCacheClusterClientSvc(cacheHost string, cachePort string, cachePassword string) {
onceCacheCluster.Do(func() {
cacheSvc, err := NewCacheService(true, cacheHost, cachePort, cachePassword)
if err != nil {
panic(err)
}
cacheClusterClientSvc = cacheSvc
})
}
func GetCacheClusterClient() Service {
return cacheClusterClientSvc
}
type ClusterClient struct {
onceCache sync.Once
Client *redis.ClusterClient
}
func NewClusterCacheClient(host, port, password string) (*ClusterClient, error) {
client, err := NewRedisClusterClient(host, port, password)
if err != nil {
return nil, err
}
cache := &ClusterClient{
Client: client,
}
return cache, nil
}
func (cache *ClusterClient) HSet(key string, values ...interface{}) error {
return cache.Client.HSet(key, values).Err()
}
func (cache *ClusterClient) HSetNX(key string, field string, value interface{}, expiration time.Duration) (set bool, err error) {
set, err = cache.Client.HSetNX(key, field, value).Result()
if set == true {
err = cache.Expire(key, expiration)
}
return set, err
}
func (cache *ClusterClient) Expire(key string, expiration time.Duration) error {
return cache.Client.Expire(key, expiration).Err()
}
func (cache *ClusterClient) HGet(key string, field string) (string, error) {
data, err := cache.Client.HGet(key, field).Result()
if err != nil && err.Error() == "redis: nil" {
return data, nil
}
return data, err
}
func (cache *ClusterClient) HGetAll(key string) map[string]string {
return cache.Client.HGetAll(key).Val()
}
func (cache *ClusterClient) HDel(key string, fields string) error {
return cache.Client.HDel(key, fields).Err()
}
func (cache *ClusterClient) Del(key string) error {
return cache.Client.Del(key).Err()
}
func (cache *ClusterClient) Pipeline() redis.Pipeliner {
return cache.Client.Pipeline()
}
func NewRedisClusterClient(redisHost, cachePort, redisPassword string) (cacheClient *redis.ClusterClient, err error) {
addrs := strings.Split(fmt.Sprintf("%s:%s", redisHost, cachePort), ",")
if len(redisPassword) > 0 {
cacheClient = redis.NewClusterClient(&redis.ClusterOptions{
Addrs: addrs,
Password: redisPassword,
TLSConfig: &tls.Config{
RootCAs: nil,
},
})
} else {
cacheClient = redis.NewClusterClient(&redis.ClusterOptions{
Addrs: addrs,
})
}
if err := cacheClient.Ping().Err(); err != nil {
fmt.Println("ERRO NO CLUSTER REDIS")
return nil, err
}
return cacheClient, nil
}
|
package main
import (
"fmt"
"os"
"github.com/Cloud-Foundations/Dominator/imagepublishers/amipublisher"
libjson "github.com/Cloud-Foundations/Dominator/lib/json"
"github.com/Cloud-Foundations/Dominator/lib/log"
)
func listUnpackersSubcommand(args []string, logger log.DebugLogger) error {
if err := listUnpackers(logger); err != nil {
return fmt.Errorf("error listing unpackers: %s", err)
}
return nil
}
func listUnpackers(logger log.DebugLogger) error {
results, err := amipublisher.ListUnpackers(targets, skipTargets,
*instanceName, logger)
if err != nil {
return err
}
return libjson.WriteWithIndent(os.Stdout, " ", results)
}
|
package server
import (
"fmt"
"go-be-book/server/handler"
"net/http"
"sync"
"github.com/gorilla/mux"
)
func Run(wg *sync.WaitGroup) {
book_handler := handler.BookHandler{}
router := mux.NewRouter()
defer wg.Done()
router.HandleFunc("/", book_handler.ListBook).Methods("GET")
router.HandleFunc("/books", book_handler.CreateBook).Methods("POST")
router.HandleFunc("/book/{id}", book_handler.RetrieveBook).Methods("GET")
router.HandleFunc("/book/{id}/delete", book_handler.DeleteBook).Methods("DELETE")
router.HandleFunc("/book/{id}/update", book_handler.UpdateBook).Methods("PUT")
router.HandleFunc("/books/create-combo", book_handler.CreateComboBook).Methods("POST")
fmt.Println("Server started port 8000!")
http.ListenAndServe(":8000", router)
}
|
package main_test
import (
"log"
"net"
"os/exec"
"strconv"
"strings"
"time"
"xip/testhelper"
"xip/xip"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gexec"
)
var err error
var serverCmd *exec.Cmd
var serverSession *Session
var port = getFreePort()
var serverPath, _ = Build("main.go")
var _ = BeforeSuite(func() {
Expect(err).ToNot(HaveOccurred())
serverCmd = exec.Command(serverPath, "-port", strconv.Itoa(port), "-blocklistURL", "file://../../etc/blocklist.txt")
serverSession, err = Start(serverCmd, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
// takes 0.455s to start up on macOS Big Sur 3.7 GHz Quad Core 22-nm Xeon E5-1620v2 processor (2013 Mac Pro)
// takes 1.312s to start up on macOS Big Sur 2.0GHz quad-core 10th-generation Intel Core i5 processor (2020 13" MacBook Pro)
// 10 seconds should be long enough for slow container-on-a-VM-with-shared-core
Eventually(serverSession.Err, 10).Should(Say(` version \d+\.\d+\.\d+ starting`))
Eventually(serverSession.Err, 10).Should(Say("Ready to answer queries"))
})
var _ = AfterSuite(func() {
serverSession.Terminate()
Eventually(serverSession).Should(Exit())
})
var _ = Describe("sslip.io-dns-server", func() {
//var stdin io.WriteCloser
var digCmd *exec.Cmd
var digSession *Session
var digArgs string
Describe("Integration tests", func() {
DescribeTable("when the DNS server is queried",
func(digArgs string, digResults string, serverLogMessage string) {
digArgs += " -p " + strconv.Itoa(port)
digCmd = exec.Command("dig", strings.Split(digArgs, " ")...)
digSession, err = Start(digCmd, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
// we want to make sure digSession has exited because we
// want to compare the _full_ contents of the stdout in the case
// of negative assertions (e.g. "^$")
Eventually(digSession, 1).Should(Exit(0))
Eventually(string(digSession.Out.Contents())).Should(MatchRegexp(digResults))
Eventually(string(serverSession.Err.Contents())).Should(MatchRegexp(serverLogMessage))
},
Entry("A (customized) for sslip.io",
"@localhost sslip.io +short",
`\A78.46.204.247\n\z`,
`TypeA sslip.io. \? 78.46.204.247\n`),
Entry("A (or lack thereof) for example.com",
"@localhost example.com +short",
`\A\z`,
`TypeA example.com. \? nil, SOA example.com. briancunnie.gmail.com. 2023031500 900 900 1800 180\n`),
Entry("A for www-127-0-0-1.sslip.io",
"@localhost www-127-0-0-1.sslip.io +short",
`\A127.0.0.1\n\z`,
`TypeA www-127-0-0-1.sslip.io. \? 127.0.0.1\n`),
Entry("A for www.192.168.0.1.sslip.io",
"@localhost www.192.168.0.1.sslip.io +short",
`\A192.168.0.1\n\z`,
`TypeA www.192.168.0.1.sslip.io. \? 192.168.0.1\n`),
Entry("AAAA (customized) for sslip.io",
"@localhost sslip.io aaaa +short",
`\A2a01:4f8:c17:b8f::2\n\z`,
`TypeAAAA sslip.io. \? 2a01:4f8:c17:b8f::2\n`),
Entry("AAAA not found for example.com",
"@localhost example.com aaaa +short",
`\A\z`,
`TypeAAAA example.com. \? nil, SOA example.com. briancunnie.gmail.com. 2023031500 900 900 1800 180\n`),
Entry("AAAA for www-2601-646-100-69f0-1c09-bae7-aa42-146c.sslip.io",
"@localhost www-2601-646-100-69f0-1c09-bae7-aa42-146c.sslip.io aaaa +short",
`\A2601:646:100:69f0:1c09:bae7:aa42:146c\n\z`,
`TypeAAAA www-2601-646-100-69f0-1c09-bae7-aa42-146c.sslip.io. \? 2601:646:100:69f0:1c09:bae7:aa42:146c\n`),
Entry("ALL (ANY) is NOT implemented",
// `+notcp` required for dig 9.11.25-RedHat-9.11.25-2.fc32 to avoid "connection refused"
"@localhost sslip.io any +notcp",
` status: NOTIMP,`,
`TypeALL sslip.io. \? NotImplemented\n`),
Entry("CNAME (customized) for protonmail._domainkey.sslip.io",
"@localhost protonmail._domainkey.sslip.io cname +short",
`\Aprotonmail.domainkey.dw4gykv5i2brtkjglrf34wf6kbxpa5hgtmg2xqopinhgxn5axo73a.domains.proton.ch.\n\z`,
`TypeCNAME protonmail._domainkey.sslip.io. \? protonmail.domainkey.dw4gykv5i2brtkjglrf34wf6kbxpa5hgtmg2xqopinhgxn5axo73a.domains.proton.ch.\n`),
Entry("CNAME not found for example.com",
"@localhost example.com cname +short",
`\A\z`,
`TypeCNAME example.com. \? nil, SOA example.com. briancunnie.gmail.com. 2023031500 900 900 1800 180\n`),
Entry("MX for example.com",
"@localhost example.com mx +short",
`\A0 example.com.\n\z`,
`TypeMX example.com. \? 0 example.com.\n`),
Entry("SOA for sslip.io",
"@localhost sslip.io soa +short",
`\Asslip.io. briancunnie.gmail.com. 2023031500 900 900 1800 180\n\z`,
`TypeSOA sslip.io. \? sslip.io. briancunnie.gmail.com. 2023031500 900 900 1800 180\n`),
Entry("SOA for example.com",
"@localhost example.com soa +short",
`\Aexample.com. briancunnie.gmail.com. 2023031500 900 900 1800 180\n\z`,
`TypeSOA example.com. \? example.com. briancunnie.gmail.com. 2023031500 900 900 1800 180\n`),
Entry("SRV (or other record that we don't implement) for example.com",
"@localhost example.com srv +short",
`\A\z`,
`TypeSRV example.com. \? nil, SOA example.com. briancunnie.gmail.com. 2023031500 900 900 1800 180\n`),
Entry(`TXT for version.status.sslip.io is the version number of the xip software (which gets overwritten during linking)`,
"@127.0.0.1 version.status.sslip.io txt +short",
`\A"0.0.0"\n"0001/01/01-99:99:99-0800"\n"cafexxx"\n\z`,
`TypeTXT version.status.sslip.io. \? \["0.0.0"\], \["0001/01/01-99:99:99-0800"\], \["cafexxx"\]`),
Entry(`TXT is the querier's IPv4 address and the domain "ip.sslip.io"`,
"@127.0.0.1 ip.sslip.io txt +short",
`127.0.0.1`,
`TypeTXT ip.sslip.io. \? \["127.0.0.1"\]`),
Entry(`TXT is the querier's IPv4 address and the domain is NOT "ip.sslip.io"`,
"@127.0.0.1 example.com txt +short",
`\A\z`,
`TypeTXT example.com. \? nil, SOA example.com. briancunnie.gmail.com. 2023031500 900 900 1800 180\n`),
Entry(`get a PTR for 1.0.168.192.in-addr.arpa returns 192-168-0-1.sslip.io`,
"@127.0.0.1 ptr -x 192.168.0.1 +short",
`\A192-168-0-1.sslip.io.\n\z`,
`TypePTR 1.0.168.192.in-addr.arpa. \? 192-168-0-1.sslip.io.`),
Entry(`get a PTR for 1.0.0.127.blah.in-addr.arpa returns no records; "blah.in-addr.arpa is not a valid domain."`,
"@127.0.0.1 1.0.0.127.blah.in-addr.arpa ptr +short",
`\A\z`,
`TypePTR 1.0.0.127.blah.in-addr.arpa. \? nil, SOA sslip.io. briancunnie.gmail.com. 2023031500 900 900 1800 180\n`),
Entry(`get a PTR for blah.1.0.0.127.in-addr.arpa returns no records; "blah" isn't a valid subdomain' `,
"@127.0.0.1 blah.1.0.0.127.in-addr.arpa ptr +short",
`\A\z`,
`TypePTR blah.1.0.0.127.in-addr.arpa. \? nil, SOA sslip.io. briancunnie.gmail.com. 2023031500 900 900 1800 180\n`),
Entry(`get a PTR for 0.0.127.in-addr.arpa returns no records; should have 4 octets, not 3`,
"@127.0.0.1 0.0.127.in-addr.arpa ptr +short",
`\A\z`,
`TypePTR 0.0.127.in-addr.arpa. \? nil, SOA sslip.io. briancunnie.gmail.com. 2023031500 900 900 1800 180\n`),
Entry(`get a PTR for 2.a.b.b.4.0.2.9.a.e.e.6.e.c.4.1.0.f.9.6.0.0.1.0.6.4.6.0.1.0.6.2.ip6.arpa returns 2601-646-100-69f0-14ce-6eea-9204-bba2.sslip.io`,
"@127.0.0.1 ptr -x 2601:646:100:69f0:14ce:6eea:9204:bba2 +short",
`\A2601-646-100-69f0-14ce-6eea-9204-bba2.sslip.io.\n\z`,
`TypePTR 2.a.b.b.4.0.2.9.a.e.e.6.e.c.4.1.0.f.9.6.0.0.1.0.6.4.6.0.1.0.6.2.ip6.arpa. \? 2601-646-100-69f0-14ce-6eea-9204-bba2.sslip.io.`),
Entry(`get a PTR for 2.a.b.b.4.0.2.9.a.e.e.6.e.c.4.1.0.f.9.6.0.0.1.0.6.4.6.0.1.0.6.2.blah.ip6.arpa returns no records; "blah isn't a valid subdomain'"`,
"@127.0.0.1 2.a.b.b.4.0.2.9.a.e.e.6.e.c.4.1.0.f.9.6.0.0.1.0.6.4.6.0.1.0.6.2.blah.ip6.arpa ptr +short",
`\A\z`,
`TypePTR 2.a.b.b.4.0.2.9.a.e.e.6.e.c.4.1.0.f.9.6.0.0.1.0.6.4.6.0.1.0.6.2.blah.ip6.arpa. \? nil, SOA sslip.io. briancunnie.gmail.com. 2023031500 900 900 1800 180\n`),
Entry(`get a PTR for b2.a.b.b.4.0.2.9.a.e.e.6.e.c.4.1.0.f.9.6.0.0.1.0.6.4.6.0.1.0.6.2.ip6.arpa returns no records; "b2" isn't a valid subdomain'`,
"@127.0.0.1 b2.a.b.b.4.0.2.9.a.e.e.6.e.c.4.1.0.f.9.6.0.0.1.0.6.4.6.0.1.0.6.2.ip6.arpa ptr +short",
`\A\z`,
`TypePTR b2.a.b.b.4.0.2.9.a.e.e.6.e.c.4.1.0.f.9.6.0.0.1.0.6.4.6.0.1.0.6.2.ip6.arpa. \? nil, SOA sslip.io. briancunnie.gmail.com. 2023031500 900 900 1800 180\n`),
Entry(`get a PTR for b.b.4.0.2.9.a.e.e.6.e.c.4.1.0.f.9.6.0.0.1.0.6.4.6.0.1.0.6.2.ip6.arpa returns no records; has too few numbers`,
"@127.0.0.1 b.b.4.0.2.9.a.e.e.6.e.c.4.1.0.f.9.6.0.0.1.0.6.4.6.0.1.0.6.2.ip6.arpa ptr +short",
`\A\z`,
`TypePTR b.b.4.0.2.9.a.e.e.6.e.c.4.1.0.f.9.6.0.0.1.0.6.4.6.0.1.0.6.2.ip6.arpa. \? nil, SOA sslip.io. briancunnie.gmail.com. 2023031500 900 900 1800 180\n`),
)
})
Describe("for more complex assertions", func() {
When("our test is run on a machine which has IPv6", func() {
cmd := exec.Command("ping6", "-c", "1", "::1")
err := cmd.Run() // if the command succeeds, we have IPv6
if err == nil {
It("returns a TXT of the querier's IPv6 address when querying ip.sslip.io", func() {
digCmd = exec.Command("dig", "@::1", "ip.sslip.io", "txt", "+short", "-p", strconv.Itoa(port))
digSession, err = Start(digCmd, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
Eventually(digSession, 1).Should(Exit(0))
Eventually(string(digSession.Out.Contents())).Should(MatchRegexp(`::1`))
Eventually(serverSession.Err).Should(Say(`TypeTXT ip\.sslip\.io\. \? \["::1"\]`))
Expect(digSession).To(Exit())
})
}
})
When("we do reverse lookups (PTR) on a random series of IPv6 addresses (fuzz testing)", func() {
It("should succeed every time", func() {
for i := 0; i < 100; i++ {
addr := testhelper.RandomIPv6Address()
digArgs = "@localhost -x " + addr.String() + " -p " + strconv.Itoa(port) + " +short"
digCmd = exec.Command("dig", strings.Split(digArgs, " ")...)
digSession, err = Start(digCmd, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
expectedPtr := strings.ReplaceAll(addr.String(), ":", "-") + ".sslip.io."
Eventually(digSession).Should(Say(expectedPtr))
Eventually(digSession, 1).Should(Exit(0))
}
})
})
When("ns.sslip.io is queried", func() {
It("returns all the A records", func() {
digArgs = "@localhost ns.sslip.io +short -p " + strconv.Itoa(port)
digCmd = exec.Command("dig", strings.Split(digArgs, " ")...)
digSession, err = Start(digCmd, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
Eventually(digSession).Should(Say(`52.0.56.137`))
Eventually(digSession).Should(Say(`52.187.42.158`))
Eventually(digSession).Should(Say(`104.155.144.4`))
Eventually(digSession, 1).Should(Exit(0))
Eventually(string(serverSession.Err.Contents())).Should(MatchRegexp(`TypeA ns.sslip.io. \? 52.0.56.137, 52.187.42.158, 104.155.144.4\n`))
})
It("returns all the AAAA records", func() {
digArgs = "@localhost aaaa ns.sslip.io +short -p " + strconv.Itoa(port)
digCmd = exec.Command("dig", strings.Split(digArgs, " ")...)
digSession, err = Start(digCmd, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
Eventually(digSession).Should(Say(`2600:1f18:aaf:6900::a`))
Eventually(digSession, 1).Should(Exit(0))
Eventually(string(serverSession.Err.Contents())).Should(MatchRegexp(`TypeAAAA ns.sslip.io. \? 2600:1f18:aaf:6900::a\n`))
})
})
When("there are multiple MX records returned (e.g. sslip.io)", func() {
It("returns all the records", func() {
digArgs = "@localhost sslip.io mx +short -p " + strconv.Itoa(port)
digCmd = exec.Command("dig", strings.Split(digArgs, " ")...)
digSession, err = Start(digCmd, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
Eventually(digSession).Should(Say(`10 mail.protonmail.ch.`))
Eventually(digSession).Should(Say(`20 mailsec.protonmail.ch.\n$`))
Eventually(digSession, 1).Should(Exit(0))
Eventually(string(serverSession.Err.Contents())).Should(MatchRegexp(`TypeMX sslip.io. \? 10 mail.protonmail.ch., 20 mailsec.protonmail.ch.\n`))
})
})
When("there are multiple NS records returned (e.g. almost any NS query)", func() {
It("returns all the records", func() {
digArgs = "@localhost example.com ns -p " + strconv.Itoa(port)
digCmd = exec.Command("dig", strings.Split(digArgs, " ")...)
digSession, err = Start(digCmd, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
Eventually(digSession).Should(Say(`flags: qr aa rd; QUERY: 1, ANSWER: 3, AUTHORITY: 0, ADDITIONAL: 4`))
Eventually(digSession).Should(Say(`;; ANSWER SECTION:`))
Eventually(digSession).Should(Say(`ns-aws.sslip.io.\n`))
Eventually(digSession).Should(Say(`ns-azure.sslip.io.\n`))
Eventually(digSession).Should(Say(`ns-gce.sslip.io.\n`))
Eventually(digSession).Should(Say(`;; ADDITIONAL SECTION:`))
Eventually(digSession).Should(Say(`ns-aws.sslip.io..*52.0.56.137\n`))
Eventually(digSession).Should(Say(`ns-aws.sslip.io..*2600:1f18:aaf:6900::a\n`))
Eventually(digSession).Should(Say(`ns-azure.sslip.io..*52.187.42.158\n`))
Eventually(digSession).Should(Say(`ns-gce.sslip.io..*104.155.144.4\n`))
Eventually(digSession, 1).Should(Exit(0))
Eventually(string(serverSession.Err.Contents())).Should(MatchRegexp(`TypeNS example.com. \? ns-aws.sslip.io., ns-azure.sslip.io., ns-gce.sslip.io.\n`))
})
})
When(`there are multiple TXT records returned (e.g. SPF for sslip.io)`, func() {
It("returns the custom TXT records", func() {
digArgs = "@localhost sslip.io txt +short -p " + strconv.Itoa(port)
digCmd = exec.Command("dig", strings.Split(digArgs, " ")...)
digSession, err = Start(digCmd, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
Eventually(digSession).Should(Say(`"protonmail-verification=ce0ca3f5010aa7a2cf8bcc693778338ffde73e26"`))
Eventually(digSession).Should(Say(`"v=spf1 include:_spf.protonmail.ch mx ~all"`))
Eventually(digSession, 1).Should(Exit(0))
Eventually(string(serverSession.Err.Contents())).Should(MatchRegexp(`TypeTXT sslip.io. \? \["protonmail-verification=ce0ca3f5010aa7a2cf8bcc693778338ffde73e26"\], \["v=spf1 include:_spf.protonmail.ch mx ~all"\]\n`))
})
})
When(`a record for an "_acme-challenge" domain is queried`, func() {
When(`it's an NS record`, func() {
It(`returns the NS record of the query with the "_acme-challenge." stripped`, func() {
digArgs = "@localhost _acme-challenge.fe80--.sslip.io ns -p " + strconv.Itoa(port)
digCmd = exec.Command("dig", strings.Split(digArgs, " ")...)
digSession, err = Start(digCmd, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
Eventually(digSession).Should(Say(`flags: qr rd; QUERY: 1, ANSWER: 0, AUTHORITY: 1, ADDITIONAL: 1`))
Eventually(digSession).Should(Say(`;; AUTHORITY SECTION:`))
Eventually(digSession).Should(Say(`fe80--.sslip.io.`))
Eventually(digSession).Should(Say(`;; ADDITIONAL SECTION:`))
Eventually(digSession).Should(Say(`fe80--.sslip.io..*fe80::\n`))
Eventually(digSession, 1).Should(Exit(0))
Eventually(string(serverSession.Err.Contents())).Should(MatchRegexp(`TypeNS _acme-challenge.fe80--.sslip.io. \? nil, NS fe80--.sslip.io.\n`))
})
})
When(`it's a TXT record`, func() {
It(`returns the NS record of the query with the "_acme-challenge." stripped`, func() {
digArgs = "@localhost _acme-challenge.127-0-0-1.sslip.io txt -p " + strconv.Itoa(port)
digCmd = exec.Command("dig", strings.Split(digArgs, " ")...)
digSession, err = Start(digCmd, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
Eventually(digSession).Should(Say(`flags: qr rd; QUERY: 1, ANSWER: 0, AUTHORITY: 1,`))
Eventually(digSession).Should(Say(`;; AUTHORITY SECTION:\n`))
Eventually(digSession).Should(Say(`^_acme-challenge.127-0-0-1.sslip.io. 604800 IN NS 127-0-0-1.sslip.io.\n`))
Eventually(digSession, 1).Should(Exit(0))
Eventually(string(serverSession.Err.Contents())).Should(MatchRegexp(`TypeTXT _acme-challenge.127-0-0-1.sslip.io. \? nil, NS 127-0-0-1.sslip.io.\n`))
})
})
When(`it's a A record`, func() {
It(`returns the NS record of the query with the "_acme-challenge." stripped`, func() {
digArgs = "@localhost _acme-challenge.127-0-0-1.sslip.io a -p " + strconv.Itoa(port)
digCmd = exec.Command("dig", strings.Split(digArgs, " ")...)
digSession, err = Start(digCmd, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
Eventually(digSession).Should(Say(`flags: qr rd; QUERY: 1, ANSWER: 0, AUTHORITY: 1,`))
Eventually(digSession).Should(Say(`;; AUTHORITY SECTION:\n`))
Eventually(digSession).Should(Say(`^_acme-challenge.127-0-0-1.sslip.io. 604800 IN NS 127-0-0-1.sslip.io.\n`))
Eventually(digSession, 1).Should(Exit(0))
Eventually(string(serverSession.Err.Contents())).Should(MatchRegexp(`TypeA _acme-challenge.127-0-0-1.sslip.io. \? nil, NS 127-0-0-1.sslip.io.\n`))
})
})
})
When(`a TXT record for an "metrics.status.sslip.io" domain is repeatedly queried`, func() {
It("rate-limits the queries after some amount requests", func() {
// typically ~9 milliseconds / query, ~125 queries / sec on 4-core Xeon
var start, stop time.Time
throttled := false
// double the the number of queries to make sure we exhaust the channel's buffers
for i := 0; i < xip.MetricsBufferSize*2; i++ {
start = time.Now()
digArgs = "@localhost metrics.status.sslip.io txt -p " + strconv.Itoa(port)
digCmd = exec.Command("dig", strings.Split(digArgs, " ")...)
_, err := digCmd.Output()
Expect(err).ToNot(HaveOccurred())
stop = time.Now()
// we currently buffer at 250 milliseconds, so for our test we use a smidgen less because jitter
if stop.Sub(start) > 240*time.Millisecond {
throttled = true
break
}
}
Expect(throttled).To(BeTrue())
})
})
})
Describe(`The domain blocklist`, func() {
DescribeTable("when queried",
func(digArgs string, digResults string, serverLogMessage string) {
digArgs += " -p " + strconv.Itoa(port)
digCmd = exec.Command("dig", strings.Split(digArgs, " ")...)
digSession, err = Start(digCmd, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
// we want to make sure digSession has exited because we
// want to compare the _full_ contents of the stdout in the case
// of negative assertions (e.g. "^$")
Eventually(digSession, 1).Should(Exit(0))
Eventually(string(digSession.Out.Contents())).Should(MatchRegexp(digResults))
Eventually(serverSession.Err).Should(Say(serverLogMessage))
},
Entry("an A record with a forbidden string on the left-hand side is redirected",
"@localhost raiffeisen.94.228.116.140.sslip.io +short",
`\A52.0.56.137\n\z`,
`TypeA raiffeisen.94.228.116.140.sslip.io. \? 52.0.56.137\n$`),
Entry("an A record with a forbidden string on the right-hand side is redirected",
"@localhost www.94-228-116-140.raiffeisen.com +short",
`\A52.0.56.137\n\z`,
`TypeA www.94-228-116-140.raiffeisen.com. \? 52.0.56.137\n$`),
Entry("an A record with a forbidden string embedded is redirected",
"@localhost international-raiffeisen-bank.94.228.116.140.sslip.io +short",
`\A52.0.56.137\n\z`,
`TypeA international-raiffeisen-bank.94.228.116.140.sslip.io. \? 52.0.56.137\n$`),
Entry("an A record with a forbidden string with a private IP is not redirected",
"@localhost raiffeisen.192.168.0.20.sslip.io +short",
`\A192.168.0.20\n\z`,
`TypeA raiffeisen.192.168.0.20.sslip.io. \? 192.168.0.20\n$`),
Entry("an AAAA record with a forbidden string is redirected",
"@localhost international-raiffeisen-bank.2600--.sslip.io aaaa +short",
`\A2600:1f18:aaf:6900::a\n\z`,
`TypeAAAA international-raiffeisen-bank.2600--.sslip.io. \? 2600:1f18:aaf:6900::a\n$`),
Entry("an AAAA record with a forbidden string with a private IP is NOT redirected",
"@localhost international-raiffeisen-bank.fc00--.sslip.io aaaa +short",
`\Afc00::\n\z`,
`TypeAAAA international-raiffeisen-bank.fc00--.sslip.io. \? fc00::\n$`),
Entry("an NS record with acme_challenge with a forbidden string is not delegated",
"@localhost _acme-challenge.raiffeisen.fe80--.sslip.io ns +short",
`\Ans-aws.sslip.io.\nns-azure.sslip.io.\nns-gce.sslip.io.\n\z`,
`TypeNS _acme-challenge.raiffeisen.fe80--.sslip.io. \? ns-aws.sslip.io., ns-azure.sslip.io., ns-gce.sslip.io.\n$`),
Entry("an A record with a forbidden CIDR is redirected",
"@localhost nf.43.134.66.67.sslip.io +short",
`\A52.0.56.137\n\z`,
`TypeA nf.43.134.66.67.sslip.io. \? 52.0.56.137\n$`),
Entry("an AAAA record with a forbidden CIDR is redirected",
"@localhost 2601-646-100-69f7-cafe-bebe-cafe-baba.sslip.io aaaa +short",
`\A2600:1f18:aaf:6900::a\n\z`,
`TypeAAAA 2601-646-100-69f7-cafe-bebe-cafe-baba.sslip.io. \? 2600:1f18:aaf:6900::a\n$`),
)
})
})
var listenPort = 1023 // lowest unprivileged port - 1 (immediately incremented)
// getFreePort should always succeed unless something awful has happened, e.g. port exhaustion
func getFreePort() int {
// we randomize the start based on the millisecond to avoid collisions in our test
// we also bind for a millisecond (in `isPortFree()` to make sure we don't collide
// with another test running in parallel
listenPort = (time.Now().Nanosecond() / 1000000) + 1024
for {
listenPort += 1
switch {
case listenPort > 65535:
listenPort = 1023 // we've reached the highest port, start over
case isPortFree(listenPort):
return listenPort
}
}
}
func isPortFree(port int) bool {
conn, err := net.ListenUDP("udp", &net.UDPAddr{Port: port})
if err != nil {
return false
}
// we must Sleep() in order to avoid a race condition when tests
// are run in parallel (`ginkgo -p`) and the `ListenUDP()` and `Close()`
// we sleep for a millisecond because the port is randomized based on the millisecond.
time.Sleep(2 * time.Millisecond)
err = conn.Close()
if err != nil {
log.Printf("I couldn't close port %d", port)
return false
}
return true
}
|
package data
import (
"github.com/bububa/oppo-omni/enum"
"github.com/bububa/oppo-omni/model"
)
type QTodayTopRequest struct {
model.BaseRequest
Demision *enum.DataDemision `json:"demision,omitempty"`
}
type QTodayTopResponse struct {
model.BaseResponse
Data *QTodayTopResult `json:"data,omitempty"`
}
type QTodayTopResult struct {
AdID uint64 `json:"adId,omitempty"` // 广告ID
AdName string `json:"adName,omitempty"` // 广告名称
OrderData int64 `json:"orderData,omitempty"` // 排序数据值(如果是下载量排序,就是下载次数,如果是消耗,就是消耗金额)
}
|
package billyfs
import (
"os"
"time"
)
type dirFileInfo struct {
name string
mode os.FileMode
}
func (dirFileInfo) IsDir() bool {
return true
}
func (dirFileInfo) ModTime() time.Time {
return time.Now()
}
func (d dirFileInfo) Mode() os.FileMode {
return d.mode
}
func (d dirFileInfo) Name() string {
return d.name
}
func (dirFileInfo) Size() int64 {
return 0
}
func (dirFileInfo) Sys() interface{} {
return nil
}
|
// Copyright (c) 2013, Sean Treadway, SoundCloud Ltd.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/streadway/zk
// GENERATED - DO NOT EDIT
package proto
import "fmt"
type Id struct {
Scheme string
Id string
}
func (m *Id) String() string { return fmt.Sprintf("%+v", *m) }
func (m *Id) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Scheme, err = in.ReadString(); err != nil {
return err
}
if m.Id, err = in.ReadString(); err != nil {
return err
}
return nil
}
func (m *Id) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Scheme); err != nil {
return err
}
if err = o.WriteString(m.Id); err != nil {
return err
}
return nil
}
type ACL struct {
Perms int32
Id Id
}
func (m *ACL) String() string { return fmt.Sprintf("%+v", *m) }
func (m *ACL) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Perms, err = in.ReadInt32(); err != nil {
return err
}
if err = (&(m.Id)).Decode(in); err != nil {
return err
}
return nil
}
func (m *ACL) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(m.Perms); err != nil {
return err
}
if err = m.Id.Encode(o); err != nil {
return err
}
return nil
}
type Stat struct {
Czxid int64
Mzxid int64
Ctime int64
Mtime int64
Version int32
Cversion int32
Aversion int32
EphemeralOwner int64
DataLength int32
NumChildren int32
Pzxid int64
}
func (m *Stat) String() string { return fmt.Sprintf("%+v", *m) }
func (m *Stat) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Czxid, err = in.ReadInt64(); err != nil {
return err
}
if m.Mzxid, err = in.ReadInt64(); err != nil {
return err
}
if m.Ctime, err = in.ReadInt64(); err != nil {
return err
}
if m.Mtime, err = in.ReadInt64(); err != nil {
return err
}
if m.Version, err = in.ReadInt32(); err != nil {
return err
}
if m.Cversion, err = in.ReadInt32(); err != nil {
return err
}
if m.Aversion, err = in.ReadInt32(); err != nil {
return err
}
if m.EphemeralOwner, err = in.ReadInt64(); err != nil {
return err
}
if m.DataLength, err = in.ReadInt32(); err != nil {
return err
}
if m.NumChildren, err = in.ReadInt32(); err != nil {
return err
}
if m.Pzxid, err = in.ReadInt64(); err != nil {
return err
}
return nil
}
func (m *Stat) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt64(m.Czxid); err != nil {
return err
}
if err = o.WriteInt64(m.Mzxid); err != nil {
return err
}
if err = o.WriteInt64(m.Ctime); err != nil {
return err
}
if err = o.WriteInt64(m.Mtime); err != nil {
return err
}
if err = o.WriteInt32(m.Version); err != nil {
return err
}
if err = o.WriteInt32(m.Cversion); err != nil {
return err
}
if err = o.WriteInt32(m.Aversion); err != nil {
return err
}
if err = o.WriteInt64(m.EphemeralOwner); err != nil {
return err
}
if err = o.WriteInt32(m.DataLength); err != nil {
return err
}
if err = o.WriteInt32(m.NumChildren); err != nil {
return err
}
if err = o.WriteInt64(m.Pzxid); err != nil {
return err
}
return nil
}
type StatPersisted struct {
Czxid int64
Mzxid int64
Ctime int64
Mtime int64
Version int32
Cversion int32
Aversion int32
EphemeralOwner int64
Pzxid int64
}
func (m *StatPersisted) String() string { return fmt.Sprintf("%+v", *m) }
func (m *StatPersisted) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Czxid, err = in.ReadInt64(); err != nil {
return err
}
if m.Mzxid, err = in.ReadInt64(); err != nil {
return err
}
if m.Ctime, err = in.ReadInt64(); err != nil {
return err
}
if m.Mtime, err = in.ReadInt64(); err != nil {
return err
}
if m.Version, err = in.ReadInt32(); err != nil {
return err
}
if m.Cversion, err = in.ReadInt32(); err != nil {
return err
}
if m.Aversion, err = in.ReadInt32(); err != nil {
return err
}
if m.EphemeralOwner, err = in.ReadInt64(); err != nil {
return err
}
if m.Pzxid, err = in.ReadInt64(); err != nil {
return err
}
return nil
}
func (m *StatPersisted) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt64(m.Czxid); err != nil {
return err
}
if err = o.WriteInt64(m.Mzxid); err != nil {
return err
}
if err = o.WriteInt64(m.Ctime); err != nil {
return err
}
if err = o.WriteInt64(m.Mtime); err != nil {
return err
}
if err = o.WriteInt32(m.Version); err != nil {
return err
}
if err = o.WriteInt32(m.Cversion); err != nil {
return err
}
if err = o.WriteInt32(m.Aversion); err != nil {
return err
}
if err = o.WriteInt64(m.EphemeralOwner); err != nil {
return err
}
if err = o.WriteInt64(m.Pzxid); err != nil {
return err
}
return nil
}
type StatPersistedV1 struct {
Czxid int64
Mzxid int64
Ctime int64
Mtime int64
Version int32
Cversion int32
Aversion int32
EphemeralOwner int64
}
func (m *StatPersistedV1) String() string { return fmt.Sprintf("%+v", *m) }
func (m *StatPersistedV1) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Czxid, err = in.ReadInt64(); err != nil {
return err
}
if m.Mzxid, err = in.ReadInt64(); err != nil {
return err
}
if m.Ctime, err = in.ReadInt64(); err != nil {
return err
}
if m.Mtime, err = in.ReadInt64(); err != nil {
return err
}
if m.Version, err = in.ReadInt32(); err != nil {
return err
}
if m.Cversion, err = in.ReadInt32(); err != nil {
return err
}
if m.Aversion, err = in.ReadInt32(); err != nil {
return err
}
if m.EphemeralOwner, err = in.ReadInt64(); err != nil {
return err
}
return nil
}
func (m *StatPersistedV1) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt64(m.Czxid); err != nil {
return err
}
if err = o.WriteInt64(m.Mzxid); err != nil {
return err
}
if err = o.WriteInt64(m.Ctime); err != nil {
return err
}
if err = o.WriteInt64(m.Mtime); err != nil {
return err
}
if err = o.WriteInt32(m.Version); err != nil {
return err
}
if err = o.WriteInt32(m.Cversion); err != nil {
return err
}
if err = o.WriteInt32(m.Aversion); err != nil {
return err
}
if err = o.WriteInt64(m.EphemeralOwner); err != nil {
return err
}
return nil
}
type ConnectRequest struct {
ProtocolVersion int32
LastZxidSeen int64
TimeOut int32
SessionId int64
Passwd []byte
}
func (m *ConnectRequest) String() string { return fmt.Sprintf("%+v", *m) }
func (m *ConnectRequest) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.ProtocolVersion, err = in.ReadInt32(); err != nil {
return err
}
if m.LastZxidSeen, err = in.ReadInt64(); err != nil {
return err
}
if m.TimeOut, err = in.ReadInt32(); err != nil {
return err
}
if m.SessionId, err = in.ReadInt64(); err != nil {
return err
}
if m.Passwd, err = in.ReadBuffer(); err != nil {
return err
}
return nil
}
func (m *ConnectRequest) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(m.ProtocolVersion); err != nil {
return err
}
if err = o.WriteInt64(m.LastZxidSeen); err != nil {
return err
}
if err = o.WriteInt32(m.TimeOut); err != nil {
return err
}
if err = o.WriteInt64(m.SessionId); err != nil {
return err
}
if err = o.WriteBuffer(m.Passwd); err != nil {
return err
}
return nil
}
type ConnectResponse struct {
ProtocolVersion int32
TimeOut int32
SessionId int64
Passwd []byte
}
func (m *ConnectResponse) String() string { return fmt.Sprintf("%+v", *m) }
func (m *ConnectResponse) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.ProtocolVersion, err = in.ReadInt32(); err != nil {
return err
}
if m.TimeOut, err = in.ReadInt32(); err != nil {
return err
}
if m.SessionId, err = in.ReadInt64(); err != nil {
return err
}
if m.Passwd, err = in.ReadBuffer(); err != nil {
return err
}
return nil
}
func (m *ConnectResponse) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(m.ProtocolVersion); err != nil {
return err
}
if err = o.WriteInt32(m.TimeOut); err != nil {
return err
}
if err = o.WriteInt64(m.SessionId); err != nil {
return err
}
if err = o.WriteBuffer(m.Passwd); err != nil {
return err
}
return nil
}
type SetWatches struct {
RelativeZxid int64
DataWatches []string
ExistWatches []string
ChildWatches []string
}
func (m *SetWatches) String() string { return fmt.Sprintf("%+v", *m) }
func (m *SetWatches) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.RelativeZxid, err = in.ReadInt64(); err != nil {
return err
}
lenDataWatches, err := in.ReadInt32()
if err != nil {
return err
}
m.DataWatches = make([]string, lenDataWatches)
for i := 0; i < len(m.DataWatches); i++ {
if m.DataWatches[i], err = in.ReadString(); err != nil {
return err
}
}
lenExistWatches, err := in.ReadInt32()
if err != nil {
return err
}
m.ExistWatches = make([]string, lenExistWatches)
for i := 0; i < len(m.ExistWatches); i++ {
if m.ExistWatches[i], err = in.ReadString(); err != nil {
return err
}
}
lenChildWatches, err := in.ReadInt32()
if err != nil {
return err
}
m.ChildWatches = make([]string, lenChildWatches)
for i := 0; i < len(m.ChildWatches); i++ {
if m.ChildWatches[i], err = in.ReadString(); err != nil {
return err
}
}
return nil
}
func (m *SetWatches) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt64(m.RelativeZxid); err != nil {
return err
}
if err = o.WriteInt32(int32(len(m.DataWatches))); err != nil {
return err
}
for i := 0; i < len(m.DataWatches); i++ {
if err = o.WriteString(m.DataWatches[i]); err != nil {
return err
}
}
if err = o.WriteInt32(int32(len(m.ExistWatches))); err != nil {
return err
}
for i := 0; i < len(m.ExistWatches); i++ {
if err = o.WriteString(m.ExistWatches[i]); err != nil {
return err
}
}
if err = o.WriteInt32(int32(len(m.ChildWatches))); err != nil {
return err
}
for i := 0; i < len(m.ChildWatches); i++ {
if err = o.WriteString(m.ChildWatches[i]); err != nil {
return err
}
}
return nil
}
type RequestHeader struct {
Xid int32
Type int32
}
func (m *RequestHeader) String() string { return fmt.Sprintf("%+v", *m) }
func (m *RequestHeader) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Xid, err = in.ReadInt32(); err != nil {
return err
}
if m.Type, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *RequestHeader) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(m.Xid); err != nil {
return err
}
if err = o.WriteInt32(m.Type); err != nil {
return err
}
return nil
}
type MultiHeader struct {
Type int32
Done bool
Err int32
}
func (m *MultiHeader) String() string { return fmt.Sprintf("%+v", *m) }
func (m *MultiHeader) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Type, err = in.ReadInt32(); err != nil {
return err
}
if m.Done, err = in.ReadBool(); err != nil {
return err
}
if m.Err, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *MultiHeader) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(m.Type); err != nil {
return err
}
if err = o.WriteBool(m.Done); err != nil {
return err
}
if err = o.WriteInt32(m.Err); err != nil {
return err
}
return nil
}
type AuthPacket struct {
Type int32
Scheme string
Auth []byte
}
func (m *AuthPacket) String() string { return fmt.Sprintf("%+v", *m) }
func (m *AuthPacket) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Type, err = in.ReadInt32(); err != nil {
return err
}
if m.Scheme, err = in.ReadString(); err != nil {
return err
}
if m.Auth, err = in.ReadBuffer(); err != nil {
return err
}
return nil
}
func (m *AuthPacket) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(m.Type); err != nil {
return err
}
if err = o.WriteString(m.Scheme); err != nil {
return err
}
if err = o.WriteBuffer(m.Auth); err != nil {
return err
}
return nil
}
type ReplyHeader struct {
Xid int32
Zxid int64
Err int32
}
func (m *ReplyHeader) String() string { return fmt.Sprintf("%+v", *m) }
func (m *ReplyHeader) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Xid, err = in.ReadInt32(); err != nil {
return err
}
if m.Zxid, err = in.ReadInt64(); err != nil {
return err
}
if m.Err, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *ReplyHeader) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(m.Xid); err != nil {
return err
}
if err = o.WriteInt64(m.Zxid); err != nil {
return err
}
if err = o.WriteInt32(m.Err); err != nil {
return err
}
return nil
}
type GetDataRequest struct {
Path string
Watch bool
}
func (m *GetDataRequest) String() string { return fmt.Sprintf("%+v", *m) }
func (m *GetDataRequest) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
if m.Watch, err = in.ReadBool(); err != nil {
return err
}
return nil
}
func (m *GetDataRequest) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
if err = o.WriteBool(m.Watch); err != nil {
return err
}
return nil
}
type SetDataRequest struct {
Path string
Data []byte
Version int32
}
func (m *SetDataRequest) String() string { return fmt.Sprintf("%+v", *m) }
func (m *SetDataRequest) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
if m.Data, err = in.ReadBuffer(); err != nil {
return err
}
if m.Version, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *SetDataRequest) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
if err = o.WriteBuffer(m.Data); err != nil {
return err
}
if err = o.WriteInt32(m.Version); err != nil {
return err
}
return nil
}
type SetDataResponse struct {
Stat Stat
}
func (m *SetDataResponse) String() string { return fmt.Sprintf("%+v", *m) }
func (m *SetDataResponse) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if err = (&(m.Stat)).Decode(in); err != nil {
return err
}
return nil
}
func (m *SetDataResponse) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = m.Stat.Encode(o); err != nil {
return err
}
return nil
}
type GetSASLRequest struct {
Token []byte
}
func (m *GetSASLRequest) String() string { return fmt.Sprintf("%+v", *m) }
func (m *GetSASLRequest) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Token, err = in.ReadBuffer(); err != nil {
return err
}
return nil
}
func (m *GetSASLRequest) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteBuffer(m.Token); err != nil {
return err
}
return nil
}
type SetSASLRequest struct {
Token []byte
}
func (m *SetSASLRequest) String() string { return fmt.Sprintf("%+v", *m) }
func (m *SetSASLRequest) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Token, err = in.ReadBuffer(); err != nil {
return err
}
return nil
}
func (m *SetSASLRequest) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteBuffer(m.Token); err != nil {
return err
}
return nil
}
type SetSASLResponse struct {
Token []byte
}
func (m *SetSASLResponse) String() string { return fmt.Sprintf("%+v", *m) }
func (m *SetSASLResponse) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Token, err = in.ReadBuffer(); err != nil {
return err
}
return nil
}
func (m *SetSASLResponse) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteBuffer(m.Token); err != nil {
return err
}
return nil
}
type CreateRequest struct {
Path string
Data []byte
Acl []ACL
Flags int32
}
func (m *CreateRequest) String() string { return fmt.Sprintf("%+v", *m) }
func (m *CreateRequest) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
if m.Data, err = in.ReadBuffer(); err != nil {
return err
}
lenAcl, err := in.ReadInt32()
if err != nil {
return err
}
m.Acl = make([]ACL, lenAcl)
for i := 0; i < len(m.Acl); i++ {
if err = (&(m.Acl[i])).Decode(in); err != nil {
return err
}
}
if m.Flags, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *CreateRequest) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
if err = o.WriteBuffer(m.Data); err != nil {
return err
}
if err = o.WriteInt32(int32(len(m.Acl))); err != nil {
return err
}
for i := 0; i < len(m.Acl); i++ {
if err = m.Acl[i].Encode(o); err != nil {
return err
}
}
if err = o.WriteInt32(m.Flags); err != nil {
return err
}
return nil
}
type DeleteRequest struct {
Path string
Version int32
}
func (m *DeleteRequest) String() string { return fmt.Sprintf("%+v", *m) }
func (m *DeleteRequest) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
if m.Version, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *DeleteRequest) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
if err = o.WriteInt32(m.Version); err != nil {
return err
}
return nil
}
type GetChildrenRequest struct {
Path string
Watch bool
}
func (m *GetChildrenRequest) String() string { return fmt.Sprintf("%+v", *m) }
func (m *GetChildrenRequest) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
if m.Watch, err = in.ReadBool(); err != nil {
return err
}
return nil
}
func (m *GetChildrenRequest) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
if err = o.WriteBool(m.Watch); err != nil {
return err
}
return nil
}
type GetChildren2Request struct {
Path string
Watch bool
}
func (m *GetChildren2Request) String() string { return fmt.Sprintf("%+v", *m) }
func (m *GetChildren2Request) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
if m.Watch, err = in.ReadBool(); err != nil {
return err
}
return nil
}
func (m *GetChildren2Request) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
if err = o.WriteBool(m.Watch); err != nil {
return err
}
return nil
}
type CheckVersionRequest struct {
Path string
Version int32
}
func (m *CheckVersionRequest) String() string { return fmt.Sprintf("%+v", *m) }
func (m *CheckVersionRequest) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
if m.Version, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *CheckVersionRequest) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
if err = o.WriteInt32(m.Version); err != nil {
return err
}
return nil
}
type GetMaxChildrenRequest struct {
Path string
}
func (m *GetMaxChildrenRequest) String() string { return fmt.Sprintf("%+v", *m) }
func (m *GetMaxChildrenRequest) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
return nil
}
func (m *GetMaxChildrenRequest) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
return nil
}
type GetMaxChildrenResponse struct {
Max int32
}
func (m *GetMaxChildrenResponse) String() string { return fmt.Sprintf("%+v", *m) }
func (m *GetMaxChildrenResponse) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Max, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *GetMaxChildrenResponse) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(m.Max); err != nil {
return err
}
return nil
}
type SetMaxChildrenRequest struct {
Path string
Max int32
}
func (m *SetMaxChildrenRequest) String() string { return fmt.Sprintf("%+v", *m) }
func (m *SetMaxChildrenRequest) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
if m.Max, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *SetMaxChildrenRequest) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
if err = o.WriteInt32(m.Max); err != nil {
return err
}
return nil
}
type SyncRequest struct {
Path string
}
func (m *SyncRequest) String() string { return fmt.Sprintf("%+v", *m) }
func (m *SyncRequest) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
return nil
}
func (m *SyncRequest) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
return nil
}
type SyncResponse struct {
Path string
}
func (m *SyncResponse) String() string { return fmt.Sprintf("%+v", *m) }
func (m *SyncResponse) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
return nil
}
func (m *SyncResponse) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
return nil
}
type GetACLRequest struct {
Path string
}
func (m *GetACLRequest) String() string { return fmt.Sprintf("%+v", *m) }
func (m *GetACLRequest) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
return nil
}
func (m *GetACLRequest) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
return nil
}
type SetACLRequest struct {
Path string
Acl []ACL
Version int32
}
func (m *SetACLRequest) String() string { return fmt.Sprintf("%+v", *m) }
func (m *SetACLRequest) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
lenAcl, err := in.ReadInt32()
if err != nil {
return err
}
m.Acl = make([]ACL, lenAcl)
for i := 0; i < len(m.Acl); i++ {
if err = (&(m.Acl[i])).Decode(in); err != nil {
return err
}
}
if m.Version, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *SetACLRequest) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
if err = o.WriteInt32(int32(len(m.Acl))); err != nil {
return err
}
for i := 0; i < len(m.Acl); i++ {
if err = m.Acl[i].Encode(o); err != nil {
return err
}
}
if err = o.WriteInt32(m.Version); err != nil {
return err
}
return nil
}
type SetACLResponse struct {
Stat Stat
}
func (m *SetACLResponse) String() string { return fmt.Sprintf("%+v", *m) }
func (m *SetACLResponse) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if err = (&(m.Stat)).Decode(in); err != nil {
return err
}
return nil
}
func (m *SetACLResponse) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = m.Stat.Encode(o); err != nil {
return err
}
return nil
}
type WatcherEvent struct {
Type int32
State int32
Path string
}
func (m *WatcherEvent) String() string { return fmt.Sprintf("%+v", *m) }
func (m *WatcherEvent) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Type, err = in.ReadInt32(); err != nil {
return err
}
if m.State, err = in.ReadInt32(); err != nil {
return err
}
if m.Path, err = in.ReadString(); err != nil {
return err
}
return nil
}
func (m *WatcherEvent) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(m.Type); err != nil {
return err
}
if err = o.WriteInt32(m.State); err != nil {
return err
}
if err = o.WriteString(m.Path); err != nil {
return err
}
return nil
}
type ErrorResponse struct {
Err int32
}
func (m *ErrorResponse) String() string { return fmt.Sprintf("%+v", *m) }
func (m *ErrorResponse) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Err, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *ErrorResponse) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(m.Err); err != nil {
return err
}
return nil
}
type CreateResponse struct {
Path string
}
func (m *CreateResponse) String() string { return fmt.Sprintf("%+v", *m) }
func (m *CreateResponse) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
return nil
}
func (m *CreateResponse) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
return nil
}
type ExistsRequest struct {
Path string
Watch bool
}
func (m *ExistsRequest) String() string { return fmt.Sprintf("%+v", *m) }
func (m *ExistsRequest) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
if m.Watch, err = in.ReadBool(); err != nil {
return err
}
return nil
}
func (m *ExistsRequest) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
if err = o.WriteBool(m.Watch); err != nil {
return err
}
return nil
}
type ExistsResponse struct {
Stat Stat
}
func (m *ExistsResponse) String() string { return fmt.Sprintf("%+v", *m) }
func (m *ExistsResponse) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if err = (&(m.Stat)).Decode(in); err != nil {
return err
}
return nil
}
func (m *ExistsResponse) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = m.Stat.Encode(o); err != nil {
return err
}
return nil
}
type GetDataResponse struct {
Data []byte
Stat Stat
}
func (m *GetDataResponse) String() string { return fmt.Sprintf("%+v", *m) }
func (m *GetDataResponse) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Data, err = in.ReadBuffer(); err != nil {
return err
}
if err = (&(m.Stat)).Decode(in); err != nil {
return err
}
return nil
}
func (m *GetDataResponse) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteBuffer(m.Data); err != nil {
return err
}
if err = m.Stat.Encode(o); err != nil {
return err
}
return nil
}
type GetChildrenResponse struct {
Children []string
}
func (m *GetChildrenResponse) String() string { return fmt.Sprintf("%+v", *m) }
func (m *GetChildrenResponse) Decode(in Input) error {
if m == nil {
return nil
}
var err error
lenChildren, err := in.ReadInt32()
if err != nil {
return err
}
m.Children = make([]string, lenChildren)
for i := 0; i < len(m.Children); i++ {
if m.Children[i], err = in.ReadString(); err != nil {
return err
}
}
return nil
}
func (m *GetChildrenResponse) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(int32(len(m.Children))); err != nil {
return err
}
for i := 0; i < len(m.Children); i++ {
if err = o.WriteString(m.Children[i]); err != nil {
return err
}
}
return nil
}
type GetChildren2Response struct {
Children []string
Stat Stat
}
func (m *GetChildren2Response) String() string { return fmt.Sprintf("%+v", *m) }
func (m *GetChildren2Response) Decode(in Input) error {
if m == nil {
return nil
}
var err error
lenChildren, err := in.ReadInt32()
if err != nil {
return err
}
m.Children = make([]string, lenChildren)
for i := 0; i < len(m.Children); i++ {
if m.Children[i], err = in.ReadString(); err != nil {
return err
}
}
if err = (&(m.Stat)).Decode(in); err != nil {
return err
}
return nil
}
func (m *GetChildren2Response) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(int32(len(m.Children))); err != nil {
return err
}
for i := 0; i < len(m.Children); i++ {
if err = o.WriteString(m.Children[i]); err != nil {
return err
}
}
if err = m.Stat.Encode(o); err != nil {
return err
}
return nil
}
type GetACLResponse struct {
Acl []ACL
Stat Stat
}
func (m *GetACLResponse) String() string { return fmt.Sprintf("%+v", *m) }
func (m *GetACLResponse) Decode(in Input) error {
if m == nil {
return nil
}
var err error
lenAcl, err := in.ReadInt32()
if err != nil {
return err
}
m.Acl = make([]ACL, lenAcl)
for i := 0; i < len(m.Acl); i++ {
if err = (&(m.Acl[i])).Decode(in); err != nil {
return err
}
}
if err = (&(m.Stat)).Decode(in); err != nil {
return err
}
return nil
}
func (m *GetACLResponse) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(int32(len(m.Acl))); err != nil {
return err
}
for i := 0; i < len(m.Acl); i++ {
if err = m.Acl[i].Encode(o); err != nil {
return err
}
}
if err = m.Stat.Encode(o); err != nil {
return err
}
return nil
}
type LearnerInfo struct {
Serverid int64
ProtocolVersion int32
}
func (m *LearnerInfo) String() string { return fmt.Sprintf("%+v", *m) }
func (m *LearnerInfo) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Serverid, err = in.ReadInt64(); err != nil {
return err
}
if m.ProtocolVersion, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *LearnerInfo) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt64(m.Serverid); err != nil {
return err
}
if err = o.WriteInt32(m.ProtocolVersion); err != nil {
return err
}
return nil
}
type QuorumPacket struct {
Type int32
Zxid int64
Data []byte
Authinfo []Id
}
func (m *QuorumPacket) String() string { return fmt.Sprintf("%+v", *m) }
func (m *QuorumPacket) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Type, err = in.ReadInt32(); err != nil {
return err
}
if m.Zxid, err = in.ReadInt64(); err != nil {
return err
}
if m.Data, err = in.ReadBuffer(); err != nil {
return err
}
lenAuthinfo, err := in.ReadInt32()
if err != nil {
return err
}
m.Authinfo = make([]Id, lenAuthinfo)
for i := 0; i < len(m.Authinfo); i++ {
if err = (&(m.Authinfo[i])).Decode(in); err != nil {
return err
}
}
return nil
}
func (m *QuorumPacket) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(m.Type); err != nil {
return err
}
if err = o.WriteInt64(m.Zxid); err != nil {
return err
}
if err = o.WriteBuffer(m.Data); err != nil {
return err
}
if err = o.WriteInt32(int32(len(m.Authinfo))); err != nil {
return err
}
for i := 0; i < len(m.Authinfo); i++ {
if err = m.Authinfo[i].Encode(o); err != nil {
return err
}
}
return nil
}
type FileHeader struct {
Magic int32
Version int32
Dbid int64
}
func (m *FileHeader) String() string { return fmt.Sprintf("%+v", *m) }
func (m *FileHeader) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Magic, err = in.ReadInt32(); err != nil {
return err
}
if m.Version, err = in.ReadInt32(); err != nil {
return err
}
if m.Dbid, err = in.ReadInt64(); err != nil {
return err
}
return nil
}
func (m *FileHeader) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(m.Magic); err != nil {
return err
}
if err = o.WriteInt32(m.Version); err != nil {
return err
}
if err = o.WriteInt64(m.Dbid); err != nil {
return err
}
return nil
}
type TxnHeader struct {
ClientId int64
Cxid int32
Zxid int64
Time int64
Type int32
}
func (m *TxnHeader) String() string { return fmt.Sprintf("%+v", *m) }
func (m *TxnHeader) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.ClientId, err = in.ReadInt64(); err != nil {
return err
}
if m.Cxid, err = in.ReadInt32(); err != nil {
return err
}
if m.Zxid, err = in.ReadInt64(); err != nil {
return err
}
if m.Time, err = in.ReadInt64(); err != nil {
return err
}
if m.Type, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *TxnHeader) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt64(m.ClientId); err != nil {
return err
}
if err = o.WriteInt32(m.Cxid); err != nil {
return err
}
if err = o.WriteInt64(m.Zxid); err != nil {
return err
}
if err = o.WriteInt64(m.Time); err != nil {
return err
}
if err = o.WriteInt32(m.Type); err != nil {
return err
}
return nil
}
type CreateTxnV0 struct {
Path string
Data []byte
Acl []ACL
Ephemeral bool
}
func (m *CreateTxnV0) String() string { return fmt.Sprintf("%+v", *m) }
func (m *CreateTxnV0) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
if m.Data, err = in.ReadBuffer(); err != nil {
return err
}
lenAcl, err := in.ReadInt32()
if err != nil {
return err
}
m.Acl = make([]ACL, lenAcl)
for i := 0; i < len(m.Acl); i++ {
if err = (&(m.Acl[i])).Decode(in); err != nil {
return err
}
}
if m.Ephemeral, err = in.ReadBool(); err != nil {
return err
}
return nil
}
func (m *CreateTxnV0) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
if err = o.WriteBuffer(m.Data); err != nil {
return err
}
if err = o.WriteInt32(int32(len(m.Acl))); err != nil {
return err
}
for i := 0; i < len(m.Acl); i++ {
if err = m.Acl[i].Encode(o); err != nil {
return err
}
}
if err = o.WriteBool(m.Ephemeral); err != nil {
return err
}
return nil
}
type CreateTxn struct {
Path string
Data []byte
Acl []ACL
Ephemeral bool
ParentCVersion int32
}
func (m *CreateTxn) String() string { return fmt.Sprintf("%+v", *m) }
func (m *CreateTxn) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
if m.Data, err = in.ReadBuffer(); err != nil {
return err
}
lenAcl, err := in.ReadInt32()
if err != nil {
return err
}
m.Acl = make([]ACL, lenAcl)
for i := 0; i < len(m.Acl); i++ {
if err = (&(m.Acl[i])).Decode(in); err != nil {
return err
}
}
if m.Ephemeral, err = in.ReadBool(); err != nil {
return err
}
if m.ParentCVersion, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *CreateTxn) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
if err = o.WriteBuffer(m.Data); err != nil {
return err
}
if err = o.WriteInt32(int32(len(m.Acl))); err != nil {
return err
}
for i := 0; i < len(m.Acl); i++ {
if err = m.Acl[i].Encode(o); err != nil {
return err
}
}
if err = o.WriteBool(m.Ephemeral); err != nil {
return err
}
if err = o.WriteInt32(m.ParentCVersion); err != nil {
return err
}
return nil
}
type DeleteTxn struct {
Path string
}
func (m *DeleteTxn) String() string { return fmt.Sprintf("%+v", *m) }
func (m *DeleteTxn) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
return nil
}
func (m *DeleteTxn) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
return nil
}
type SetDataTxn struct {
Path string
Data []byte
Version int32
}
func (m *SetDataTxn) String() string { return fmt.Sprintf("%+v", *m) }
func (m *SetDataTxn) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
if m.Data, err = in.ReadBuffer(); err != nil {
return err
}
if m.Version, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *SetDataTxn) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
if err = o.WriteBuffer(m.Data); err != nil {
return err
}
if err = o.WriteInt32(m.Version); err != nil {
return err
}
return nil
}
type CheckVersionTxn struct {
Path string
Version int32
}
func (m *CheckVersionTxn) String() string { return fmt.Sprintf("%+v", *m) }
func (m *CheckVersionTxn) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
if m.Version, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *CheckVersionTxn) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
if err = o.WriteInt32(m.Version); err != nil {
return err
}
return nil
}
type SetACLTxn struct {
Path string
Acl []ACL
Version int32
}
func (m *SetACLTxn) String() string { return fmt.Sprintf("%+v", *m) }
func (m *SetACLTxn) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
lenAcl, err := in.ReadInt32()
if err != nil {
return err
}
m.Acl = make([]ACL, lenAcl)
for i := 0; i < len(m.Acl); i++ {
if err = (&(m.Acl[i])).Decode(in); err != nil {
return err
}
}
if m.Version, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *SetACLTxn) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
if err = o.WriteInt32(int32(len(m.Acl))); err != nil {
return err
}
for i := 0; i < len(m.Acl); i++ {
if err = m.Acl[i].Encode(o); err != nil {
return err
}
}
if err = o.WriteInt32(m.Version); err != nil {
return err
}
return nil
}
type SetMaxChildrenTxn struct {
Path string
Max int32
}
func (m *SetMaxChildrenTxn) String() string { return fmt.Sprintf("%+v", *m) }
func (m *SetMaxChildrenTxn) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Path, err = in.ReadString(); err != nil {
return err
}
if m.Max, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *SetMaxChildrenTxn) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteString(m.Path); err != nil {
return err
}
if err = o.WriteInt32(m.Max); err != nil {
return err
}
return nil
}
type CreateSessionTxn struct {
TimeOut int32
}
func (m *CreateSessionTxn) String() string { return fmt.Sprintf("%+v", *m) }
func (m *CreateSessionTxn) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.TimeOut, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *CreateSessionTxn) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(m.TimeOut); err != nil {
return err
}
return nil
}
type ErrorTxn struct {
Err int32
}
func (m *ErrorTxn) String() string { return fmt.Sprintf("%+v", *m) }
func (m *ErrorTxn) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Err, err = in.ReadInt32(); err != nil {
return err
}
return nil
}
func (m *ErrorTxn) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(m.Err); err != nil {
return err
}
return nil
}
type Txn struct {
Type int32
Data []byte
}
func (m *Txn) String() string { return fmt.Sprintf("%+v", *m) }
func (m *Txn) Decode(in Input) error {
if m == nil {
return nil
}
var err error
if m.Type, err = in.ReadInt32(); err != nil {
return err
}
if m.Data, err = in.ReadBuffer(); err != nil {
return err
}
return nil
}
func (m *Txn) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(m.Type); err != nil {
return err
}
if err = o.WriteBuffer(m.Data); err != nil {
return err
}
return nil
}
type MultiTxn struct {
Txns []Txn
}
func (m *MultiTxn) String() string { return fmt.Sprintf("%+v", *m) }
func (m *MultiTxn) Decode(in Input) error {
if m == nil {
return nil
}
var err error
lenTxns, err := in.ReadInt32()
if err != nil {
return err
}
m.Txns = make([]Txn, lenTxns)
for i := 0; i < len(m.Txns); i++ {
if err = (&(m.Txns[i])).Decode(in); err != nil {
return err
}
}
return nil
}
func (m *MultiTxn) Encode(o Output) error {
if m == nil {
return nil
}
var err error
if err = o.WriteInt32(int32(len(m.Txns))); err != nil {
return err
}
for i := 0; i < len(m.Txns); i++ {
if err = m.Txns[i].Encode(o); err != nil {
return err
}
}
return nil
}
|
package leetcode
/*Given an array of integers A sorted in non-decreasing order,
return an array of the squares of each number, also in sorted non-decreasing order.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/squares-of-a-sorted-array
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。*/
//import "sort"
func sortedSquares(A []int) []int {
length := len(A)
rlt := make([]int, length, length)
i, j, index := 0, length-1, length-1
for i <= j {
if A[i] >= 0 {
rlt[index] = A[j] * A[j]
j--
} else if A[j] <= 0 {
rlt[index] = A[i] * A[i]
i++
} else {
if -A[i] >= A[j] {
rlt[index] = A[i] * A[i]
i++
} else {
rlt[index] = A[j] * A[j]
j--
}
}
index--
}
return rlt
/*
for i := 0; i < length; i++ {
A[i] = A[i] * A[i]
}
sort.Ints(A)
return A*/
}
|
package main
import ()
//总日志结构
type LogInfo struct {
ServerId uint64 //服务器ID TODO 前三个字段不能动,应用模块已经使用
OpId uint32 //运营商ID
UserId uint64 //玩家ID
EventId uint64 //流水ID
MainType uint32 //日志主类型
ChildType uint32 //日志子类型
RealServerId uint64 //真实服务器ID(合服后主服务器ID)
OpgameId uint32 //混服组ID
AdId string //广告ID
ChannelId string //用户来源 CPS ID
Account string //平台账号
DeviceId string //设备号
LogSendTime uint32 //日志发送时间
ClientIp string //客户端IP
Uuid string //玩家UUID
UserName string //玩家名字
UserLevel uint32 //玩家等级
RegisterTime uint32 //玩家注册时间
PurchaseDiamond uint32 //购买钻石数
DonateDiamond uint32 //玩家赠送钻石数
CorpId uint64 //军团ID
RolePaid bool //是否为付费玩家
RoleValid bool //是否为有效玩家
VipLevel uint32 //VIP等级
RoleExp uint32 //玩家经验
RoleEnergy uint32 //玩家体力
OnlineTime uint32 //本次上线时间点
OfflineTime uint32 //上次下线时间点
LastOnlineTime uint32 //上次上线时间点
LogParams
Nanosec uint64 //TODO 最后字段不能动,应用模块已经使用
}
type LogParams struct {
Param1 uint32
Param2 uint32
Param3 uint32
Param4 uint32
Param5 uint32
Param6 uint32
Param7 uint32
Param8 uint32
Param9 uint32
Param10 uint32
Param11 uint64
Param12 uint64
Param13 uint64
Param14 uint64
Param15 uint64
Param16 string
Param17 string
Param18 string
Param19 string
Param20 string
}
//登陆日志
type LogLogin struct {
Time uint32 `json:"time"` //时间戳
Type uint32 `json:"type"` //登陆类型 1: 通过角色ID 2:通过角色名称 3:通过平台账号
UserName string `json:"userName"` //登陆用户
UserId string `json:"userId"` //账号ID
UserType string `json:"userType"` //账号类型
AreaName uint32 `json:"areaName"` //登陆区名称
UserLv uint32 `json:"userLev"` //用户等级
VipLv uint32 `json:"vipLev"` //VIP等级
LeaderRank string `json:"leaderRank"` //排名
Stage string `json:"stage"` //关卡位置
DailyStage string `json:"eliteStage"` //日常关卡位置
DeviceId string `json:"deviceId"` //设备ID
DeviceMobile string `json:"deviceMobile"` //机型
ClientIp string `json:"clientIp"` //终端IP
DeviceType string `json:"deviceType"` //应用类别(1是安卓,2是IOS)
ChannelName string `json:"channelName"` //渠道ID
DeviceCarrier string `json:"deviceCarrier"` //运营商
DeviceOsVer string `json:"deviceOsVer"` //设备具体系统
IsNew uint32 `json:"isNew"` //账号为新,为1,否则为0
IsAreaNew uint32 `json:"isAreaNew"` //角色微信,为1,否则为0
}
//创角注册日志
type LogRegister struct {
Time uint32 `json:"time"` //时间戳
UserName string `json:"userName"` //登陆用户
UserId string `json:"userId"` //账号ID
UserType string `json:"userType"` //账号类型
DeviceId string `json:"deviceId"` //设备ID
DeviceMobile string `json:"deviceMobile"` //机型
ClientIp string `json:"clientIp"` //终端IP
DeviceType string `json:"deviceType"` //应用类别(1是安卓,2是IOS)
ChannelName string `json:"channelName"` //渠道ID
DeviceCarrier string `json:"deviceCarrier"` //运营商
DeviceOsVer string `json:"deviceOsVer"` //设备具体系统
}
//物品产出日志
type LogOutPut struct {
Time uint32 `json:"time"` //时间戳
Type string `json:"type"` //产出来源
ItemId string `json:"itemId"` //道具ID
ItemType uint32 `json:"itemType"` //道具类型
ItemName string `json:"itemName"` //道具名称
ItemNum uint32 `json:"itemNum"` //道具数量
UserName string `json:"userName"` //登陆用户
UserId string `json:"userId"` //账号ID
UserType string `json:"userType"` //账号类型
AreaName uint32 `json:"areaName"` //登陆区名称
UserLv uint32 `json:"userLev"` //用户等级
VipLv uint32 `json:"vipLev"` //VIP等级
DeviceId string `json:"deviceId"` //设备ID
DeviceMobile string `json:"deviceMobile"` //机型
ClientIp string `json:"clientIp"` //终端IP
DeviceType string `json:"deviceType"` //应用类别(1是安卓,2是IOS)
ChannelName string `json:"channelName"` //渠道ID
DeviceCarrier string `json:"deviceCarrier"` //运营商
DeviceOsVer string `json:"deviceOsVer"` //设备具体系统
}
//在线人数日志
type LogUserOnline struct {
Time uint32 `json:"time"` //时间戳
AreaName uint32 `json:"areaName"` //登陆区名称
Count uint32 `json:"count"` //在线玩家数量
DeviceId string `json:"deviceId"` //设备ID
DeviceMobile string `json:"deviceMobile"` //机型
ClientIp string `json:"clientIp"` //终端IP
DeviceType string `json:"deviceType"` //应用类别(1是安卓,2是IOS)
ChannelName string `json:"channelName"` //渠道ID
DeviceCarrier string `json:"deviceCarrier"` //运营商
DeviceOsVer string `json:"deviceOsVer"` //设备具体系统
}
//物品消耗日志
type LogItemConsume struct {
Time uint32 `json:"time"` //时间戳
Type string `json:"type"` //产出来源
ItemId string `json:"itemId"` //道具ID
ItemType uint32 `json:"itemType"` //道具类型
ItemName string `json:"itemName"` //道具名称
ItemNum uint32 `json:"itemNum"` //道具数量
UserName string `json:"userName"` //登陆用户
UserId string `json:"userId"` //账号ID
UserType string `json:"userType"` //账号类型
AreaName uint32 `json:"areaName"` //登陆区名称
UserLv uint32 `json:"userLev"` //用户等级
VipLv uint32 `json:"vipLev"` //VIP等级
DeviceId string `json:"deviceId"` //设备ID
DeviceMobile string `json:"deviceMobile"` //机型
ClientIp string `json:"clientIp"` //终端IP
DeviceType string `json:"deviceType"` //应用类别(1是安卓,2是IOS)
ChannelName string `json:"channelName"` //渠道ID
DeviceCarrier string `json:"deviceCarrier"` //运营商
DeviceOsVer string `json:"deviceOsVer"` //设备具体系统
}
//关卡事件
type LogChapterStage struct {
Time uint32 `json:"time"` //时间戳
UserName string `json:"userName"` //登陆用户
UserId string `json:"userId"` //账号ID
UserType string `json:"userType"` //账号类型
AreaName uint32 `json:"areaName"` //登陆区名称
UserLv uint32 `json:"userLev"` //用户等级
VipLv uint32 `json:"vipLev"` //VIP等级
Efficiency uint32 `json:"efficiency"` //关卡ID
StageType string `json:"stageType"` //日常,主线
Stage string `json:"stage"` //关卡名称
IsSuccess uint32 `json:"isSuccess"` //0:成功,1,失败,2,开始
DeviceId string `json:"deviceId"` //设备ID
DeviceMobile string `json:"deviceMobile"` //机型
ClientIp string `json:"clientIp"` //终端IP
DeviceType string `json:"deviceType"` //应用类别(1是安卓,2是IOS)
ChannelName string `json:"channelName"` //渠道ID
AppVersion string `json:"appVersion"` //版本号
}
//在线时长
type LogOnlineTime struct {
Time uint32 `json:"time"` //时间戳
UserName string `json:"userName"` //登陆用户
UserId string `json:"userId"` //账号ID
UserType string `json:"userType"` //账号类型
AreaName uint32 `json:"areaName"` //登陆区名称
Duration uint32 `json:"duration"` //用户在线时间(秒)
DeviceId string `json:"deviceId"` //设备ID
DeviceMobile string `json:"deviceMobile"` //机型
ClientIp string `json:"clientIp"` //终端IP
DeviceType string `json:"deviceType"` //应用类别(1是安卓,2是IOS)
ChannelName string `json:"channelName"` //渠道ID
AppVersion string `json:"appVersion"` //版本号
}
//等级
type LogLevel struct {
Time uint32 `json:"time"` //时间戳
UserName string `json:"userName"` //登陆用户
UserId string `json:"userId"` //账号ID
UserType string `json:"userType"` //账号类型
AreaName uint32 `json:"areaName"` //登陆区名称
UserLv uint32 `json:"userLev"` //用户等级
VipLv uint32 `json:"vipLev"` //VIP等级
DeviceId string `json:"deviceId"` //设备ID
DeviceMobile string `json:"deviceMobile"` //机型
ClientIp string `json:"clientIp"` //终端IP
DeviceType string `json:"deviceType"` //应用类别(1是安卓,2是IOS)
ChannelName string `json:"channelName"` //渠道ID
AppVersion string `json:"appVersion"` //版本号
}
//充值货币进出
type LogSilverConsume struct {
Time uint32 `json:"time"` //时间戳
UserName string `json:"userName"` //登陆用户
UserId string `json:"userId"` //账号ID
UserType string `json:"userType"` //账号类型
AreaName uint32 `json:"areaName"` //登陆区名称
UserLv uint32 `json:"userLev"` //用户等级
VipLv uint32 `json:"vipLev"` //VIP等级
LeaderRank string `json:"leaderRank"` //排名
Stage string `json:"stage"` //关卡位置
DailyStage string `json:"eliteStage"` //日常关卡位置
ConsumeType string `json:"consumeType"` //消耗类型 用于干什么了
Consume int `json:"consume"` //消耗数量(产出拋正值,消耗拋负值)
ItemId string `json:"itemId"` //道具ID
ItemType uint32 `json:"itemType"` //道具类型
ItemName string `json:"itemName"` //道具名称
ItemNum uint32 `json:"itemNum"` //道具数量
DeviceId string `json:"deviceId"` //设备ID
DeviceMobile string `json:"deviceMobile"` //机型
ClientIp string `json:"clientIp"` //终端IP
DeviceType string `json:"deviceType"` //应用类别(1是安卓,2是IOS)
ChannelName string `json:"channelName"` //渠道ID
}
|
package message
type MessageService struct {
msgManager *messageManager
}
func (ms *MessageService) Init() {
}
func (ms *MessageService) Tick() {
for {
msg := ms.msgManager.Consume()
msg.Handle()
}
}
func (ms *MessageService) Destroy() {
}
|
package opusutil
import (
"errors"
"time"
)
// Header represents the opus packet's TOC plus extra information depending on config
type Header struct {
Config *Config
NumFrames int
Stereo bool
}
// FullDuration returns the full duration of the opus packet (frameduration * number of frames)
func (h *Header) FullDuration() time.Duration {
return time.Duration(h.NumFrames) * h.Config.FrameDuration
}
// DecodeHeader parses the TOC byte and more depending on config
// will return an error is opus packet is invalid to the spec.
func DecodeHeader(packet []byte) (header *Header, err error) {
if len(packet) < 1 {
err = errors.New("Invalid opus packet, len < 1")
return
}
toc := packet[0]
framesBits := toc & 0x3 // Framecount in 0-1
stereo := (toc>>2)&1 != 0 // Stereo in 2
ConfigIndex := (toc >> 3) & 0x1f // Config in 3-7
config := ConfigTable[ConfigIndex]
// Read number of frames depending on framesBits
numFrames := -1
switch framesBits {
case 0:
numFrames = 1
case 1, 2:
numFrames = 2
case 3: // Signaled number of frames (upto max 120ms of audio)
// This packet requires 2 bytes at min
if len(packet) < 2 {
err = errors.New("Invalid opus packet, len < 2 && c = 3")
return
}
numFrames = int(packet[1] & 0x3f) // Count in 0-5
if numFrames < 1 {
err = errors.New("Invalid opus packet, framcount < 1")
}
}
header = &Header{
Config: config,
NumFrames: numFrames,
Stereo: stereo,
}
return
}
type Codec int
const (
SILK = iota
CELT
Hybrid
)
// Config represents the config bits in the TOC byte
type Config struct {
Codec Codec
FrameDuration time.Duration
Bandwidth *Bandwidth
}
type Bandwidth struct {
Bandwidth int
SampleRate int
}
var (
NB = &Bandwidth{4, 8} // Narrow band
MB = &Bandwidth{6, 12} // Medium
WB = &Bandwidth{8, 16} // Wide
SWB = &Bandwidth{12, 24} // Super-wide
FB = &Bandwidth{20, 48} // Full
)
// Opus config mapping table
var ConfigTable = [32]*Config{
// Silk
{SILK, 10000 * time.Microsecond, NB}, {SILK, 20000 * time.Microsecond, NB}, {SILK, 40000 * time.Microsecond, NB}, {SILK, 60000 * time.Microsecond, NB},
{SILK, 10000 * time.Microsecond, MB}, {SILK, 20000 * time.Microsecond, MB}, {SILK, 40000 * time.Microsecond, MB}, {SILK, 60000 * time.Microsecond, MB},
{SILK, 10000 * time.Microsecond, WB}, {SILK, 20000 * time.Microsecond, WB}, {SILK, 40000 * time.Microsecond, WB}, {SILK, 60000 * time.Microsecond, WB},
// Hybrid
{Hybrid, 10000 * time.Microsecond, SWB}, {Hybrid, 20000 * time.Microsecond, SWB},
{Hybrid, 10000 * time.Microsecond, FB}, {Hybrid, 20000 * time.Microsecond, FB},
// CELT
{CELT, 2500 * time.Microsecond, NB}, {CELT, 5000 * time.Microsecond, NB}, {CELT, 10000 * time.Microsecond, NB}, {CELT, 20000 * time.Microsecond, NB},
{CELT, 2500 * time.Microsecond, WB}, {CELT, 5000 * time.Microsecond, WB}, {CELT, 10000 * time.Microsecond, WB}, {CELT, 20000 * time.Microsecond, WB},
{CELT, 2500 * time.Microsecond, SWB}, {CELT, 5000 * time.Microsecond, SWB}, {CELT, 10000 * time.Microsecond, SWB}, {CELT, 20000 * time.Microsecond, SWB},
{CELT, 2500 * time.Microsecond, FB}, {CELT, 5000 * time.Microsecond, FB}, {CELT, 10000 * time.Microsecond, FB}, {CELT, 20000 * time.Microsecond, FB},
}
|
package metadata
import (
"time"
"github.com/go-gormigrate/gormigrate/v2"
"gorm.io/gorm"
)
func (b *Backend) getMigrations() []*gormigrate.Migration {
migrations := []*gormigrate.Migration{
{
ID: "0001-initial",
Migrate: func(tx *gorm.DB) error {
type File struct {
ID string `json:"id"`
UploadID string `json:"-" gorm:"size:256;constraint:OnUpdate:RESTRICT,OnDelete:RESTRICT;"`
Name string `json:"fileName"`
Status string `json:"status"`
Md5 string `json:"fileMd5"`
Type string `json:"fileType"`
Size int64 `json:"fileSize"`
Reference string `json:"reference"`
BackendDetails string `json:"-"`
CreatedAt time.Time `json:"createdAt"`
}
type Upload struct {
ID string `json:"id"`
TTL int `json:"ttl"`
DownloadDomain string `json:"downloadDomain" gorm:"-"`
RemoteIP string `json:"uploadIp,omitempty"`
Comments string `json:"comments"`
Files []*File `json:"files"`
UploadToken string `json:"uploadToken,omitempty"`
User string `json:"user,omitempty" gorm:"index:idx_upload_user"`
Token string `json:"token,omitempty" gorm:"index:idx_upload_user_token"`
IsAdmin bool `json:"admin" gorm:"-"`
Stream bool `json:"stream"`
OneShot bool `json:"oneShot"`
Removable bool `json:"removable"`
ProtectedByPassword bool `json:"protectedByPassword"`
Login string `json:"login,omitempty"`
Password string `json:"password,omitempty"`
CreatedAt time.Time `json:"createdAt"`
DeletedAt gorm.DeletedAt `json:"-" gorm:"index:idx_upload_deleted_at"`
ExpireAt *time.Time `json:"expireAt" gorm:"index:idx_upload_expire_at"`
}
type Token struct {
Token string `json:"token" gorm:"primary_key"`
Comment string `json:"comment,omitempty"`
UserID string `json:"-" gorm:"size:256;constraint:OnUpdate:RESTRICT,OnDelete:RESTRICT;"`
CreatedAt time.Time `json:"createdAt"`
}
type User struct {
ID string `json:"id,omitempty"`
Provider string `json:"provider"`
Login string `json:"login,omitempty"`
Password string `json:"-"`
Name string `json:"name,omitempty"`
Email string `json:"email,omitempty"`
IsAdmin bool `json:"admin"`
Tokens []*Token `json:"tokens,omitempty"`
CreatedAt time.Time `json:"createdAt"`
}
type Setting struct {
Key string `gorm:"primary_key"`
Value string
}
_, _, err := b.clean(tx)
if err != nil {
return err
}
b.log.Warning("Applying database migration 0001-initial")
return b.setupTxForMigration(tx).AutoMigrate(&Upload{}, &File{}, &User{}, &Token{}, &Setting{})
},
Rollback: func(tx *gorm.DB) error {
b.log.Criticalf("Something went wrong. Please check database status manually")
return nil
},
}, {
ID: "0002-user-limits",
Migrate: func(tx *gorm.DB) error {
type User struct {
MaxFileSize int64 `json:"maxFileSize"`
MaxTTL int `json:"maxTTL"`
}
_, _, err := b.clean(tx)
if err != nil {
return err
}
b.log.Warning("Applying database migration 0002-user-limits")
return b.setupTxForMigration(tx).AutoMigrate(&User{})
},
Rollback: func(tx *gorm.DB) error {
b.log.Criticalf("Something went wrong. Please check database status manually")
return nil
},
}, {
ID: "0003-extend-ttl",
Migrate: func(tx *gorm.DB) error {
type Upload struct {
ExtendTTL bool `json:"extend_ttl"`
}
_, _, err := b.clean(tx)
if err != nil {
return err
}
b.log.Warning("Applying database migration 0003-extend-ttl")
return b.setupTxForMigration(tx).AutoMigrate(&Upload{})
},
Rollback: func(tx *gorm.DB) error {
b.log.Criticalf("Something went wrong. Please check database status manually")
return nil
},
}, {
ID: "0004-max-user-size",
Migrate: func(tx *gorm.DB) error {
type User struct {
MaxUserSize int64 `json:"maxUserSize"`
}
_, _, err := b.clean(tx)
if err != nil {
return err
}
b.log.Warning("Applying database migration 0004-user-max-user-size")
return b.setupTxForMigration(tx).AutoMigrate(&User{})
},
Rollback: func(tx *gorm.DB) error {
b.log.Criticalf("Something went wrong. Please check database status manually")
return nil
},
},
}
if b.Config.migrationFilter != nil {
migrations = b.Config.migrationFilter(migrations)
}
return migrations
}
|
package main
import (
_ "github.com/go-sql-driver/mysql" //加载mysql
"github.com/jinzhu/gorm"
)
var DB *gorm.DB
var err error
func DbInit() (db *gorm.DB) {
DB, err = gorm.Open("mysql", "root:.aA1451418@tcp(123.207.88.76:3306)/updateflow?charset=utf8&parseTime=True&loc=Local")
if err != nil {
panic(err.Error())
}
//defer db.Close()
// fmt.Println("运行数据库")
return DB
}
func main() {
DbInit()
}
|
package game_map
import (
"github.com/steelx/go-rpg-cgm/combat"
"github.com/steelx/go-rpg-cgm/utilz"
"github.com/steelx/go-rpg-cgm/world"
)
type CombatSelectorFunc struct {
RandomAlivePlayer,
WeakestEnemy,
SideEnemy,
SelectAll func(state *CombatState) []*combat.Actor
}
var CombatSelectorMap = map[string]func(state *CombatState) []*combat.Actor{
world.RandomAlivePlayer: RandomAlivePlayer,
world.WeakestEnemy: WeakestEnemy,
world.SideEnemy: SideEnemy,
world.SelectAll: SelectAll,
world.MostHurtEnemy: func(state *CombatState) []*combat.Actor {
return WeakestActor(state.Actors[enemies], true)
},
world.MostHurtParty: func(state *CombatState) []*combat.Actor {
return WeakestActor(state.Actors[party], true)
},
world.MostDrainedParty: func(state *CombatState) []*combat.Actor {
return MostDrainedActor(state.Actors[party], true)
},
world.DeadParty: func(state *CombatState) []*combat.Actor {
return DeadActors(state.Actors[party])
},
}
var CombatSelector = CombatSelectorFunc{
RandomAlivePlayer: RandomAlivePlayer,
WeakestEnemy: WeakestEnemy,
SideEnemy: SideEnemy,
SelectAll: SelectAll,
}
func RandomAlivePlayer(state *CombatState) []*combat.Actor {
aliveList := make([]*combat.Actor, 0)
for _, v := range state.Actors[party] {
if v.Stats.Get("HpNow") > 0 {
aliveList = append(aliveList, v)
}
}
if len(aliveList) == 1 {
return []*combat.Actor{aliveList[0]}
}
randIndex := utilz.RandInt(0, len(aliveList)-1)
return []*combat.Actor{aliveList[randIndex]}
}
func WeakestEnemy(state *CombatState) []*combat.Actor {
enemyList := state.Actors[enemies]
health := 99999.9
var target *combat.Actor
for _, v := range enemyList {
hpNow := v.Stats.Get("HpNow")
if hpNow < health {
health = hpNow
target = v
}
}
return []*combat.Actor{target}
}
func SideEnemy(state *CombatState) []*combat.Actor {
return state.Actors[enemies]
}
func SelectAll(state *CombatState) []*combat.Actor {
return append(state.Actors[enemies], state.Actors[party]...)
}
func WeakestActor(actors []*combat.Actor, onlyCheckHurt bool) []*combat.Actor {
var target *combat.Actor = nil
health := 99999.9
for _, v := range actors {
hp := v.Stats.Get("HpNow")
isHurt := false
if hp < v.Stats.Get("HpMax") {
isHurt = true
}
skip := false
if onlyCheckHurt && !isHurt {
skip = true
}
if hp < health && !skip {
health = hp
target = v
}
}
if target != nil {
return []*combat.Actor{target}
}
return []*combat.Actor{actors[0]}
}
func MostDrainedActor(actors []*combat.Actor, onlyCheckDrained bool) []*combat.Actor {
var target *combat.Actor = nil
magic := 99999.9
for _, v := range actors {
mp := v.Stats.Get("MpNow")
isDrained := false
if mp < v.Stats.Get("MpMax") {
isDrained = true
}
skip := false
if onlyCheckDrained && !isDrained {
skip = true
}
if mp < magic && !skip {
magic = mp
target = v
}
}
if target != nil {
return []*combat.Actor{target}
}
return []*combat.Actor{actors[0]}
}
func DeadActors(actors []*combat.Actor) []*combat.Actor {
for _, v := range actors {
hp := v.Stats.Get("HpNow")
if hp <= 0 {
return []*combat.Actor{v}
}
}
return []*combat.Actor{actors[0]}
}
|
package mainWindow
import (
"errors"
"github.com/myProj/scaner/new/include/unarchive"
"github.com/therecipe/qt/widgets"
)
func newErrorTable()*widgets.QTableWidget{
table := widgets.NewQTableWidget(nil)
//установить readOnly
table.SetEditTriggers(widgets.QAbstractItemView__NoEditTriggers)
table.SetColumnCount(3)
table.SetHorizontalHeaderLabels([]string{"Имя файла","Ошибки","Тип файла"})
addErrorsToTable(table,unarchive.ArchInfoError{ ArchiveName: "",OpenError: errors.New("test")},".test")
table.RemoveRow(0)
return table
}
func NewNonScanTable()*widgets.QTableWidget{
table := widgets.NewQTableWidget(nil)
//установить readOnly
table.SetEditTriggers(widgets.QAbstractItemView__NoEditTriggers)
table.SetColumnCount(2)
table.SetRowCount(0)
table.SetHorizontalHeaderLabels([]string{"Имя файла","Тип файла"})
addNonScanItem(table,".test",".test")
table.RemoveRow(0)
return table
}
func RemoveAllRows(table *widgets.QTableWidget){
for table.RowCount()-1 > 0{
table.RemoveRow(1)
}
if table.RowCount() > 0 {
table.RemoveRow(table.RowCount()-1)
}
} |
package qywxapi
import (
"github.com/thelark/request"
"fmt"
"reflect"
)
type cgiBin struct {
CorpID string
Secret string
AccessToken string
}
func (t *cgiBin) set(k, v string) {
_value := reflect.ValueOf(t).Elem()
_type := reflect.TypeOf(t).Elem()
if _, ok := _type.FieldByName(k); ok {
_field := _value.FieldByName(k)
_field.SetString(v)
}
}
// 子节点 --------------------------------------------------------------------
func (t *cgiBin) Department(opts ...option) *cgiBinDepartment {
self := &cgiBinDepartment{}
self.AccessToken = t.AccessToken
for _, opt := range opts {
opt(self)
}
return self
}
// 方法 --------------------------------------------------------------------
type cgiBinToken struct {
*ErrorReturn
AccessToken string `json:"access_token"`
ExpiresIn int `json:"expires_in"`
}
// GetToken 获取access_token是调用企业微信API接口的第一步,相当于创建了一个登录凭证,其它的业务API接口,都需要依赖于access_token来鉴权调用者身份。因此开发者,在使用业务接口前,要明确access_token的颁发来源,使用正确的access_token。
// GET https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=ID&corpsecret=SECRET
func (t *cgiBin) GetToken() (*cgiBinToken, error) {
rsp := new(cgiBinToken)
if err := wxRequest.Get(
fmt.Sprintf("%s/gettoken", getBasePath()),
request.WithParam("corpid", t.CorpID),
request.WithParam("corpsecret", t.Secret),
request.WithResponse(&rsp),
); err != nil {
return nil, err
}
if err := checkError(rsp); err != nil {
return nil, err
}
t.set("AccessToken", rsp.AccessToken)
return rsp, nil
}
|
/*
There is a game in which you try not to repeat a word while your opponent tries to see if you have repeated one.
"THE RAIN IN SPAIN" has no repeats.
"IN THE RAIN AND THE SNOW" repeats THE.
"THE RAIN IN SPAIN IN THE PLAIN" repeats THE and IN.
Write a program to test a phrase.
Input
Input is a line containing words separated by single spaces, where a word consists of one or more uppercase letters.
A line contains no more than 80 characters.
Output
The output is "yes" if no word is repeated, and "no" if one or more words repeat.
*/
package main
import "strings"
func main() {
assert(norepeats("THE RAIN IN SPAIN") == "yes")
assert(norepeats("IN THE RAIN AND THE SNOW") == "no")
assert(norepeats("THE RAIN IN SPAIN IN THE PLAIN") == "no")
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func norepeats(s string) string {
t := strings.Split(s, " ")
m := make(map[string]bool)
for _, t := range t {
if m[t] {
return "no"
}
m[t] = true
}
return "yes"
}
|
package query
import (
"encoding/json"
"fmt"
"github.com/juju/errgo"
"github.com/mezis/klask/index"
)
type Query interface {
// `records` is a ZSET key, containing subset of records IDs.
// The key returned should contain a subset of `sourceKey`.
// Pass `nil` as a context to the top-level query.
Run(records string, context Context) (string, error)
}
// the toplevel type of query, wraps a single query
type query_t struct {
query Query
}
func New(idx index.Index) Query {
q := new(query_t)
return q
}
func (self *query_t) UnmarshalJSON(data []byte) error {
var parsed interface{}
// "syntactic" parsing: get a structure tree form JSON
err := json.Unmarshal(data, &parsed)
if err != nil {
return errgo.Mask(err)
}
fmt.Printf("parsed JSON query:\n%+v\n\n%#v\n", parsed, parsed)
// "semantic" parsing: build a tree of queries
q := new(query_generic_t)
err = q.parse(parsed)
if err != nil {
return errgo.Mask(err)
}
fmt.Printf("query AST:\n%+v\n\n%#v\n", q, q)
self.query = q
return nil
}
func (self *query_t) Run(records string, context Context) (string, error) {
results, err := self.query.Run(records, context)
if err != nil {
return "", errgo.Mask(err)
}
return results, nil
}
// func (self *query_t) cleanKey(key string) error {
// conn := self.idx.Conn()
// defer conn.Close()
// _, err := conn.Do("DEL", key)
// if err != nil {
// return errgo.Mask(err)
// }
// return nil
// }
// func (self *query_t) Run(offset int, ttl int) ([]Id, error) {
// // FIXME: generate random temp keys
// // FIXME: as a second step, general hashed result keys for caching
// resultKey := "temp:1"
// defer self.cleanKey(resultKey) // ignoring errors
// sourceKey := self.idx.RecordsKey()
// for _, fi := range self.Filters {
// err := fi.run(sourceKey, resultKey)
// if err != nil {
// return nil, errgo.Mask(err)
// }
// sourceKey = resultKey
// }
// conn := self.idx.Conn()
// defer conn.Close()
// reply, err := redis.Values(conn.Do("SMEMBERS", resultKey))
// if err != nil {
// return nil, errgo.Mask(err)
// }
// ids := make([]Id, len(reply))
// _, err = redis.Scan(reply, ids)
// if err != nil {
// return nil, errgo.Mask(err)
// }
// return ids, nil
// }
|
package handler
import (
"net/http"
"testing"
"github.com/golang/protobuf/ptypes"
analysispb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/analysis/v1"
ptypesv2 "github.com/jinmukeji/proto/v3/gen/micro/idl/ptypes/v2"
"github.com/micro/go-micro/v2/metadata"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
)
// WeeklyReportTestSuite 是 weekly_report 的单元测试的 Test Suite
type WeeklyReportTestSuite struct {
suite.Suite
analysisManagerService *AnalysisManagerService
}
// SetupSuite 设置测试环境
func (suite *WeeklyReportTestSuite) SetupSuite() {
suite.analysisManagerService = newAnalysisManagerServiceForTest()
}
// TestGetWeeklyReportContent 测试GetWeeklyReportContent
func (suite *WeeklyReportTestSuite) TestGetWeeklyReportContent() {
t := suite.T()
reqSignIn, ctx := getSignInReq()
userId := 1
respSignIn, err := suite.analysisManagerService.jinmuidSvc.UserSignInByUsernamePassword(ctx, reqSignIn)
ctx = metadata.NewContext(ctx, map[string]string{
http.CanonicalHeaderKey(AccessTokenKey): respSignIn.AccessToken,
})
assert.Nil(t, err)
reqGetWeeklyReportContent := &analysispb.GetWeeklyAnalyzeResultRequest{
UserId: int32(userId),
Language: ptypesv2.Language_LANGUAGE_SIMPLIFIED_CHINESE,
Cid: "cid",
CInfo: &analysispb.CInfo{
C0: 2,
C1: 4,
C2: 2,
C3: 1,
C4: 0,
C5: 6,
C6: 4,
C7: 2,
},
PhysicalDialectics: []string{"T0017", "TZN0001", "T0017", "TZN0001", "T0017", "TZN0001", "T0017", "TZN0001", "T0017", "TZN0001", "T0017", "TZN0001", "T0017", "TZN000"},
}
respGetWeeklyReportContent := &analysispb.GetWeeklyAnalyzeResultResponse{}
ctx = metadata.NewContext(ctx, map[string]string{
http.CanonicalHeaderKey(ClientIDKey): "jm-10005",
http.CanonicalHeaderKey(AccessTokenKey): respSignIn.AccessToken,
})
err = suite.analysisManagerService.GetWeeklyAnalyzeResult(ctx, reqGetWeeklyReportContent, respGetWeeklyReportContent)
assert.Nil(t, err)
physicalTherapyIndex := &analysispb.PhysicalTherapyIndexModule{}
err = ptypes.UnmarshalAny(respGetWeeklyReportContent.Report.Modules["physical_therapy_index"], physicalTherapyIndex)
assert.Nil(t, err)
assert.Equal(t, int32(25), physicalTherapyIndex.GetF0().GetValue())
assert.Equal(t, int32(100), physicalTherapyIndex.GetF1().GetValue())
assert.Equal(t, int32(0), physicalTherapyIndex.GetF2().GetValue())
assert.Equal(t, int32(0), physicalTherapyIndex.GetF3().GetValue())
}
// TestWeeklyReportTestSuite 启动 TestSuite
func TestWeeklyReportTestSuite(t *testing.T) {
suite.Run(t, new(WeeklyReportTestSuite))
}
|
package main
func countPrimeSetBits(L int, R int) int {
isPrime := [40]bool{}
prime := []int{2, 3, 5, 7, 11, 13, 17, 19, 23, 31}
for i := 0; i < len(prime); i++ {
isPrime[prime[i]] = true
}
ans := 0
for i := L; i <= R; i++ {
if isPrime[countOne(i)] {
ans++
}
}
return ans
}
func countOne(a int) int {
count := 0
for a != 0 {
a = a & (a - 1)
count++
}
return count
}
/*
总结
1. 这题关键就是求数x有多少个1。
*/
|
package main
import (
"context"
"errors"
"fmt"
"os"
"strings"
"github.com/kudrykv/latex-yearly-planner/app"
)
var code int
func main() {
ctx := context.Background()
defer func() { os.Exit(code) }()
shouldExit("", app.New().RunContext(ctx, os.Args))
}
func shouldExit(msg string, err error) bool {
if err == nil {
return false
}
code = 1
printErr(msg, err)
return true
}
// nolint:forbidigo
func printErr(msg string, err error) {
if len(msg) > 0 {
err = fmt.Errorf("%s: %w", msg, err)
}
indent := 0
a, b := errors.Unwrap(err), err
for a != nil {
index := strings.Index(b.Error(), a.Error())
fmt.Print(strings.Repeat(" ", indent))
fmt.Println(b.Error()[0:index])
indent += 2
a, b = errors.Unwrap(a), a
}
fmt.Print(strings.Repeat(" ", indent))
fmt.Println(b.Error())
}
|
package socketman
import (
"crypto/tls"
"io"
"net"
)
//Client is a socket client
type Client struct {
//Config is a configuration for new incoming connections
Config
}
//Connect opens a tcp connection on server behind addr and calls handler.
//
//connection will be closed after the handler returns
//
//The syntax of addr is "host:port", like "127.0.0.1:8080".
//See net.Dial and tls.Dial for more details about address syntax.
func (c *Client) Connect(addr string, handler Handler) error {
var con net.Conn
var err error
if c.Config.TLSConfig != nil {
config := cloneTLSClientConfig(c.Config.TLSConfig)
con, err = tls.Dial("tcp", addr, config)
} else {
con, err = net.Dial("tcp", addr)
}
if err != nil {
return err
}
conn := newconn(con, c.Config)
handler.ServeSocket(conn)
return conn.Close()
}
//ConnectFunc calls Connect
func (c *Client) ConnectFunc(addr string, handler func(io.ReadWriter)) error {
return c.Connect(addr, HandlerFunc(handler))
}
|
package rpc
import (
"context"
"fmt"
"github.com/benka-me/users/go-pkg/hash"
"github.com/benka-me/users/go-pkg/users"
"go.mongodb.org/mongo-driver/bson"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func (app *App) insertRegisterProcess() {
for {
r := <-app.RegisterChan
fmt.Println("Register process revceived :", r.Username)
pwd, _ := hash.HashPassword(r.Password)
entry := users.User{
Data: users.Data{
Username: r.Username,
Email: r.Email,
},
Password: pwd,
}
if app.usernameAvailable(context.TODO(), r) {
inserted, err := app.MongoUsers.InsertOne(context.TODO(), entry)
if err != nil {
fmt.Println(err)
} else {
fmt.Println("inserted: ", inserted.InsertedID)
}
//TODO: Send email validation
} else {
fmt.Println("cant insert: ", r.Username)
//TODO: Implement case of someone registered faster
}
}
}
func (app *App) usernameAvailable(ctx context.Context, req *users.RegisterReq) bool {
filter := bson.D{{"data.username", req.Username}}
return nil != app.MongoUsers.FindOne(context.TODO(), filter).Err()
}
func (app *App) emailAvailable(ctx context.Context, req *users.RegisterReq) bool {
filter := bson.D{{"data.email", req.Email}}
return nil != app.MongoUsers.FindOne(context.TODO(), filter).Err()
}
func (app *App) Register(ctx context.Context, req *users.RegisterReq) (*users.RegisterRes, error) {
if !app.usernameAvailable(ctx, req) {
return &users.RegisterRes{}, status.Error(codes.AlreadyExists, "username already used")
}
if !app.emailAvailable(ctx, req) {
return &users.RegisterRes{}, status.Error(codes.AlreadyExists, "email already used")
}
//TODO validate pwd / email / username
//TODO add event register request
app.RegisterChan <- req
return &users.RegisterRes{}, status.Error(codes.OK, "ok")
}
|
// Copyright 2020, Jeff Alder
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nr_yml
import (
"github.com/newrelic/go-agent/v3/newrelic"
"github.com/stretchr/testify/assert"
"testing"
)
func TestDistributedTracingEnabled(t *testing.T) {
withContents(`
production:
distributed_tracing:
enabled: true
`, t, func(filename string, t *testing.T) {
cfg := new(newrelic.Config)
cfg.DistributedTracer.Enabled = false
ConfigFromYamlFile(filename)(cfg)
assert.NoError(t, cfg.Error)
assert.True(t, cfg.DistributedTracer.Enabled)
})
}
func TestDistributedTracingNoOverwrite(t *testing.T) {
withContents("production:", t, func(filename string, t *testing.T) {
cfg := new(newrelic.Config)
cfg.DistributedTracer.Enabled = true
ConfigFromYamlFile(filename)(cfg)
assert.NoError(t, cfg.Error)
assert.True(t, cfg.DistributedTracer.Enabled)
})
}
|
package imagekit
import (
"context"
"errors"
)
//
// REQUESTS
//
type AddTagsRequest struct {
// FileIDs is the list of unique ID of the uploaded files.
FileIDs []string `json:"fileIds"`
// Tags is an array of tags to add on these files.
Tags []string `json:"tags"`
}
//
// METHODS
//
// AddTags to multiple files in a single request.
func (s *MediaService) AddTags(ctx context.Context, r *AddTagsRequest) error {
if r == nil {
return errors.New("request is empty")
}
// Prepare request
req, err := s.client.request("POST", "v1/files/addTags", nil, requestTypeAPI)
if err != nil {
return err
}
err = s.client.do(ctx, req, nil)
if err != nil {
return err
}
return nil
}
|
package main
import "fmt"
func main(){
cardsList := newDeck()
//fmt.Println(cardsList)
//
//handCards, remainCards := deal(cardsList,5)
//
//fmt.Println("Cards in hand are : ",show(handCards))
//fmt.Println("Cards remain are : ",show(remainCards))
fmt.Print(cardsList.toString())
} |
package core
// windows环境下启动服务
func RunWindowsServer() {
runServer()
}
|
package lookup
import (
"fmt"
"github.com/docker/libcompose/config"
)
type MapEnvLookup struct {
Env map[string]interface{}
}
func (m *MapEnvLookup) Lookup(key string, config *config.ServiceConfig) []string {
if v, ok := m.Env[key]; ok {
return []string{fmt.Sprintf("%s=%v", key, v)}
}
return []string{}
}
|
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"os"
"time"
"github.com/amenzhinsky/iothub/cmd/internal"
"github.com/amenzhinsky/iothub/eventhub"
"github.com/amenzhinsky/iothub/iotservice"
)
// globally accessible by command handlers, is it a good idea?
var (
// common
debugFlag bool
compressFlag bool
// send
uidFlag string
midFlag string
cidFlag string
expFlag time.Duration
ackFlag string
connectTimeoutFlag int
responseTimeoutFlag int
// create/update device
primaryKeyFlag string
secondaryKeyFlag string
primaryThumbprintFlag string
secondaryThumbprintFlag string
caFlag bool
// sas and connection string
secondaryFlag bool
// sas
uriFlag string
durationFlag time.Duration
// watch events
ehcsFlag string
ehcgFlag string
)
func main() {
if err := run(); err != nil {
if err != internal.ErrInvalidUsage {
fmt.Fprintf(os.Stderr, "error: %s\n", err)
}
os.Exit(1)
}
}
const help = `Helps with interacting and managing your iothub devices.
The $IOTHUB_SERVICE_CONNECTION_STRING environment variable is required for authentication.`
func run() error {
cli, err := internal.New(help, func(f *flag.FlagSet) {
f.BoolVar(&debugFlag, "debug", debugFlag, "enable debug mode")
f.BoolVar(&compressFlag, "compress", false, "compress data (remove JSON indentations)")
}, []*internal.Command{
{
Name: "send",
Alias: "s",
Help: "DEVICE PAYLOAD [[key value]...]",
Desc: "send a message to the named device (C2D)",
Handler: wrap(send),
ParseFunc: func(f *flag.FlagSet) {
f.StringVar(&ackFlag, "ack", "", "type of ack feedback")
f.StringVar(&uidFlag, "uid", "golang-iothub", "origin of the message")
f.StringVar(&midFlag, "mid", "", "identifier for the message")
f.StringVar(&cidFlag, "cid", "", "message identifier in a request-reply")
f.DurationVar(&expFlag, "exp", 0, "message lifetime")
},
},
{
Name: "watch-events",
Alias: "we",
Desc: "subscribe to device messages (D2C)",
Handler: wrap(watchEvents),
ParseFunc: func(f *flag.FlagSet) {
f.StringVar(&ehcsFlag, "ehcs", "", "custom eventhub connection string")
f.StringVar(&ehcgFlag, "ehcg", "$Default", "eventhub consumer group")
},
},
{
Name: "watch-feedback",
Alias: "wf",
Desc: "monitor message feedback send by devices",
Handler: wrap(watchFeedback),
},
{
Name: "call",
Alias: "c",
Help: "DEVICE METHOD PAYLOAD",
Desc: "call a direct method on a device",
Handler: wrap(call),
ParseFunc: func(f *flag.FlagSet) {
f.IntVar(&connectTimeoutFlag, "c", 0, "connect timeout in seconds")
f.IntVar(&responseTimeoutFlag, "r", 30, "response timeout in seconds")
},
},
{
Name: "device",
Alias: "d",
Help: "DEVICE",
Desc: "get device information",
Handler: wrap(device),
},
{
Name: "devices",
Alias: "ds",
Desc: "list all available devices",
Handler: wrap(devices),
},
{
Name: "create-device",
Alias: "cd",
Help: "DEVICE",
Desc: "creates a new device",
Handler: wrap(createDevice),
ParseFunc: func(f *flag.FlagSet) {
f.StringVar(&primaryKeyFlag, "primary-key", "", "primary key (base64)")
f.StringVar(&secondaryKeyFlag, "secondary-key", "", "secondary key (base64)")
f.StringVar(&primaryThumbprintFlag, "primary-thumbprint", "", "x509 primary thumbprint")
f.StringVar(&secondaryThumbprintFlag, "secondary-thumbprint", "", "x509 secondary thumbprint")
f.BoolVar(&caFlag, "ca", false, "use certificate authority authentication")
},
},
{
Name: "update-device",
Alias: "ud",
Help: "DEVICE",
Desc: "updates the named device",
Handler: wrap(updateDevice),
ParseFunc: func(f *flag.FlagSet) {
f.StringVar(&primaryKeyFlag, "primary-key", "", "primary key (base64)")
f.StringVar(&secondaryKeyFlag, "secondary-key", "", "secondary key (base64)")
f.StringVar(&primaryThumbprintFlag, "primary-thumbprint", "", "x509 primary thumbprint")
f.StringVar(&secondaryThumbprintFlag, "secondary-thumbprint", "", "x509 secondary thumbprint")
f.BoolVar(&caFlag, "ca", false, "use certificate authority authentication")
},
},
{
Name: "delete-device",
Alias: "dd",
Help: "DEVICE",
Desc: "delete the named device",
Handler: wrap(deleteDevice),
},
{
Name: "twin",
Alias: "t",
Desc: "inspect the named twin device",
Handler: wrap(twin),
},
{
Name: "update-twin",
Alias: "ut",
Help: "DEVICE [[key value]...]",
Desc: "update the named twin device",
Handler: wrap(updateTwin),
},
{
Name: "stats",
Alias: "st",
Desc: "get statistics about the devices",
Handler: wrap(stats),
},
{
Name: "jobs",
Alias: "js",
Desc: "list the last import/export jobs",
Handler: wrap(jobs),
},
{
Name: "job",
Alias: "j",
Help: "ID",
Desc: "get the status of a import/export job",
Handler: wrap(job),
},
{
Name: "cancel-job",
Alias: "cj",
Desc: "cancel a import/export job",
Handler: wrap(cancelJob),
},
{
Name: "connection-string",
Alias: "cs",
Help: "DEVICE",
Desc: "get a device's connection string",
Handler: wrap(connectionString),
ParseFunc: func(f *flag.FlagSet) {
f.BoolVar(&secondaryFlag, "secondary", false, "use the secondary key instead")
},
},
{
Name: "access-signature",
Alias: "sas",
Help: "DEVICE",
Desc: "generate a GenerateToken token",
Handler: wrap(sas),
ParseFunc: func(f *flag.FlagSet) {
f.StringVar(&uriFlag, "uri", "", "storage resource uri")
f.DurationVar(&durationFlag, "duration", time.Hour, "token validity time")
f.BoolVar(&secondaryFlag, "secondary", false, "use the secondary key instead")
},
},
})
if err != nil {
return err
}
return cli.Run(context.Background(), os.Args...)
}
func wrap(fn func(context.Context, *flag.FlagSet, *iotservice.Client) error) internal.HandlerFunc {
return func(ctx context.Context, f *flag.FlagSet) error {
c, err := iotservice.New()
if err != nil {
return err
}
defer c.Close()
return fn(ctx, f, c)
}
}
func device(ctx context.Context, f *flag.FlagSet, c *iotservice.Client) error {
if f.NArg() != 1 {
return internal.ErrInvalidUsage
}
d, err := c.GetDevice(ctx, f.Arg(0))
if err != nil {
return err
}
return internal.OutputJSON(d, compressFlag)
}
func devices(ctx context.Context, f *flag.FlagSet, c *iotservice.Client) error {
if f.NArg() != 0 {
return internal.ErrInvalidUsage
}
d, err := c.ListDevices(ctx)
if err != nil {
return err
}
return internal.OutputJSON(d, compressFlag)
}
func createDevice(ctx context.Context, f *flag.FlagSet, c *iotservice.Client) error {
if f.NArg() != 1 {
return internal.ErrInvalidUsage
}
a, err := mkAuthentication()
if err != nil {
return err
}
d, err := c.CreateDevice(ctx, &iotservice.Device{
DeviceID: f.Arg(0),
Authentication: a,
})
if err != nil {
return err
}
return internal.OutputJSON(d, compressFlag)
}
func updateDevice(ctx context.Context, f *flag.FlagSet, c *iotservice.Client) error {
if f.NArg() != 1 {
return internal.ErrInvalidUsage
}
a, err := mkAuthentication()
if err != nil {
return err
}
d, err := c.UpdateDevice(ctx, &iotservice.Device{
DeviceID: f.Arg(0),
Authentication: a,
})
if err != nil {
return err
}
return internal.OutputJSON(d, compressFlag)
}
func mkAuthentication() (*iotservice.Authentication, error) {
// TODO: validate that flags only of one type of auth can be passed
if primaryThumbprintFlag != "" || secondaryThumbprintFlag != "" {
return &iotservice.Authentication{
Type: iotservice.AuthSelfSigned,
X509Thumbprint: &iotservice.X509Thumbprint{
PrimaryThumbprint: primaryThumbprintFlag,
SecondaryThumbprint: secondaryThumbprintFlag,
},
}, nil
}
if caFlag {
return &iotservice.Authentication{
Type: iotservice.AuthCA,
}, nil
}
// auto-generate keys when no auth type is given
var err error
if primaryKeyFlag == "" {
primaryKeyFlag, err = iotservice.NewSymmetricKey()
if err != nil {
return nil, err
}
}
if secondaryKeyFlag == "" {
secondaryKeyFlag, err = iotservice.NewSymmetricKey()
if err != nil {
return nil, err
}
}
return &iotservice.Authentication{
Type: iotservice.AuthSAS,
SymmetricKey: &iotservice.SymmetricKey{
PrimaryKey: primaryKeyFlag,
SecondaryKey: secondaryKeyFlag,
},
}, nil
}
func deleteDevice(ctx context.Context, f *flag.FlagSet, c *iotservice.Client) error {
if f.NArg() != 1 {
return internal.ErrInvalidUsage
}
return c.DeleteDevice(ctx, f.Arg(0))
}
func stats(ctx context.Context, f *flag.FlagSet, c *iotservice.Client) error {
if f.NArg() != 0 {
return internal.ErrInvalidUsage
}
s, err := c.Stats(ctx)
if err != nil {
return err
}
return internal.OutputJSON(s, compressFlag)
}
func twin(ctx context.Context, f *flag.FlagSet, c *iotservice.Client) error {
if f.NArg() != 1 {
return internal.ErrInvalidUsage
}
t, err := c.GetTwin(ctx, f.Arg(0))
if err != nil {
return err
}
return internal.OutputJSON(t, compressFlag)
}
func updateTwin(ctx context.Context, f *flag.FlagSet, c *iotservice.Client) error {
if f.NArg() < 3 {
return internal.ErrInvalidUsage
}
m, err := internal.ArgsToMap(f.Args()[1:])
if err != nil {
return err
}
twin := &iotservice.Twin{
Properties: &iotservice.Properties{
Desired: make(map[string]interface{}, len(m)),
},
}
for k, v := range m {
if v == "null" {
twin.Properties.Desired[k] = nil
} else {
twin.Properties.Desired[k] = v
}
}
twin, err = c.UpdateTwin(ctx, f.Arg(0), twin, "*")
if err != nil {
return err
}
return internal.OutputJSON(twin, compressFlag)
}
func call(ctx context.Context, f *flag.FlagSet, c *iotservice.Client) error {
if f.NArg() != 3 {
return internal.ErrInvalidUsage
}
var v map[string]interface{}
if err := json.Unmarshal([]byte(f.Arg(2)), &v); err != nil {
return err
}
r, err := c.Call(ctx, f.Arg(0), f.Arg(1), v,
iotservice.WithCallConnectTimeout(connectTimeoutFlag),
iotservice.WithCallResponseTimeout(responseTimeoutFlag),
)
if err != nil {
return err
}
return internal.OutputJSON(r, compressFlag)
}
func send(ctx context.Context, f *flag.FlagSet, c *iotservice.Client) error {
if f.NArg() < 2 {
return internal.ErrInvalidUsage
}
var err error
var props map[string]string
if f.NArg() > 2 {
props, err = internal.ArgsToMap(f.Args()[2:])
if err != nil {
return err
}
}
expiryTime := time.Time{}
if expFlag != 0 {
expiryTime = time.Now().Add(expFlag)
}
if err := c.SendEvent(ctx, f.Arg(0), []byte(f.Arg(1)),
iotservice.WithSendMessageID(midFlag),
iotservice.WithSendAck(ackFlag),
iotservice.WithSendProperties(props),
iotservice.WithSendUserID(uidFlag),
iotservice.WithSendCorrelationID(cidFlag),
iotservice.WithSentExpiryTime(expiryTime),
); err != nil {
return err
}
return nil
}
func watchEvents(ctx context.Context, f *flag.FlagSet, c *iotservice.Client) error {
if f.NArg() != 0 {
return internal.ErrInvalidUsage
}
if ehcsFlag != "" {
return watchEventHubEvents(ctx, ehcsFlag, ehcgFlag)
}
return c.SubscribeEvents(ctx, func(msg *iotservice.Event) error {
return internal.OutputJSON(msg, compressFlag)
})
}
func watchEventHubEvents(ctx context.Context, cs, group string) error {
c, err := eventhub.DialConnectionString(cs)
if err != nil {
return err
}
return c.Subscribe(ctx, func(m *eventhub.Event) error {
return internal.OutputJSON(iotservice.FromAMQPMessage(m.Message), compressFlag)
},
eventhub.WithSubscribeConsumerGroup(group),
eventhub.WithSubscribeSince(time.Now()),
)
}
func watchFeedback(ctx context.Context, f *flag.FlagSet, c *iotservice.Client) error {
if f.NArg() != 0 {
return internal.ErrInvalidUsage
}
errc := make(chan error, 1)
if err := c.SubscribeFeedback(ctx, func(f *iotservice.Feedback) {
if err := internal.OutputJSON(f, compressFlag); err != nil {
errc <- err
}
}); err != nil {
return err
}
return <-errc
}
func jobs(ctx context.Context, f *flag.FlagSet, c *iotservice.Client) error {
if f.NArg() != 0 {
return internal.ErrInvalidUsage
}
v, err := c.ListJobs(ctx)
if err != nil {
return err
}
return internal.OutputJSON(v, compressFlag)
}
func job(ctx context.Context, f *flag.FlagSet, c *iotservice.Client) error {
if f.NArg() != 1 {
return internal.ErrInvalidUsage
}
v, err := c.GetJob(ctx, f.Arg(0))
if err != nil {
return err
}
return internal.OutputJSON(v, compressFlag)
}
func cancelJob(ctx context.Context, f *flag.FlagSet, c *iotservice.Client) error {
if f.NArg() != 1 {
return internal.ErrInvalidUsage
}
v, err := c.CancelJob(ctx, f.Arg(0))
if err != nil {
return err
}
return internal.OutputJSON(v, compressFlag)
}
func connectionString(ctx context.Context, f *flag.FlagSet, c *iotservice.Client) error {
if f.NArg() != 1 {
return internal.ErrInvalidUsage
}
d, err := c.GetDevice(ctx, f.Arg(0))
if err != nil {
return err
}
cs, err := c.DeviceConnectionString(d, secondaryFlag)
if err != nil {
return err
}
return internal.OutputLine(cs)
}
func sas(ctx context.Context, f *flag.FlagSet, c *iotservice.Client) error {
if f.NArg() != 1 {
return internal.ErrInvalidUsage
}
d, err := c.GetDevice(ctx, f.Arg(0))
if err != nil {
return err
}
sas, err := c.DeviceSAS(d, durationFlag, secondaryFlag)
if err != nil {
return err
}
return internal.OutputLine(sas)
}
|
package main
//Accesslog tipo per transaction
type Accesslog struct {
Hash string
Type string
Time string
TTS int
SEIp string
Clientip string
Request string
Bytes int
Method string
URL string
Urlschema string
Urlhost string
Urlpath string
Urlquery string
Mime string
Ua string
}
//Fruizioni keeps data relavant to fruitions
type Fruizioni struct {
Hashfruizione map[string]bool
Clientip map[string]string
Idvideoteca map[string]string
Idaps map[string]string
Edgeip map[string]string
Giorno map[string]string
Orario map[string]string
Details map[string][]float64 `json:"-"`
}
|
package content
import (
dbUtils "github.com/sundogrd/content-api/utils/db"
"sync"
)
var _contentRepository *ContentRepository
var _contentRepositoryOnce sync.Once
func ContentRepositoryInstance() *ContentRepository {
_contentRepositoryOnce.Do(func() {
db := dbUtils.Client
hasContentTable := db.HasTable(&PFContent{})
if hasContentTable == false {
db.CreateTable(&PFContent{})
}
_contentRepository = newContentRepository(db)
// &ContentRepository{
// KitcClient: kitcClient,
// cache??
// }
})
return _contentRepository
}
|
package config
import (
"path/filepath"
"runtime"
"gopkg.in/ini.v1"
)
type ConfigManager interface {
Load(section string, tpl interface{}) error
}
type configManagerImpl struct {
config *ini.File
}
// returns api/config.ConfigManager
func NewConfigManager() (ConfigManager, error) {
_, filename, _, ok := runtime.Caller(0)
if !ok {
panic("No caller information")
}
filename = filepath.Join(filepath.Dir(filename), "config.ini")
config, err := ini.InsensitiveLoad(filename)
if err != nil {
return nil, err
}
return &configManagerImpl{config: config}, nil
}
// loads section of ini file into struct
func (this *configManagerImpl) Load(section string, tpl interface{}) error {
return this.config.Section(section).MapTo(tpl)
}
|
package main
import (
"fmt"
"github.com/lxc/lxd/client"
"github.com/lxc/lxd/shared/api"
)
//Connect to Unixsocket
func connect() (container lxd.ContainerServer) {
container, err := lxd.ConnectLXDUnix("", nil)
if err != nil {
fmt.Println(err)
}
return
}
//Create container
func create(container lxd.ContainerServer) {
//container creation request
req := api.ContainersPost{
Name: "container-name",
Source: api.ContainerSource{
Type: "image",
Alias: "debian",
},
}
op, err := container.CreateContainer(req)
if err != nil {
fmt.Println(err)
}
err = op.Wait()
if err != nil {
fmt.Println(err)
}
}
//Start Container
func start(name string, container lxd.ContainerServer) {
reqState := api.ContainerStatePut{
Action: "start",
Timeout: -1,
}
op, err := container.UpdateContainerState(name, reqState, "")
if err != nil {
fmt.Println(err)
}
err = op.Wait()
if err != nil {
fmt.Println(err)
}
}
//Stop Container
func stop(name string, container lxd.ContainerServer) {
reqState := api.ContainerStatePut{
Action: "stop",
Timeout: -1,
}
op, err := container.UpdateContainerState(name, reqState, "")
if err != nil {
fmt.Println(err)
}
err = op.Wait()
if err != nil {
fmt.Println(err)
}
}
//Delete Container
//if doesnt stop the container , it will be panic
func delete(name string, container lxd.ContainerServer) {
op, err := container.DeleteContainer(name)
if err != nil {
fmt.Println(err)
}
err = op.Wait()
if err != nil {
fmt.Println(err)
}
}
//Get Container Status
func status(name string, container lxd.ContainerServer) {
var stat *api.ContainerState
stat, str, err := container.GetContainerState(name)
if err != nil {
fmt.Println(err)
fmt.Println(str)
}
fmt.Println(*stat)
}
func main() {
c := connect()
//create(c)
//start("container-name", c)
//stop("container-name", c)
status("debian", c)
//delete("container-name", c)
}
|
package metadata
import (
"fmt"
"gorm.io/gorm"
"github.com/root-gg/plik/server/common"
)
// CreateFile persist a new file to the database
func (b *Backend) CreateFile(file *common.File) (err error) {
return b.db.Create(file).Error
}
// GetFile return a file from the database ( nil and no error if not found )
func (b *Backend) GetFile(fileID string) (file *common.File, err error) {
file = &common.File{}
err = b.db.Where(&common.File{ID: fileID}).Take(file).Error
if err == gorm.ErrRecordNotFound {
return nil, nil
} else if err != nil {
return nil, err
}
return file, err
}
// GetFiles return all files for the given upload ID
func (b *Backend) GetFiles(uploadID string) (files []*common.File, err error) {
err = b.db.Where(&common.File{UploadID: uploadID}).Find(&files).Error
if err != nil {
return nil, err
}
return files, err
}
// UpdateFile update a file in DB. Status ensure the file status has not changed since loaded
func (b *Backend) UpdateFile(file *common.File, status string) error {
result := b.db.Model(&common.File{}).Where(&common.File{ID: file.ID, Status: status}).Updates(file)
if result.Error != nil {
return result.Error
}
if result.RowsAffected != int64(1) {
return fmt.Errorf("invalid file status")
}
return nil
}
// UpdateFileStatus update a file status in DB. oldStatus ensure the file status has not changed since loaded
func (b *Backend) UpdateFileStatus(file *common.File, oldStatus string, newStatus string) error {
result := b.db.Model(&common.File{}).Where(&common.File{ID: file.ID, Status: oldStatus}).Update("status", newStatus)
if result.Error != nil {
return result.Error
}
if result.RowsAffected != int64(1) {
return fmt.Errorf("%s file not found", oldStatus)
}
file.Status = newStatus
return nil
}
// RemoveFile change the file status to removed
// The file will then be deleted from the data backend by the server and the status changed to deleted.
func (b *Backend) RemoveFile(file *common.File) error {
switch file.Status {
case common.FileMissing, "":
// Missing files were never uploaded, even partially it is safe to update the status to deleted directly
return b.UpdateFileStatus(file, file.Status, common.FileDeleted)
case common.FileUploaded, common.FileUploading:
// Uploaded, Uploading files have been at least partially uploaded
// by setting the status to Removed we mark the files as ready to be deleted from the Data backend
// which will occur during the next cleaning cycle
return b.UpdateFileStatus(file, file.Status, common.FileRemoved)
//case common.FileRemoved, common.FileDeleted:
// return nil
default:
return nil
}
}
// ForEachUploadFiles execute f for each file of the upload
func (b *Backend) ForEachUploadFiles(uploadID string, f func(file *common.File) error) (err error) {
rows, err := b.db.Model(&common.File{}).Where(&common.File{UploadID: uploadID}).Rows()
if err != nil {
return err
}
defer func() { _ = rows.Close() }()
for rows.Next() {
file := &common.File{}
err = b.db.ScanRows(rows, file)
if err != nil {
return err
}
err = f(file)
if err != nil {
return err
}
}
return nil
}
// ForEachRemovedFile execute f for each file with the status "removed"
func (b *Backend) ForEachRemovedFile(f func(file *common.File) error) (err error) {
rows, err := b.db.Model(&common.File{}).Where(&common.File{Status: common.FileRemoved}).Rows()
if err != nil {
return err
}
defer func() { _ = rows.Close() }()
for rows.Next() {
file := &common.File{}
err = b.db.ScanRows(rows, file)
if err != nil {
return err
}
err = f(file)
if err != nil {
return err
}
}
return nil
}
// CountUploadFiles count how many files have been added to an upload
func (b *Backend) CountUploadFiles(uploadID string) (count int, err error) {
var c int64 // Gorm V2 requires int64 for counts
err = b.db.Model(&common.File{}).Where(&common.File{UploadID: uploadID}).Count(&c).Error
if err != nil {
return -1, err
}
return int(c), nil
}
// ForEachFile execute f for every file in the database
func (b *Backend) ForEachFile(f func(file *common.File) error) (err error) {
stmt := b.db.Model(&common.File{})
rows, err := stmt.Rows()
if err != nil {
return err
}
defer func() { _ = rows.Close() }()
for rows.Next() {
file := &common.File{}
err = b.db.ScanRows(rows, file)
if err != nil {
return err
}
err = f(file)
if err != nil {
return err
}
}
return nil
}
|
package methods
import (
"net/http"
)
func contains(needle string, haystack []string) bool {
for _, item := range haystack {
if needle == item {
return true
}
}
return false
}
func makeMiddleware(predicate func(string, []string) bool) func(...string) func(http.Handler) http.Handler {
return func(methods ...string) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if ok := predicate(r.Method, methods); !ok {
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
return
}
next.ServeHTTP(w, r)
})
}
}
}
// Allow will only pass through requests to the given handler
// if the request's method is contained in the given list of
// HTTP methods
var Allow = makeMiddleware(contains)
// Disallow will not pass through requests to the given handler
// if the request's method is contained in the given list of
// HTTP methods
var Disallow = makeMiddleware(func(needle string, haystack []string) bool {
return !contains(needle, haystack)
})
|
package cmd
import (
"github.com/stretchr/testify/assert"
"testing"
)
func Test_Root(t *testing.T) {
// Arrange
ass := assert.New(t)
appPrinter = newMockPrn()
// Act
err := Execute()
// Assert
ass.Nil(err)
}
func Test_RootUnknownCommand(t *testing.T) {
// Arrange
ass := assert.New(t)
appPrinter = newMockPrn()
// Act
err := Execute("xxx")
// Assert
ass.Equal("unknown command \"xxx\" for \"copyto\"", err.Error())
}
func Test_Execute(t *testing.T) {
// Arrange
appPrinter = newMockPrn()
// Act
_ = Execute()
// Assert
}
|
package errors
import (
"fmt"
)
type TceCloudSDKError struct {
Code string
Message string
RequestId string
}
func (e *TceCloudSDKError) Error() string {
return fmt.Sprintf("[TceCloudSDKError] Code=%s, Message=%s, RequestId=%s", e.Code, e.Message, e.RequestId)
}
func NewTceCloudSDKError(code, message, requestId string) error {
return &TceCloudSDKError{
Code: code,
Message: message,
RequestId: requestId,
}
}
func (e *TceCloudSDKError) GetCode() string {
return e.Code
}
func (e *TceCloudSDKError) GetMessage() string {
return e.Message
}
func (e *TceCloudSDKError) GetRequestId() string {
return e.RequestId
}
|
package main
import "fmt"
func main() {
fmt.Println(removeStars("leet**cod*e"))
}
func removeStars(s string) string {
bs := make([]byte, 0)
for i := 0; i < len(s); i++ {
if s[i] == '*' {
bs = bs[:len(bs)-1] // 删除左侧的一个字符
} else {
bs = append(bs, s[i]) // 直接插入
}
}
return string(bs)
}
|
package fslm
import (
"bytes"
"encoding/gob"
"errors"
"os"
"reflect"
"unsafe"
"github.com/kho/byteblock"
"github.com/kho/word"
)
// Hashed is a finite-state representation of a n-gram language model
// using hash tables. A Hashed model is usually loaded from file or
// constructed with a Builder.
type Hashed struct {
// The vocabulary of the model. Don't modify this. If you need to
// have a vocab based on this, make a copy using Vocab.Copy().
vocab *word.Vocab
// Sentence boundary symbols.
bos, eos string
bosId, eosId word.Id
// Buckets per state for out-going lexical transitions.
// There are three kinds of transitions:
//
// (1) A lexical transition that consumes an actual word (i.e. any
// valid word other than <s> or </s>). This leads to a valid state
// with some weight. Note we allow transition from empty consuming
// <s>. This transition should have WEIGHT_LOG0 anyway (e.g. those
// built from SRILM) so keeping it doesn't cause much trouble.
//
// (2) A final transition that consumes </s>. This gives the final
// weight but always leads to an invalid state.
//
// (3) Buckets with invalid keys (word.NIL) are all filled with
// back-off transitions so that we know the back-off transition
// immediately when the key cannot be found.
transitions []xqwBuckets
}
func (m *Hashed) Start() StateId {
return _STATE_START
}
func (m *Hashed) NextI(p StateId, i word.Id) (q StateId, w Weight) {
// Try backing off until we find the n-gram or hit empty state.
next := m.transitions[p].FindEntry(i)
for next.Key == word.NIL && p != _STATE_EMPTY {
p = next.Value.State
w += next.Value.Weight
next = m.transitions[p].FindEntry(i)
}
if next.Key != word.NIL {
q = next.Value.State
w += next.Value.Weight
} else {
q = _STATE_EMPTY
w = WEIGHT_LOG0
}
return
}
func (m *Hashed) NextS(p StateId, s string) (q StateId, w Weight) {
return m.NextI(p, m.vocab.IdOf(s))
}
func (m *Hashed) Final(p StateId) Weight {
_, w := m.NextI(p, m.eosId)
return w
}
func (m *Hashed) BackOff(p StateId) (StateId, Weight) {
if p == _STATE_EMPTY {
return STATE_NIL, 0
}
backoff := m.transitions[p].FindEntry(word.NIL).Value
return backoff.State, backoff.Weight
}
func (m *Hashed) Vocab() (*word.Vocab, string, string, word.Id, word.Id) {
return m.vocab, m.bos, m.eos, m.bosId, m.eosId
}
func (m *Hashed) NumStates() int {
return len(m.transitions)
}
func (m *Hashed) Transitions(p StateId) chan WordStateWeight {
ch := make(chan WordStateWeight)
go func() {
for i := range m.transitions[p].Range() {
ch <- WordStateWeight{i.Key, i.Value.State, i.Value.Weight}
}
close(ch)
}()
return ch
}
func (m *Hashed) header() (header []byte, err error) {
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
if err = enc.Encode(m.vocab); err != nil {
return
}
if err = enc.Encode(m.bos); err != nil {
return
}
if err = enc.Encode(m.eos); err != nil {
return
}
numBuckets := make([]int, len(m.transitions))
for i, t := range m.transitions {
numBuckets[i] = len(t)
}
if err = enc.Encode(numBuckets); err != nil {
return
}
header = buf.Bytes()
return
}
func (m *Hashed) parseHeader(header []byte) (numBuckets []int, err error) {
dec := gob.NewDecoder(bytes.NewReader(header))
if err = dec.Decode(&m.vocab); err != nil {
return
}
if err = dec.Decode(&m.bos); err != nil {
return
}
if err = dec.Decode(&m.eos); err != nil {
return
}
if m.bosId = m.vocab.IdOf(m.bos); m.bosId == word.NIL {
err = errors.New(m.bos + " not in vocabulary")
return
}
if m.eosId = m.vocab.IdOf(m.eos); m.eosId == word.NIL {
err = errors.New(m.eos + " not in vocabulary")
return
}
if err = dec.Decode(&numBuckets); err != nil {
return
}
return
}
func (m *Hashed) WriteBinary(path string) (err error) {
w, err := os.Create(path)
if err != nil {
return
}
defer w.Close()
bw := byteblock.NewByteBlockWriter(w)
if err = bw.WriteString(MAGIC_HASHED, 0); err != nil {
return
}
// Header
header, err := m.header()
if err != nil {
return
}
if err = bw.Write(header, 0); err != nil {
return
}
// Raw entries.
// Go over the transitions to see how many entries there are in total.
numEntries := int64(0)
for _, i := range m.transitions {
numEntries += int64(len(i))
}
// Ask for a large new block and then incrementally write out the
// data.
align := int64(unsafe.Alignof(xqwEntry{}))
size := int64(unsafe.Sizeof(xqwEntry{}))
if err = bw.NewBlock(align, size*numEntries); err != nil {
return
}
for _, i := range m.transitions {
iHeader := (*reflect.SliceHeader)(unsafe.Pointer(&i))
var bytes []byte
bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&bytes))
bytesHeader.Data = iHeader.Data
bytesHeader.Len = int(int64(iHeader.Len) * size)
bytesHeader.Cap = bytesHeader.Len
if err = bw.Append(bytes); err != nil {
return
}
}
return nil
}
func IsHashedBinary(raw []byte) bool {
bs := byteblock.NewByteBlockSlicer(raw)
magic, err := bs.Slice()
return err == nil && string(magic) == MAGIC_HASHED
}
func (m *Hashed) UnsafeParseBinary(raw []byte) error {
bs := byteblock.NewByteBlockSlicer(raw)
magic, err := bs.Slice()
if err != nil {
return err
}
if string(magic) != MAGIC_HASHED {
return errors.New("not a FSLM binary file")
}
header, err := bs.Slice()
if err != nil {
return err
}
numBuckets, err := m.parseHeader(header)
if err != nil {
return err
}
entryBytes, err := bs.Slice()
if err != nil {
return err
}
var entrySlice []xqwEntry
entryBytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&entryBytes))
entrySliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&entrySlice))
entrySliceHeader.Data = entryBytesHeader.Data
entrySliceHeader.Len = entryBytesHeader.Len / int(unsafe.Sizeof(xqwEntry{}))
entrySliceHeader.Cap = entrySliceHeader.Len
m.transitions = make([]xqwBuckets, len(numBuckets))
low := 0
for i, n := range numBuckets {
m.transitions[i] = xqwBuckets(entrySlice[low : low+n])
low += n
}
return nil
}
|
package main
import "fmt"
func fibonacci(n uint) uint {
if n == 0 {
return 0
}
if n == 1 {
return 1
}
return fibonacci(n-1) + fibonacci(n-2)
}
func main() {
prevRes := make(map[uint]uint)
var n uint
nEnter:
fmt.Print("Input positive number: ")
fmt.Scanln(&n)
res := prevRes[n]
if res == 0 && n != 0 {
res = fibonacci(n)
prevRes[n] = res
}
fmt.Println("Fibonacci number: ", res)
goto nEnter
}
|
// Copyright 2019 Leandro Akira Omiya Takagi. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package unio
import (
"reflect"
)
// Search if a value contains inside array
func (u *Util) ArrayContains(ss interface{}, e interface{}) bool {
s := reflect.ValueOf(ss)
for i := 0; i < s.Len(); i++ {
a := s.Index(i).Interface()
if reflect.DeepEqual(a, e) {
return true
}
}
return false
} |
package handlers
import (
"aws-lambda-api/pkg/ngo"
"net/http"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
)
func GetNgo(req events.APIGatewayProxyRequest, tableName string, dynaClient dynamodbiface.DynamoDBAPI) (
*events.APIGatewayProxyResponse,
error,
) {
ngoId := req.QueryStringParameters["ngoId"]
result, err := ngo.FetchNgo(ngoId, tableName, dynaClient)
if err != nil {
return apiResponse(http.StatusBadRequest, ErrorBody{aws.String(err.Error())})
}
return apiResponse(http.StatusOK, result)
}
func GetNgos(req events.APIGatewayProxyRequest, tableName string, dynaClient dynamodbiface.DynamoDBAPI) (
*events.APIGatewayProxyResponse,
error,
) {
countries := req.QueryStringParameters["countries"]
categories := req.QueryStringParameters["categories"]
result, err := ngo.FetchNgos(countries,categories,tableName, dynaClient)
if err != nil {
return apiResponse(http.StatusBadRequest, ErrorBody{aws.String(err.Error())})
}
return apiResponse(http.StatusOK, result)
}
func CreateNgo(req events.APIGatewayProxyRequest, tableName string, dynaClient dynamodbiface.DynamoDBAPI) (
*events.APIGatewayProxyResponse,
error,
) {
result, err := ngo.CreateNgo(req, tableName, dynaClient)
if err != nil {
return apiResponse(http.StatusBadRequest, ErrorBody{
aws.String(err.Error()),
})
}
return apiResponse(http.StatusCreated, result)
}
func UpdateNgo(req events.APIGatewayProxyRequest, tableName string, dynaClient dynamodbiface.DynamoDBAPI) (
*events.APIGatewayProxyResponse,
error,
) {
result, err := ngo.UpdateNgo(req, tableName, dynaClient)
if err != nil {
return apiResponse(http.StatusBadRequest, ErrorBody{
aws.String(err.Error()),
})
}
return apiResponse(http.StatusOK, result)
}
func DeleteNgo(req events.APIGatewayProxyRequest, tableName string, dynaClient dynamodbiface.DynamoDBAPI) (
*events.APIGatewayProxyResponse,
error,
) {
err := ngo.DeleteNgo(req, tableName, dynaClient)
if err != nil {
return apiResponse(http.StatusBadRequest, ErrorBody{
aws.String(err.Error()),
})
}
return apiResponse(http.StatusOK, nil)
}
|
package rlcli
import (
"context"
"fmt"
"github.com/gin-gonic/gin"
"github.com/sanguohot/rlcli/pkg/common/log"
"github.com/ulule/limiter/v3"
mgin "github.com/ulule/limiter/v3/drivers/middleware/gin"
"github.com/ulule/limiter/v3/drivers/store/memory"
"net/http"
"os"
"os/signal"
"time"
)
type Rlcli struct {
limit string
addr string
}
func New(limit, addr string) *Rlcli {
return &Rlcli{
addr: addr,
limit: limit,
}
}
func (s *Rlcli) defaultHandler(c *gin.Context) {
type message struct {
Message string `json:"message"`
Timestamp int64 `json:"timestamp"`
}
resp := message{Message: "ok", Timestamp: time.Now().Unix()}
c.JSON(http.StatusOK, resp)
}
func (s *Rlcli) rateLimitMiddleware() gin.HandlerFunc {
rate, err := limiter.NewRateFromFormatted(s.limit)
if err != nil {
log.Logger.Fatal(err.Error())
}
store := memory.NewStore()
return mgin.NewMiddleware(limiter.New(store, rate))
}
func (s *Rlcli) startServer() {
gin.SetMode(gin.DebugMode)
r := gin.New()
r.Use(gin.Recovery())
// 默认设置logger,但启用logger会导致吞吐量大幅度降低
if os.Getenv("GIN_LOG") != "off" {
r.Use(gin.Logger())
}
r.MaxMultipartMemory = 10 << 20 // 10 MB
r.NoRoute(s.rateLimitMiddleware(), s.defaultHandler)
//r.Any("/", s.rateLimitMiddleware(), s.defaultHandler)
server := &http.Server{
Addr: fmt.Sprintf("%s", s.addr),
Handler: r,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20, // 1 MB
}
go func() {
if err := server.ListenAndServe(); err != nil {
log.Logger.Fatal(err.Error())
}
}()
log.Sugar.Infof("[http] listening => %s, limit => %v", server.Addr, s.limit)
// apiserver发生错误后延时五秒钟,优雅关闭
quit := make(chan os.Signal)
signal.Notify(quit, os.Interrupt)
<-quit
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := server.Shutdown(ctx); err != nil {
log.Logger.Fatal(err.Error())
}
log.Sugar.Infof("stop server => %s, limit => %v", server.Addr, s.limit)
}
func (s *Rlcli) Serve() {
s.startServer()
}
|
package createplayerusecase
import "backend/internal/domain"
type UseCase interface {
Execute(name string) (Result, error)
}
func New(
playerIdGenerator domain.PlayerIdGenerator,
playerRepository domain.PlayerRepository,
) UseCase {
return &createPlayerUseCase{
playerIdGenerator: playerIdGenerator,
playerRepository: playerRepository,
}
}
type createPlayerUseCase struct {
playerRepository domain.PlayerRepository
playerIdGenerator domain.PlayerIdGenerator
}
func (c *createPlayerUseCase) Execute(name string) (Result, error) {
createdPlayerId := c.playerIdGenerator.Generate()
createdPlayer := domain.NewPlayer(createdPlayerId, name)
err := c.playerRepository.Save(createdPlayer)
if err != nil {
return Result{}, err
}
return NewResult(createdPlayerId), nil
}
type Result struct {
id domain.PlayerId
}
func NewResult(id domain.PlayerId) Result {
return Result{id}
}
func (r Result) Id() domain.PlayerId {
return r.id
}
|
package sshmux
import (
"errors"
"fmt"
"io"
"strconv"
)
// DefaultInteractive is the default server selection prompt for users during
// session forward.
func DefaultInteractive(comm io.ReadWriter, session *Session) (string, error) {
remotes := session.Remotes
fmt.Fprintf(comm, "Welcome to sshmux, %s\r\n", session.Conn.User())
for i, v := range remotes {
fmt.Fprintf(comm, " [%d] %s\r\n", i, v)
}
// Beware, nasty comm parsing loop
loop:
for {
fmt.Fprintf(comm, "Please select remote server: ")
var buf []byte
b := make([]byte, 1)
var (
n int
err error
)
for {
if err != nil {
return "", err
}
n, err = comm.Read(b)
if n == 1 {
fmt.Fprintf(comm, "%s", b)
switch b[0] {
case '\r':
fmt.Fprintf(comm, "\r\n")
res, err := strconv.ParseInt(string(buf), 10, 64)
if err != nil {
fmt.Fprintf(comm, "comm not a valid integer. Please try again\r\n")
continue loop
}
if int(res) >= len(remotes) || res < 0 {
fmt.Fprintf(comm, "No such server. Please try again\r\n")
continue loop
}
return remotes[int(res)], nil
case 0x03:
fmt.Fprintf(comm, "\r\nGoodbye\r\n")
return "", errors.New("user terminated session")
}
buf = append(buf, b[0])
}
}
}
}
|
package main
import "fmt"
func main() {
s := []int{2, 3, 5, 7, 11}
s = s[1:4]
fmt.Printf("len=%d cap=%d %v\n", len(s), cap(s), s)
s = s[:2]
fmt.Printf("len=%d cap=%d %v\n", len(s), cap(s), s)
s = s[:3]
fmt.Printf("len=%d cap=%d %v\n", len(s), cap(s), s)
s = s[1:]
fmt.Printf("len=%d cap=%d %v\n", len(s), cap(s), s)
s = s[:]
fmt.Printf("len=%d cap=%d %v\n", len(s), cap(s), s)
} |
package utils
import (
"encoding/json"
"io"
"io/ioutil"
"log"
)
type ApiData struct {
body io.ReadCloser
}
func (ap ApiData) ToString() string {
defer ap.body.Close()
bs, _ := ioutil.ReadAll(ap.body)
return string(bs)
}
func (ap ApiData) ToJson() JsonObject {
defer ap.body.Close()
jsonMap := make(map[string]interface{})
bs, _ := ioutil.ReadAll(ap.body)
err1 := json.Unmarshal(bs, &jsonMap)
if err1 != nil {
log.Println(err1)
}
jsonObject := JsonObject{jsonMap}
return jsonObject
}
|
// Licensed to SolID under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. SolID licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package types
import (
"testing"
"github.com/google/go-cmp/cmp"
)
func TestStringArray_Contains(t *testing.T) {
tests := []struct {
name string
s StringArray
args string
want bool
}{
{
name: "nil",
s: nil,
want: false,
},
{
name: "empty",
s: []string{},
want: false,
},
{
name: "empty / blank",
s: []string{},
args: "",
want: false,
},
{
name: "not empty / blank",
s: []string{""},
args: "",
want: true,
},
{
name: "not empty / same case",
s: []string{"azerty"},
args: "azerty",
want: true,
},
{
name: "not empty / not same case",
s: []string{"azerty"},
args: "AzErTy",
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.s.Contains(tt.args); got != tt.want {
t.Errorf("StringArray.Contains() = %v, want %v", got, tt.want)
}
})
}
}
func TestStringArray_AddIfNotContains(t *testing.T) {
tests := []struct {
name string
s StringArray
args string
want StringArray
}{
{
name: "contains",
s: []string{"1", "2", "3"},
args: "3",
want: []string{"1", "2", "3"},
},
{
name: "not contains",
s: []string{"1", "2", "3"},
args: "4",
want: []string{"1", "2", "3", "4"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.s.AddIfNotContains(tt.args)
if !cmp.Equal(tt.s, tt.want) {
t.Errorf("AddIfNotContains() = %v, want %v", tt.s, tt.want)
}
})
}
}
func TestStringArray_Remove(t *testing.T) {
tests := []struct {
name string
s StringArray
args string
want StringArray
}{
{
name: "contains",
s: []string{"1", "2", "3"},
args: "3",
want: []string{"1", "2"},
},
{
name: "not contains",
s: []string{"1", "2", "3"},
args: "4",
want: []string{"1", "2", "3"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.s.Remove(tt.args)
if !cmp.Equal(tt.s, tt.want) {
t.Errorf("Remove() = %v, want %v", tt.s, tt.want)
}
})
}
}
func TestStringArray_HasOneOf(t *testing.T) {
type args struct {
items []string
}
tests := []struct {
name string
s StringArray
args []string
want bool
}{
{
name: "empty",
want: false,
s: []string{},
args: []string{},
},
{
name: "empty / not empty",
want: false,
s: []string{},
args: []string{""},
},
{
name: "not empty / empty",
want: false,
s: []string{},
args: []string{""},
},
{
name: "contains",
want: true,
s: []string{"1", "2", "3"},
args: []string{"1"},
},
{
name: "partial contains",
want: true,
s: []string{"1", "2", "3"},
args: []string{"1", "4"},
},
{
name: "not contains at all",
want: false,
s: []string{"1", "2", "3"},
args: []string{"4"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.s.HasOneOf(tt.args...); got != tt.want {
t.Errorf("StringArray.HasOneOf() = %v, want %v", got, tt.want)
}
})
}
}
|
// Copyright 2021 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
computepb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/compute/compute_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute"
)
// Server implements the gRPC interface for BackendBucket.
type BackendBucketServer struct{}
// ProtoToBackendBucketCdnPolicy converts a BackendBucketCdnPolicy resource from its proto representation.
func ProtoToComputeBackendBucketCdnPolicy(p *computepb.ComputeBackendBucketCdnPolicy) *compute.BackendBucketCdnPolicy {
if p == nil {
return nil
}
obj := &compute.BackendBucketCdnPolicy{
SignedUrlCacheMaxAgeSec: dcl.Int64OrNil(p.SignedUrlCacheMaxAgeSec),
}
for _, r := range p.GetSignedUrlKeyNames() {
obj.SignedUrlKeyNames = append(obj.SignedUrlKeyNames, r)
}
return obj
}
// ProtoToBackendBucket converts a BackendBucket resource from its proto representation.
func ProtoToBackendBucket(p *computepb.ComputeBackendBucket) *compute.BackendBucket {
obj := &compute.BackendBucket{
BucketName: dcl.StringOrNil(p.BucketName),
CdnPolicy: ProtoToComputeBackendBucketCdnPolicy(p.GetCdnPolicy()),
Description: dcl.StringOrNil(p.Description),
EnableCdn: dcl.Bool(p.EnableCdn),
Name: dcl.StringOrNil(p.Name),
Project: dcl.StringOrNil(p.Project),
SelfLink: dcl.StringOrNil(p.SelfLink),
}
return obj
}
// BackendBucketCdnPolicyToProto converts a BackendBucketCdnPolicy resource to its proto representation.
func ComputeBackendBucketCdnPolicyToProto(o *compute.BackendBucketCdnPolicy) *computepb.ComputeBackendBucketCdnPolicy {
if o == nil {
return nil
}
p := &computepb.ComputeBackendBucketCdnPolicy{
SignedUrlCacheMaxAgeSec: dcl.ValueOrEmptyInt64(o.SignedUrlCacheMaxAgeSec),
}
for _, r := range o.SignedUrlKeyNames {
p.SignedUrlKeyNames = append(p.SignedUrlKeyNames, r)
}
return p
}
// BackendBucketToProto converts a BackendBucket resource to its proto representation.
func BackendBucketToProto(resource *compute.BackendBucket) *computepb.ComputeBackendBucket {
p := &computepb.ComputeBackendBucket{
BucketName: dcl.ValueOrEmptyString(resource.BucketName),
CdnPolicy: ComputeBackendBucketCdnPolicyToProto(resource.CdnPolicy),
Description: dcl.ValueOrEmptyString(resource.Description),
EnableCdn: dcl.ValueOrEmptyBool(resource.EnableCdn),
Name: dcl.ValueOrEmptyString(resource.Name),
Project: dcl.ValueOrEmptyString(resource.Project),
SelfLink: dcl.ValueOrEmptyString(resource.SelfLink),
}
return p
}
// ApplyBackendBucket handles the gRPC request by passing it to the underlying BackendBucket Apply() method.
func (s *BackendBucketServer) applyBackendBucket(ctx context.Context, c *compute.Client, request *computepb.ApplyComputeBackendBucketRequest) (*computepb.ComputeBackendBucket, error) {
p := ProtoToBackendBucket(request.GetResource())
res, err := c.ApplyBackendBucket(ctx, p)
if err != nil {
return nil, err
}
r := BackendBucketToProto(res)
return r, nil
}
// ApplyBackendBucket handles the gRPC request by passing it to the underlying BackendBucket Apply() method.
func (s *BackendBucketServer) ApplyComputeBackendBucket(ctx context.Context, request *computepb.ApplyComputeBackendBucketRequest) (*computepb.ComputeBackendBucket, error) {
cl, err := createConfigBackendBucket(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return s.applyBackendBucket(ctx, cl, request)
}
// DeleteBackendBucket handles the gRPC request by passing it to the underlying BackendBucket Delete() method.
func (s *BackendBucketServer) DeleteComputeBackendBucket(ctx context.Context, request *computepb.DeleteComputeBackendBucketRequest) (*emptypb.Empty, error) {
cl, err := createConfigBackendBucket(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteBackendBucket(ctx, ProtoToBackendBucket(request.GetResource()))
}
// ListComputeBackendBucket handles the gRPC request by passing it to the underlying BackendBucketList() method.
func (s *BackendBucketServer) ListComputeBackendBucket(ctx context.Context, request *computepb.ListComputeBackendBucketRequest) (*computepb.ListComputeBackendBucketResponse, error) {
cl, err := createConfigBackendBucket(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
resources, err := cl.ListBackendBucket(ctx, request.Project)
if err != nil {
return nil, err
}
var protos []*computepb.ComputeBackendBucket
for _, r := range resources.Items {
rp := BackendBucketToProto(r)
protos = append(protos, rp)
}
return &computepb.ListComputeBackendBucketResponse{Items: protos}, nil
}
func createConfigBackendBucket(ctx context.Context, service_account_file string) (*compute.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return compute.NewClient(conf), nil
}
|
package http
import (
"bytes"
"context"
"fmt"
"math"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/drand/drand/chain"
"github.com/drand/drand/log"
"github.com/drand/drand/metrics"
"github.com/drand/drand/protobuf/drand"
"github.com/prometheus/client_golang/prometheus/promhttp"
json "github.com/nikkolasg/hexjson"
)
var (
// Timeout for how long to wait for the drand.PublicClient before timing out
reqTimeout = 5 * time.Second
)
// New creates an HTTP handler for the public Drand API
func New(ctx context.Context, client drand.PublicClient, version string, logger log.Logger) (http.Handler, error) {
if logger == nil {
logger = log.DefaultLogger
}
handler := handler{
timeout: reqTimeout,
client: client,
chainInfo: nil,
log: logger,
pending: nil,
context: ctx,
latestRound: 0,
version: version,
}
mux := http.NewServeMux()
//TODO: aggregated bulk round responses.
mux.HandleFunc("/public/latest", handler.LatestRand)
mux.HandleFunc("/public/", handler.PublicRand)
mux.HandleFunc("/info", handler.ChainInfo)
instrumented := promhttp.InstrumentHandlerCounter(
metrics.HTTPCallCounter,
promhttp.InstrumentHandlerDuration(
metrics.HTTPLatency,
promhttp.InstrumentHandlerInFlight(
metrics.HTTPInFlight,
mux)))
return instrumented, nil
}
type handler struct {
timeout time.Duration
client drand.PublicClient
chainInfo *chain.Info
log log.Logger
// synchronization for blocking writes until randomness available.
pendingLk sync.RWMutex
startOnce sync.Once
pending []chan []byte
context context.Context
latestRound uint64
version string
}
func (h *handler) start() {
h.pendingLk.Lock()
defer h.pendingLk.Unlock()
h.pending = make([]chan []byte, 0)
go h.Watch(h.context)
}
func (h *handler) Watch(ctx context.Context) {
RESET:
stream, err := h.client.PublicRandStream(context.Background(), &drand.PublicRandRequest{})
if err != nil {
h.log.Error("http_server", "creation of random stream failed", "err", err)
return
}
for {
next, err := stream.Recv()
if err != nil {
h.log.Warn("http_server", "random stream round failed", "err", err)
h.pendingLk.Lock()
h.latestRound = 0
h.pendingLk.Unlock()
// backoff on failures a bit to not fall into a tight loop.
// TODO: tuning.
time.Sleep(300 * time.Millisecond)
goto RESET
}
bytes, err := json.Marshal(next)
h.pendingLk.Lock()
if h.latestRound+1 != next.Round && h.latestRound != 0 {
// we missed a round, or similar. don't send bad data to peers.
h.log.Warn("http_server", "unexpected round for watch", "err", fmt.Sprintf("expected %d, saw %d", h.latestRound+1, next.Round))
bytes = []byte{}
}
h.latestRound = next.Round
pending := h.pending
h.pending = make([]chan []byte, 0)
h.pendingLk.Unlock()
for _, waiter := range pending {
waiter <- bytes
}
select {
case <-ctx.Done():
return
default:
}
}
}
func (h *handler) getChainInfo(ctx context.Context) *chain.Info {
if h.chainInfo != nil {
return h.chainInfo
}
ctx, cancel := context.WithTimeout(ctx, h.timeout)
defer cancel()
pkt, err := h.client.ChainInfo(ctx, &drand.ChainInfoRequest{})
if err != nil {
h.log.Warn("msg", "chain info fetch failed", "err", err)
return nil
}
if pkt == nil {
h.log.Warn("msg", "chain info fetch didn't return group info")
return nil
}
h.chainInfo, err = chain.InfoFromProto(pkt)
if err != nil {
h.log.Warn("msg", "chain info is invalid")
return nil
}
return h.chainInfo
}
func (h *handler) getRand(ctx context.Context, round uint64) ([]byte, error) {
h.startOnce.Do(h.start)
// First see if we should get on the synchronized 'wait for next release' bandwagon.
block := false
h.pendingLk.RLock()
block = (h.latestRound+1 == round) && h.latestRound != 0
h.pendingLk.RUnlock()
// If so, prepare, and if we're still sync'd, add ourselves to the list of waiters.
if block {
ch := make(chan []byte)
h.pendingLk.Lock()
block = (h.latestRound+1 == round) && h.latestRound != 0
if block {
h.pending = append(h.pending, ch)
}
h.pendingLk.Unlock()
// If that was successful, we can now block until we're notified.
if block {
select {
case r := <-ch:
return r, nil
case <-ctx.Done():
h.pendingLk.Lock()
defer h.pendingLk.Unlock()
for i, c := range h.pending {
if c == ch {
h.pending = append(h.pending[:i], h.pending[i+1:]...)
break
}
}
close(ch)
return nil, ctx.Err()
}
}
}
req := drand.PublicRandRequest{Round: round}
ctx, cancel := context.WithTimeout(ctx, h.timeout)
defer cancel()
resp, err := h.client.PublicRand(ctx, &req)
if err != nil {
return nil, err
}
return json.Marshal(resp)
}
func (h *handler) PublicRand(w http.ResponseWriter, r *http.Request) {
// Get the round.
round := strings.Replace(r.URL.Path, "/public/", "", 1)
roundN, err := strconv.ParseUint(round, 10, 64)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
h.log.Warn("http_server", "failed to parse client round", "client", r.RemoteAddr, "req", url.PathEscape(r.URL.Path))
return
}
info := h.getChainInfo(r.Context())
roundExpectedTime := time.Now()
if info != nil {
roundExpectedTime = time.Unix(chain.TimeOfRound(info.Period, info.GenesisTime, roundN), 0)
if roundExpectedTime.After(time.Now().Add(info.Period)) {
w.WriteHeader(http.StatusNotFound)
h.log.Warn("http_server", "request in the future", "client", r.RemoteAddr, "req", url.PathEscape(r.URL.Path))
return
}
}
data, err := h.getRand(r.Context(), roundN)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
h.log.Warn("http_server", "failed to get randomness", "client", r.RemoteAddr, "req", url.PathEscape(r.URL.Path), "err", err)
return
}
// Headers per recommendation for static assets at
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
w.Header().Set("Server", h.version)
w.Header().Set("Cache-Control", "public, max-age=604800, immutable")
w.Header().Set("Expires", time.Now().Add(7*24*time.Hour).Format(http.TimeFormat))
w.Header().Set("Content-Type", "application/json")
http.ServeContent(w, r, "rand.json", roundExpectedTime, bytes.NewReader(data))
}
func (h *handler) LatestRand(w http.ResponseWriter, r *http.Request) {
req := drand.PublicRandRequest{Round: 0}
ctx, cancel := context.WithTimeout(r.Context(), h.timeout)
defer cancel()
resp, err := h.client.PublicRand(ctx, &req)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
h.log.Warn("http_server", "failed to get randomness", "client", r.RemoteAddr, "req", url.PathEscape(r.URL.Path), "err", err)
return
}
data, err := json.Marshal(resp)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
h.log.Warn("http_server", "failed to marshal randomness", "client", r.RemoteAddr, "req", url.PathEscape(r.URL.Path), "err", err)
return
}
info := h.getChainInfo(r.Context())
roundTime := time.Now()
nextTime := time.Now()
if info != nil {
roundTime = time.Unix(chain.TimeOfRound(info.Period, info.GenesisTime, resp.Round), 0)
nextTime = time.Unix(chain.TimeOfRound(info.Period, info.GenesisTime, resp.Round+1), 0)
}
remaining := nextTime.Sub(time.Now())
if remaining > 0 && remaining < info.Period {
seconds := int(math.Ceil(remaining.Seconds()))
w.Header().Set("Cache-Control", fmt.Sprintf("max-age:%d, public", seconds))
} else {
h.log.Warn("http_server", "latest rand in the past", "client", r.RemoteAddr, "req", url.PathEscape(r.URL.Path), "remaining", remaining)
}
w.Header().Set("Server", h.version)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Expires", nextTime.Format(http.TimeFormat))
w.Header().Set("Last-Modified", roundTime.Format(http.TimeFormat))
w.Write(data)
}
func (h *handler) ChainInfo(w http.ResponseWriter, r *http.Request) {
info := h.getChainInfo(r.Context())
if info == nil {
w.WriteHeader(http.StatusNoContent)
h.log.Warn("http_server", "failed to serve group", "client", r.RemoteAddr, "req", url.PathEscape(r.URL.Path))
return
}
var chainBuff bytes.Buffer
err := info.ToJSON(&chainBuff)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
h.log.Warn("http_server", "failed to marshal group", "client", r.RemoteAddr, "req", url.PathEscape(r.URL.Path), "err", err)
return
}
// Headers per recommendation for static assets at
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
w.Header().Set("Server", h.version)
w.Header().Set("Cache-Control", "public, max-age=604800, immutable")
w.Header().Set("Expires", time.Now().Add(7*24*time.Hour).Format(http.TimeFormat))
w.Header().Set("Content-Type", "application/json")
http.ServeContent(w, r, "info.json", time.Unix(info.GenesisTime, 0), bytes.NewReader(chainBuff.Bytes()))
}
|
/*
Copyright 2023 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provider
import (
"fmt"
goast "go/ast"
"io"
"strings"
cueast "cuelang.org/go/cue/ast"
cuetoken "cuelang.org/go/cue/token"
"golang.org/x/tools/go/packages"
"github.com/oam-dev/kubevela/references/cuegen"
)
const (
typeProviderFnMap = "map[string]github.com/kubevela/pkg/cue/cuex/runtime.ProviderFn"
typeProvidersParamsPrefix = "github.com/kubevela/pkg/cue/cuex/providers.Params"
typeProvidersReturnsPrefix = "github.com/kubevela/pkg/cue/cuex/providers.Returns"
)
const (
doKey = "do"
providerKey = "provider"
)
type provider struct {
name string
params string
returns string
do string
}
// Options is options of generation
type Options struct {
File string // Go file path
Writer io.Writer // target writer
Types map[string]cuegen.Type // option cuegen.WithTypes
Nullable bool // option cuegen.WithNullable
}
// Generate generates cue provider from Go struct
func Generate(opts Options) (rerr error) {
g, err := cuegen.NewGenerator(opts.File)
if err != nil {
return err
}
// make options
genOpts := make([]cuegen.Option, 0)
// any types
genOpts = append(genOpts, cuegen.WithTypes(opts.Types))
// nullable
if opts.Nullable {
genOpts = append(genOpts, cuegen.WithNullable())
}
// type filter
genOpts = append(genOpts, cuegen.WithTypeFilter(func(spec *goast.TypeSpec) bool {
typ := g.Package().TypesInfo.TypeOf(spec.Type)
// only process provider params and returns.
if strings.HasPrefix(typ.String(), typeProvidersParamsPrefix) ||
strings.HasPrefix(typ.String(), typeProvidersReturnsPrefix) {
return true
}
return false
}))
decls, err := g.Generate(genOpts...)
if err != nil {
return err
}
providers, err := extractProviders(g.Package())
if err != nil {
return err
}
newDecls, err := modifyDecls(g.Package().Name, decls, providers)
if err != nil {
return err
}
return g.Format(opts.Writer, newDecls)
}
// extractProviders extracts the providers from map[string]cuexruntime.ProviderFn
func extractProviders(pkg *packages.Package) (providers []provider, rerr error) {
var (
providersMap *goast.CompositeLit
ok bool
)
// extract provider def map
for k, v := range pkg.TypesInfo.Types {
if v.Type.String() != typeProviderFnMap {
continue
}
if providersMap, ok = k.(*goast.CompositeLit); ok {
break
}
}
if providersMap == nil {
return nil, fmt.Errorf("no provider function map found like '%s'", typeProviderFnMap)
}
defer recoverAssert(&rerr, "extract providers")
for _, e := range providersMap.Elts {
pair := e.(*goast.KeyValueExpr)
doName := pair.Key.(*goast.BasicLit)
value := pair.Value.(*goast.CallExpr)
indices := value.Fun.(*goast.IndexListExpr)
params := indices.Indices[0].(*goast.Ident) // params struct name
returns := indices.Indices[1].(*goast.Ident) // returns struct name
do := value.Args[0].(*goast.Ident)
providers = append(providers, provider{
name: doName.Value,
params: params.Name,
returns: returns.Name,
do: do.Name,
})
}
return providers, nil
}
// modifyDecls re-generates cue ast decls of providers.
func modifyDecls(provider string, old []cuegen.Decl, providers []provider) (decls []cuegen.Decl, rerr error) {
defer recoverAssert(&rerr, "modify decls failed")
// map[StructName]StructLit
mapping := make(map[string]cueast.Expr)
for _, decl := range old {
if t, ok := decl.(*cuegen.Struct); ok {
mapping[t.Name] = t.Expr
}
}
providerField := &cueast.Field{
Label: cuegen.Ident(providerKey, true),
Value: cueast.NewString(provider),
}
for _, p := range providers {
params := mapping[p.params].(*cueast.StructLit).Elts
returns := mapping[p.returns].(*cueast.StructLit).Elts
doField := &cueast.Field{
Label: cuegen.Ident(doKey, true),
Value: cueast.NewLit(cuetoken.STRING, p.name), // p.name has contained double quotes
}
pdecls := []cueast.Decl{doField, providerField}
pdecls = append(pdecls, params...)
pdecls = append(pdecls, returns...)
decls = append(decls, &cuegen.Struct{CommonFields: cuegen.CommonFields{
Expr: &cueast.StructLit{
Elts: pdecls,
},
Name: "#" + p.do,
Pos: cuetoken.NewSection.Pos(),
}})
}
return decls, nil
}
// recoverAssert captures panic caused by invalid type assertion or out of range index,
// so we don't need to check each type assertion and index
func recoverAssert(err *error, msg string) {
if r := recover(); r != nil {
*err = fmt.Errorf("%s: panic: %v", r, msg)
}
}
|
package main
import (
"fmt"
)
func main() {
ss := [][]string{
{
"miu",
"milton",
"encher o saco",
},
{
"mimi",
"martha",
"pedir comida",
},
{
"meus alunos queridos",
"que estudam bastante",
"fazer os exercícios ninja",
},
}
for _, v := range ss {
fmt.Println(v)
}
fmt.Print("\n\n")
for _, v := range ss {
fmt.Println(v[0])
for _, item := range v {
fmt.Println("\t", item)
}
}
}
|
package pomogo
import (
"bytes"
"log"
"os/exec"
)
// StopTask ...
func StopTask(taskUUID string) ([]byte, error) {
log.Println("StopTask")
cmd := exec.Command("task", taskUUID, "stop")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
return out.Bytes(), err
}
// StartTask ...
func StartTask(taskUUID string) ([]byte, error) {
log.Println("StartTask")
cmd := exec.Command("task", taskUUID, "start")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
return out.Bytes(), err
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.