text
stringlengths
11
4.05M
// Package options provides convenience methods for reading node's options. package options import ( "github.com/jhump/protoreflect/desc" "github.com/jhump/protoreflect/dynamic" "github.com/richardwilkes/toolbox/errs" ) // OptionReader embeds an extension registry type OptionReader struct { init bool Registry *dynamic.ExtensionRegistry } // NewOptionReader uses a default extension registry if the passed one is nil func NewOptionReader(registry *dynamic.ExtensionRegistry) *OptionReader { if registry == nil { registry = dynamic.NewExtensionRegistryWithDefaults() } o := &OptionReader{ Registry: registry, } return o } // ReadOptionByID attempts to load the value into the "out" param if it is the correct type, // if the found type and the out type do not agree an error is returned func (o *OptionReader) ReadOptionByID(node desc.Descriptor, tagID int, out interface{}) error { val, err := o.GetOptionByID(node, tagID) if err != nil { return errs.Wrap(err) } return o.readValue(val, out) } // ReadOptionByName attempts to load the value into the "out" param if it is the correct type, // if the found type and the out type do not agree an error is returned func (o *OptionReader) ReadOptionByName(node desc.Descriptor, tagName string, out interface{}) error { val, err := o.GetOptionByName(node, tagName) if err != nil { return errs.Wrap(err) } return o.readValue(val, out) } // GetOptionByID attempts to find an option on the node by tagID func (o *OptionReader) GetOptionByID(node desc.Descriptor, tagID int) (interface{}, error) { opts, err := o.getOptions(node) if err != nil { return nil, errs.Wrap(err) } val, err := opts.TryGetFieldByNumber(tagID) if err != nil { return nil, errs.Wrap(err) } return val, nil } // GetOptionByName attempts to find an option on the node by tagName func (o *OptionReader) GetOptionByName(node desc.Descriptor, tagName string) (interface{}, error) { opts, err := o.getOptions(node) if err != nil { return nil, errs.Wrap(err) } val, err := opts.TryGetFieldByName(tagName) if err != nil { return nil, errs.Wrap(err) } return val, nil } // ------- // helpers func (o *OptionReader) getOptions(node desc.Descriptor) (*dynamic.Message, error) { if node == nil { return nil, nil } if !o.init { o.Registry.AddExtensionsFromFileRecursively(node.GetFile()) o.init = true } opts := node.GetOptions() if opts == nil { return nil, nil } msg, err := dynamic.AsDynamicMessageWithExtensionRegistry(opts, o.Registry) if err != nil { return nil, errs.Wrap(err) } return msg, nil } func (o *OptionReader) readValue(in interface{}, out interface{}) error { if in == nil { return nil } if out == nil { return nil } switch target := out.(type) { case *string: data, ok := in.(string) if !ok { return errs.Newf("value was %T, not %T", in, data) } *target = data case *bool: data, ok := in.(bool) if !ok { return errs.Newf("value was %T, not %T", in, data) } *target = data case *int16: data, ok := in.(int16) if !ok { return errs.Newf("value was %T, not %T", in, data) } *target = data case *int32: data, ok := in.(int32) if !ok { return errs.Newf("value was %T, not %T", in, data) } *target = data case *int64: data, ok := in.(int64) if !ok { return errs.Newf("value was %T, not %T", in, data) } *target = data default: return errs.Newf("helper could not read %T, please use `GetOption...` directly", target) } return nil }
package main import "fmt" //Contact exported type Contact struct { greeting string name string } //SwitchOnType exported func SwitchOnType(x interface{}) { switch x.(type) { // type assertion(type is a lexical element keyword) case int: fmt.Println("Sup Integer") case string: fmt.Println("Sup String") case Contact: fmt.Println("Sup Contact") default: fmt.Println("No matches") } } func main() { SwitchOnType(7) SwitchOnType("UFO") var t = Contact{"Good to see you,", "Jim"} fmt.Println("t: ", t) SwitchOnType(t) }
/* Copyright 2019 The Crossplane Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package secretsmanager import ( "context" "net/http" "testing" "time" "github.com/aws/aws-sdk-go-v2/aws" awssecretsmanager "github.com/aws/aws-sdk-go-v2/service/secretsmanager" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" runtimev1alpha1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" "github.com/crossplane/crossplane-runtime/pkg/test" "github.com/crossplane/provider-aws/apis/secretsmanager/v1alpha1" awsclients "github.com/crossplane/provider-aws/pkg/clients" "github.com/crossplane/provider-aws/pkg/clients/secretsmanager" "github.com/crossplane/provider-aws/pkg/clients/secretsmanager/fake" ) const ( secretKey = "credentials" credData = "confidential" ) var ( secretName = "some-name" secretNamespace = "some-namespace" kmsKeyIDRef = "kms-key-id" randomDate = time.Now() tags = []v1alpha1.Tag{ { Key: "some-key", Value: "some-value", }, { Key: "some-other-key", Value: "some-other-value", }, } awsTags = []awssecretsmanager.Tag{ { Key: aws.String("some-key"), Value: aws.String("some-value"), }, { Key: aws.String("some-other-key"), Value: aws.String("some-other-value"), }, } errBoom = errors.New("boom") ) type args struct { kube client.Client secretsmanager secretsmanager.Client cr *v1alpha1.Secret } func secret(m ...secretModifier) *v1alpha1.Secret { cr := &v1alpha1.Secret{} for _, f := range m { f(cr) } return cr } type secretModifier func(*v1alpha1.Secret) func withExternalName(s string) secretModifier { return func(r *v1alpha1.Secret) { meta.SetExternalName(r, s) } } func withDeletedDate(s time.Time) secretModifier { return func(r *v1alpha1.Secret) { r.Status.AtProvider.DeletedDate = &metav1.Time{Time: s} } } func withDeletionDate(s time.Time) secretModifier { return func(r *v1alpha1.Secret) { r.Status.AtProvider.DeletionDate = &metav1.Time{Time: s} } } func withForceDeleteWithoutRecovery(b bool) secretModifier { return func(r *v1alpha1.Secret) { r.Spec.ForProvider.ForceDeleteWithoutRecovery = &b } } func withRecoveryWindow(i int64) secretModifier { return func(r *v1alpha1.Secret) { r.Spec.ForProvider.RecoveryWindowInDays = &i } } func withConditions(c ...runtimev1alpha1.Condition) secretModifier { return func(r *v1alpha1.Secret) { r.Status.ConditionedStatus.Conditions = c } } func withSecretRef(n, ns, key string) secretModifier { return func(r *v1alpha1.Secret) { r.Spec.ForProvider.SecretRef = &v1alpha1.SecretSelector{ SecretReference: &runtimev1alpha1.SecretReference{ Name: n, Namespace: ns, }, Key: key, } } } func withKmsKeyIDRef(kmsKeyIDRef string) secretModifier { return func(r *v1alpha1.Secret) { r.Spec.ForProvider.KmsKeyRef = &runtimev1alpha1.Reference{ Name: kmsKeyIDRef, } } } func withTagList(tagMaps ...map[string]string) secretModifier { var tagList []v1alpha1.Tag for _, tagMap := range tagMaps { for k, v := range tagMap { tagList = append(tagList, v1alpha1.Tag{Key: k, Value: v}) } } return func(r *v1alpha1.Secret) { r.Spec.ForProvider.Tags = tagList } } func withTags(p []v1alpha1.Tag) secretModifier { return func(r *v1alpha1.Secret) { r.Spec.ForProvider.Tags = p } } func TestInitialize(t *testing.T) { type args struct { cr *v1alpha1.Secret kube client.Client } type want struct { cr *v1alpha1.Secret err error } cases := map[string]struct { args want }{ "Successful": { args: args{ cr: secret(withTagList(map[string]string{"foo": "bar"})), kube: &test.MockClient{MockUpdate: test.NewMockUpdateFn(nil)}, }, want: want{ cr: secret(withTagList(resource.GetExternalTags(secret()), map[string]string{"foo": "bar"})), }, }, "UpdateFailed": { args: args{ cr: secret(), kube: &test.MockClient{MockUpdate: test.NewMockUpdateFn(errBoom)}, }, want: want{ err: errors.Wrap(errBoom, errKubeUpdateFailed), }, }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { e := &tagger{kube: tc.kube} err := e.Initialize(context.Background(), tc.args.cr) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } if diff := cmp.Diff(tc.want.cr, tc.args.cr, cmpopts.SortSlices(func(a, b v1alpha1.Tag) bool { return a.Key > b.Key })); err == nil && diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } }) } } func TestObserve(t *testing.T) { type want struct { cr *v1alpha1.Secret result managed.ExternalObservation err error } cases := map[string]struct { args want }{ "SuccessfulObservation": { args: args{ kube: &test.MockClient{ MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { secret := corev1.Secret{ Data: map[string][]byte{}, } secret.Data[secretKey] = []byte(credData) secret.DeepCopyInto(obj.(*corev1.Secret)) return nil }, MockUpdate: test.NewMockUpdateFn(nil), }, secretsmanager: &fake.MockClient{ MockDescribeSecretRequest: func(input *awssecretsmanager.DescribeSecretInput) awssecretsmanager.DescribeSecretRequest { return awssecretsmanager.DescribeSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.DescribeSecretOutput{}}, } }, MockGetSecretValueRequest: func(input *awssecretsmanager.GetSecretValueInput) awssecretsmanager.GetSecretValueRequest { return awssecretsmanager.GetSecretValueRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.GetSecretValueOutput{ SecretString: awsclients.String(credData), }, }, } }, }, cr: secret( withExternalName(secretName), withSecretRef(secretName, secretNamespace, secretKey), withKmsKeyIDRef(kmsKeyIDRef), ), }, want: want{ cr: secret( withExternalName(secretName), withConditions(runtimev1alpha1.Available()), withSecretRef(secretName, secretNamespace, secretKey), withKmsKeyIDRef(kmsKeyIDRef), ), result: managed.ExternalObservation{ ResourceExists: true, ResourceUpToDate: true, ResourceLateInitialized: false, }, }, }, "SuccessfulObservationWithTags": { args: args{ kube: &test.MockClient{ MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { secret := corev1.Secret{ Data: map[string][]byte{}, } secret.Data[secretKey] = []byte(credData) secret.DeepCopyInto(obj.(*corev1.Secret)) return nil }, }, secretsmanager: &fake.MockClient{ MockDescribeSecretRequest: func(input *awssecretsmanager.DescribeSecretInput) awssecretsmanager.DescribeSecretRequest { return awssecretsmanager.DescribeSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.DescribeSecretOutput{ Tags: awsTags, }}, } }, MockGetSecretValueRequest: func(input *awssecretsmanager.GetSecretValueInput) awssecretsmanager.GetSecretValueRequest { return awssecretsmanager.GetSecretValueRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.GetSecretValueOutput{ SecretString: awsclients.String(credData), }, }, } }, }, cr: secret( withExternalName(secretName), withSecretRef(secretName, secretNamespace, secretKey), withTags(tags), ), }, want: want{ cr: secret( withExternalName(secretName), withConditions(runtimev1alpha1.Available()), withSecretRef(secretName, secretNamespace, secretKey), withTags(tags), ), result: managed.ExternalObservation{ ResourceExists: true, ResourceUpToDate: true, ResourceLateInitialized: false, }, }, }, "SuccessfulObservationWithoutExternalName": { args: args{ kube: &test.MockClient{ MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { secret := corev1.Secret{ Data: map[string][]byte{}, } secret.Data[secretKey] = []byte(credData) secret.DeepCopyInto(obj.(*corev1.Secret)) return nil }, }, secretsmanager: &fake.MockClient{ MockDescribeSecretRequest: func(input *awssecretsmanager.DescribeSecretInput) awssecretsmanager.DescribeSecretRequest { return awssecretsmanager.DescribeSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.DescribeSecretOutput{ Tags: awsTags, }}, } }, MockGetSecretValueRequest: func(input *awssecretsmanager.GetSecretValueInput) awssecretsmanager.GetSecretValueRequest { return awssecretsmanager.GetSecretValueRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.GetSecretValueOutput{ SecretString: awsclients.String(credData), }, }, } }, }, cr: secret( withSecretRef(secretName, secretNamespace, secretKey), withTags(tags), ), }, want: want{ cr: secret( withSecretRef(secretName, secretNamespace, secretKey), withTags(tags), ), result: managed.ExternalObservation{ ResourceExists: false, ResourceUpToDate: false, ResourceLateInitialized: false, }, }, }, "SecretNotUpToDate": { args: args{ kube: &test.MockClient{ MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { secret := corev1.Secret{ Data: map[string][]byte{}, } secret.Data[secretKey] = []byte(credData) secret.DeepCopyInto(obj.(*corev1.Secret)) return nil }, }, secretsmanager: &fake.MockClient{ MockDescribeSecretRequest: func(input *awssecretsmanager.DescribeSecretInput) awssecretsmanager.DescribeSecretRequest { return awssecretsmanager.DescribeSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.DescribeSecretOutput{}}, } }, MockGetSecretValueRequest: func(input *awssecretsmanager.GetSecretValueInput) awssecretsmanager.GetSecretValueRequest { return awssecretsmanager.GetSecretValueRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.GetSecretValueOutput{ SecretString: awsclients.String("some-outdated-secret-value"), }, }, } }, }, cr: secret( withExternalName(secretName), withSecretRef(secretName, secretNamespace, secretKey), ), }, want: want{ cr: secret( withExternalName(secretName), withConditions(runtimev1alpha1.Available()), withSecretRef(secretName, secretNamespace, secretKey), ), result: managed.ExternalObservation{ ResourceExists: true, ResourceUpToDate: false, ResourceLateInitialized: false, }, }, }, "FailedDescribeSecretRequest": { args: args{ secretsmanager: &fake.MockClient{ MockDescribeSecretRequest: func(input *awssecretsmanager.DescribeSecretInput) awssecretsmanager.DescribeSecretRequest { return awssecretsmanager.DescribeSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Error: errBoom}, } }, }, cr: secret( withExternalName(secretName), ), }, want: want{ cr: secret( withExternalName(secretName), ), err: errors.Wrap(errBoom, errDescribeSecretFailed), }, }, "LateInitFailedKubeUpdate": { args: args{ kube: &test.MockClient{ MockUpdate: test.NewMockUpdateFn(errBoom), }, secretsmanager: &fake.MockClient{ MockDescribeSecretRequest: func(input *awssecretsmanager.DescribeSecretInput) awssecretsmanager.DescribeSecretRequest { return awssecretsmanager.DescribeSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.DescribeSecretOutput{ Tags: awsTags, }}, } }, MockGetSecretValueRequest: func(input *awssecretsmanager.GetSecretValueInput) awssecretsmanager.GetSecretValueRequest { return awssecretsmanager.GetSecretValueRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.GetSecretValueOutput{}}, } }, }, cr: secret( withExternalName(secretName), ), }, want: want{ cr: secret( withExternalName(secretName), withTags(tags)), err: errors.Wrap(errBoom, errKubeUpdateFailed), }, }, "FailedGetSecretValueRequest": { args: args{ kube: &test.MockClient{ MockUpdate: test.NewMockUpdateFn(nil), }, secretsmanager: &fake.MockClient{ MockDescribeSecretRequest: func(input *awssecretsmanager.DescribeSecretInput) awssecretsmanager.DescribeSecretRequest { return awssecretsmanager.DescribeSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.DescribeSecretOutput{}}, } }, MockGetSecretValueRequest: func(input *awssecretsmanager.GetSecretValueInput) awssecretsmanager.GetSecretValueRequest { return awssecretsmanager.GetSecretValueRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Error: errBoom}, } }, }, cr: secret( withExternalName(secretName), ), }, want: want{ cr: secret( withExternalName(secretName), ), err: errors.Wrap(errBoom, errGetSecretValueFailed), }, }, "NotFound": { args: args{ secretsmanager: &fake.MockClient{ MockDescribeSecretRequest: func(input *awssecretsmanager.DescribeSecretInput) awssecretsmanager.DescribeSecretRequest { return awssecretsmanager.DescribeSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Error: errors.New(awssecretsmanager.ErrCodeResourceNotFoundException)}, } }, }, cr: secret(), }, want: want{ cr: secret(), result: managed.ExternalObservation{}, }, }, "DeletedDateNotNil": { args: args{ kube: &test.MockClient{ MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { secret := corev1.Secret{} secret.DeepCopyInto(obj.(*corev1.Secret)) return nil }, MockUpdate: test.NewMockUpdateFn(nil), }, secretsmanager: &fake.MockClient{ MockDescribeSecretRequest: func(input *awssecretsmanager.DescribeSecretInput) awssecretsmanager.DescribeSecretRequest { return awssecretsmanager.DescribeSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.DescribeSecretOutput{ DeletedDate: &randomDate, Tags: awsTags, }}, } }, MockGetSecretValueRequest: func(input *awssecretsmanager.GetSecretValueInput) awssecretsmanager.GetSecretValueRequest { return awssecretsmanager.GetSecretValueRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.GetSecretValueOutput{}}, } }, }, cr: secret( withExternalName(secretName), withSecretRef(secretName, secretNamespace, secretKey), ), }, want: want{ cr: secret( withExternalName(secretName), withConditions(runtimev1alpha1.Deleting().WithMessage(secretMarkedForDeletion)), withSecretRef(secretName, secretNamespace, secretKey), withDeletedDate(randomDate), withTags(tags), ), result: managed.ExternalObservation{ ResourceExists: true, ResourceUpToDate: true, ResourceLateInitialized: true, }, }, }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { e := &external{kube: tc.kube, client: tc.secretsmanager} o, err := e.Observe(context.Background(), tc.args.cr) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } if diff := cmp.Diff(tc.want.cr, tc.args.cr, test.EquateConditions()); diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } if diff := cmp.Diff(tc.want.result, o); diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } }) } } func TestCreate(t *testing.T) { type want struct { cr *v1alpha1.Secret result managed.ExternalCreation err error } cases := map[string]struct { args want }{ "SuccessfulCreation": { args: args{ kube: &test.MockClient{ MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { secret := corev1.Secret{ Data: map[string][]byte{}, } secret.Data[secretKey] = []byte(credData) secret.DeepCopyInto(obj.(*corev1.Secret)) return nil }, }, secretsmanager: &fake.MockClient{ MockCreateSecretRequest: func(input *awssecretsmanager.CreateSecretInput) awssecretsmanager.CreateSecretRequest { return awssecretsmanager.CreateSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.CreateSecretOutput{}}, } }, }, cr: secret(withSecretRef(secretName, secretNamespace, secretKey)), }, want: want{ cr: secret( withConditions(runtimev1alpha1.Creating()), withSecretRef(secretName, secretNamespace, secretKey), ), result: managed.ExternalCreation{}, }, }, "FailedGetSecret": { args: args{ kube: &test.MockClient{ MockGet: test.NewMockGetFn(errBoom), }, cr: secret(withSecretRef(secretName, secretNamespace, secretKey)), }, want: want{ cr: secret( withConditions(runtimev1alpha1.Creating()), withSecretRef(secretName, secretNamespace, secretKey), ), result: managed.ExternalCreation{}, err: errors.Wrap(errBoom, errK8sSecretNotFound), }, }, "FailedCreateSecretRequest": { args: args{ kube: &test.MockClient{ MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { secret := corev1.Secret{ Data: map[string][]byte{}, } secret.Data[secretKey] = []byte(credData) secret.DeepCopyInto(obj.(*corev1.Secret)) return nil }, }, secretsmanager: &fake.MockClient{ MockCreateSecretRequest: func(input *awssecretsmanager.CreateSecretInput) awssecretsmanager.CreateSecretRequest { return awssecretsmanager.CreateSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Error: errBoom}, } }, }, cr: secret( withSecretRef(secretName, secretNamespace, secretKey), ), }, want: want{ cr: secret( withConditions(runtimev1alpha1.Creating()), withSecretRef(secretName, secretNamespace, secretKey), ), result: managed.ExternalCreation{}, err: errors.Wrap(errBoom, errCreateFailed), }, }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { e := &external{kube: tc.kube, client: tc.secretsmanager} o, err := e.Create(context.Background(), tc.args.cr) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } if diff := cmp.Diff(tc.want.cr, tc.args.cr, test.EquateConditions()); diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } if diff := cmp.Diff(tc.want.result, o); diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } }) } } func TestUpdate(t *testing.T) { type want struct { cr *v1alpha1.Secret result managed.ExternalUpdate err error } cases := map[string]struct { args want }{ "SuccessfulUpdate": { args: args{ kube: &test.MockClient{ MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { secret := corev1.Secret{ Data: map[string][]byte{}, } secret.Data[secretKey] = []byte(credData) secret.DeepCopyInto(obj.(*corev1.Secret)) return nil }, }, secretsmanager: &fake.MockClient{ MockDescribeSecretRequest: func(input *awssecretsmanager.DescribeSecretInput) awssecretsmanager.DescribeSecretRequest { return awssecretsmanager.DescribeSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.DescribeSecretOutput{}}, } }, MockUpdateSecretRequest: func(input *awssecretsmanager.UpdateSecretInput) awssecretsmanager.UpdateSecretRequest { return awssecretsmanager.UpdateSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.UpdateSecretOutput{}}, } }, MockTagResourceRequest: func(input *awssecretsmanager.TagResourceInput) awssecretsmanager.TagResourceRequest { return awssecretsmanager.TagResourceRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.TagResourceOutput{}}, } }, MockUntagResourceRequest: func(input *awssecretsmanager.UntagResourceInput) awssecretsmanager.UntagResourceRequest { return awssecretsmanager.UntagResourceRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.UntagResourceOutput{}}, } }, }, cr: secret( withSecretRef(secretName, secretNamespace, secretKey), withTags(tags), ), }, want: want{ cr: secret( withSecretRef(secretName, secretNamespace, secretKey), withTags(tags), ), }, }, "SuccessfulUpdateRemoveTags": { args: args{ kube: &test.MockClient{ MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { secret := corev1.Secret{ Data: map[string][]byte{}, } secret.Data[secretKey] = []byte(credData) secret.DeepCopyInto(obj.(*corev1.Secret)) return nil }, }, secretsmanager: &fake.MockClient{ MockDescribeSecretRequest: func(input *awssecretsmanager.DescribeSecretInput) awssecretsmanager.DescribeSecretRequest { return awssecretsmanager.DescribeSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.DescribeSecretOutput{ Tags: awsTags, }}, } }, MockUpdateSecretRequest: func(input *awssecretsmanager.UpdateSecretInput) awssecretsmanager.UpdateSecretRequest { return awssecretsmanager.UpdateSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.UpdateSecretOutput{}}, } }, MockTagResourceRequest: func(input *awssecretsmanager.TagResourceInput) awssecretsmanager.TagResourceRequest { return awssecretsmanager.TagResourceRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.TagResourceOutput{}}, } }, MockUntagResourceRequest: func(input *awssecretsmanager.UntagResourceInput) awssecretsmanager.UntagResourceRequest { return awssecretsmanager.UntagResourceRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.UntagResourceOutput{}}, } }, }, cr: secret( withSecretRef(secretName, secretNamespace, secretKey), ), }, want: want{ cr: secret( withSecretRef(secretName, secretNamespace, secretKey), ), }, }, "FailedDescribeSecretRequest": { args: args{ kube: &test.MockClient{ MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { secret := corev1.Secret{ Data: map[string][]byte{}, } secret.Data[secretKey] = []byte(credData) secret.DeepCopyInto(obj.(*corev1.Secret)) return nil }, }, secretsmanager: &fake.MockClient{ MockDescribeSecretRequest: func(input *awssecretsmanager.DescribeSecretInput) awssecretsmanager.DescribeSecretRequest { return awssecretsmanager.DescribeSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Error: errBoom}, } }, }, cr: secret( withSecretRef(secretName, secretNamespace, secretKey), ), }, want: want{ cr: secret( withSecretRef(secretName, secretNamespace, secretKey), ), err: errors.Wrap(errBoom, errDescribeSecretFailed), }, }, "FailedGetSecret": { args: args{ kube: &test.MockClient{ MockGet: test.NewMockGetFn(errBoom), }, secretsmanager: &fake.MockClient{ MockDescribeSecretRequest: func(input *awssecretsmanager.DescribeSecretInput) awssecretsmanager.DescribeSecretRequest { return awssecretsmanager.DescribeSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.DescribeSecretOutput{}}, } }, }, cr: secret( withSecretRef(secretName, secretNamespace, secretKey), ), }, want: want{ cr: secret( withSecretRef(secretName, secretNamespace, secretKey), ), err: errors.Wrap(errBoom, errK8sSecretNotFound), }, }, "FailedUpdateSecretRequest": { args: args{ kube: &test.MockClient{ MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { secret := corev1.Secret{ Data: map[string][]byte{}, } secret.Data[secretKey] = []byte(credData) secret.DeepCopyInto(obj.(*corev1.Secret)) return nil }, }, secretsmanager: &fake.MockClient{ MockDescribeSecretRequest: func(input *awssecretsmanager.DescribeSecretInput) awssecretsmanager.DescribeSecretRequest { return awssecretsmanager.DescribeSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.DescribeSecretOutput{}}, } }, MockUpdateSecretRequest: func(input *awssecretsmanager.UpdateSecretInput) awssecretsmanager.UpdateSecretRequest { return awssecretsmanager.UpdateSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Error: errBoom}, } }, MockTagResourceRequest: func(input *awssecretsmanager.TagResourceInput) awssecretsmanager.TagResourceRequest { return awssecretsmanager.TagResourceRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.TagResourceOutput{}}, } }, MockUntagResourceRequest: func(input *awssecretsmanager.UntagResourceInput) awssecretsmanager.UntagResourceRequest { return awssecretsmanager.UntagResourceRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.UntagResourceOutput{}}, } }, }, cr: secret( withSecretRef(secretName, secretNamespace, secretKey), ), }, want: want{ cr: secret( withSecretRef(secretName, secretNamespace, secretKey), ), err: errors.Wrap(errBoom, errUpdateFailed), }, }, "FailedTagResourceRequest": { args: args{ kube: &test.MockClient{ MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { secret := corev1.Secret{ Data: map[string][]byte{}, } secret.Data[secretKey] = []byte(credData) secret.DeepCopyInto(obj.(*corev1.Secret)) return nil }, }, secretsmanager: &fake.MockClient{ MockDescribeSecretRequest: func(input *awssecretsmanager.DescribeSecretInput) awssecretsmanager.DescribeSecretRequest { return awssecretsmanager.DescribeSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.DescribeSecretOutput{}}, } }, MockUpdateSecretRequest: func(input *awssecretsmanager.UpdateSecretInput) awssecretsmanager.UpdateSecretRequest { return awssecretsmanager.UpdateSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.UpdateSecretOutput{}}, } }, MockTagResourceRequest: func(input *awssecretsmanager.TagResourceInput) awssecretsmanager.TagResourceRequest { return awssecretsmanager.TagResourceRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Error: errBoom}, } }, MockUntagResourceRequest: func(input *awssecretsmanager.UntagResourceInput) awssecretsmanager.UntagResourceRequest { return awssecretsmanager.UntagResourceRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.UntagResourceOutput{}}, } }, }, cr: secret( withSecretRef(secretName, secretNamespace, secretKey), withTags(tags), ), }, want: want{ cr: secret( withSecretRef(secretName, secretNamespace, secretKey), withTags(tags), ), err: errors.Wrap(errBoom, errCreateTags), }, }, "FailedUntagResourceRequest": { args: args{ kube: &test.MockClient{ MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { secret := corev1.Secret{ Data: map[string][]byte{}, } secret.Data[secretKey] = []byte(credData) secret.DeepCopyInto(obj.(*corev1.Secret)) return nil }, }, secretsmanager: &fake.MockClient{ MockDescribeSecretRequest: func(input *awssecretsmanager.DescribeSecretInput) awssecretsmanager.DescribeSecretRequest { return awssecretsmanager.DescribeSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.DescribeSecretOutput{ Tags: awsTags, }}, } }, MockUpdateSecretRequest: func(input *awssecretsmanager.UpdateSecretInput) awssecretsmanager.UpdateSecretRequest { return awssecretsmanager.UpdateSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.UpdateSecretOutput{}}, } }, MockTagResourceRequest: func(input *awssecretsmanager.TagResourceInput) awssecretsmanager.TagResourceRequest { return awssecretsmanager.TagResourceRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.TagResourceOutput{}}, } }, MockUntagResourceRequest: func(input *awssecretsmanager.UntagResourceInput) awssecretsmanager.UntagResourceRequest { return awssecretsmanager.UntagResourceRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Error: errBoom}, } }, }, cr: secret( withSecretRef(secretName, secretNamespace, secretKey), ), }, want: want{ cr: secret( withSecretRef(secretName, secretNamespace, secretKey), ), err: errors.Wrap(errBoom, errRemoveTags), }, }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { e := &external{kube: tc.kube, client: tc.secretsmanager} o, err := e.Update(context.Background(), tc.args.cr) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } if diff := cmp.Diff(tc.want.cr, tc.args.cr, test.EquateConditions()); diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } if diff := cmp.Diff(tc.want.result, o); diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } }) } } func TestDelete(t *testing.T) { type want struct { cr *v1alpha1.Secret err error } cases := map[string]struct { args want }{ "SuccessfulDeletion": { args: args{ secretsmanager: &fake.MockClient{ MockDeleteSecretRequest: func(input *awssecretsmanager.DeleteSecretInput) awssecretsmanager.DeleteSecretRequest { return awssecretsmanager.DeleteSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.DeleteSecretOutput{ DeletionDate: &randomDate, }}, } }, }, cr: secret( withSecretRef(secretName, secretNamespace, secretKey), ), }, want: want{ cr: secret( withConditions(runtimev1alpha1.Deleting()), withDeletionDate(randomDate), withSecretRef(secretName, secretNamespace, secretKey), ), }, }, "SuccessfulDeletionWithForceDeleteWithoutRecoveryTrue": { args: args{ secretsmanager: &fake.MockClient{ MockDeleteSecretRequest: func(input *awssecretsmanager.DeleteSecretInput) awssecretsmanager.DeleteSecretRequest { return awssecretsmanager.DeleteSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.DeleteSecretOutput{ DeletionDate: &randomDate, }}, } }, }, cr: secret( withDeletedDate(randomDate), withSecretRef(secretName, secretNamespace, secretKey), withForceDeleteWithoutRecovery(true), ), }, want: want{ cr: secret( withConditions(runtimev1alpha1.Deleting()), withDeletionDate(randomDate), withDeletedDate(randomDate), withSecretRef(secretName, secretNamespace, secretKey), withForceDeleteWithoutRecovery(true), ), }, }, "SuccessfulDeletionWithForceDeleteWithoutRecoveryTrueAndRecoveryWindowIsNotNil": { args: args{ secretsmanager: &fake.MockClient{ MockDeleteSecretRequest: func(input *awssecretsmanager.DeleteSecretInput) awssecretsmanager.DeleteSecretRequest { return awssecretsmanager.DeleteSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Error: errors.New(awssecretsmanager.ErrCodeInvalidParameterException)}, } }, }, cr: secret( withDeletionDate(randomDate), withDeletedDate(randomDate), withSecretRef(secretName, secretNamespace, secretKey), withForceDeleteWithoutRecovery(true), withRecoveryWindow(int64(7)), ), }, want: want{ cr: secret( withConditions(runtimev1alpha1.Deleting()), withDeletionDate(randomDate), withDeletedDate(randomDate), withSecretRef(secretName, secretNamespace, secretKey), withForceDeleteWithoutRecovery(true), withRecoveryWindow(int64(7)), ), err: errors.Wrap(errors.New(awssecretsmanager.ErrCodeInvalidParameterException), errDeleteFailed), }, }, "ForceDeleteWithoutRecoveryIsFalseAndRecoveryWindowIsNil": { args: args{ secretsmanager: &fake.MockClient{ MockDeleteSecretRequest: func(input *awssecretsmanager.DeleteSecretInput) awssecretsmanager.DeleteSecretRequest { return awssecretsmanager.DeleteSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Data: &awssecretsmanager.DeleteSecretOutput{ DeletionDate: secretsmanager.TimeToPtr(randomDate.AddDate(0, 0, 30)), }}, } }, }, cr: secret( withDeletionDate(time.Now().Add(5*time.Minute)), withDeletedDate(randomDate), withSecretRef(secretName, secretNamespace, secretKey), withForceDeleteWithoutRecovery(false), ), }, want: want{ cr: secret( withConditions(runtimev1alpha1.Deleting()), withDeletionDate(randomDate.AddDate(0, 0, 30)), withDeletedDate(randomDate), withSecretRef(secretName, secretNamespace, secretKey), withForceDeleteWithoutRecovery(false), ), }, }, "FailedDeleteSecretRequest": { args: args{ secretsmanager: &fake.MockClient{ MockDeleteSecretRequest: func(input *awssecretsmanager.DeleteSecretInput) awssecretsmanager.DeleteSecretRequest { return awssecretsmanager.DeleteSecretRequest{ Request: &aws.Request{HTTPRequest: &http.Request{}, Retryer: aws.NoOpRetryer{}, Error: errBoom}, } }, }, cr: secret( withSecretRef(secretName, secretNamespace, secretKey), ), }, want: want{ cr: secret( withConditions(runtimev1alpha1.Deleting()), withSecretRef(secretName, secretNamespace, secretKey), ), err: errors.Wrap(errBoom, errDeleteFailed), }, }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { e := &external{kube: tc.kube, client: tc.secretsmanager} err := e.Delete(context.Background(), tc.args.cr) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } if diff := cmp.Diff(tc.want.cr, tc.args.cr, test.EquateConditions()); diff != "" { t.Errorf("r: -want, +got:\n%s", diff) } }) } }
package simple_factory import ( "testing" ) func TestCaculatorFactory(t *testing.T) { opFactory := new(OperationFactory) operation := opFactory.CreateOperation("+") operation.SetNumber(1, 2) t.Logf("this is add operation, 1+2=%v\n", operation.GetResult()) operation = opFactory.CreateOperation("-") operation.SetNumber(2, 1) t.Logf("this is sub operation, 2-1=%v\n", operation.GetResult()) }
package persistence import ( "github.com/joshprzybyszewski/cribbage/model" ) func ValidateLatestActionBelongs(mg model.Game) error { if mg.NumActions() == 0 { return nil } la := mg.Actions[mg.NumActions()-1] if la.GameID != mg.ID { return ErrGameActionWrongGame } found := false for _, p := range mg.Players { if p.ID == la.ID { found = true break } } if !found { return ErrGameActionWrongPlayer } return nil }
// Copyright 2021 Dataptive SAS. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tcp import ( "errors" ) var ( ErrUnknownError = errors.New("tcp: unknown error") defaultErrorCode = 0 defaultErrorMessage = ErrUnknownError errorsCodes = map[error]int{ // TODO: add tcp error codes. } errorsMessages = map[int]error{ // TODO: add tcp error messages. } ) func GetErrorCode(err error) (code int) { code, ok := errorsCodes[err] if !ok { return defaultErrorCode } return code } func GetErrorMessage(code int) (err error) { err, ok := errorsMessages[code] if !ok { return defaultErrorMessage } return err }
package odoo import ( "fmt" ) // StockPicking represents stock.picking model. type StockPicking struct { LastUpdate *Time `xmlrpc:"__last_update,omptempty"` ActivityDateDeadline *Time `xmlrpc:"activity_date_deadline,omptempty"` ActivityIds *Relation `xmlrpc:"activity_ids,omptempty"` ActivityState *Selection `xmlrpc:"activity_state,omptempty"` ActivitySummary *String `xmlrpc:"activity_summary,omptempty"` ActivityTypeId *Many2One `xmlrpc:"activity_type_id,omptempty"` ActivityUserId *Many2One `xmlrpc:"activity_user_id,omptempty"` BackorderId *Many2One `xmlrpc:"backorder_id,omptempty"` CompanyId *Many2One `xmlrpc:"company_id,omptempty"` CreateDate *Time `xmlrpc:"create_date,omptempty"` CreateUid *Many2One `xmlrpc:"create_uid,omptempty"` Date *Time `xmlrpc:"date,omptempty"` DateDone *Time `xmlrpc:"date_done,omptempty"` DisplayName *String `xmlrpc:"display_name,omptempty"` EntirePackageDetailIds *Relation `xmlrpc:"entire_package_detail_ids,omptempty"` EntirePackageIds *Relation `xmlrpc:"entire_package_ids,omptempty"` GroupId *Many2One `xmlrpc:"group_id,omptempty"` HasPackages *Bool `xmlrpc:"has_packages,omptempty"` HasScrapMove *Bool `xmlrpc:"has_scrap_move,omptempty"` HasTracking *Bool `xmlrpc:"has_tracking,omptempty"` Id *Int `xmlrpc:"id,omptempty"` IsLocked *Bool `xmlrpc:"is_locked,omptempty"` LocationDestId *Many2One `xmlrpc:"location_dest_id,omptempty"` LocationId *Many2One `xmlrpc:"location_id,omptempty"` MessageChannelIds *Relation `xmlrpc:"message_channel_ids,omptempty"` MessageFollowerIds *Relation `xmlrpc:"message_follower_ids,omptempty"` MessageIds *Relation `xmlrpc:"message_ids,omptempty"` MessageIsFollower *Bool `xmlrpc:"message_is_follower,omptempty"` MessageLastPost *Time `xmlrpc:"message_last_post,omptempty"` MessageNeedaction *Bool `xmlrpc:"message_needaction,omptempty"` MessageNeedactionCounter *Int `xmlrpc:"message_needaction_counter,omptempty"` MessagePartnerIds *Relation `xmlrpc:"message_partner_ids,omptempty"` MessageUnread *Bool `xmlrpc:"message_unread,omptempty"` MessageUnreadCounter *Int `xmlrpc:"message_unread_counter,omptempty"` MoveLineExist *Bool `xmlrpc:"move_line_exist,omptempty"` MoveLineIds *Relation `xmlrpc:"move_line_ids,omptempty"` MoveLines *Relation `xmlrpc:"move_lines,omptempty"` MoveType *Selection `xmlrpc:"move_type,omptempty"` Name *String `xmlrpc:"name,omptempty"` Note *String `xmlrpc:"note,omptempty"` Origin *String `xmlrpc:"origin,omptempty"` OwnerId *Many2One `xmlrpc:"owner_id,omptempty"` PartnerId *Many2One `xmlrpc:"partner_id,omptempty"` PickingTypeCode *Selection `xmlrpc:"picking_type_code,omptempty"` PickingTypeEntirePacks *Bool `xmlrpc:"picking_type_entire_packs,omptempty"` PickingTypeId *Many2One `xmlrpc:"picking_type_id,omptempty"` Printed *Bool `xmlrpc:"printed,omptempty"` Priority *Selection `xmlrpc:"priority,omptempty"` ProductId *Many2One `xmlrpc:"product_id,omptempty"` PurchaseId *Many2One `xmlrpc:"purchase_id,omptempty"` SaleId *Many2One `xmlrpc:"sale_id,omptempty"` ScheduledDate *Time `xmlrpc:"scheduled_date,omptempty"` ShowCheckAvailability *Bool `xmlrpc:"show_check_availability,omptempty"` ShowLotsText *Bool `xmlrpc:"show_lots_text,omptempty"` ShowMarkAsTodo *Bool `xmlrpc:"show_mark_as_todo,omptempty"` ShowOperations *Bool `xmlrpc:"show_operations,omptempty"` ShowValidate *Bool `xmlrpc:"show_validate,omptempty"` State *Selection `xmlrpc:"state,omptempty"` WebsiteMessageIds *Relation `xmlrpc:"website_message_ids,omptempty"` WriteDate *Time `xmlrpc:"write_date,omptempty"` WriteUid *Many2One `xmlrpc:"write_uid,omptempty"` } // StockPickings represents array of stock.picking model. type StockPickings []StockPicking // StockPickingModel is the odoo model name. const StockPickingModel = "stock.picking" // Many2One convert StockPicking to *Many2One. func (sp *StockPicking) Many2One() *Many2One { return NewMany2One(sp.Id.Get(), "") } // CreateStockPicking creates a new stock.picking model and returns its id. func (c *Client) CreateStockPicking(sp *StockPicking) (int64, error) { ids, err := c.CreateStockPickings([]*StockPicking{sp}) if err != nil { return -1, err } if len(ids) == 0 { return -1, nil } return ids[0], nil } // CreateStockPicking creates a new stock.picking model and returns its id. func (c *Client) CreateStockPickings(sps []*StockPicking) ([]int64, error) { var vv []interface{} for _, v := range sps { vv = append(vv, v) } return c.Create(StockPickingModel, vv) } // UpdateStockPicking updates an existing stock.picking record. func (c *Client) UpdateStockPicking(sp *StockPicking) error { return c.UpdateStockPickings([]int64{sp.Id.Get()}, sp) } // UpdateStockPickings updates existing stock.picking records. // All records (represented by ids) will be updated by sp values. func (c *Client) UpdateStockPickings(ids []int64, sp *StockPicking) error { return c.Update(StockPickingModel, ids, sp) } // DeleteStockPicking deletes an existing stock.picking record. func (c *Client) DeleteStockPicking(id int64) error { return c.DeleteStockPickings([]int64{id}) } // DeleteStockPickings deletes existing stock.picking records. func (c *Client) DeleteStockPickings(ids []int64) error { return c.Delete(StockPickingModel, ids) } // GetStockPicking gets stock.picking existing record. func (c *Client) GetStockPicking(id int64) (*StockPicking, error) { sps, err := c.GetStockPickings([]int64{id}) if err != nil { return nil, err } if sps != nil && len(*sps) > 0 { return &((*sps)[0]), nil } return nil, fmt.Errorf("id %v of stock.picking not found", id) } // GetStockPickings gets stock.picking existing records. func (c *Client) GetStockPickings(ids []int64) (*StockPickings, error) { sps := &StockPickings{} if err := c.Read(StockPickingModel, ids, nil, sps); err != nil { return nil, err } return sps, nil } // FindStockPicking finds stock.picking record by querying it with criteria. func (c *Client) FindStockPicking(criteria *Criteria) (*StockPicking, error) { sps := &StockPickings{} if err := c.SearchRead(StockPickingModel, criteria, NewOptions().Limit(1), sps); err != nil { return nil, err } if sps != nil && len(*sps) > 0 { return &((*sps)[0]), nil } return nil, fmt.Errorf("stock.picking was not found with criteria %v", criteria) } // FindStockPickings finds stock.picking records by querying it // and filtering it with criteria and options. func (c *Client) FindStockPickings(criteria *Criteria, options *Options) (*StockPickings, error) { sps := &StockPickings{} if err := c.SearchRead(StockPickingModel, criteria, options, sps); err != nil { return nil, err } return sps, nil } // FindStockPickingIds finds records ids by querying it // and filtering it with criteria and options. func (c *Client) FindStockPickingIds(criteria *Criteria, options *Options) ([]int64, error) { ids, err := c.Search(StockPickingModel, criteria, options) if err != nil { return []int64{}, err } return ids, nil } // FindStockPickingId finds record id by querying it with criteria. func (c *Client) FindStockPickingId(criteria *Criteria, options *Options) (int64, error) { ids, err := c.Search(StockPickingModel, criteria, options) if err != nil { return -1, err } if len(ids) > 0 { return ids[0], nil } return -1, fmt.Errorf("stock.picking was not found with criteria %v and options %v", criteria, options) }
package packet import "github.com/bianjieai/tibc-sdk-go/types" const moduleName = "tibc" + "-" + "packet" // TIBC packet sentinel errors var ( ErrSequenceSendNotFound = types.Register(moduleName, 2, "sequence send not found") ErrSequenceReceiveNotFound = types.Register(moduleName, 3, "sequence receive not found") ErrSequenceAckNotFound = types.Register(moduleName, 4, "sequence acknowledgement not found") ErrInvalidPacket = types.Register(moduleName, 5, "invalid packet") ErrInvalidAcknowledgement = types.Register(moduleName, 6, "invalid acknowledgement") ErrPacketCommitmentNotFound = types.Register(moduleName, 7, "packet commitment not found") ErrPacketReceived = types.Register(moduleName, 8, "packet already received") ErrAcknowledgementExists = types.Register(moduleName, 9, "acknowledgement for packet already exists") ErrInvalidCleanPacket = types.Register(moduleName, 10, "invalid clean packet") )
package boltclient import ( "encoding/json" "log" "github.com/gokapaya/cshelper/errlist" "github.com/gokapaya/cshelper/match" ) const PairBucket = "pairs" // StoreMatches saves Pair data to the database. // `pl` can be single or multiple Pair(s) func (c *Client) StoreMatches(pl ...match.Pair) error { var el errlist.ErrList tx, err := c.db.Begin(true) if err != nil { return err } defer tx.Rollback() b, err := tx.CreateBucketIfNotExists([]byte(PairBucket)) if err != nil { return err } var total int for _, p := range pl { value, err := json.Marshal(p) if err != nil { el = append(el, err) continue } if err := b.Put([]byte(p.Santa.Username), value); err != nil { el = append(el, err) continue } total++ } if el.NotEmpty() { return el } log.Printf("Added %v new pairs\n", total) return tx.Commit() } // GetPair retrieves a single Pair from the database func (c *Client) GetPair(key string) (*match.Pair, error) { tx, err := c.db.Begin(false) if err != nil { return nil, err } defer tx.Rollback() b := tx.Bucket([]byte(PairBucket)) if b == nil { return nil, errlist.Err("Bucket not found") } value := b.Get([]byte(key)) if value == nil { return nil, errlist.Err("Pair not found") } var p match.Pair if err := json.Unmarshal(value, &p); err != nil { return nil, err } return &p, nil } // GetMatches retrieves the list of Pairs from the database func (c *Client) GetMatches() ([]match.Pair, error) { var ( el errlist.ErrList pl []match.Pair ) tx, err := c.db.Begin(false) if err != nil { return nil, err } defer tx.Rollback() b := tx.Bucket([]byte(PairBucket)) if b == nil { return nil, errlist.Err("Bucket not found") } cr := b.Cursor() for k, v := cr.First(); k != nil; k, v = cr.Next() { var p match.Pair if err := json.Unmarshal(v, &p); err != nil { el = append(el, err) continue } pl = append(pl, p) } if el.NotEmpty() { return pl, el } return pl, nil }
package main import ( "log" "shared/protobuf/pb" ) func (c *Client) YggdrasilGetMain(req *pb.C2SYggdrasilGetMain) (*pb.S2CYggdrasilGetMain, error) { gameResp, err := c.Request(2801, req) if err != nil { return nil, err } resp := &pb.S2CYggdrasilGetMain{} err = c.Handle(gameResp, resp) if err != nil { return nil, err } log.Printf("RESP SUCCESS: YggdrasilGetMain resp: %+v", resp) return resp, nil }
package polyclip import ( //"fmt" g "github.com/murphy214/geobuf" "github.com/murphy214/geobuf/geobuf_raw" m "github.com/murphy214/mercantile" "github.com/murphy214/pbf" "math" //"github.com/paulmach/go.geojson" ) func RoundPt(pt []float64) []float64 { return []float64{Round(pt[0], .5, 6), Round(pt[1], .5, 6)} } func RoundPolygon(polygon [][][]float64) [][][]float64 { for i := range polygon { cont := polygon[i] for j := range cont { cont[j] = RoundPt(cont[j]) } lastpt := cont[len(cont)-1] if cont[0][0] != lastpt[0] || cont[0][1] != lastpt[1] { cont[len(cont)-1] = cont[0] } polygon[i] = cont } return polygon } // adding a geobuf byte array to a given layer // this function house's both the ingestion and output to vector tiles // hopefully to reduce allocations func ClipNaiveGeobuf(bytevals []byte, zoom int) map[m.TileID]*g.Writer { // the pbf representing a feauture pbf := pbf.PBF{Pbf: bytevals, Length: len(bytevals)} // creating total bytes that holds the bytes for a given layer // refreshing cursor key, val := pbf.ReadKey() if key == 1 && val == 0 { pbf.ReadVarint() key, val = pbf.ReadKey() } for key == 2 && val == 2 { // starting properties shit here size := pbf.ReadVarint() endpos := pbf.Pos + size pbf.Pos = endpos key, val = pbf.ReadKey() } var geomtype string if key == 3 && val == 0 { switch int(pbf.Pbf[pbf.Pos]) { case 1: geomtype = "Point" case 2: geomtype = "LineString" case 3: geomtype = "Polygon" case 4: geomtype = "MultiPoint" case 5: geomtype = "MultiLineString" case 6: geomtype = "MultiPolygon" } pbf.Pos += 1 key, val = pbf.ReadKey() } endpos := pbf.Pos bytevals = bytevals[:endpos-1] if key == 4 && val == 2 { size := pbf.ReadVarint() endpos := pbf.Pos + size switch geomtype { case "Point": point := pbf.ReadPoint(endpos) tilemap := map[m.TileID][][]float64{m.Tile(point[0], point[1], zoom): [][]float64{point}} newtilemap := map[m.TileID]*g.Writer{} for k, v := range tilemap { newtilemap[k] = g.WriterBufNew() for _, point := range v { geomb := geobuf_raw.MakePoint(point) newtilemap[k].Write(append(bytevals, geomb...)) } } return newtilemap //array9 = WritePackedUint32(layer.Cursor.Geometry) case "LineString": tilemap := ClipLine(pbf.ReadLine(0, endpos), zoom) newtilemap := map[m.TileID]*g.Writer{} for k, v := range tilemap { newtilemap[k] = g.WriterBufNew() for _, line := range v { geomb, _ := geobuf_raw.MakeLine(line) newtilemap[k].Write(append(bytevals, geomb...)) } } return newtilemap //array9 = WritePackedUint32(layer.Cursor.Geometry) case "Polygon": poly := pbf.ReadPolygon(endpos) tilemap := PolygonClipNaive(poly, zoom) newtilemap := map[m.TileID]*g.Writer{} for k, v := range tilemap { newtilemap[k] = g.WriterBufNew() for _, polygon := range v { geomb, _ := geobuf_raw.MakePolygon(polygon) newtilemap[k].Write(append(bytevals, geomb...)) } } return newtilemap case "MultiPoint": points := pbf.ReadLine(0, endpos) tilemap := map[m.TileID][][]float64{} for _, point := range points { tilemap[m.Tile(point[0], point[1], zoom)] = append(tilemap[m.Tile(point[0], point[1], zoom)], point) } newtilemap := map[m.TileID]*g.Writer{} for k, v := range tilemap { newtilemap[k] = g.WriterBufNew() geomb, _ := geobuf_raw.MakeLine(v) newtilemap[k].Write(append(bytevals, geomb...)) } return newtilemap case "MultiLineString": lines := pbf.ReadPolygon(endpos) tilemap := map[m.TileID][][][]float64{} for _, line := range lines { templinemap := ClipLine(line, zoom) for k, v := range templinemap { tilemap[k] = append(tilemap[k], v...) } } newtilemap := map[m.TileID]*g.Writer{} for k, v := range tilemap { newtilemap[k] = g.WriterBufNew() geomb, _ := geobuf_raw.MakePolygon(v) newtilemap[k].Write(append(bytevals, geomb...)) } return newtilemap //array9 = WritePackedUint32(layer.Cursor.Geometry) case "MultiPolygon": multipolygon := pbf.ReadMultiPolygon(endpos) tilemap := map[m.TileID][][][][]float64{} for _, polygon := range multipolygon { temppolygonmap := PolygonClipNaive(polygon, zoom) for k, v := range temppolygonmap { tilemap[k] = append(tilemap[k], v...) } } newtilemap := map[m.TileID]*g.Writer{} for k, v := range tilemap { newtilemap[k] = g.WriterBufNew() geomb, _ := geobuf_raw.MakeMultiPolygon(v) newtilemap[k].Write(append(bytevals, geomb...)) } return newtilemap //layer.Cursor.MakeMultiPolygonFloat(pbf.ReadMultiPolygon(endpos)) //array9 = WritePackedUint32(layer.Cursor.Geometry) } } return map[m.TileID]*g.Writer{} } func DeltaPt(pt []float64, testpt []float64) float64 { deltax := math.Abs(pt[0] - testpt[0]) deltay := math.Abs(pt[1] - testpt[1]) return deltax + deltay } var PrecisionError = math.Pow(10.0, -7.0) /* // adding a geobuf byte array to a given layer // this function house's both the ingestion and output to vector tiles // hopefully to reduce allocations func ClipNaiveGeobufMiddle(bytevals []byte, zoom int) map[m.TileID][]*geojson.Feature { // the pbf representing a feauture pbf := pbf.PBF{Pbf: bytevals, Length: len(bytevals)} // creating total bytes that holds the bytes for a given layer // refreshing cursor key, val := pbf.ReadKey() if key == 1 && val == 0 { pbf.ReadVarint() key, val = pbf.ReadKey() } for key == 2 && val == 2 { // starting properties shit here size := pbf.ReadVarint() endpos := pbf.Pos + size pbf.Pos = endpos key, val = pbf.ReadKey() } var geomtype string if key == 3 && val == 0 { switch int(pbf.Pbf[pbf.Pos]) { case 1: geomtype = "Point" case 2: geomtype = "LineString" case 3: geomtype = "Polygon" case 4: geomtype = "MultiPoint" case 5: geomtype = "MultiLineString" case 6: geomtype = "MultiPolygon" } pbf.Pos += 1 key, val = pbf.ReadKey() } //endpos := pbf.Pos //bytevals2 := bytevals[:endpos] if key == 4 && val == 2 { size := pbf.ReadVarint() endpos := pbf.Pos + size switch geomtype { case "Point": point := pbf.ReadPoint(endpos) tilemap := map[m.TileID][][]float64{m.Tile(point[0], point[1], zoom): [][]float64{point}} newtilemap := map[m.TileID][]*geojson.Feature{} for k, v := range tilemap { for _, point := range v { ///geomb := geobuf_raw.MakePoint(point) newtilemap[k] = append(newtilemap[k], geojson.NewPointFeature(point)) } } return newtilemap //array9 = WritePackedUint32(layer.Cursor.Geometry) case "LineString": tilemap := ClipLine(pbf.ReadLine(0, endpos), zoom) newtilemap := map[m.TileID][]*geojson.Feature{} for k, v := range tilemap { for _, line := range v { newtilemap[k] = append(newtilemap[k], geojson.NewMultiLineStringFeature(line)) } } return newtilemap //array9 = WritePackedUint32(layer.Cursor.Geometry) case "Polygon": tilemap := PolygonClipNaive(pbf.ReadPolygon(endpos), zoom) newtilemap := map[m.TileID][]*geojson.Feature{} for k, v := range tilemap { for _, polygon := range v { if len(v) > 0 { newtilemap[k] = append(newtilemap[k], geojson.NewPolygonFeature(polygon)) } } } return newtilemap //layer.Cursor.MakePolygonFloat(pbf.ReadPolygon(endpos)) //array9 = WritePackedUint32(layer.Cursor.Geometry) case "MultiPoint": points := pbf.ReadLine(0, endpos) tilemap := map[m.TileID][][]float64{} for _, point := range points { tilemap[m.Tile(point[0], point[1], zoom)] = append(tilemap[m.Tile(point[0], point[1], zoom)], point) } newtilemap := map[m.TileID][]*geojson.Feature{} for k, v := range tilemap { newtilemap[k] = append(newtilemap[k], geojson.NewMultiPointFeature(v...)) } return newtilemap //layer.Cursor.MakeMultiPointFloat(pbf.ReadLine(0,endpos)) //array9 = WritePackedUint32(layer.Cursor.Geometry) case "MultiLineString": lines := pbf.ReadPolygon(endpos) tilemap := map[m.TileID][][][]float64{} for _, line := range lines { templinemap := ClipLine(line, zoom) for k, v := range templinemap { tilemap[k] = append(tilemap[k], v...) } } newtilemap := map[m.TileID][]*geojson.Feature{} for k, v := range tilemap { newtilemap[k] = append(newtilemap[k], geojson.NewMultiLineStringFeature(v...)) } return newtilemap //array9 = WritePackedUint32(layer.Cursor.Geometry) case "MultiPolygon": multipolygon := pbf.ReadMultiPolygon(endpos) tilemap := map[m.TileID][][][][]float64{} for _, polygon := range multipolygon { temppolygonmap := PolygonClipNaive(polygon, zoom) for k, v := range temppolygonmap { tilemap[k] = append(tilemap[k], v...) } } newtilemap := map[m.TileID][]*geojson.Feature{} for k, v := range tilemap { newtilemap[k] = append(newtilemap[k], geojson.NewMultiPolygonFeature(v...)) } return newtilemap //layer.Cursor.MakeMultiPolygonFloat(pbf.ReadMultiPolygon(endpos)) //array9 = WritePackedUint32(layer.Cursor.Geometry) } } return map[m.TileID][]*geojson.Feature{} } */ /*() // adding a geobuf byte array to a given layer // this function house's both the ingestion and output to vector tiles // hopefully to reduce allocations func ClipTileGeobuf(bytevals []byte,zoom m.TileID) map[m.TileID]*g.Writer { // the pbf representing a feauture pbf := geobuf_raw.PBF{Pbf:bytevals,Length:len(bytevals)} // creating total bytes that holds the bytes for a given layer // refreshing cursor key,val := pbf.ReadKey() if key == 1 && val == 0 { pbf.ReadVarint() key,val = pbf.ReadKey() } for key == 2 && val == 2 { // starting properties shit here size := pbf.ReadVarint() endpos := pbf.Pos + size pbf.Pos = endpos key,val = pbf.ReadKey() } var geomtype string if key == 3 && val == 0 { switch int(pbf.Pbf[pbf.Pos]) { case 1: geomtype = "Point" case 2: geomtype = "LineString" case 3: geomtype = "Polygon" case 4: geomtype = "MultiPoint" case 5: geomtype = "MultiLineString" case 6: geomtype = "MultiPolygon" } pbf.Pos += 1 key,val = pbf.ReadKey() } endpos := pbf.Pos bytevals = bytevals[:endpos-1] if key == 4 && val == 2 { size := pbf.ReadVarint() endpos := pbf.Pos + size switch geomtype { case "Point": tilemap := ClipTile(geojson.NewFeature(geojson.NewPointGeometry(pbf.ReadPoint(endpos))),zoom) newtilemap := map[m.TileID]*g.Writer{} for k,v := range tilemap { newtilemap[k] = g.WriterBufNew() for _,feat := range v { geomb := geobuf_raw.MakePoint(feat.Geometry.Point) newtilemap[k].Write(append(bytevals,geomb...)) } } return newtilemap //array9 = WritePackedUint32(layer.Cursor.Geometry) case "LineString": tilemap := ClipTile(geojson.NewFeature(geojson.NewLineStringGeometry(pbf.ReadLine(0,endpos))),zoom) newtilemap := map[m.TileID]*g.Writer{} for k,v := range tilemap { eh := g.WriterBufNew() for _,feat := range v { geomb,_ := geobuf_raw.MakeLine(feat.Geometry.LineString) eh.Write(append(bytevals,geomb...)) } newtilemap[k] = eh } return newtilemap //array9 = WritePackedUint32(layer.Cursor.Geometry) case "Polygon": tilemap := ClipTile(geojson.NewFeature(geojson.NewPolygonGeometry(pbf.ReadPolygon(endpos))),zoom) newtilemap := map[m.TileID]*g.Writer{} for k,v := range tilemap { eh := g.WriterBufNew() for _,feat := range v { if len(v) > 0 { geomb,_ := geobuf_raw.MakePolygon(feat.Geometry.Polygon) eh.Write(append(bytevals,geomb...)) } } newtilemap[k] = eh } return newtilemap //layer.Cursor.MakePolygonFloat(pbf.ReadPolygon(endpos)) //array9 = WritePackedUint32(layer.Cursor.Geometry) case "MultiPoint": tilemap := ClipTile(geojson.NewFeature(geojson.NewMultiPointGeometry(pbf.ReadLine(0,endpos)...)),zoom) newtilemap := map[m.TileID]*g.Writer{} for k,v := range tilemap { newtilemap[k] = g.WriterBufNew() for _,feat := range v { geomb,_ := geobuf_raw.MakeLine(feat.Geometry.MultiPoint) newtilemap[k].Write(append(bytevals,geomb...)) } } return newtilemap //layer.Cursor.MakeMultiPointFloat(pbf.ReadLine(0,endpos)) //array9 = WritePackedUint32(layer.Cursor.Geometry) case "MultiLineString": tilemap := ClipTile(geojson.NewFeature(geojson.NewMultiLineStringGeometry(pbf.ReadPolygon(endpos)...)),zoom) newtilemap := map[m.TileID]*g.Writer{} for k,v := range tilemap { newtilemap[k] = g.WriterBufNew() for _,feat := range v { geomb,_ := geobuf_raw.MakePolygon(feat.Geometry.MultiLineString) newtilemap[k].Write(append(bytevals,geomb...)) } } return newtilemap //array9 = WritePackedUint32(layer.Cursor.Geometry) case "MultiPolygon": tilemap := ClipTile(geojson.NewFeature(geojson.NewMultiPolygonGeometry(pbf.ReadMultiPolygon(endpos)...)),zoom) newtilemap := map[m.TileID]*g.Writer{} for k,v := range tilemap { newtilemap[k] = g.WriterBufNew() for _,feat := range v { geomb,_ := geobuf_raw.MakeMultiPolygon(feat.Geometry.MultiPolygon) newtilemap[k].Write(append(bytevals,geomb...)) } } return newtilemap //layer.Cursor.MakeMultiPolygonFloat(pbf.ReadMultiPolygon(endpos)) //array9 = WritePackedUint32(layer.Cursor.Geometry) } } return map[m.TileID]*g.Writer{} } */
// Package tree contains the anchor tree package tree import ( "runtime" "sync" "github.com/wetware/ww/internal/mem" memutil "github.com/wetware/ww/pkg/util/mem" ) // Transaction groups multiple node operations into a single atomic commit. type Transaction Node // Load API value func (t Transaction) Load() mem.Any { return Node(t).any } // Store API value func (t Transaction) Store(any mem.Any) bool { if memutil.IsNil(any) || memutil.IsNil(Node(t).any) { Node(t).any = any return true } return false } // Node in an anchor tree. type Node struct{ *nodeRef } // New anchor tree func New() Node { return Node{newRootNode()} } func newRootNode() *nodeRef { return newNode(nil, "").ref() } // Path from root to the present Node func (n Node) Path() []string { return n.nodeRef.Path() } // Walk an anchor path func (n Node) Walk(path []string) Node { return Node{n.nodeRef.Walk(path)} } // List the anchor's children func (n Node) List() []Node { // N.B.: hard-lock because the List() operation may co-occur with a sub-anchor // creation/deletion. n.nodeRef.Hard().Lock() defer n.nodeRef.Hard().Unlock() children := make([]Node, 0, len(n.nodeRef.children)) for _, child := range n.nodeRef.children { children = append(children, Node{child.ref()}) } return children } // Load API value func (n Node) Load() mem.Any { n.tx.RLock() defer n.tx.RUnlock() return n.any } // Store API value func (n Node) Store(any mem.Any) bool { n.tx.Lock() defer n.tx.Unlock() if memutil.IsNil(any) || memutil.IsNil(n.any) { n.any = any return true } return false } // Txn starts a transaction. func (n Node) Txn(f func(t Transaction)) { n.tx.Lock() defer n.tx.Unlock() f(Transaction(n)) } // nodeRef is a proxy to a node that is responsible for implemented refcounting and gc // logic. When anchor is GCed, the underlying node's refcount is decremented. type nodeRef struct{ *node } // hard lock - prevents updates to children & counter states func (h nodeRef) Hard() *sync.Mutex { return &h.mu } func (h nodeRef) Path() (parts []string) { // zero-allocation filtering of empty path components. raw := h.path() parts = raw[:0] for _, segment := range raw { if len(segment) > 0 { parts = append(parts, segment) } } return } func (h nodeRef) Walk(path []string) *nodeRef { if len(path) == 0 { return h.ref() } h.mu.Lock() defer h.mu.Unlock() n, ok := h.children[path[0]] if !ok { n = newNode(h.node, path[0]) h.children[path[0]] = n return n.ref().Walk(path[1:]) // Ensure n is garbage-collected. } // n is already tracked by garbage collector; use concrete `nodeRef` return nodeRef{n}.Walk(path[1:]) } type node struct { mu sync.Mutex ctr int tx sync.RWMutex any mem.Any Name string parent *node children map[string]*node } func newNode(parent *node, name string) *node { return &node{ Name: name, parent: parent, children: make(map[string]*node), } } func (n *node) path() []string { if n.parent == nil { return []string{n.Name} } return append(n.parent.path(), n.Name) } func (n *node) orphaned() bool { n.mu.Lock() defer n.mu.Unlock() return n.orphanedUnsafe() } // Unsafe - requires locking func (n *node) orphanedUnsafe() bool { // - nobody's using it // - it has no children // - it's not holding an object return n.ctr == 0 && len(n.children) == 0 && memutil.IsNil(n.any) } func (n *node) ref() *nodeRef { n.mu.Lock() defer n.mu.Unlock() n.ctr++ ref := &nodeRef{n} runtime.SetFinalizer(ref, gc) return ref } func gc(n *nodeRef) { n.mu.Lock() defer n.mu.Unlock() n.ctr-- if n.orphanedUnsafe() && n.parent != nil { if child, ok := n.children[n.Name]; ok && child.orphanedUnsafe() { delete(n.children, n.Name) } } }
package validator import ( "fmt" "net/http" "net/url" "github.com/mts-test-task/pkg/sitesdataservice/httperror" ) // Input validate input data type Input interface { CheckURLs(urls []string) (err error) } type input struct { maxURLsCount int errorCreator httperror.ErrorCreator } func (i *input) CheckURLs(urls []string) (err error) { if len(urls) == 0 { return i.errorCreator( http.StatusBadRequest, "Ошибка ввода: нет ни одного урла", fmt.Sprintf("input validation error: %s", "no urls"), ) } if len(urls) > i.maxURLsCount { return i.errorCreator( http.StatusBadRequest, "Ошибка ввода: слишком много урлов", fmt.Sprintf("input validation error: %s", "too many urls"), ) } for j := 0; j < len(urls); j++ { _, err = url.ParseRequestURI(urls[j]) if err != nil { return i.errorCreator( http.StatusBadRequest, fmt.Sprintf("Ошибка ввода: неверный урл: %s", urls[j]), fmt.Sprintf("input validation error: %s %s", "bad url:", urls[j]), ) } } return } // NewInput ... func NewInput(maxURLsCount int, errorCreator httperror.ErrorCreator) Input { return &input{ maxURLsCount: maxURLsCount, errorCreator: errorCreator, } }
/** * Copyright (c) 2020 Ameya Lokare */ package main import ( "context" "crypto" "crypto/ecdsa" "crypto/ed25519" "crypto/rand" "crypto/rsa" "crypto/tls" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "io" "math/big" "net" "net/http" "sync" "time" "github.com/google/uuid" ) type Mitmer struct { dialContext func(ctx context.Context, network, addr string) (net.Conn, error) issuerCertificate *x509.Certificate issuerPrivateKey crypto.PrivateKey generatedCertKeyPair *rsa.PrivateKey doTLSHandshake func(conn net.Conn, hostname string, certAlias string) (net.Conn, error) } func NewMitmer() (*Mitmer, error) { keyPair, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { return nil, err } return &Mitmer{generatedCertKeyPair: keyPair}, nil } func (m *Mitmer) HandleHttpConnect(requestUUID uuid.UUID, w http.ResponseWriter, r *http.Request) { // TODO: think about what context deadlines to set etc outboundConn, err := m.dialContext(context.Background(), "tcp4", r.RequestURI) if err != nil { responseCode, errorCode, errorMsg := mapError(requestUUID, err) sendHTTPError(w, responseCode, errorCode, errorMsg) return } defer outboundConn.Close() hj, ok := w.(http.Hijacker) if !ok { http.Error(w, "Connection hijacking not supported", http.StatusInternalServerError) return } inboundConn, bufrw, err := hj.Hijack() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } defer inboundConn.Close() bufrw.WriteString("HTTP/1.1 200 Connection Established\r\n") bufrw.WriteString("Connection: Close\r\n") bufrw.WriteString("\r\n") bufrw.Flush() m.doMitm(inboundConn, outboundConn, r.URL.Hostname()) } func (m *Mitmer) doMitm(inboundConn net.Conn, outboundConn net.Conn, hostnameInRequest string) { var remoteHostname string config := &tls.Config{ GetCertificate: func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { sni := clientHello.ServerName if sni == "" { remoteHostname = hostnameInRequest } else { if sni != hostnameInRequest { log.Warnf("SNI name %s in TLS ClientHello is not the same as hostname %s indicated in HTTP CONNECT, proceeding anyway", sni, hostnameInRequest) } remoteHostname = sni } return m.generateCert(remoteHostname) }, } inboundTLSConn := tls.Server(inboundConn, config) defer inboundTLSConn.Close() err := inboundTLSConn.Handshake() if err != nil { log.Errorf("Inbound (MITM) handshake failed with error: %s\n", err) return } // NOTE: remoteHostname will only be set after the inbound handshake is done, so we can't do // inbound and outbound handshakes in parallel handshakeConn, err := m.doTLSHandshake(outboundConn, remoteHostname, "default") if err != nil { log.Errorf("TLS Handshake failed on outbound connection: %s\n", err) return } outboundTLSConn := handshakeConn.(*tls.Conn) var wg sync.WaitGroup wg.Add(2) go func() { rawProxy(inboundTLSConn, outboundTLSConn) wg.Done() }() go func() { rawProxy(outboundTLSConn, inboundTLSConn) wg.Done() }() wg.Wait() } // Heavily inspired by generate_cert.go func (m *Mitmer) generateCert(hostname string) (*tls.Certificate, error) { serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { return nil, err } notBefore := time.Now().Add(time.Duration(-1) * time.Hour) notAfter := time.Now().Add(time.Duration(1) * time.Hour) template := x509.Certificate{ SerialNumber: serialNumber, Subject: pkix.Name{ Organization: []string{"WHSentry Co"}, }, NotBefore: notBefore, NotAfter: notAfter, KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, BasicConstraintsValid: true, IsCA: false, } if ip := net.ParseIP(hostname); ip != nil { template.IPAddresses = append(template.IPAddresses, ip) } else { template.DNSNames = append(template.DNSNames, hostname) } derBytes, err := x509.CreateCertificate(rand.Reader, &template, m.issuerCertificate, publicKey(m.generatedCertKeyPair), m.issuerPrivateKey) if err != nil { return nil, err } certPemBytes := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) // TODO: this can be done during initialization privKeyBytes, err := x509.MarshalPKCS8PrivateKey(m.generatedCertKeyPair) if err != nil { return nil, err } privKeyPemBytes := pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: privKeyBytes}) cert, err := tls.X509KeyPair(certPemBytes, privKeyPemBytes) return &cert, err } func publicKey(priv interface{}) interface{} { switch k := priv.(type) { case *rsa.PrivateKey: return &k.PublicKey case *ecdsa.PrivateKey: return &k.PublicKey case ed25519.PrivateKey: return k.Public().(ed25519.PublicKey) default: return nil } } func rawProxy(inConn *tls.Conn, outConn *tls.Conn) { buf := make([]byte, 2048) for { numRead, err := inConn.Read(buf) if numRead > 0 { _, writeErr := outConn.Write(buf[:numRead]) // Write must return a non-nil error if it returns n < len(p) if writeErr != nil { log.Warnf("Error writing to outbound connection: %s\n", writeErr) inConn.Close() outConn.Close() return } } if err == io.EOF { outConn.CloseWrite() return } if err != nil { log.Warnf("Error reading from inbound connection: %s\n", err) inConn.Close() outConn.Close() return } } }
package server import ( "rkouj/fun-with-go/chatserver/user" "rkouj/fun-with-go/chatserver/util" ) type Server interface { ProcessRequests() GetMessages(u user.User) []string } type server struct { messages map[int][]string userReadChans []chan string userWriteChans []chan string } func NewServer(userReadChans []chan string, userWriteChans []chan string) Server { return &server{ messages: make(map[int][]string), userReadChans: userReadChans, userWriteChans: userWriteChans, } } func (s *server) ProcessRequests() { for { select { case msg:= <- s.userWriteChans[0]: s.processMessage(msg) case msg:= <- s.userWriteChans[1]: s.processMessage(msg) } } } func (s *server) GetMessages(u user.User) []string { return s.messages[u.GetId()] } func (s *server) processMessage(msg string) { _, msgRecepientId, msgContent := util.DecipherMessage(msg) s.userReadChans[msgRecepientId] <- msg s.messages[msgRecepientId] = append(s.messages[msgRecepientId], msgContent) }
package main import "fmt" func uniqueLetterString(S string) int { n := len(S) dict := make(map[byte][]int) for i := 0; i < n; i++ { dict[S[i]] = append(dict[S[i]], i); } var ans int64 = 0 for i, _ := range dict { res := int64(0) lastApp := -1 for j, app := range dict[i] { if j == len(dict[i]) - 1 { res += int64(app - lastApp) * int64(n - 1 - app + 1); break } res += int64(app - lastApp) * int64(dict[i][j + 1] - 1 - app + 1); lastApp = app } ans += res } return int(ans % 1000000007) } func main() { uniqueLetterString("abc"); }
package wxapi import ( "encoding/xml" ) const ( // 消息类型 MsgTypeText = "text" // 文本消息 MsgTypeImage = "image" // 图片消息 MsgTypeVoice = "voice" // 语音消息 MsgTypeVideo = "video" // 视频消息 MsgTypeShortVideo = "shortvideo" // 小视频消息 MsgTypeLocation = "location" // 地理位置消息 MsgTypeLink = "link" // 链接消息 // 事件类型 EvtTypeSubscribe = "subscribe" // 关注事件/用户未关注时扫描带参数二维码事件 EvtTypeUnsubscribe = "unsubscribe" // 取消关注事件 EvtTypeScan = "SCAN" // 用户已关注时扫描带参数二维码事件 EvtTypeLocation = "LOCATION" // 上报地理位置事件 EvtTypeClick = "CLICK" // 自定义菜单拉取消息事件 EvtTypeView = "VIEW" // 自定义菜单跳转链接事件 ) type ReqMsg struct { XMLName xml.Name `xml:"xml"` Encrypt string // 密文 AppId string // 第三方开放平台的APPID ToUserName string // 开发者微信号 FromUserName string // 发送方OpenID CreateTime int64 // 消息创建时间 MsgType string // 消息类型 // 普通消息参数 Content string // 文本消息内容 PicURL string `xml:"PicUrl"` // 图片链接 MediaID string `xml:"MediaId"` // 图片/语音/视频消息媒体ID Format string // 语音格式 Recognition string // 语音识别结果 ThumbMediaID string `xml:"ThumbMediaId"` // 视频消息缩略图的媒体ID LocationX float64 `xml:"Location_X"` // 地理位置维度 LocationY float64 `xml:"Location_Y"` // 地理位置经度 Scale int // 地图缩放大小 Label string // 地理位置信息 Title string // 消息标题 Description string // 消息描述 URL string `xml:"Url"` // 消息链接 MsgID int64 `xml:"MsgId"` // 消息ID // 事件推送参数 Event string // 事件类型 EventKey string // 事件KEY值 Ticket string // 二维码的Ticket Latitude float64 // 地理位置纬度 Longitude float64 // 地理位置经度 Precision float64 // 地理位置精度 } type CDATA struct { Value string `xml:",cdata"` } type MsgBase struct { XMLName xml.Name `xml:"xml"` ToUserName CDATA FromUserName CDATA CreateTime CDATA MsgType CDATA } //返回微信消息加密结构体 type EncryptMsg struct { XMLName xml.Name `xml:"xml"` Encrypt CDATA MsgSignature CDATA TimeStamp CDATA Nonce CDATA } //回复文本消息结构体 type TextMsg struct { *MsgBase Content CDATA } //回复图片消息结构体 type ImageMsg struct { *MsgBase MediaID CDATA `xml:"Image>MediaId"` } //回复语音消息 type VoiceMsg struct { *MsgBase MediaID CDATA `xml:"Voice>MediaId"` } //回复视频消息 type VideoMsg struct { *MsgBase Video struct { MediaID CDATA `xml:"MediaId"` Title CDATA Description CDATA } } //回复音乐消息 type MusicMsg struct { *MsgBase Music struct { Title CDATA Description CDATA MusicURL CDATA `xml:"MusicUrl"` HQMusicURL CDATA `xml:"HQMusicUrl"` ThumbMediaID CDATA `xml:"ThumbMediaId"` } } //回复图文消息 type newsMsg struct { *MsgBase ArticleCount CDATA Articles []*Article `xml:">item"` } type Article struct { Title CDATA Description CDATA PicURL CDATA `xml:"PicUrl"` URL CDATA `xml:"Url"` } //转发到客服系统消息结构体 type Transfer2CustomerService struct { *MsgBase KfAccount CDATA `xml:"TransInfo>KfAccount"` }
// Copyright 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package linux import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/errors/linuxerr" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fsimpl/timerfd" "gvisor.dev/gvisor/pkg/sentry/kernel" ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" ) // TimerfdCreate implements Linux syscall timerfd_create(2). func TimerfdCreate(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { clockID := args[0].Int() flags := args[1].Int() if flags&^(linux.TFD_CLOEXEC|linux.TFD_NONBLOCK) != 0 { return 0, nil, linuxerr.EINVAL } // Timerfds aren't writable per se (their implementation of Write just // returns EINVAL), but they are "opened for writing", which is necessary // to actually reach said implementation of Write. fileFlags := uint32(linux.O_RDWR) if flags&linux.TFD_NONBLOCK != 0 { fileFlags |= linux.O_NONBLOCK } var clock ktime.Clock switch clockID { case linux.CLOCK_REALTIME: clock = t.Kernel().RealtimeClock() case linux.CLOCK_MONOTONIC, linux.CLOCK_BOOTTIME: clock = t.Kernel().MonotonicClock() default: return 0, nil, linuxerr.EINVAL } vfsObj := t.Kernel().VFS() file, err := timerfd.New(t, vfsObj, clock, fileFlags) if err != nil { return 0, nil, err } defer file.DecRef(t) fd, err := t.NewFDFrom(0, file, kernel.FDFlags{ CloseOnExec: flags&linux.TFD_CLOEXEC != 0, }) if err != nil { return 0, nil, err } return uintptr(fd), nil, nil } // TimerfdSettime implements Linux syscall timerfd_settime(2). func TimerfdSettime(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() flags := args[1].Int() newValAddr := args[2].Pointer() oldValAddr := args[3].Pointer() if flags&^(linux.TFD_TIMER_ABSTIME) != 0 { return 0, nil, linuxerr.EINVAL } file := t.GetFile(fd) if file == nil { return 0, nil, linuxerr.EBADF } defer file.DecRef(t) tfd, ok := file.Impl().(*timerfd.TimerFileDescription) if !ok { return 0, nil, linuxerr.EINVAL } var newVal linux.Itimerspec if _, err := newVal.CopyIn(t, newValAddr); err != nil { return 0, nil, err } newS, err := ktime.SettingFromItimerspec(newVal, flags&linux.TFD_TIMER_ABSTIME != 0, tfd.Clock()) if err != nil { return 0, nil, err } tm, oldS := tfd.SetTime(newS) if oldValAddr != 0 { oldVal := ktime.ItimerspecFromSetting(tm, oldS) if _, err := oldVal.CopyOut(t, oldValAddr); err != nil { return 0, nil, err } } return 0, nil, nil } // TimerfdGettime implements Linux syscall timerfd_gettime(2). func TimerfdGettime(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { fd := args[0].Int() curValAddr := args[1].Pointer() file := t.GetFile(fd) if file == nil { return 0, nil, linuxerr.EBADF } defer file.DecRef(t) tfd, ok := file.Impl().(*timerfd.TimerFileDescription) if !ok { return 0, nil, linuxerr.EINVAL } tm, s := tfd.GetTime() curVal := ktime.ItimerspecFromSetting(tm, s) _, err := curVal.CopyOut(t, curValAddr) return 0, nil, err }
package problem07 import ( "exercises/aoc2020/common" "regexp" "strconv" ) func Solve() (int, int, error) { return SolveBoth("./problem07/input.txt") } func SolveBoth(inputFile string) (int, int, error) { relations, err := parseFile(inputFile) if err != nil { return 0, 0, err } contained_by := map[string][]string{} contains := map[string][]relation{} for rel := range relations { if existing, exists := contained_by[rel.containedBag]; exists { existing = append(existing, rel.outer_bag) contained_by[rel.containedBag] = existing } else { contained_by[rel.containedBag] = []string{rel.outer_bag} } if existing, exists := contains[rel.outer_bag]; exists { existing = append(existing, rel) contains[rel.outer_bag] = existing } else { contains[rel.outer_bag] = []relation{rel} } } solutionAResults := map[string]bool{} countContainingBags(contained_by, solutionAResults, "shiny gold") solutionB := countBagsContainedBy(contains, "shiny gold") - 1 return len(solutionAResults), solutionB, nil } func countBagsContainedBy(contains map[string][]relation, bag_name string) int { sum := 1 for _, contained_bag := range contains[bag_name] { sum += contained_bag.quantity * countBagsContainedBy(contains, contained_bag.containedBag) } return sum } func countContainingBags(graph map[string][]string, results map[string]bool, bag_name string) { for _, containing_bag := range graph[bag_name] { if _, exists := results[containing_bag]; !exists { results[containing_bag] = true countContainingBags(graph, results, containing_bag) } } } type relation struct { outer_bag string quantity int containedBag string } var fileLineRe = regexp.MustCompile(`(\d+)? ?(\S+ \S+) bags?`) func parseFile(inputFile string) (<-chan relation, error) { lines, err := common.FileLines(inputFile) if err != nil { return nil, err } out := make(chan relation) go func() { defer close(out) for line := range lines { matches := fileLineRe.FindAllStringSubmatch(line, -1) outer_bag := matches[0][2] for _, match := range matches[1:] { if match[0] == " no other bags" { break } quantity, err := strconv.Atoi(match[1]) if err != nil { panic(err) } out <- relation{ outer_bag: outer_bag, quantity: quantity, containedBag: match[2], } } } }() return out, nil }
package schema import ( "context" "go-graphql-starter/db" "go-graphql-starter/person" ) type ViewerResolver struct { viewer person.Person } func (resolver *ViewerResolver) User(ctx context.Context, args struct { ID string }) (person.PersonResolver, error) { result, err := person.GetByUsername(args.ID, db.GetConnection()) return person.PersonResolver{result}, err }
/* Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package portforward import ( "context" "fmt" "io" "strings" "sync" "testing" "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/constants" "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/schema/latest" "github.com/GoogleContainerTools/skaffold/v2/testutil" testEvent "github.com/GoogleContainerTools/skaffold/v2/testutil/event" ) func TestStop(t *testing.T) { testEvent.InitializeState([]latest.Pipeline{{}}) pfe1 := newPortForwardEntry(0, latest.PortForwardResource{ Type: constants.Pod, Name: "resource", Namespace: "default", }, "", "", "", "", 9000, false) pfe2 := newPortForwardEntry(0, latest.PortForwardResource{ Type: constants.Pod, Name: "resource2", Namespace: "default", }, "", "", "", "", 9001, false) fakeForwarder := newTestForwarder() em := NewEntryManager(fakeForwarder) em.forwardPortForwardEntry(context.Background(), io.Discard, pfe1) em.forwardPortForwardEntry(context.Background(), io.Discard, pfe2) testutil.CheckDeepEqual(t, 2, length(&fakeForwarder.forwardedResources)) testutil.CheckDeepEqual(t, 2, fakeForwarder.forwardedPorts.Length()) em.Stop() testutil.CheckDeepEqual(t, 0, length(&fakeForwarder.forwardedResources)) testutil.CheckDeepEqual(t, 0, fakeForwarder.forwardedPorts.Length()) } // length returns the number of elements in a sync.Map func length(m *sync.Map) int { n := 0 m.Range(func(_, _ interface{}) bool { n++ return true }) return n } // print is a String() function for a sync.Map func print(m *sync.Map) string { var b strings.Builder b.WriteString("map[") n := 0 m.Range(func(k, v interface{}) bool { if n > 0 { b.WriteRune(' ') } b.WriteString(fmt.Sprintf("%v:%v", k, v)) n++ return true }) b.WriteRune(']') return b.String() }
// Copyright 2020 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package arc import ( "context" "io/ioutil" "os" "path/filepath" "strings" "time" "chromiumos/tast/ctxutil" "chromiumos/tast/errors" "chromiumos/tast/local/arc" "chromiumos/tast/local/arc/optin" "chromiumos/tast/local/chrome" "chromiumos/tast/local/chrome/browser" "chromiumos/tast/local/chrome/browser/browserfixt" "chromiumos/tast/local/chrome/lacros/lacrosfixt" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: PlayAutoInstall, LacrosStatus: testing.LacrosVariantExists, Desc: "A functional test that verifies PlayAutoInstall(PAI) flow, It waits PAI is triggered and verifies the minimal set of apps is schedulled for installation", Contacts: []string{ "arc-core@google.com", "khmel@chromium.org", // author. }, Attr: []string{"group:mainline", "informational"}, SoftwareDeps: []string{"arc_android_data_cros_access", "chrome"}, Params: []testing.Param{{ ExtraSoftwareDeps: []string{"android_p", "chrome"}, Val: browser.TypeAsh, }, { Name: "lacros", ExtraSoftwareDeps: []string{"android_p", "lacros"}, Val: browser.TypeLacros, }, { Name: "vm", ExtraSoftwareDeps: []string{"android_vm"}, Val: browser.TypeAsh, }, { Name: "lacros_vm", ExtraSoftwareDeps: []string{"android_vm", "lacros"}, Val: browser.TypeLacros, }}, Timeout: 4 * time.Minute, VarDeps: []string{"arc.PlayAutoInstall.username", "arc.PlayAutoInstall.password"}, }) } func PlayAutoInstall(ctx context.Context, s *testing.State) { // Note, ARC produces pailist.txt only for this account. Changing this account would lead to test failures. // TODO(khmel): Switch to pool of accounts "ui.gaiaPoolDefault". username := s.RequiredVar("arc.PlayAutoInstall.username") password := s.RequiredVar("arc.PlayAutoInstall.password") const ( // Path to file to read of list of apps triggered by PlayAutoInstall flow (PAI). paiList = "/data/data/org.chromium.arc.gms/pailist.txt" ) cleanupCtx := ctx ctx, cancel := ctxutil.Shorten(ctx, 3*time.Second) defer cancel() opts := []chrome.Option{ chrome.GAIALogin(chrome.Creds{User: username, Pass: password}), chrome.ARCSupported(), chrome.ExtraArgs("--arc-disable-app-sync", "--arc-disable-locale-sync", "--arc-play-store-auto-update=off"), } bt := s.Param().(browser.Type) cr, err := browserfixt.NewChrome(ctx, bt, lacrosfixt.NewConfig(), opts...) if err != nil { s.Fatal("Failed to start Chrome: ", err) } defer cr.Close(cleanupCtx) s.Log("Performing optin") maxAttempts := 2 if err := optin.PerformWithRetry(ctx, cr, maxAttempts); err != nil { s.Fatal("Failed to optin: ", err) } // /data/data is not accessible from adb in RVC. Access this using chrome root. androidDataDir, err := arc.AndroidDataDir(ctx, cr.NormalizedUser()) if err != nil { s.Fatal("Failed to get android-data path: ", err) } paiListUnderHome := filepath.Join(androidDataDir, paiList) a, err := arc.New(ctx, s.OutDir()) if err != nil { s.Fatal("Failed to start ARC: ", err) } defer a.Close(cleanupCtx) s.Log("Waiting PAI triggered") if err := testing.Poll(ctx, func(ctx context.Context) error { // On ARCVM virtio-blk /data enabled devices, we mount and unmount the disk image on // every iteration of testing.Poll to ensure that the Android-side changes are // reflected on the host side. cleanupFunc, err := arc.MountVirtioBlkDataDiskImageReadOnlyIfUsed(ctx, a, cr.NormalizedUser()) if err != nil { s.Fatal("Failed to make Android /data directory available on host: ", err) } defer cleanupFunc(cleanupCtx) if _, err := os.Stat(paiListUnderHome); err != nil { if os.IsNotExist(err) { return errors.Errorf("paiList %q is not created yet", paiListUnderHome) } return testing.PollBreak(err) } return nil }, &testing.PollOptions{Timeout: 2 * time.Minute}); err != nil { s.Fatal("Failed to wait PAI triggered: ", err) } cleanupFunc, err := arc.MountVirtioBlkDataDiskImageReadOnlyIfUsed(ctx, a, cr.NormalizedUser()) if err != nil { s.Fatal("Failed to make Android /data directory available on host: ", err) } defer cleanupFunc(cleanupCtx) data, err := ioutil.ReadFile(paiListUnderHome) if err != nil { s.Fatal("Failed to read PAI list: ", err) } paiDocs := make(map[string]bool) for _, doc := range strings.Split(string(data), "\n") { // Mark that app was not recognized as default at this momemnt. // List of know default apps will be applied to this map, and value // for each entry would be set to true. All other apps would be // considered as non-default app. if doc != "" { paiDocs[doc] = false } } if len(paiDocs) == 0 { // Common case that usually means PAI configuration is missing at server. s.Fatal("PAI was triggered but returned no app. Server configuration might be missed") } // Define default PAI list. Some boards might have extended set, however following must // exist on any board. defaultPaiDocs := []string{ "com.google.android.deskclock", "com.google.android.apps.books", "com.google.android.play.games", "com.google.android.videos", "com.google.android.apps.youtube.music.pwa", "com.google.android.apps.photos"} // Verify that all default apps from the minimal set are scheduled for installation. for _, defaultDoc := range defaultPaiDocs { if _, ok := paiDocs[defaultDoc]; ok { s.Logf("Default app %q is found in the list", defaultDoc) paiDocs[defaultDoc] = true } else { s.Errorf("Default app %q was not found in the list. Server configuration might be outdated", defaultDoc) } } // Print leftover portion as board extra customization. for doc, found := range paiDocs { if !found { s.Logf("Found app %q outside of default list", doc) } } }
package changelog import "github.com/Azure/azure-sdk-for-go/tools/apidiff/report" // Changelog describes a changelog of the package during this generation type Changelog struct { PackageName string NewPackage bool RemovedPackage bool Modified *report.Package } // HasBreakingChanges returns if this report of changelog contains breaking changes func (c Changelog) HasBreakingChanges() bool { return c.RemovedPackage || (c.Modified != nil && c.Modified.HasBreakingChanges()) } func (c Changelog) String() string { return c.ToMarkdown() } // ToMarkdown returns the markdown string of this changelog func (c Changelog) ToMarkdown() string { if c.NewPackage { return "This is a new package" } if c.RemovedPackage { return "This package was removed" } r := c.Modified.ToMarkdown() if r == "" { return "No exported changes" } return r }
package main import ( "net/http" "github.com/dodosuke/authlete-go/internal/app" "github.com/dodosuke/authlete-go/pkg/authlete" "github.com/dodosuke/authlete-go/pkg/util" ) // introspection is an implementation of introspection endpoint defined by RFC 7662. // // RFC 7662, OAuth 2.0 Token Introspection // http://tools.ietf.org/html/rfc7662 // // NOTE: You can implement an introspection endpoint at this authorization server. // However, you may call Authlete's /auth/introspection API directly from your // resource server. func introspection(api *authlete.API) app.Handler { return app.Handler(func(w http.ResponseWriter, r *http.Request) error { // Check the method. if r.Method != http.MethodPost { return util.BadRequest("only POST is allowed").Write(w, nil) } // "2.1. Introspection Request" in RFC 7662 says as follows: // // To prevent token scanning attacks, the endpoint MUST also require // some form of authorization to access this endpoint, such as client // authentication as described in OAuth 2.0 [RFC6749] or a separate // OAuth 2.0 access token such as the bearer token described in OAuth // 2.0 Bearer Token Usage [RFC6750]. The methods of managing and // validating these authentication credentials are out of scope of this // specification. // // Therefore, this API must be protected in some way or other. // Basic Authentication and Bearer Token are typical means, and // both use the value of the 'Authorization' header. // // Authenticate the API. if !authenticateRequest() { return util.Unauthorized("", "").Write(w, nil) } // Extract parameters parameters := util.GetParametersFromRequest(r) // Prepare a request req := &authlete.StandardIntrospectionRequest{Parameters: parameters} // Get a response from the Authlete API Authorization Server res, err := req.Process(api) if err != nil { return err } // Build a response based on the response return res.Write(w, nil) }) } func authenticateRequest() bool { return true }
package value import ( "testing" "github.com/stretchr/testify/require" "go.starlark.net/starlark" ) func TestStringStringMap(t *testing.T) { sv := starlark.NewDict(2) err := sv.SetKey(starlark.String("a"), starlark.String("b")) require.NoError(t, err) err = sv.SetKey(starlark.String("c"), starlark.String("d")) require.NoError(t, err) v := StringStringMap{} err = v.Unpack(sv) require.NoError(t, err) expected := StringStringMap{"a": "b", "c": "d"} require.Equal(t, expected, v) } func TestStringStringMapNotDict(t *testing.T) { sv := starlark.NewList([]starlark.Value{starlark.String("a"), starlark.String("b")}) v := StringStringMap{} err := v.Unpack(sv) require.Error(t, err) require.Contains(t, err.Error(), "expected dict, got *starlark.List") } func TestStringStringMapKeyNotString(t *testing.T) { sv := starlark.NewDict(1) err := sv.SetKey(starlark.MakeInt(1), starlark.String("a")) require.NoError(t, err) v := StringStringMap{} err = v.Unpack(sv) require.Error(t, err) require.Contains(t, err.Error(), "key is not a string: starlark.Int (1)") } func TestStringStringMapValueNotString(t *testing.T) { sv := starlark.NewDict(1) err := sv.SetKey(starlark.String("a"), starlark.MakeInt(1)) require.NoError(t, err) v := StringStringMap{} err = v.Unpack(sv) require.Error(t, err) require.Contains(t, err.Error(), "value is not a string: starlark.Int (1)") } func TestStringStringMapUnpackClearsExistingData(t *testing.T) { sv := starlark.NewDict(2) err := sv.SetKey(starlark.String("a"), starlark.String("b")) require.NoError(t, err) err = sv.SetKey(starlark.String("c"), starlark.String("d")) require.NoError(t, err) v := StringStringMap{} err = v.Unpack(sv) require.NoError(t, err) sv = starlark.NewDict(0) err = v.Unpack(sv) require.NoError(t, err) require.Equal(t, 0, len(v)) }
package controller import ( "fmt" "github.com/myonlyzzy/prometheus-operator/pkg/apis/prometheus.io/v1alpha1" "github.com/myonlyzzy/prometheus-operator/pkg/client/clientset/versioned" pv1alpha1 "github.com/myonlyzzy/prometheus-operator/pkg/client/informers/externalversions/prometheus.io/v1alpha1" listers "github.com/myonlyzzy/prometheus-operator/pkg/client/listers/prometheus.io/v1alpha1" appsv1 "k8s.io/api/apps/v1beta1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" appsinformers "k8s.io/client-go/informers/apps/v1" coreinformers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" apps "k8s.io/client-go/listers/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "k8s.io/klog" "time" ) const ( ServiceName = "prometheus" ) type PrometheusController struct { client kubernetes.Interface clientSet versioned.Interface setLister apps.StatefulSetLister svcLister corelisters.ServiceLister prometheusLister listers.PrometheusLister recorder record.EventRecorder workqueue workqueue.RateLimitingInterface setSynced cache.InformerSynced prometheusSynced cache.InformerSynced } //NewPrometheusController return a Prometheus controller func NewPrometheusController(client kubernetes.Interface, clientset versioned.Interface, setInformer appsinformers.StatefulSetInformer, svcInformers coreinformers.ServiceInformer, prometheusInformer pv1alpha1.PrometheusInformer) *PrometheusController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: client.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(v1alpha1.Scheme, corev1.EventSource{Component: "prometheus"}) p := &PrometheusController{ client: client, clientSet: clientset, recorder: recorder, workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "prometheus"), setLister: setInformer.Lister(), svcLister: svcInformers.Lister(), prometheusLister: prometheusInformer.Lister(), prometheusSynced: prometheusInformer.Informer().HasSynced, setSynced: setInformer.Informer().HasSynced, } prometheusInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: p.addPrometheus, DeleteFunc: p.deletePromethues, }) setInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ //AddFunc: p.addStatefulSet, DeleteFunc: p.deleteStatefulSet, }) return p } // func (p *PrometheusController) addStatefulSet(obj interface{}) { //TODO } // add a prometheus obj to workqueque when statefulSet delete func (p *PrometheusController) deleteStatefulSet(obj interface{}) { set := obj.(*appsv1.StatefulSet) prometheus := p.GetPrometheusByStateful(set) p.enqueue(prometheus) } func (p *PrometheusController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer p.workqueue.ShutDown() klog.Info("Starting prometheus controller") klog.Info("Wating for informer caches to sync") if ok := cache.WaitForCacheSync(stopCh, p.setSynced, p.prometheusSynced); !ok { return } klog.Info("Starting workers") for i := 0; i < workers; i++ { go wait.Until(p.Worker, time.Second, stopCh) } klog.Info("Started workers") <-stopCh klog.Info("Shutting down workers") } func (p *PrometheusController) Worker() { for p.processNextWorkItem() { } } func (p *PrometheusController) processNextWorkItem() bool { key, quit := p.workqueue.Get() if quit { return false } defer p.workqueue.Done(key) if err := p.sync(key.(string)); err != nil { utilruntime.HandleError(fmt.Errorf("Error syncing Prometheus %v,requeue:%v", key.(string), err)) p.workqueue.AddRateLimited(key) } else { p.workqueue.Forget(key) } return true } //convert a prometheus resource to namespace/name then add to workqueue func (p *PrometheusController) enqueue(obj interface{}) { var key string var err error if key, err = cache.DeletionHandlingMetaNamespaceKeyFunc(obj); err != nil { utilruntime.HandleError(err) return } p.workqueue.AddRateLimited(key) } func (p *PrometheusController) deletePromethues(obj interface{}) { p.enqueue(obj) } func (p *PrometheusController) addPrometheus(obj interface{}) { p.enqueue(obj) } //sync prometheus reource func (p *PrometheusController) sync(key string) error { startTime := time.Now() defer func() { klog.V(4).Infof("Finished syncing prometheus %q (%v)", key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return err } klog.Infof("sync prometheus %v", name) prometheus, err := p.prometheusLister.Prometheuses(namespace).Get(name) prom := prometheus.DeepCopy() if errors.IsNotFound(err) { klog.Infof("Prometheus has been deleted %v", key) return nil } if err := p.syncService(prom); err != nil { return err } if err := p.syncStatefulSet(prom); err != nil { return err } return nil } //sync prometheus service func (p *PrometheusController) syncService(prometheus *v1alpha1.Prometheus) error { namespace := prometheus.GetNamespace() name := prometheus.GetName() klog.Infof("sync prometheus service %v", name) _, err := p.svcLister.Services(namespace).Get(name) if errors.IsNotFound(err) { err := p.CreateService(prometheus) if err != nil { return err } } return err } //sync prometheus statefulset func (p *PrometheusController) syncStatefulSet(prometheus *v1alpha1.Prometheus) error { namespace := prometheus.GetNamespace() name := prometheus.GetName() klog.Infof("sync prometheus statefulset %v", name) if prometheus.Spec.StatefulSet == nil { if err := p.client.AppsV1().StatefulSets(namespace).Delete(name, &metav1.DeleteOptions{}); err != nil { return err } } _, err := p.setLister.StatefulSets(namespace).Get(name) if errors.IsNotFound(err) { err := p.CreateStatefulset(prometheus) if err != nil { return err } } return err } //update prometheus statefulset func (p *PrometheusController) UpdateStatefulSet(prometheus *v1alpha1.Prometheus) { // TODO } //create prometheus service into k8s func (p *PrometheusController) CreateService(prometheus *v1alpha1.Prometheus) error { nameSpace := prometheus.GetNamespace() svc := p.NewPrometheusService(prometheus) _, err := p.client.CoreV1().Services(nameSpace).Create(svc) if apierrors.IsAlreadyExists(err) { return err } if err != nil { p.recorder.Event(prometheus, corev1.EventTypeNormal, "failed", fmt.Sprintln("Create prometheus service failed ")) } else { p.recorder.Event(prometheus, corev1.EventTypeWarning, "success", fmt.Sprintln(" Successful create prometheus service")) } return err } //create prometheus statefulset into k8s func (p *PrometheusController) CreateStatefulset(prometheus *v1alpha1.Prometheus) error { nameSpace := prometheus.GetNamespace() set := p.NewPrometheusStatefulSet(prometheus) _, err := p.client.AppsV1beta1().StatefulSets(nameSpace).Create(set) if apierrors.IsAlreadyExists(err) { return err } if err != nil { p.recorder.Event(prometheus, corev1.EventTypeNormal, "success", fmt.Sprintln("Successful create prometheus service ")) } else { p.recorder.Event(prometheus, corev1.EventTypeWarning, "failed", fmt.Sprintln(" Create prometheus service failed ")) } return err } //new prometheus service object func (p *PrometheusController) NewPrometheusService(prometheus *v1alpha1.Prometheus) *corev1.Service { labels := make(map[string]string) labels["app"] = "prometheus" svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: prometheus.Name, Namespace: prometheus.Namespace, Labels: labels, OwnerReferences: []metav1.OwnerReference{ metav1.OwnerReference{ Name: prometheus.GetName(), Kind: prometheus.Kind, APIVersion: prometheus.APIVersion, UID: prometheus.GetUID(), }, }, }, Spec: corev1.ServiceSpec{ ClusterIP: "None", Ports: []corev1.ServicePort{ { Name: "http", Port: 9090, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromInt(9090), }, }, Selector: map[string]string{ "app": "prometheus", }, }, } return svc } //new prometheus statefulset object func (p *PrometheusController) NewPrometheusStatefulSet(prometheus *v1alpha1.Prometheus) *appsv1.StatefulSet { labels := map[string]string{"app": "prometheus"} initVolumeMounts := []corev1.VolumeMount{ corev1.VolumeMount{ Name: "prometheus-data", MountPath: "/data", }, } reloadVolumeMounts := []corev1.VolumeMount{ corev1.VolumeMount{ Name: "config-volume", MountPath: "/etc/config", ReadOnly: true, }, } prometheusVolumeMounts := []corev1.VolumeMount{ corev1.VolumeMount{ Name: "config-volume", MountPath: "/etc/config", }, corev1.VolumeMount{ Name: "prometheus-data", MountPath: "/data", }, } var probe = &corev1.Probe{} probe.Handler = corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/-/ready", Port: intstr.FromInt(9090), }, } probe.InitialDelaySeconds = 30 probe.TimeoutSeconds = 30 var volume, emptyDir corev1.Volume volume.ConfigMap = &corev1.ConfigMapVolumeSource{} volume.ConfigMap.Name = "prometheus-config" volume.Name = "config-volume" emptyDir.Name = "prometheus-data" emptyDir.EmptyDir = &corev1.EmptyDirVolumeSource{} setSpec := appsv1.StatefulSetSpec{ ServiceName: prometheus.Name, Replicas: prometheus.Spec.StatefulSet.Replicas, Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": "prometheus"}, }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": "prometheus"}, }, Spec: corev1.PodSpec{ InitContainers: []corev1.Container{ corev1.Container{ Name: "init-chown-data", Image: prometheus.Spec.StatefulSet.InitImage, ImagePullPolicy: prometheus.Spec.StatefulSet.ImagePullPolicy, Command: []string{"chown", "-R", "65534:65534", "/data"}, VolumeMounts: initVolumeMounts, }, }, Containers: []corev1.Container{ corev1.Container{ Name: "prometheus-server-configmap-reload", Image: prometheus.Spec.StatefulSet.ReloadImage, ImagePullPolicy: prometheus.Spec.StatefulSet.ImagePullPolicy, Args: []string{ "--volume-dir=/etc/config", "--webhook-url=http://localhost:9090/-/reload", }, VolumeMounts: reloadVolumeMounts, Resources: NewContainerResourceRequirements("10m", "10m", "10Mi", "10Mi"), }, corev1.Container{ Name: "prometheus-server", ImagePullPolicy: prometheus.Spec.StatefulSet.ImagePullPolicy, Image: prometheus.Spec.StatefulSet.PrometheusImage, Args: []string{ "--config.file=/etc/config/prometheus.yml", "--storage.tsdb.path=/data", "--web.console.libraries=/etc/prometheus/console_libraries", "--web.console.templates=/etc/prometheus/consoles", "--web.enable-lifecycle", }, Ports: []corev1.ContainerPort{ corev1.ContainerPort{ ContainerPort: 9090, }, }, Resources: NewContainerResourceRequirements("200m", "200m", "1000Mi", "1000Mi"), VolumeMounts: prometheusVolumeMounts, LivenessProbe: probe, ReadinessProbe: probe, }, }, Volumes: []corev1.Volume{ volume, emptyDir, }, }, }, } set := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: prometheus.Name, Namespace: prometheus.Namespace, Labels: labels, OwnerReferences: []metav1.OwnerReference{ metav1.OwnerReference{ Name: prometheus.GetName(), Kind: prometheus.Kind, APIVersion: prometheus.APIVersion, UID: prometheus.GetUID(), }, }, }, Spec: setSpec, } return set } //new resourceRequirements object func NewContainerResourceRequirements(cpuLimit, cpuRequest, memLimit, memRequest string) corev1.ResourceRequirements { r := corev1.ResourceRequirements{ Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse(cpuLimit), corev1.ResourceMemory: resource.MustParse(memLimit), }, Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse(cpuRequest), corev1.ResourceMemory: resource.MustParse(memRequest), }, } return r } func (p *PrometheusController) GetPrometheusByStateful(set *appsv1.StatefulSet) *v1alpha1.Prometheus { controllerRef := metav1.GetControllerOf(set) if controllerRef == nil { return nil } nameSpace := set.GetNamespace() prometheusKind := v1alpha1.SchemeGroupVersion.WithKind("prometheus") if controllerRef.Kind != prometheusKind.Kind { return nil } prometheus, err := p.prometheusLister.Prometheuses(nameSpace).Get(controllerRef.Name) if err != nil { return nil } if prometheus.UID != controllerRef.UID { return nil } return prometheus }
package constants import "errors" const ( // ApplicationStatusKTPSave application status for success update Document Information ApplicationStatusKTPSave = "KTP_SAVED" // ApplicationStatusPerSave application status for success update Applicant Information ApplicationStatusPerSave = "PER_SAVED" // ApplicationStatusPaySave application status for success update Payroll Information ApplicationStatusPaySave = "PAY_SAVED" // ApplicationStatusConSave application status for success update Contact Information ApplicationStatusConSave = "CON_SAVED" // ApplicationStatusEmpSave application status for success update Employee Information ApplicationStatusEmpSave = "EMP_SAVED" // ApplicationStatusCreSub application status for Credit Score Submit success ApplicationStatusCreSub = "CR_SCORE_SUB" // ApplicationStatusCreRej application status for Credit Score Reject success ApplicationStatusCreRej = "CR_SCORE_REJ" // ApplicationStatusCreApr application status for Credit Score Approval success ApplicationStatusCreApr = "CR_SCORE_APR" // ApplicationStatusPayRej application status if the request payroll rejected ApplicationStatusPayRej = "PAYROLL_REJ" // ApplicationStatusUsrRej application status if the request rejected by user ApplicationStatusUsrRej = "USR_REJECT" // ApplicationStatusLoanCreated application status if the loan has been created ApplicationStatusLoanCreated = "LOAN_CREATED" // ApplicationStatusLoanPaid application status if the loan has been paid ApplicationStatusLoanPaid = "LOAN_PAID" // ApplicationStatusAppExp application status if the loan has expired ApplicationStatusAppExp = "APP_EXPIRED" // OCRTEXT for text detection type in OCR service OCRTEXT = "TEXT_DETECTION" // OCRVerFail to identify if OCR verification failed OCRVerFail = "OCR_VER_FAIL" // KTPReject to identify when the requested document rejected KTPReject = "KTP_REJECT" // ActLinkConf constanst application status ... ActLinkConf = "ACT_LINK_CONF" // DisbAccConf constanst application status disbursment confirmed DisbAccConf = "DISB_ACC_CONF" // EKYCompleteStage constanst application status E-KYC Completed EKYCompleteStage = "EKYC_COM" // PrivyCompleteStage constants for privy complete stage (DIG_SIGN_COM) PrivyCompleteStage = "DIG_SIGN_COM" // DocSigned constanst application status for document signing process complete DocSigned = "DOCUMENT_SIGNED" // DisbInProcess is the constants flag for starting the disbursement process DisbInProcess = "DISB_IN_PROCESS" // ISOTimeLayout ISO standard time layout without timezone (with timezone use time.RFC3339 instead) ISOTimeLayout = "2006-01-02T15:04:05" // XBRITimeLayout standard time layout with timezone XBRITimeLayout = "2006-01-02T15:04:05.000Z" // NoRecordsFound error message for no records found NoRecordsFound = "No records fetched" // UserIDIsInvalid error message for no records found UserIDIsInvalid = "User ID is invalid" // AccountIDIsInvalid error message for no records found AccountIDIsInvalid = "Account ID is invalid" // UserIDIsEmpty error message for no records found UserIDIsEmpty = "User ID is empty" // NoHostFound error message for not found host NoHostFound = "The host does not exist." // LoanStatusActive flag for active loan exist in finacle LoanStatusActive = "Active" // UnableToProcessRequestErrMessage error message for unable to process request UnableToProcessRequestErrMessage = "We are unable to process your request. Try after sometime." // UnexpectedErrorDuringRetrieval ... UnexpectedErrorDuringRetrieval = "An unexpected exception occurred during loan list retrieval" // UnexpectedErrorBalance balance error UnexpectedErrorBalance = "Error in Fetching in Balance details." // PinangProductName project product name PinangProductName = "PINANG" // FinacleXMLSchema ... FinacleXMLSchema = "http://www.finacle.com/fixml" // FinacleXMLSchemaInstance ... FinacleXMLSchemaInstance = "http://www.w3.org/2001/XMLSchema-instance" // LoanApplicationMissingOrInvalid error message for loan invalid or null LoanApplicationMissingOrInvalid = "Loan Application ID is NULL or Invalid" // PrivyRecordNotFoundErrMessage error message for invalid privy PrivyRecordNotFoundErrMessage = "Privy record not found" //InternalErrorASLIRIMessage error message for internal error ASLIRI InternalErrorASLIRIMessage = "Internal service error, try in 15 minutes." //ASTG code type constants ASTG = "ASTG" //FinacleURLPath url path for finacle services FinacleURLPath = "FISERVLET/fihttp" //CompareDone status after photo compare CompareDone = "COMP_DONE" // LoanTypeCode code type constants LOAN TYPE LoanTypeCode = "LNTP" // CommonCodePinang code type constants for cocd CommonCodePinang = "Pinang" // CommonCodeMaritalStatus code type constants for cocd CommonCodeMaritalStatus = "MSTH" // CommonCodeLastEducation code type constants for cocd last education CommonCodeLastEducation = "EDUH" // CommonCodeHomeOwnership code type constants for cocd home ownership CommonCodeHomeOwnership = "RESH" // CommonCodeHomeOwnership code type constants for cocd segmentation class CommonCodeSegmentationType = "SGMC" // DocSignedCodeDesc code desc constants for astg DOCUMENT_SIGNED DocSignedCodeDesc = "Document Signed" // EkycCompletedCodeDesc code desc constants for astg EKYC_COM EkycCompletedCodeDesc = "EKYC Completed" // PrivyCompleteStageCodeDesc code desc constants for astg DIG_SIGN_COM PrivyCompleteStageCodeDesc = "Digital Signature Created" // LoanCreatedCodeDesc code desc constants for astg LOAN_CREATED LoanCreatedCodeDesc = "Loan Created" ) // Storing all the codes constant const ( // CodeLoanApplicationFetchNoRecords error code for record not found in the database CodeLoanApplicationFetchNoRecords = "200606" // CodeUserIDEmpty error code for user id empty CodeUserIDEmpty = "211081" // CodeUserIDInvalid error code for user id invalid / not found CodeUserIDInvalid = "211082" // CodeAccountIDInvalid error code for user id invalid / not found CodeAccountIDInvalid = "211083" // CodeHostNotFound error code for host invalid / not found CodeHostNotFound = "14084" // CodeHostNotFound error code for host invalid / not found CodeUnableProcessRequest = "100126" // CodeHostNotFound error code for host invalid / not found CodeBalanceProcessFailed = "211304" // SuccessCode is the constant code for api success SuccessCode = "0000" // CodeLoanAppIDMandatory CodeLoanAppIDMandatory = "211095" // CodeASLIRI02 asliri03 error code CodeASLIRI02 = "211102" // CodeASLIRI03 asliri03 error code CodeASLIRI03 = "211103" // CodeASLIRI04 asliri03 error code CodeASLIRI04 = "211104" // CodeASLIRI05 asliri03 error code CodeASLIRI05 = "211105" // CodeASLIRI06 asliri03 error code CodeASLIRI06 = "211106" ) // Storing all the cm code desc constant const ( // CMDescInvalidInputValue cm code for invalid input message CMDescInvalidInputValue = "There are invalid input value" ) // Constants Property const ( // PRPMAppId constant for Property APP ID PRPMAppID = "BWY" // CreditScoreTimeIntervalProperty property name for credit score time interval CreditScoreTimeIntervalProperty = "CREDIT_SCR_CAL_TIME_INTRVL" // CooldownPeriodMissingSimpananProperty property name for cooldown period missing simpanan CooldownPeriodMissingSimpananProperty = "COOLDOWN_PERIOD_FOR_MISSING_SIMPANAN" // CreditOfferValidityPeriodProperty property name for credit offer validity CreditOfferValidityPeriodProperty = "CREDIT_OFFER_VALIDITY_PERIOD" // InstallationProductIDProperty property name for installation product id InstallationProductIDProperty = "INSTALLATION_PRODUCT_ID" // AllowedPeriodIncompleteProperty property name for Allowed period in days to complete the incomplete credit approval process AllowedPeriodIncompleteProperty = "ALLOWED_PERIOD_FOR_INCOMPLETE_APP" // CooldownPeriodPayrollRejProperty property name for Cooldown period in days for a system rejected payroll application CooldownPeriodPayrollRejProperty = "COOLDOWN_PERIOD_FOR_PAYROLL_REJECTION" // AllowedConsecutiveUserRejProperty property name for Allowed number of user rejection post which a temporary ban will be put on user for application AllowedConsecutiveUserRejProperty = "ALLOWED_CONSECUTIVE_USER_REJ" // BanPeriodForUserRejProperty property name for Cooldown period in days for a user rejected application BanPeriodForUserRejProperty = "BAN_PERIOD_FOR_USER_REJECTION" // BRILifeInsuranceCode property name for "BRI_LIFE_INSURANCE_CODE" BRILifeInsuranceCode = "BRI_LIFE_INSURANCE_CODE" // FEBASharedSystemPath property name for FEBA_SHARED_SYS_PATH FEBASharedSystemPath = "FEBA_SHARED_SYS_PATH" // IsEKYCEnabled property name for IS_EKYC_ENABLED IsEKYCEnabled = "IS_EKYC_ENABLED" // AsliriApprovalRate property name for ASLIRI_APPROVAL_RATE AsliriApprovalRate = "ASLIRI_APPROVAL_RATE" // IsAsliriEnabled property name for IS_ASLIRI_ENABLED IsAsliriEnabled = "IS_ASLIRI_ENABLED" // ASLIRIURLRequest is prpm value for retrieve the verify URL ASLIRI_SELFIE_VERIFY_URL ASLIRIURLRequest = "ASLIRI_SELFIE_VERIFY_URL" // ASLIRITokenRequest is prpm value for retrieve the token ASLIRI_HEADER_TOKEN ASLIRITokenRequest = "ASLIRI_HEADER_TOKEN" //DaysToDueDateInstallmentOne DAYS_TO_DUE_DATE_INST_ONE DaysToDueDateInstallmentOne = "DAYS_TO_DUE_DATE_INST_ONE" //CoreBODDate CORE_BOD_DATE CoreBODDate = "CORE_BOD_DATE" //DaysToDueDateInstallmentTwoOrMore DAYS_TO_DUE_DATE_INST_TWO_OR_MORE DaysToDueDateInstallmentTwoOrMore = "DAYS_TO_DUE_DATE_INST_TWO_OR_MORE" //PrivyMerchantKey PRIVY_MERCHANT_KEY PrivyMerchantKey = "PRIVY_MERCHANT_KEY" //PrivyAuthToken PRIVY_AUTHORIZATION_KEY PrivyAuthToken = "PRIVY_AUTHORIZATION_KEY" //PersonalNumberKey PERSONAL_NUMBER PersonalNumberKey = "PERSONAL_NUMBER" ) var ( // ErrNoRecords error default if there is no records in the database ErrNoRecords = errors.New(NoRecordsFound) // ErrUserIDInvalid error default if there is invalid user ID provided ErrUserIDInvalid = errors.New(UserIDIsInvalid) // ErrUnexpectedDuringRetrieval error default if there is failure in finacle ErrUnexpectedDuringRetrieval = errors.New(UnexpectedErrorDuringRetrieval) // ErrProcessLoanCreation error default if there is failure in finacle create loan ErrProcessLoanCreation = errors.New("error while creating loan") // ErrProcessCustomerCreation error default if there is failure in finacle create customer ErrProcessCustomerCreation = errors.New("error while creating customer") // ErrProcessPrivyCreation error default if there is failure create privy registration ErrProcessPrivyCreation = errors.New("error while creating privy registration") // ErrRuntime error runtime ErrRuntime = errors.New("Runtime error has occurred") )
package breach import ( "fmt" "io/ioutil" "net/http" "net/http/httptest" "testing" ) func Test_Sleep(t *testing.T) { cases := []struct { seconds string expectingErr bool }{ {"10E99999", true}, {"1", false}, } for i, c := range cases { fmt.Printf("Running case %d\n", i+1) err := Sleep(c.seconds) if c.expectingErr { if err == nil { t.Errorf("Expecting an error in case %d\n", i+1) } else { fmt.Println(err) } continue } if err != nil { t.Error(err) } } } func Test_VerifyResponse(t *testing.T) { cases := []struct { status int expectingErr bool }{ {400, true}, {200, false}, {429, false}, } for i, c := range cases { fmt.Printf("Running case %d\n", i+1) _, err := VerifyResponse(c.status) if c.expectingErr { if err == nil { t.Errorf("Expecting an error in case %d\n", i+1) } else { fmt.Println(err) } continue } if err != nil { t.Error(err) } } } func Test_VerifyAndRetry(t *testing.T) { cases := []struct { res http.Response expectingErr bool }{ {http.Response{StatusCode: 400}, true}, {http.Response{StatusCode: 200}, false}, {http.Response{StatusCode: 429}, true}, } for i, c := range cases { fmt.Printf("Running case %d\n", i+1) _, err := VerifyAndRetry(&c.res) if c.expectingErr { if err == nil { t.Errorf("Expecting an error in case %d\n", i+1) } else { fmt.Println(err) } continue } if err != nil { t.Error(err) } } } func Test_Get(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, "HIBPwned servers should be happy now") })) defer ts.Close() cases := []struct { URL string expectingErr bool }{ {":", true}, {ts.URL, false}, } for i, c := range cases { fmt.Printf("Running case %d\n", i+1) res, err := Get(c.URL) if c.expectingErr { if err == nil { t.Errorf("Expecting an error in case %d\n", i+1) } else { fmt.Println(err) } continue } if err != nil { t.Error(err) } _, err = ioutil.ReadAll(res.Body) res.Body.Close() if err != nil { t.Error(err) } } }
// 1.2 命令行参数 package main //func main() { //fmt.Println("hello, world") // 第一种拼接参数的方法 //var s, sep string //for i := 1; i < len(os.Args); i++ { // s += sep + os.Args[i] // sep = "/" //} //fmt.Println(s) // 第二种拼接参数的方法 range 返回值 第一个是索引,第二个是索引对应的参数值 //s, sep := "", "/" //for _, arg := range os.Args[1:] { // s += sep + arg //} //fmt.Println(s) // 使用String包的Join函数拼接 //fmt.Println(strings.Join(os.Args[1:], " ")) //fmt.Println(os.Args[1:]) // 习题 1.1 // 修改echo程序,使其能够打印os.Args[0],即被执行命令本身的名字。 //fmt.Println(os.Args[0]) // 习题 1.2 // 修改echo程序,使其打印每个参数的索引和值,每个一行。 //for index, arg := range os.Args[1:] { // fmt.Println(index, arg) //} // 习题1.3 // 做实验测量潜在低效的版本和使用了strings.Join的版本的运行时间差异。 // (1.6节讲解了部分time包,11.4节展示了如何写标准测试程序,以得到系统性的性能评测。) //startd:=time.Now() //s, sep := "", "/" //for _, arg := range os.Args[1:] { // s += sep + arg //} //fmt.Println(s) //endd:=time.Now() //spendTime:= endd.Sub(startd) //fmt.Println("低效方式消耗时间:",spendTime) // //startg:=time.Now() //fmt.Println(strings.Join(os.Args[1:],"/")) //endg:=time.Now() //spendTimeg:= endg.Sub(startg) //fmt.Println("高效方式消耗时间:",spendTimeg) //}
package main import ( "fmt" "strings" ) func main() { //最长公共前缀 /** 编写一个函数来查找字符串数组中的最长公共前缀。如果不存在公共前缀,则返回"" 示例1: 输入: ["flower","flow","flight"] 输出: "fl" 示例 2: 输入: ["dog","racecar","car"] 输出: "" */ arr := []string{"flower", "flow", "flight"} res := longestPrefix(arr) fmt.Println(res) } //以第一个元素为基础元素,依次判断索引位置,索引不等于0的,依次递减直到满足索引为0 func longestPrefix(arr []string) string { if len(arr) < 0 { return "" } prefix := arr[0] for _, s := range arr { for strings.Index(s, prefix) != 0 { if len(prefix) == 0 { return "" } prefix = prefix[:len(prefix)-1] } } return prefix }
package column import "github.com/vahid-sohrabloo/chconn/v2/internal/readerwriter" // Array is a column of Array(Nullable(T)) ClickHouse data type type ArrayNullable[T comparable] struct { Array[T] dataColumn NullableColumn[T] columnData []*T } // NewArrayNullable create a new array column of Array(Nullable(T)) ClickHouse data type func NewArrayNullable[T comparable](dataColumn NullableColumn[T]) *ArrayNullable[T] { a := &ArrayNullable[T]{ dataColumn: dataColumn, Array: Array[T]{ ArrayBase: ArrayBase{ dataColumn: dataColumn, offsetColumn: New[uint64](), }, }, } a.resetHook = func() { a.columnData = a.columnData[:0] } return a } // Data get all the nullable data in current block as a slice of pointer. func (c *ArrayNullable[T]) DataP() [][]*T { values := make([][]*T, c.offsetColumn.numRow) var lastOffset uint64 columnData := c.getColumnData() for i := 0; i < c.offsetColumn.numRow; i++ { values[i] = columnData[lastOffset:c.offsetColumn.Row(i)] lastOffset = c.offsetColumn.Row(i) } return values } // Read reads all the nullable data in current block as a slice pointer and append to the input. func (c *ArrayNullable[T]) ReadP(value [][]*T) [][]*T { var lastOffset uint64 columnData := c.getColumnData() for i := 0; i < c.offsetColumn.numRow; i++ { value = append(value, columnData[lastOffset:c.offsetColumn.Row(i)]) lastOffset = c.offsetColumn.Row(i) } return value } // RowP return the nullable value of given row as a pointer // NOTE: Row number start from zero func (c *ArrayNullable[T]) RowP(row int) []*T { var lastOffset uint64 if row != 0 { lastOffset = c.offsetColumn.Row(row - 1) } var val []*T val = append(val, c.getColumnData()[lastOffset:c.offsetColumn.Row(row)]...) return val } // AppendP a nullable value for insert func (c *ArrayNullable[T]) AppendP(v ...[]*T) { for _, v := range v { c.AppendLen(len(v)) c.dataColumn.AppendP(v...) } } // AppendItemP Append nullable item value for insert // // it should use with AppendLen // // Example: // // c.AppendLen(2) // insert 2 items // c.AppendItemP(val1, val2) // insert item 1 func (c *ArrayNullable[T]) AppendItemP(v ...*T) { c.dataColumn.AppendP(v...) } // ArrayOf return a Array type for this column func (c *ArrayNullable[T]) ArrayOf() *Array2Nullable[T] { return NewArray2Nullable(c) } // ReadRaw read raw data from the reader. it runs automatically func (c *ArrayNullable[T]) ReadRaw(num int, r *readerwriter.Reader) error { err := c.Array.ReadRaw(num, r) if err != nil { return err } c.columnData = c.dataColumn.DataP() return nil } func (c *ArrayNullable[T]) getColumnData() []*T { if len(c.columnData) == 0 { c.columnData = c.dataColumn.DataP() } return c.columnData } func (c *ArrayNullable[T]) elem(arrayLevel int) ColumnBasic { if arrayLevel > 0 { return c.ArrayOf().elem(arrayLevel - 1) } return c }
package solutions import ( "math/rand" ) type Pair struct { value int index int } type RandomizedCollection struct { mapToIndex map[int][]int nums []Pair } func Constructor() RandomizedCollection { return RandomizedCollection { mapToIndex: make(map[int][]int), nums: []Pair{}, } } func (this *RandomizedCollection) Insert(val int) bool { result := false if _, ok := this.mapToIndex[val]; !ok { this.mapToIndex[val] = []int{} result = true } this.mapToIndex[val] = append(this.mapToIndex[val], len(this.nums)) this.nums = append(this.nums, Pair{value: val, index: len(this.mapToIndex[val]) - 1}) return result } func (this *RandomizedCollection) Remove(val int) bool { if _, ok := this.mapToIndex[val]; !ok { return false } indices := this.mapToIndex[val] replaceIndex := indices[len(indices) - 1] last := this.nums[len(this.nums) - 1] this.mapToIndex[last.value][last.index] = replaceIndex this.nums[replaceIndex] = last this.nums = this.nums[:len(this.nums) - 1] this.mapToIndex[val] = indices[:len(indices) - 1] if len(this.mapToIndex[val]) == 0 { delete(this.mapToIndex, val) } return true } func (this *RandomizedCollection) GetRandom() int { return this.nums[rand.Intn(len(this.nums))].value }
package main import "fmt" func main() { var t1 = "text" switch t1 { case "test": fmt.Println("Test abcd") // list of possible comma separated val case "text", "hello": fmt.Println("Text abcd") case "outer": fmt.Println("Outer abcd") default: fmt.Println("Bye abcd") } } // Text abcd
package core import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/opspec-io/opctl/util/containerprovider" "github.com/opspec-io/opctl/util/pubsub" "github.com/opspec-io/opctl/util/uniquestring" "github.com/opspec-io/sdk-golang/pkg/model" "time" ) var _ = Context("core", func() { Context("StartOp", func() { It("should call opCaller.Call w/ expected args", func() { /* arrange */ providedReq := model.StartOpReq{ Args: map[string]*model.Data{ "dummyArg1Name": {String: "dummyArg1Value"}, "dummyArg2Name": {Dir: "dummyArg2Value"}, "dummyArg3Name": {Dir: "dummyArg3Value"}, "dummyArg4Name": {Dir: "dummyArg4Value"}, }, PkgRef: "dummyPkgRef", } expectedOpId := "dummyOpId" fakeOpCaller := new(fakeOpCaller) fakeUniqueStringFactory := new(uniquestring.Fake) fakeUniqueStringFactory.ConstructReturns(expectedOpId) objectUnderTest := _core{ containerProvider: new(containerprovider.Fake), pubSub: new(pubsub.Fake), opCaller: fakeOpCaller, dcgNodeRepo: new(fakeDcgNodeRepo), uniqueStringFactory: fakeUniqueStringFactory, } /* act */ objectUnderTest.StartOp(providedReq) /* assert */ // Call happens in go routine; wait 500ms to allow it to occur time.Sleep(time.Millisecond * 500) actualInboundScope, actualOpId, actualPkgRef, actualRootOpId := fakeOpCaller.CallArgsForCall(0) Expect(actualInboundScope).To(Equal(providedReq.Args)) Expect(actualOpId).To(Equal(expectedOpId)) Expect(actualPkgRef).To(Equal(providedReq.PkgRef)) Expect(actualRootOpId).To(Equal(actualOpId)) }) }) })
package miner import ( "bytes" "encoding/json" "errors" "fmt" "math/big" "sync" "time" "github.com/golang/protobuf/proto" "github.com/xuperchain/xupercore/bcs/ledger/xledger/state" "github.com/xuperchain/xupercore/bcs/ledger/xledger/tx" lpb "github.com/xuperchain/xupercore/bcs/ledger/xledger/xldgpb" xctx "github.com/xuperchain/xupercore/kernel/common/xcontext" "github.com/xuperchain/xupercore/kernel/engines/xuperos/common" "github.com/xuperchain/xupercore/lib/logs" "github.com/xuperchain/xupercore/lib/metrics" "github.com/xuperchain/xupercore/lib/timer" "github.com/xuperchain/xupercore/lib/utils" ) const ( tickOnCalcBlock = time.Second syncOnstatusChangeTimeout = 1 * time.Minute statusFollowing = 0 statusMining = 1 ) var ( errCalculateBlockInterrupt = errors.New("calculate block interrupted") ) // 负责生产和同步区块 type Miner struct { ctx *common.ChainCtx log logs.Logger // 当前节点状态,矿工或者同步节点 // 值得注意的是节点同一时刻只能处于一种角色,并严格执行相应的动作。 // 即:如果是矿工则只出块,并且不会向其他节点同步新区块(pow除外),如果是非矿工则定时同步区块。 status int // 标记是否退出运行 isExit bool // 用户等待退出 exitWG sync.WaitGroup } func NewMiner(ctx *common.ChainCtx) *Miner { obj := &Miner{ ctx: ctx, log: ctx.GetLog(), } return obj } // Deprecated: 使用新的同步方案,这个函数仅用来兼容 // 处理P2P网络中接收到的区块 func (t *Miner) ProcBlock(ctx xctx.XContext, block *lpb.InternalBlock) error { return nil } // 启动矿工,周期检查矿工身份 // 同一时间,矿工状态是唯一的。0:休眠中 1:同步区块中 2:打包区块中 func (t *Miner) Start() { // 用于监测退出 t.exitWG.Add(1) defer t.exitWG.Done() var err error t.status = statusFollowing ctx := &xctx.BaseCtx{ XLog: t.log, Timer: timer.NewXTimer(), } t.syncWithNeighbors(ctx) // 启动矿工循环 for !t.IsExit() { err = t.step() // 如果出错,休眠1s后重试,防止cpu被打满 if err != nil { t.log.Warn("miner run occurred error,sleep 1s try", "err", err) time.Sleep(time.Second) } } } // 停止矿工 func (t *Miner) Stop() { t.isExit = true t.exitWG.Wait() } func (t *Miner) IsExit() bool { return t.isExit } func traceMiner() func(string) { last := time.Now() return func(action string) { metrics.CallMethodHistogram.WithLabelValues("miner", action).Observe(time.Since(last).Seconds()) last = time.Now() } } // step 用于推动节点循环进行一次动作,可以是一次出块动作(矿工角色),也可以是一次区块同步(非矿工) // 在此期间可能会发生节点角色变更。 func (t *Miner) step() error { ledgerTipId := t.ctx.Ledger.GetMeta().TipBlockid ledgerTipHeight := t.ctx.Ledger.GetMeta().TrunkHeight stateTipId := t.ctx.State.GetLatestBlockid() log, _ := logs.NewLogger("", "miner") ctx := &xctx.BaseCtx{ XLog: log, Timer: timer.NewXTimer(), } if !bytes.Equal(ledgerTipId, stateTipId) { err := t.ctx.State.Walk(ledgerTipId, false) if err != nil { return err } } trace := traceMiner() ctx.GetLog().Trace("miner step", "ledgerTipHeight", ledgerTipHeight, "ledgerTipId", utils.F(ledgerTipId), "stateTipId", utils.F(stateTipId)) // 通过共识检查矿工身份 isMiner, isSync, err := t.ctx.Consensus.CompeteMaster(ledgerTipHeight + 1) trace("competeMaster") ctx.GetLog().Trace("compete master result", "height", ledgerTipHeight+1, "isMiner", isMiner, "isSync", isSync, "err", err) if err != nil { return err } // 如需要同步,尝试同步网络最新区块 if isMiner && isSync { err = t.syncWithValidators(ctx, syncOnstatusChangeTimeout) if err != nil { return err } } trace("syncUpValidators") // 如果是矿工,出块 if isMiner { if t.status == statusFollowing { ctx.GetLog().Info("miner change follow=>miner", "miner", t.ctx.Address.Address, "netAddr", t.ctx.EngCtx.Net.PeerInfo().Id, "height", t.ctx.Ledger.GetMeta().GetTrunkHeight(), ) // 在由非矿工向矿工切换的这次"边沿触发",主动向所有的验证集合的最长链进行一次区块同步 err = t.syncWithValidators(ctx, syncOnstatusChangeTimeout) if err != nil { return err } } t.status = statusMining err = t.mining(ctx) if err != nil { return err } trace("mining") return nil } // 非miner,向邻居同步区块 if t.status == statusMining { ctx.GetLog().Info("miner change miner=>following", "miner", t.ctx.Address.Address, "netAddr", t.ctx.EngCtx.Net.PeerInfo().Id, "height", t.ctx.Ledger.GetMeta().GetTrunkHeight(), ) } t.status = statusFollowing err = t.syncWithNeighbors(ctx) if err != nil { return err } trace("syncPeers") return nil } // 挖矿生产区块 func (t *Miner) mining(ctx xctx.XContext) error { ctx.GetLog().Debug("mining start.") // 1.共识挖矿前处理 height := t.ctx.Ledger.GetMeta().TrunkHeight + 1 now := time.Now() truncateTarget, extData, err := t.ctx.Consensus.ProcessBeforeMiner(now.UnixNano()) ctx.GetTimer().Mark("ProcessBeforeMiner") if err != nil { ctx.GetLog().Warn("consensus process before miner failed", "err", err) return fmt.Errorf("consensus process before miner failed") } ctx.GetLog().Debug("consensus before miner succ", "truncateTarget", truncateTarget, "extData", string(extData)) if truncateTarget != nil { // 裁剪掉账本目标区块,裁掉的交易判断冲突重新回放,裁剪完后继续出块操作 if err := t.truncateForMiner(ctx, truncateTarget); err != nil { return err } // 重置高度 height = t.ctx.Ledger.GetMeta().TrunkHeight + 1 } // 2.打包区块 beginTime := time.Now() block, err := t.packBlock(ctx, height, now, extData) ctx.GetTimer().Mark("PackBlock") metrics.CallMethodHistogram.WithLabelValues("miner", "PackBlock").Observe(time.Since(beginTime).Seconds()) if err != nil { ctx.GetLog().Warn("pack block error", "err", err) return err } ctx.GetLog().Debug("pack block succ", "height", height, "blockId", utils.F(block.GetBlockid())) // 3. 针对一些需要patch区块的共识 origBlkId := block.Blockid blkAgent := state.NewBlockAgent(block) err = t.calculateBlock(blkAgent) ctx.GetTimer().Mark("CalculateBlock") if err == errCalculateBlockInterrupt { return nil } if err != nil { ctx.GetLog().Warn("consensus calculate block failed", "err", err, "blockId", utils.F(block.Blockid)) return fmt.Errorf("consensus calculate block failed") } ctx.GetLog().Trace("start confirm block for miner", "originalBlockId", utils.F(origBlkId), "newBlockId", utils.F(block.Blockid)) // 4.账本&状态机&共识确认新区块 err = t.confirmBlockForMiner(ctx, block) if err != nil { ctx.GetLog().Warn("confirm block for miner failed", "err", err, "blockId", utils.F(block.GetBlockid())) return err } ctx.GetLog().Info("finish new block generation", "blockId", utils.F(block.GetBlockid()), "height", height, "txCount", block.TxCount, "size", proto.Size(block), "costs", ctx.GetTimer().Print()) return nil } // 裁剪掉账本最新的区块 func (t *Miner) truncateForMiner(ctx xctx.XContext, target []byte) error { _, err := t.ctx.Ledger.QueryBlockHeader(target) if err != nil { ctx.GetLog().Warn("truncate failed because query target error", "err", err) return err } // 状态机回滚到目标状态 err = t.ctx.State.Walk(target, false) if err != nil { ctx.GetLog().Warn("truncate failed because state walk error", "ledgerTipId", utils.F(t.ctx.Ledger.GetMeta().TipBlockid), "walkTargetBlockId", utils.F(target)) return err } // 账本裁剪到这个区块 err = t.ctx.Ledger.Truncate(target) if err != nil { ctx.GetLog().Warn("truncate failed because ledger truncate error", "err", err) return err } return nil } func (t *Miner) packBlock(ctx xctx.XContext, height int64, now time.Time, consData []byte) (*lpb.InternalBlock, error) { // 区块大小限制 sizeLimit, err := t.ctx.State.MaxTxSizePerBlock() if err != nil { return nil, err } ctx.GetLog().Debug("pack block get max size succ", "sizeLimit", sizeLimit) // 1.生成timer交易 autoTx, err := t.getTimerTx(height) if err != nil { return nil, err } if len(autoTx.TxOutputsExt) > 0 { sizeLimit -= proto.Size(autoTx) } ctx.GetLog().Debug("pack block get timer tx succ", "auto tx", autoTx) // 2.选择本次要打包的tx generalTxList, err := t.getUnconfirmedTx(sizeLimit) if err != nil { return nil, err } ctx.GetLog().Debug("pack block get general tx succ", "txCount", len(generalTxList)) // 3.获取矿工奖励交易 awardTx, err := t.getAwardTx(height) if err != nil { return nil, err } ctx.GetLog().Debug("pack block get award tx succ", "txid", utils.F(awardTx.GetTxid())) txList := make([]*lpb.Transaction, 0, len(generalTxList)+1+1) // 先coinbase tx txList = append(txList, awardTx) // 再autotx if len(autoTx.TxOutputsExt) > 0 { txList = append(txList, autoTx) } // 最后普通tx if len(generalTxList) > 0 { txList = append(txList, generalTxList...) } // 4.打包区块 consInfo, err := t.convertConsData(consData) if err != nil { ctx.GetLog().Warn("convert consensus data failed", "err", err, "consData", string(consData)) return nil, fmt.Errorf("convert consensus data failed") } block, err := t.ctx.Ledger.FormatMinerBlock(txList, []byte(t.ctx.Address.Address), t.ctx.Address.PrivateKey, now.UnixNano(), consInfo.CurTerm, consInfo.CurBlockNum, t.ctx.State.GetLatestBlockid(), consInfo.TargetBits, t.ctx.State.GetTotal(), consInfo.Justify, nil, height) if err != nil { ctx.GetLog().Warn("format block error", "err", err) return nil, err } return block, nil } func (t *Miner) convertConsData(data []byte) (*state.ConsensusStorage, error) { var consInfo state.ConsensusStorage if len(data) < 1 { return &consInfo, nil } err := json.Unmarshal(data, &consInfo) if err != nil { return nil, err } return &consInfo, nil } func (t *Miner) getTimerTx(height int64) (*lpb.Transaction, error) { autoTx, err := t.ctx.State.GetTimerTx(height) if err != nil { t.log.Error("Get timer tx error", "error", err) return nil, common.ErrGenerateTimerTxFailed } return autoTx, nil } func (t *Miner) getUnconfirmedTx(sizeLimit int) ([]*lpb.Transaction, error) { unconfirmedTxs, err := t.ctx.State.GetUnconfirmedTx(false, sizeLimit) if err != nil { return nil, err } return unconfirmedTxs, nil // txList := make([]*lpb.Transaction, 0) // for _, tx := range unconfirmedTxs { // size := proto.Size(tx) // if size > sizeLimit { // break // } // sizeLimit -= size // txList = append(txList, tx) // } // return txList, nil } func (t *Miner) getAwardTx(height int64) (*lpb.Transaction, error) { amount := t.ctx.Ledger.GenesisBlock.CalcAward(height) if amount.Cmp(big.NewInt(0)) < 0 { return nil, errors.New("amount in transaction can not be negative number") } awardTx, err := tx.GenerateAwardTx(t.ctx.Address.Address, amount.String(), []byte("award")) if err != nil { return nil, err } return awardTx, nil } // pow类共识的CompleteMaster结果并不能反映当前的矿工身份,每个节点都是潜在的矿工, // 因此需要在calculateBlock这个阻塞点上进行同步区块的处理 func (t *Miner) calculateBlock(block *state.BlockAgent) error { ticker := time.NewTicker(tickOnCalcBlock) defer ticker.Stop() calcdone := make(chan error, 1) go func() { err := t.ctx.Consensus.CalculateBlock(block) calcdone <- err }() for !t.IsExit() { select { case err := <-calcdone: t.log.Info("calc block done", "error", err, "height", block.GetHeight(), "blockid", utils.F(block.GetBlockid())) return err case <-ticker.C: ctx := &xctx.BaseCtx{ XLog: t.log, Timer: timer.NewXTimer(), } err := t.syncWithNeighbors(ctx) if err != nil { t.log.Warn("syncBlockWithPeers error", "error", err) } if t.ctx.Ledger.GetMeta().TrunkHeight >= block.GetHeight() { // TODO: stop CalculateBlock t.log.Info("CalculateBlock interrupted", "trunk-height", t.ctx.Ledger.GetMeta().TrunkHeight, "block-height", block.GetHeight()) return errCalculateBlockInterrupt } } } if t.IsExit() { return errors.New("miner already exit") } return nil } func (t *Miner) confirmBlockForMiner(ctx xctx.XContext, block *lpb.InternalBlock) error { tip := t.ctx.Ledger.GetMeta().TipBlockid if !bytes.Equal(block.PreHash, tip) { ctx.GetLog().Warn("confirmBlockForMiner error", "tip", utils.F(tip), "prehash", utils.F(block.PreHash)) return errors.New("confirm block prehash mismatch") } // 账本确认区块 confirmStatus := t.ctx.Ledger.ConfirmBlock(block, false) ctx.GetTimer().Mark("ConfirmBlock") if confirmStatus.Succ { if confirmStatus.Orphan { ctx.GetLog().Trace("the mined blocked was attached to branch,no need to play", "blockId", utils.F(block.Blockid)) return nil } ctx.GetLog().Trace("ledger confirm block success", "height", block.Height, "blockId", utils.F(block.Blockid)) } else { ctx.GetLog().Warn("ledger confirm block failed", "err", confirmStatus.Error, "blockId", utils.F(block.Blockid)) return errors.New("ledger confirm block error") } // 状态机确认区块 err := t.ctx.State.PlayForMiner(block.Blockid) ctx.GetTimer().Mark("PlayForMiner") if err != nil { ctx.GetLog().Warn("state play error ", "error", err, "blockId", utils.F(block.Blockid)) } // 共识确认区块 blkAgent := state.NewBlockAgent(block) err = t.ctx.Consensus.ProcessConfirmBlock(blkAgent) ctx.GetTimer().Mark("ProcessConfirmBlock") if err != nil { ctx.GetLog().Warn("consensus confirm block error", "err", err, "blockId", utils.F(block.Blockid)) return err } ctx.GetLog().Trace("confirm block for miner succ", "blockId", utils.F(block.Blockid)) return nil }
package built import ( "bytes" "encoding/json" "errors" "fmt" "github.com/eager7/elog" "github.com/parnurzeal/gorequest" "io" "io/ioutil" "net/http" "os" "reflect" "strings" "time" ) const URLTokenList = "https://raw.githubusercontent.com/MyEtherWallet/ethereum-lists/master/dist/tokens/eth/tokens-eth.json" var log = elog.NewLogger("constructor", elog.DebugLevel) type Logo struct { Src string `json:"src"` Width interface{} `json:"width"` //这两个字段在返回中有的是字符,有的是数字,需要进行二次解析 Height interface{} `json:"height"` IpfsHash string `json:"ipfs_hash"` } type Support struct { Email string `json:"email"` Url string `json:"url"` } type Social struct { Blog string `json:"blog"` Chat string `json:"chat"` Facebook string `json:"facebook"` Forum string `json:"forum"` Github string `json:"github"` Gitter string `json:"gitter"` Instagram string `json:"instagram"` Linkedin string `json:"linkedin"` Reddit string `json:"reddit"` Slack string `json:"slack"` Telegram string `json:"telegram"` Twitter string `json:"twitter"` Youtube string `json:"youtube"` } type TokenInfo struct { Symbol string `json:"symbol"` Name string `json:"name"` Type string `json:"type"` Address string `json:"address"` EnsAddress string `json:"ens_address"` Decimals int `json:"decimals"` Website string `json:"website"` Logo Logo `json:"logo"` Support Support `json:"support"` Social Social `json:"social"` } func (token *TokenInfo) Bytes() ([]byte, error) { t := Token{ Name: token.Name, Symbol: token.Symbol, Contract: token.Address, Decimals: token.Decimals, Logo: token.Logo.Src, Desc: struct { En string `json:"en"` Zh string `json:"zh"` }{ En: token.Type, Zh: token.Type, }, WebSite: token.Website, WhitePaper: token.Support.Url, Invalid: true, Links: struct { Twitter string `json:"twitter"` Telegram string `json:"telegram"` }{ Twitter: token.Social.Twitter, Telegram: token.Social.Telegram, }, } return json.MarshalIndent(t, "", " ") } func TokenListFromGit(url string) (tokenLists []TokenInfo, err error) { requester := gorequest.New().Get(url).Timeout(time.Second*5).Retry(5, time.Second, http.StatusRequestTimeout, http.StatusBadRequest) resp, body, errs := requester.EndBytes() if errs != nil || resp.StatusCode != http.StatusOK { req, err := requester.MakeRequest() if err == nil && req != nil && resp != nil { fmt.Printf("request status:%d, body:%+v\n", resp.StatusCode, req) } var errStr string for _, e := range errs { errStr += e.Error() } return nil, errors.New(errStr) } if err := json.Unmarshal(body, &tokenLists); err != nil { return nil, err } for _, token := range tokenLists { switch reflect.TypeOf(token.Logo.Width).Kind() { case reflect.Float64: token.Logo.Width = fmt.Sprintf("%v", token.Logo.Width.(float64)) token.Logo.Height = fmt.Sprintf("%v", token.Logo.Height.(float64)) default: continue } } return tokenLists, nil } func InitializeTokens(dir string, tokenLists []TokenInfo, skip bool) error { for _, token := range tokenLists { time.Sleep(time.Millisecond * 500) if err := os.MkdirAll(fmt.Sprintf("%s/%s", dir, strings.ToLower(token.Address)), os.ModePerm); err != nil { return err } if err := WriteTokenInfo(dir, token, skip); err != nil { return err } } return nil } func WriteTokenInfo(dir string, token TokenInfo, skip bool) error { f := fmt.Sprintf("%s/%s/token.json", dir, strings.ToLower(token.Address)) if _, err := os.Stat(f); err == nil || os.IsExist(err) { log.Debug("the contract is exist, rewrite info:", token.Address, token.Name, token.Symbol) if skip { return nil } } file, err := os.OpenFile(f, os.O_RDWR|os.O_CREATE|os.O_TRUNC, os.ModePerm) if err != nil { return err } defer checkError(file.Close) data, err := token.Bytes() if err != nil { return err } if _, err := file.Write(data); err != nil { return err } p := fmt.Sprintf("%s/%s/token.png", dir, strings.ToLower(token.Address)) times := 0 retry: if err := RequestIcon(token.Logo.Src, p); err != nil { times++ if times <= 3 { goto retry } log.Error("request icon err:", err) } log.Info("write success:", token.Address, FormatSymbol(token.Symbol)) return nil } func RequestIcon(url, p string) error { if url == "" { return nil } client := http.Client{Timeout: time.Duration(time.Second * 5)} resp, err := client.Get(url) if err != nil { log.Error("get" + url + "error:" + err.Error()) return err } if resp.StatusCode != http.StatusOK { log.Error("get" + url + "error:" + resp.Status) return err } defer checkError(resp.Body.Close) pix, err := ioutil.ReadAll(resp.Body) out, err := os.Create(p) if err != nil { return errors.New("os create png err:" + err.Error()) } defer checkError(out.Close) _, err = io.Copy(out, bytes.NewReader(pix)) if err != nil { return errors.New("io copy err:" + err.Error()) } return nil } func FormatSymbol(s string) string { if !strings.ContainsAny(s, `;'\"&<>$ф`) { return strings.Replace(s, ` `, ``, -1) } s = strings.Replace(s, `;`, ``, -1) s = strings.Replace(s, `'`, ``, -1) s = strings.Replace(s, `\`, ``, -1) s = strings.Replace(s, `"`, ``, -1) s = strings.Replace(s, `&`, ``, -1) s = strings.Replace(s, `<`, ``, -1) s = strings.Replace(s, `>`, ``, -1) s = strings.Replace(s, `$`, ``, -1) s = strings.Replace(s, `ф`, ``, -1) return strings.Replace(s, ` `, ``, -1) } func checkError(f func() error) { if err := f(); err != nil { fmt.Println(err) } }
package martinier import ( "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "time" ) type User struct { ID bson.ObjectId `bson:"_id,omitempty"` Email string `json:"email"` FirstName string `json:"first_name"` LastName string `json:"last_name"` CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` } func ensureUserIndex(db *mgo.Database) { index := mgo.Index{ Key: []string{"email"}, Unique: true, DropDups: true, } indexErr := db.C("users").EnsureIndex(index) if indexErr != nil { panic(indexErr) } } func (model *User) all(db *mgo.Database) []User { users := []User{} err := db.C("users").Find(nil).All(&users) if err != nil { panic(err) } return users } func (model *User) single(db *mgo.Database, id bson.ObjectId) *User { user := User{} err := db.C("users").Find(bson.M{"_id": id}).One(&user) if err != nil { panic(err) } return &user } func (model *User) store(db *mgo.Database, binded User) *User { binded.ID = bson.NewObjectId() binded.CreatedAt = time.Now().Local() binded.UpdatedAt = time.Now().Local() err := db.C("users").Insert(binded) if err != nil { panic(err) } return model.single(db, binded.ID) }
package main // Leetcode 605. (easy) func canPlaceFlowers(flowerbed []int, n int) bool { can := 0 i := 0 for i < len(flowerbed) { if flowerbed[i] == 0 && (i == len(flowerbed)-1 || flowerbed[i+1] == 0) { can++ i += 2 } else if flowerbed[i] == 1 { i += 2 } else { i++ } } return can >= n }
package unimatrix func NewActivitiesOperation(realm string) *Operation { return NewRealmOperation(realm, "activities") }
// Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stub import ( "context" "io" "sync" "github.com/google/gapid/core/event/task" "github.com/google/gapid/core/os/shell" ) // Response is an implementation of Target that always gives exactly the same response. type Response struct { // WaitSignal if set is waited on inside the Wait method of the process WaitSignal task.Signal // KillTask is invoked if it is non nil and the Kill method is called. KillTask task.Task // StartErr is returned by the target Start method if set. StartErr error // WaitErr is returned from the Wait method of the Process if set. WaitErr error // KillErr is returned from the Kill method of the Process if set. KillErr error // Stdout is the string to write as the standard output of the process. Stdout string // Stderr is the string to write as the standard error of the process. Stderr string } func (t *Response) Start(cmd shell.Cmd) (shell.Process, error) { if t.StartErr != nil { return nil, t.StartErr } return &responseProcess{cmd: cmd, response: t}, nil } type responseProcess struct { once sync.Once cmd shell.Cmd response *Response } func (p *responseProcess) Wait(ctx context.Context) error { if p.response.WaitSignal != nil { p.response.WaitSignal.Wait(ctx) } p.once.Do(func() { if p.cmd.Stdout != nil { io.WriteString(p.cmd.Stdout, p.response.Stdout) } if p.cmd.Stderr != nil { io.WriteString(p.cmd.Stdout, p.response.Stderr) } }) return p.response.WaitErr } func (p *responseProcess) Kill() error { if p.response.KillTask != nil { p.response.KillTask(context.Background()) } return p.response.KillErr }
package api import ( "fmt" "io/ioutil" "net/http" "net/url" "os" "strings" ) func SaveAthenaData(s3FileName string, queryParams map[string]string, dataSource string) { s3FilePath := os.Getenv("S3_FILE_PATH") appHost := os.Getenv("APP_HOST") s3Key := fmt.Sprint(s3FilePath, "/", dataSource, "/", queryParams["year"], "/", queryParams["month"], "/", queryParams["day"], "/", s3FileName, ".csv") postURL := fmt.Sprint(appHost, "/api/save-athena-data") formData := url.Values{ "s3-key": {s3Key}, "dataSource": {dataSource}, } client := &http.Client{} req, err := http.NewRequest("POST", postURL, strings.NewReader(formData.Encode())) req.Header.Add("x-badger-auth", os.Getenv("APP_AUTH")) req.Header.Add("Content-Type", "application/x-www-form-urlencoded") res, err := client.Do(req) if err != nil { fmt.Println(err) } defer res.Body.Close() body, bodyErr := ioutil.ReadAll(res.Body) if bodyErr != nil { fmt.Println(bodyErr) } fmt.Println(string(body[:])) }
package main import ( "fmt" ) type Person struct{ Name string } func main() { c := new(Person) c.Name = "cc" fmt.Println(c.Name) d := c d.Name = "dd" fmt.Println(c.Name) i := *d i.Name="ii" fmt.Println(c.Name) fmt.Println(d.Name) fmt.Println(i.Name) fmt.Printf("c type: %T\n", c) fmt.Printf("c : %#v\n", c) fmt.Printf("c ptr:%p\n", &c) fmt.Printf("d type: %T\n", d) fmt.Printf("d: %#v\n", d) fmt.Printf("d ptr:%p\n", &d) fmt.Printf("i type: %T\n", i) fmt.Printf("i: %#v\n", i) fmt.Printf("i ptr:%p\n", &i ) }
package main import ( "context" "fmt" "github.com/gin-gonic/gin" "google.golang.org/appengine" "google.golang.org/appengine/log" "math" "net/http" "time" ) func main() { route := gin.Default() http.Handle("/", route) disableGinDebugLog() // ログの出力がリクエストスコープでまとまるか route.GET("/01", handleLog) // AE Datastoreでputする route.GET("/02", handleAEDatastore) // AE Datastoreでgetする route.GET("/02/get", handleAEDatastoreRead) // Cloud Datastoreでputするj route.GET("/03", handleCloudDatastore) // Cloud Datastoreでreadする route.GET("/03/get", handleCloudDatastoreRead) // urlfetchのtraceの確認 route.GET("/04", handleUrlFetch) // http getのtraceの確認 route.GET("/05", handleHttpGet) // delay packageの動作確認 route.GET("/06", handleTQDelay) // ベンチマーク用 route.GET("/91/init", handleInit) route.GET("/91", handlePerformance) // 大陸間ベンチマーク用 route.GET("/92/run", handleEchoRun) route.GET("/92", handleEcho) appengine.Main() // Listen } func disableGinDebugLog(){ // 本番に上げたときにはginのdebugログを出さない if !appengine.IsDevAppServer() { gin.SetMode(gin.ReleaseMode) } } type Benchmarker struct { results []int64 } func (b *Benchmarker) Do(c context.Context, f func()) { // ---- Do start ---- start := time.Now() f() diff := time.Since(start) // ---- Do end ---- if b.results == nil { b.results = []int64{} } b.results = append(b.results, diff.Nanoseconds()) log.Infof(c, "time : %d msec. %d nanosec", diff / 1e6, diff) } func(b *Benchmarker) Result() string { max := int64(0) min := int64(math.MaxInt64) var avg float64 total := int64(0) for _, v := range b.results { if max < v { max = v } if min > v { min = v } total = total + v } avg = float64(total / 1e6) / float64(len(b.results)) return fmt.Sprintf("max: %d, min: %d, avg: %f", max / 1e6, min / 1e6, avg) }
/* Copyright 2019 Baidu, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package reporter import ( "encoding/json" "fmt" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" "k8s.io/klog" "github.com/baidu/ote-stack/pkg/clustermessage" ) type DeploymentReporter struct { SyncChan chan clustermessage.ClusterMessage ctx *ReporterContext } // startDeploymentReporter inits deployment reporter and starts to watch deployment resource. func startDeploymentReporter(ctx *ReporterContext) error { if !ctx.IsValid() { return fmt.Errorf("ReporterContext validation failed") } deploymentReporter := &DeploymentReporter{ ctx: ctx, SyncChan: ctx.SyncChan, } // Regists EventHandler for deployment informer listing and watching deployment resource. // Although deployment has another API version, extensions/v1beta1,the apps/v1 version is the official stable version. // Just use the apps/v1 version here. ctx.InformerFactory.Apps().V1().Deployments().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: deploymentReporter.handleDeployment, UpdateFunc: func(old, new interface{}) { newDeployment := new.(*appsv1.Deployment) olddeployment := old.(*appsv1.Deployment) if newDeployment.ResourceVersion == olddeployment.ResourceVersion { // Periodic resync will send update events for all known Deployments. // Two different versions of the same Deployment will always have different RVs. return } deploymentReporter.handleDeployment(new) }, DeleteFunc: deploymentReporter.deleteDeployment, }) go deploymentReporter.reportFullListDeployment(ctx) return nil } // handleDeployment is used to handle the creation and update operations of the deployment. func (dr *DeploymentReporter) handleDeployment(obj interface{}) { deployment, ok := obj.(*appsv1.Deployment) if !ok { klog.Errorf("Should be Deployment object but encounter others in handleDeployment.") return } klog.V(3).Infof("handle Deployment: %s", deployment.Name) addLabelToResource(&deployment.ObjectMeta, dr.ctx) if dr.ctx.IsLightweightReport { deployment = dr.lightWeightDeployment(deployment) } // generates unique key for deployment. key, err := cache.MetaNamespaceKeyFunc(deployment) if err != nil { klog.Errorf("Failed to get map key: %s", err) return } deploymentMap := &DeploymentResourceStatus{ UpdateMap: map[string]*appsv1.Deployment{ key: deployment, }, } go dr.sendToSyncChan(deploymentMap) } // deleteDeployment is used to handle the removal of the deployment. func (dr *DeploymentReporter) deleteDeployment(obj interface{}) { deployment, ok := obj.(*appsv1.Deployment) if !ok { klog.Errorf("Should be Deployment object but encounter others in deleteDeployment") return } klog.V(3).Infof("Deployment: %s deleted.", deployment.Name) addLabelToResource(&deployment.ObjectMeta, dr.ctx) if dr.ctx.IsLightweightReport { deployment = dr.lightWeightDeployment(deployment) } // generates unique key for deployment. key, err := cache.MetaNamespaceKeyFunc(deployment) if err != nil { klog.Errorf("Failed to get map key: %s", err) return } deploymentMap := &DeploymentResourceStatus{ DelMap: map[string]*appsv1.Deployment{ key: deployment, }, } go dr.sendToSyncChan(deploymentMap) } // sendToSyncChan sends wrapped ClusterMessage data to SyncChan. func (dr *DeploymentReporter) sendToSyncChan(deploymentMap *DeploymentResourceStatus) { deploymentReports, err := deploymentMap.serializeMapToReporters() if err != nil { klog.Errorf("serialize map failed: %v", err) return } msg, err := deploymentReports.ToClusterMessage(dr.ctx.ClusterName()) if err != nil { klog.Errorf("change deployment Reports to clustermessage failed: %v", err) return } dr.SyncChan <- *msg } // serializeMapToReporters serializes DeploymentResourceStatus and converts to Reports. func (ds *DeploymentResourceStatus) serializeMapToReporters() (Reports, error) { deploymentJson, err := json.Marshal(ds) if err != nil { return nil, err } data := Reports{ { ResourceType: ResourceTypeDeployment, Body: deploymentJson, }, } return data, nil } // lightWeightDeployment crops the content of the deployment func (dr *DeploymentReporter) lightWeightDeployment(deployment *appsv1.Deployment) *appsv1.Deployment { return &appsv1.Deployment{ TypeMeta: deployment.TypeMeta, ObjectMeta: metav1.ObjectMeta{ Name: deployment.Name, Namespace: deployment.Namespace, Labels: deployment.Labels, }, Spec: appsv1.DeploymentSpec{ Template: deployment.Spec.Template, Selector: deployment.Spec.Selector, Replicas: deployment.Spec.Replicas, }, Status: appsv1.DeploymentStatus{ Replicas: deployment.Status.Replicas, UpdatedReplicas: deployment.Status.UpdatedReplicas, AvailableReplicas: deployment.Status.AvailableReplicas, }, } } // reportFullListDeployment report all deployment list when starts deployment reporter. func (dr *DeploymentReporter) reportFullListDeployment(ctx *ReporterContext) { if ok := cache.WaitForCacheSync(ctx.StopChan, ctx.InformerFactory.Apps().V1().Deployments().Informer().HasSynced); !ok { klog.Errorf("failed to wait for caches to sync") return } deploymentList := ctx.InformerFactory.Apps().V1().Deployments().Informer().GetIndexer().ListKeys() deploymentMap := &DeploymentResourceStatus{ FullList: deploymentList, } go dr.sendToSyncChan(deploymentMap) }
package main import ( "github.com/gorilla/http" ) // custom header // timeout,connection/read // connection pool // follow redirect? func main() { http.Client{} }
package main import ( "net" "fmt" ) func process(conn net.Conn){ defer conn.Close() buf := make([]byte, 1024) n, err := conn.Read(buf) if err != nil { fmt.Println("服务器端获取客户端数据异常", err) return } fmt.Print(string(buf[:n])) //fmt.Print(buf) } func main() { listen, err := net.Listen("tcp", ":8888") defer listen.Close() if err != nil { fmt.Println("服务器监听服务异常", err) return } for { conn, err := listen.Accept() if err != nil { fmt.Println("服务器获取连接异常", err) continue } //fmt.Printf("服务器获取连接为%v\n", conn) go process(conn) } }
package hash import ( "fmt" "github.com/mitchellh/hashstructure/v2" "github.com/go-task/task/v3/taskfile" ) type HashFunc func(*taskfile.Task) (string, error) func Empty(*taskfile.Task) (string, error) { return "", nil } func Name(t *taskfile.Task) (string, error) { return t.Task, nil } func Hash(t *taskfile.Task) (string, error) { h, err := hashstructure.Hash(t, hashstructure.FormatV2, nil) return fmt.Sprintf("%s:%d", t.Task, h), err }
package main import "fmt" func getAverage(arr []int, n int) float32 { var sum int = 0 var average float32 = 0.0 for i := 0; i < n; i++ { sum += arr[i] } average = float32(sum) / float32(n) return average } func main() { var arr = []int{1, 2, 0, 4} fmt.Println(getAverage(arr, len(arr))) }
package util import ( "golang.org/x/crypto/bcrypt" ) func GenerateFromPassword(password string) (string, error) { hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) return string(hash), err } func CompareHashAndPassword(hash, password string) error { return bcrypt.CompareHashAndPassword([]byte(hash), []byte(password)) }
// Package documention here package main import ( "fmt" "log" "net/http" // _ "net/http/pprof" "regexp" "github.com/pkg/profile" ) func main() { defer profile.Start().Stop() http.HandleFunc("/regexp/", handlerRegex) // http.HandleFunc("/", handlerRoot) err := http.ListenAndServe(":8080", nil) if err != nil { log.Fatal(err) } } // func handlerRoot(w http.ResponseWriter, r *http.Request) { // _, err := fmt.Fprintf(w, "Hello, world!") // if err != nil { // log.Fatalf("could not write to response %s", err) // } // } var re = regexp.MustCompile("^(.+)@golang.org$") func handlerRegex(w http.ResponseWriter, r *http.Request) { w.Header().Set("content-type", "text/plain") path := r.URL.Path match := re.FindAllStringSubmatch(path, -1) if match != nil { fmt.Fprintf(w, "Hello gopher %s", match[0][1]) return } fmt.Fprint(w, "Hello, world!!") }
// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Package uiauto enables automating with the ChromeOS UI through the chrome.automation API. // The chrome.automation API is documented here: https://developer.chrome.com/extensions/automation package uiauto import ( "context" "fmt" "reflect" "strings" "time" "chromiumos/tast/common/action" "chromiumos/tast/errors" "chromiumos/tast/local/chrome" "chromiumos/tast/local/chrome/uiauto/checked" "chromiumos/tast/local/chrome/uiauto/event" "chromiumos/tast/local/chrome/uiauto/mouse" "chromiumos/tast/local/chrome/uiauto/nodewith" "chromiumos/tast/local/chrome/uiauto/restriction" "chromiumos/tast/local/chrome/uiauto/role" "chromiumos/tast/local/chrome/uiauto/state" "chromiumos/tast/local/chrome/useractions" "chromiumos/tast/local/coords" "chromiumos/tast/testing" ) // Populates an object that matches the shape of NodeInfo. const ( NodeInfoJS = `{ checked: node.checked, className: node.className, description: node.description, htmlAttributes: node.htmlAttributes, location: node.location, name: node.name, restriction: node.restriction, role: node.role, selected: node.selected, state: node.state, value: node.value, }` ) // Context is the context used when interacting with chrome.automation. // Each individual UI interaction is limited by the pollOpts such that it will return an error when the pollOpts timeout. type Context struct { tconn *chrome.TestConn pollOpts testing.PollOptions } // New returns an Context that uses tconn to communicate to chrome.automation. // It sets the poll options to the default interval and timeout. func New(tconn *chrome.TestConn) *Context { return &Context{ tconn: tconn, pollOpts: testing.PollOptions{ Interval: 300 * time.Millisecond, Timeout: 15 * time.Second, }, } } // WithTimeout returns a new Context with the specified timeout. func (ac *Context) WithTimeout(timeout time.Duration) *Context { return &Context{ tconn: ac.tconn, pollOpts: testing.PollOptions{ Interval: ac.pollOpts.Interval, Timeout: timeout, }, } } // WithInterval returns a new Context with the specified polling interval. func (ac *Context) WithInterval(interval time.Duration) *Context { return &Context{ tconn: ac.tconn, pollOpts: testing.PollOptions{ Interval: interval, Timeout: ac.pollOpts.Timeout, }, } } // WithPollOpts returns a new Context with the specified polling options. func (ac *Context) WithPollOpts(pollOpts testing.PollOptions) *Context { return &Context{ tconn: ac.tconn, pollOpts: pollOpts, } } // Action is a function that takes a context and returns an error. type Action = action.Action // NamedAction gives a name to an action. It logs when an action starts, // and if the action fails, tells you the name of the failing action. func NamedAction(name string, fn Action) Action { return action.Named(name, fn) } // UserAction wraps an action with context information. // For more details, refer to https://source.chromium.org/chromiumos/chromiumos/codesearch/+/main:src/platform/tast-tests/src/chromiumos/tast/local/chrome/useractions/README.md. func UserAction(name string, fn Action, uc *useractions.UserContext, cfg *useractions.UserActionCfg) Action { userAction := useractions.NewUserAction(name, fn, uc, cfg) return userAction.Run } // Combine combines a list of functions from Context to error into one function. // Combine adds the name of the operation into the error message to clarify the step. // It is recommended to start the name of operations with a verb, e.g., // // "open Downloads and right click a folder" // // Then the failure msg would be like: // // "failed to open Downloads and right click a folder on step ..." func Combine(name string, steps ...Action) Action { return action.Combine(name, steps...) } // NamedCombine is the same as Combine, which combines the list of functions from Context to error into one function. // If the action fails, tells you the name of the failed operation. // In addtion, it logs when actions starts. func NamedCombine(name string, steps ...Action) Action { return action.Named(name, action.Combine(name, steps...)) } // Retry returns a function that retries a given action if it returns error. // The action will be executed up to n times, including the first attempt. // The last error will be returned. Any other errors will be silently logged. func Retry(n int, fn Action) Action { return action.Retry(n, fn, 0) } // RetrySilently returns a function that retries a given action if it returns error. // The action will be executed up to n times, including the first attempt. // The last error will be returned. Any other errors will be ignored. func RetrySilently(n int, fn Action) Action { return action.RetrySilently(n, fn, 0) } // Repeat returns a function that runs the specified function repeatedly for the specific number of times. func Repeat(n int, fn Action) Action { return func(ctx context.Context) error { for i := 0; i < n; i++ { if err := fn(ctx); err != nil { return err } } return nil } } // NodeInfo is a mapping of chrome.automation API AutomationNode. // It is used to get information about a specific node from JS to Go. // NodeInfo intentionally leaves out many properties. If they become needed, add them to the Node struct. // As defined in chromium/src/extensions/common/api/automation.idl // Exported fields are sorted in alphabetical order. type NodeInfo struct { Checked checked.Checked `json:"checked,omitempty"` ClassName string `json:"className,omitempty"` Description string `json:"description,omitempty"` HTMLAttributes map[string]string `json:"htmlAttributes,omitempty"` Location coords.Rect `json:"location,omitempty"` Name string `json:"name,omitempty"` Restriction restriction.Restriction `json:"restriction,omitempty"` Role role.Role `json:"role,omitempty"` Selected bool `json:"selected,omitempty"` State map[state.State]bool `json:"state,omitempty"` Value string `json:"value,omitempty"` } // Info returns the information for the node found by the input finder. func (ac *Context) Info(ctx context.Context, finder *nodewith.Finder) (*NodeInfo, error) { q, err := finder.GenerateQuery() if err != nil { return nil, err } query := fmt.Sprintf(` (async () => { %s return %s; })() `, q, NodeInfoJS) var out NodeInfo err = testing.Poll(ctx, func(ctx context.Context) error { return ac.tconn.Eval(ctx, query, &out) }, &ac.pollOpts) return &out, err } // NodesInfo returns an array of the information for the nodes found by the input finder. // Note that the returning array might not contain any node. func (ac *Context) NodesInfo(ctx context.Context, finder *nodewith.Finder) ([]NodeInfo, error) { q, err := finder.GenerateQueryForMultipleNodes() if err != nil { return nil, err } query := fmt.Sprintf(` (async () => { %s var result = []; nodes.forEach(function(node) { result.push(%s); }); return result })() `, q, NodeInfoJS) var out []NodeInfo err = testing.Poll(ctx, func(ctx context.Context) error { return ac.tconn.Eval(ctx, query, &out) }, &ac.pollOpts) return out, err } // Matches returns whether |finder| matches |actual|. Another way of // saying this is "does |finder| map to |actual|?" or "are the properties listed // in |finder| present in |actual|?". func (ac *Context) Matches(ctx context.Context, finder *nodewith.Finder, actual *NodeInfo) (bool, error) { candidate, err := ac.Info(ctx, finder) if err != nil { return false, errors.Wrap(err, "failed to find NodeInfo") } return reflect.DeepEqual(*candidate, *actual), nil } // Location returns the location of the node found by the input finder. // It will wait until the location is the same for a two iterations of polling. func (ac *Context) Location(ctx context.Context, finder *nodewith.Finder) (*coords.Rect, error) { q, err := finder.GenerateQuery() if err != nil { return nil, err } query := fmt.Sprintf(` (async () => { %s return node.location; })() `, q) var lastLocation coords.Rect var currentLocation coords.Rect start := time.Now() if err := testing.Poll(ctx, func(ctx context.Context) error { if err := ac.tconn.Eval(ctx, query, &currentLocation); err != nil { // Reset lastLocation on error. lastLocation = coords.Rect{} return err } if currentLocation != lastLocation { lastLocation = currentLocation elapsed := time.Since(start) return errors.Errorf("node has not stopped changing location after %s, perhaps increase timeout or use ImmediateLocation", elapsed) } return nil }, &ac.pollOpts); err != nil { return nil, err } return &currentLocation, nil } // ImmediateLocation returns the location of the node found by the input finder. // It will not wait for the location to be stable. func (ac *Context) ImmediateLocation(ctx context.Context, finder *nodewith.Finder) (*coords.Rect, error) { q, err := finder.GenerateQuery() if err != nil { return nil, err } query := fmt.Sprintf(` (async () => { %s return node.location; })() `, q) var loc coords.Rect if err := ac.tconn.Eval(ctx, query, &loc); err != nil { return nil, err } return &loc, nil } // WaitForLocation returns a function that waits until the node location is // stabilized. func (ac *Context) WaitForLocation(finder *nodewith.Finder) Action { return func(ctx context.Context) error { // Invokes Location method to wait for the location to be stabilized. _, err := ac.Location(ctx, finder) return err } } // WaitForEvent returns a function that conducts the specified action, and // then waits for the specified event appears on the specified node. It takes // an action as an argument rather than it is a part of a chain of action // because it needs to set up a watcher in prior to the action, and also // it needs to clean up the allocated resources for the watcher afterwards. func (ac *Context) WaitForEvent(finder *nodewith.Finder, ev event.Event, act Action) Action { return func(ctx context.Context) error { watcher, err := ac.setupWatcher(ctx, finder, ev) if err != nil { return err } defer watcher.release(ctx) if err := act(ctx); err != nil { return errors.Wrap(err, "failed to run the main action") } return testing.Poll(ctx, func(ctx context.Context) error { var events []map[string]interface{} if err := watcher.Call(ctx, &events, `function() { return this.events; }`); err != nil { return testing.PollBreak(err) } if len(events) == 0 { return errors.New("events haven't occurred yet") } return nil }, &ac.pollOpts) } } // WaitUntilNoEvent returns a function that waits until a specified event has stopped // to appear for the specified node. func (ac *Context) WaitUntilNoEvent(finder *nodewith.Finder, ev event.Event) Action { return func(ctx context.Context) error { watcher, err := ac.setupWatcher(ctx, finder, ev) if err != nil { return err } defer watcher.release(ctx) previousEventCount := -1 var currentEventCount int return testing.Poll(ctx, func(ctx context.Context) error { var events []map[string]interface{} if err := watcher.Call(ctx, &events, `function() { return this.events; }`); err != nil { return testing.PollBreak(err) } // When the event counts are the same between two subsequent polls, events are considered to // have stopped appearing. currentEventCount = len(events) if previousEventCount == currentEventCount { return nil } previousEventCount = currentEventCount return errors.New("received new events between polls") }, &ac.pollOpts) } } // watcher is used for interfacing with the EventListener of chrome.AutomationNode to // listen for events. type watcher struct { *chrome.JSObject } // setupWatcher sets up a watcher for the specified event type on the specified node. // watcher.release(ctx) is needed to clean up the allocated resources for the watcher afterwards. func (ac *Context) setupWatcher(ctx context.Context, finder *nodewith.Finder, ev event.Event) (*watcher, error) { q, err := finder.GenerateQuery() if err != nil { return nil, err } expr := fmt.Sprintf(`async function(eventType) { %s let watcher = { "events": [], "callback": (ev) => { watcher.events.push(ev); }, "release": () => { node.removeEventListener(eventType, watcher.callback); } }; node.addEventListener(eventType, watcher.callback); return watcher; }`, q) obj := &chrome.JSObject{} if err := ac.tconn.Call(ctx, obj, expr, ev); err != nil { return nil, errors.Wrap(err, "failed to execute the registration") } return &watcher{obj}, nil } // release cleans up the allocated resources for the watcher. func (w *watcher) release(ctx context.Context) { w.Release(ctx) w.Call(ctx, nil, `function() { this.release(); }`) } // Select sets the document selection to include everything between the two nodes at the offsets. func (ac *Context) Select(startNodeFinder *nodewith.Finder, startOffset int, endNodeFinder *nodewith.Finder, endOffset int) Action { return func(ctx context.Context) error { qStart, err := startNodeFinder.GenerateQuery() if err != nil { return err } qEnd, err := endNodeFinder.GenerateQuery() if err != nil { return err } // Use the nodeFinder code generation to get the start and end nodes. // The statements are enclosed in block to avoid naming collision. query := fmt.Sprintf(` (async () => { let startNode; let endNode; { %s startNode = node; } { %s endNode = node; } chrome.automation.setDocumentSelection({ anchorObject: startNode, anchorOffset: %d, focusObject: endNode, focusOffset: %d }); })() `, qStart, qEnd, startOffset, endOffset) return ac.tconn.Eval(ctx, query, nil) } } // Exists returns a function that returns nil if a node exists. // If any node in the chain is not found, it will return an error. func (ac *Context) Exists(finder *nodewith.Finder) Action { return func(ctx context.Context) error { q, err := finder.GenerateQuery() if err != nil { return err } query := fmt.Sprintf(` (async () => { %s })() `, q) return ac.tconn.Eval(ctx, query, nil) } } // IsNodeFound immediately checks if any nodes found with given finder. // It returns true if found otherwise false. func (ac *Context) IsNodeFound(ctx context.Context, finder *nodewith.Finder) (bool, error) { if err := ac.Exists(finder)(ctx); err != nil { if strings.Contains(err.Error(), nodewith.ErrNotFound) { return false, nil } return false, err } return true, nil } // BoundsForRange returns the location of the text within the node specified by startIndex and endIndex, inclusively. // The bounds are clipped to ancestors. // Refer to https://developer.chrome.com/docs/extensions/reference/automation/#type-AutomationNode. // Note: This function only works on node with role "inlineTextBox" according to the API implementation. // http://cs/eureka_internal/chromium/src/extensions/renderer/api/automation/automation_internal_custom_bindings.cc?l=995 func (ac *Context) BoundsForRange(ctx context.Context, finder *nodewith.Finder, startIndex, endIndex int) (*coords.Rect, error) { if err := ac.WaitForLocation(finder)(ctx); err != nil { return nil, err } q, err := finder.GenerateQuery() if err != nil { return nil, err } query := fmt.Sprintf(` (async () => { %s if(node.role !== "inlineTextBox"){ throw new Error("BoundsForRange only works on node with Role inlineTextBox."); } let bounds; node.boundsForRange(%d, %d, (res) => {bounds = res;}); return bounds; })() `, q, startIndex, endIndex) var out coords.Rect if err := testing.Poll(ctx, func(ctx context.Context) error { return ac.tconn.Eval(ctx, query, &out) }, &ac.pollOpts); err != nil { return nil, err } return &out, nil } // WaitUntilExists returns a function that waits until the node found by the input finder exists. func (ac *Context) WaitUntilExists(finder *nodewith.Finder) Action { return func(ctx context.Context) error { return testing.Poll(ctx, ac.Exists(finder), &ac.pollOpts) } } // WaitUntilEnabled returns a function that waits until the node found by the // input finder is not disabled. Use it when an action should be taken after // the node is enabled. E.g. // uiauto.Combine("Click 'Save' button", // // ui.WaitUntilEnabled(saveButton), // ui.LeftClick(saveButton) // // ) func (ac *Context) WaitUntilEnabled(finder *nodewith.Finder) Action { return func(ctx context.Context) error { return testing.Poll(ctx, func(ctx context.Context) error { nodeInfo, err := ac.Info(ctx, finder) if err != nil { return err } if nodeInfo.Restriction == restriction.Disabled { return errors.Wrapf(err, "%v is disabled", nodeInfo.Name) } return nil }, &ac.pollOpts) } } // WaitUntilCheckedState returns a function that waits until the node reaches the // expected state. This property is typically used in following nodes: // {Switch, RadioButton, CheckBox} func (ac *Context) WaitUntilCheckedState(finder *nodewith.Finder, expectedState bool) Action { return func(ctx context.Context) error { return testing.Poll(ctx, func(ctx context.Context) error { nodeInfo, err := ac.Info(ctx, finder) if err != nil { return err } isNodeChecked := (nodeInfo.Checked == checked.True) if isNodeChecked != expectedState { return errors.Wrapf(err, "%v is disabled", nodeInfo.Name) } return nil }, &ac.pollOpts) } } // ErrNodeAppeared is returned if node is expected not to be visible var ErrNodeAppeared = errors.New("node appeared when it should not") // EnsureGoneFor returns a function that check the specified node does not // exist for the timeout period. Notice the usage of this function in your // code: // 1. If you expect an ui-node to go away and not to appear again use // WaitUntilGone succeeded with EnsureGoneFor. // 2. If you expect an ui-node not to appear at all use EnsureGoneFor. func (ac *Context) EnsureGoneFor(finder *nodewith.Finder, duration time.Duration) Action { return func(ctx context.Context) error { // Use custom timeout watchdog rather than relying on context due to // possible race condition. More context is here https://groups.google.com/a/google.com/g/tast-reviewers/c/sGxqggEGVAg/ start := time.Now() return testing.Poll(ctx, func(ctx context.Context) error { if err := ac.Exists(finder)(ctx); err == nil { // If node exists break the poll immediately with error. return testing.PollBreak(ErrNodeAppeared) } if time.Since(start) >= duration { // Timeout is reached and element was not found. return nil } return errors.Errorf("still waiting for the node for %.1fs", (duration - time.Since(start)).Seconds()) }, nil, ) } } // Gone returns a function that returns nil if a node does not exist. // If any node in the chain is not found, it will return nil. func (ac *Context) Gone(finder *nodewith.Finder) Action { return func(ctx context.Context) error { q, err := finder.GenerateQuery() if err != nil { return err } query := fmt.Sprintf(` (async () => { %s return !!node; })() `, q) var exists bool if err := ac.tconn.Eval(ctx, query, &exists); err != nil { // Only consider the node gone if we get a not found error. if strings.Contains(err.Error(), nodewith.ErrNotFound) { return nil } return err } if exists { return errors.New("node still exists") } return nil } } // WaitUntilGone returns a function that waits until the node found by the input finder is gone. func (ac *Context) WaitUntilGone(finder *nodewith.Finder) Action { return func(ctx context.Context) error { return testing.Poll(ctx, ac.Gone(finder), &ac.pollOpts) } } // clickType describes how user clicks mouse. type clickType int const ( leftClick clickType = iota rightClick doubleClick ) // mouseClick returns a function that clicks on the location of the node found by the input finder. // It will wait until the location is stable before clicking. // This returns a function to make it chainable in ui.Run. func (ac *Context) mouseClick(ct clickType, finder *nodewith.Finder) Action { return func(ctx context.Context) error { loc, err := ac.Location(ctx, finder) if err != nil { return err } switch ct { case leftClick: return mouse.Click(ac.tconn, loc.CenterPoint(), mouse.LeftButton)(ctx) case rightClick: return mouse.Click(ac.tconn, loc.CenterPoint(), mouse.RightButton)(ctx) case doubleClick: return mouse.DoubleClick(ac.tconn, loc.CenterPoint(), 100*time.Millisecond)(ctx) default: return errors.New("invalid click type") } } } // MousePress presses a mouse button and holds it on the node. The press needs to be released by caller. func (ac *Context) MousePress(button mouse.Button, finder *nodewith.Finder) Action { return func(ctx context.Context) error { loc, err := ac.Location(ctx, finder) if err != nil { return err } return NamedCombine("Move mouse to node and press", mouse.Move(ac.tconn, loc.CenterPoint(), 0), mouse.Press(ac.tconn, button), )(ctx) } } // MouseRelease releases the certain mouse button. func (ac *Context) MouseRelease(button mouse.Button) Action { return mouse.Release(ac.tconn, button) } // MouseClickAtLocation returns a function that clicks on the specified location. // This returns a function to make it chainable in ui.Run. func (ac *Context) MouseClickAtLocation(ct clickType, loc coords.Point) Action { switch ct { case leftClick: return mouse.Click(ac.tconn, loc, mouse.LeftButton) case rightClick: return mouse.Click(ac.tconn, loc, mouse.RightButton) case doubleClick: return mouse.DoubleClick(ac.tconn, loc, 100*time.Millisecond) default: return func(ctx context.Context) error { return errors.New("invalid click type") } } } // immediateMouseClick returns a function that clicks on the location of the node found by the input finder. // It will not wait until the location is stable before clicking. // This returns a function to make it chainable in ui.Run. func (ac *Context) immediateMouseClick(ct clickType, finder *nodewith.Finder) Action { return func(ctx context.Context) error { loc, err := ac.ImmediateLocation(ctx, finder) if err != nil { return err } switch ct { case leftClick: return mouse.Click(ac.tconn, loc.CenterPoint(), mouse.LeftButton)(ctx) case rightClick: return mouse.Click(ac.tconn, loc.CenterPoint(), mouse.RightButton)(ctx) case doubleClick: return mouse.DoubleClick(ac.tconn, loc.CenterPoint(), 100*time.Millisecond)(ctx) default: return errors.New("invalid click type") } } } // LeftClick returns a function that left clicks on the location of the node found by the input finder. // It will wait until the location is stable before clicking. // This returns a function to make it chainable in ui.Run. func (ac *Context) LeftClick(finder *nodewith.Finder) Action { return ac.mouseClick(leftClick, finder) } // RightClick returns a function that right clicks on the location of the node found by the input finder. // It will wait until the location is stable before clicking. // This returns a function to make it chainable in ui.Run. func (ac *Context) RightClick(finder *nodewith.Finder) Action { return ac.mouseClick(rightClick, finder) } // DoubleClick returns a function that double clicks on the location of the node found by the input finder. // It will wait until the location is stable before clicking. // This returns a function to make it chainable in ui.Run. func (ac *Context) DoubleClick(finder *nodewith.Finder) Action { return ac.mouseClick(doubleClick, finder) } // ImmediateLeftClick returns a function that left clicks on the location of the node found by the input finder. // It will not wait until the location is stable before clicking. // This returns a function to make it chainable in ui.Run. func (ac *Context) ImmediateLeftClick(finder *nodewith.Finder) Action { return ac.immediateMouseClick(leftClick, finder) } // ImmediateRightClick returns a function that right clicks on the location of the node found by the input finder. // It will not wait until the location is stable before clicking. // This returns a function to make it chainable in ui.Run. func (ac *Context) ImmediateRightClick(finder *nodewith.Finder) Action { return ac.immediateMouseClick(rightClick, finder) } // ImmediateDoubleClick returns a function that double clicks on the location of the node found by the input finder. // It will not wait until the location is stable before clicking. // This returns a function to make it chainable in ui.Run. func (ac *Context) ImmediateDoubleClick(finder *nodewith.Finder) Action { return ac.immediateMouseClick(doubleClick, finder) } // LeftClickUntil returns a function that repeatedly left clicks the node until the condition returns no error. // It will try to click the node once before it checks the condition. // This is useful for situations where there is no indication of whether the node is ready to receive clicks. // It uses the polling options from the Context. func (ac *Context) LeftClickUntil(finder *nodewith.Finder, condition func(context.Context) error) Action { return func(ctx context.Context) error { if err := ac.LeftClick(finder)(ctx); err != nil { return errors.Wrap(err, "failed to initially click the node") } if err := testing.Sleep(ctx, ac.pollOpts.Interval); err != nil { return err } return testing.Poll(ctx, func(ctx context.Context) error { if err := condition(ctx); err != nil { if err := ac.ImmediateLeftClick(finder)(ctx); err != nil { return errors.Wrap(err, "failed to click the node") } return errors.Wrap(err, "click may not have been received yet") } return nil }, &ac.pollOpts) } } // RightClickUntil returns a function that repeatedly right clicks the node until the condition returns no error. // It will try to click the node once before it checks the condition. // This is useful for situations where there is no indication of whether the node is ready to receive clicks. // It uses the polling options from the Context. func (ac *Context) RightClickUntil(finder *nodewith.Finder, condition func(context.Context) error) Action { return func(ctx context.Context) error { if err := ac.RightClick(finder)(ctx); err != nil { return errors.Wrap(err, "failed to initially click the node") } if err := testing.Sleep(ctx, ac.pollOpts.Interval); err != nil { return err } return testing.Poll(ctx, func(ctx context.Context) error { if err := condition(ctx); err != nil { if err := ac.ImmediateRightClick(finder)(ctx); err != nil { return errors.Wrap(err, "failed to click the node") } return errors.Wrap(err, "click may not have been received yet") } return nil }, &ac.pollOpts) } } // RetryUntil returns a function that repeatedly does the given action until the condition returns no error. // It will try to do action once before it checks the condition. // It uses the polling options from the Context. func (ac *Context) RetryUntil(action, condition Action) Action { return func(ctx context.Context) error { if err := action(ctx); err != nil { return errors.Wrap(err, "failed to initially do action") } if err := testing.Sleep(ctx, ac.pollOpts.Interval); err != nil { return err } return testing.Poll(ctx, func(ctx context.Context) error { if err := condition(ctx); err != nil { if err := action(ctx); err != nil { return errors.Wrap(err, "failed to do action") } return errors.Wrap(err, "action has been done but condition is not met") } return nil }, &ac.pollOpts) } } // DoDefaultUntil returns a function that calls doDefault() JS method until the condition returns no error. // It will try to call doDefault() once before it checks the condition. // This is useful for situations where there is no indication of whether the node is ready to receive clicks. // It uses the polling options from the Context. func (ac *Context) DoDefaultUntil(finder *nodewith.Finder, condition func(context.Context) error) Action { return func(ctx context.Context) error { if err := ac.DoDefault(finder)(ctx); err != nil { return errors.Wrap(err, "failed to initially click the node") } if err := testing.Sleep(ctx, ac.pollOpts.Interval); err != nil { return err } return testing.Poll(ctx, func(ctx context.Context) error { if err := condition(ctx); err != nil { if err := IfSuccessThen(ac.Exists(finder), ac.DoDefault(finder))(ctx); err != nil { return errors.Wrap(err, "failed to click the node") } return errors.Wrap(err, "click may not have been received yet") } return nil }, &ac.pollOpts) } } // FocusAndWait returns a function that calls the focus() JS method of the found node. // This can be used to scroll to nodes which aren't currently visible, enabling them to be clicked. // The focus event is not instant, so an EventWatcher (watcher.go) is used to check its status. // The EventWatcher waits the duration of timeout for the event to occur. func (ac *Context) FocusAndWait(finder *nodewith.Finder) Action { return ac.WaitForEvent(nodewith.Root(), event.Focus, func(ctx context.Context) error { q, err := finder.GenerateQuery() if err != nil { return err } query := fmt.Sprintf(` (async () => { %s node.focus(); })() `, q) return testing.Poll(ctx, func(ctx context.Context) error { return ac.tconn.Eval(ctx, query, nil) }, &ac.pollOpts) }) } // EnsureFocused returns a function that ensures the found node is focused. // This can be used to focus on nodes whose state isn't certained. // It checks the found node's state and calls FocusAndWait() only if the node is not focused. func (ac *Context) EnsureFocused(finder *nodewith.Finder) Action { return func(ctx context.Context) error { info, err := ac.Info(ctx, finder) if err != nil { return err } if info.State[state.Focused] { return nil } if err = ac.FocusAndWait(finder)(ctx); err != nil { // There are chances that the focus event does not occur but the node is already focused. return ac.WaitUntilExists(finder.Focused())(ctx) } return nil } } // MouseMoveTo returns a function moving the mouse to hover on the center point of located node. // When duration is 0, it moves instantly to the specified location. // Otherwise, the cursor should move linearly during the period. // Unlike mouse.Move which is designed to move to a fixed location, // this function moves to the target location immediately after getting it, // avoid the need of getting it in advance. // It addresses the cases that the node only becomes available // or changes location in the middle of a sequence of combined steps. func (ac *Context) MouseMoveTo(finder *nodewith.Finder, duration time.Duration) Action { return func(ctx context.Context) error { location, err := ac.Location(ctx, finder) if err != nil { return errors.Wrapf(err, "failed to get location of %v", finder) } return mouse.Move(ac.tconn, location.CenterPoint(), duration)(ctx) } } // Sleep returns a function sleeping given time duration. func Sleep(d time.Duration) Action { return func(ctx context.Context) error { return testing.Sleep(ctx, d) } } // MakeVisible returns a function that calls makeVisible() JS method to make found node visible. func (ac *Context) MakeVisible(finder *nodewith.Finder) Action { return func(ctx context.Context) error { q, err := finder.GenerateQuery() if err != nil { return err } query := fmt.Sprintf(` (async () => { %s node.makeVisible(); })() `, q) if err := ac.tconn.Eval(ctx, query, nil); err != nil { return errors.Wrap(err, "failed to call makeVisible() on the node") } return nil } } // IfSuccessThen returns a function that runs action only if the first function succeeds. // The function returns an error only if the preFunc succeeds but action fails, // It returns nil in all other situations. // Example: // // dialog := nodewith.Name("Dialog").Role(role.Dialog) // button := nodewith.Name("Ok").Role(role.Button).Ancestor(dialog) // ui := uiauto.New(tconn) // if err := uiauto.IfSuccessThen(ui.WithTimeout(5*time.Second).WaitUntilExists(dialog), ui.LeftClick(button))(ctx); err != nil { // ... // } func IfSuccessThen(preFunc, fn Action) Action { return action.IfSuccessThen(preFunc, fn) } // IfSuccessThenWithLog returns a function that runs action only if the first function succeeds with a logging option. func IfSuccessThenWithLog(preFunc, fn Action) Action { return action.IfSuccessThenWithLog(preFunc, fn) } // IfFailThen returns a function that runs action only if the first function fails. // The function returns an error only if the preFunc and action both fail, // It returns nil in all other situations. func IfFailThen(preFunc, fn Action) Action { return action.IfFailThen(preFunc, fn) } // Retry returns a function that retries a given action if it returns error. // The action will be executed up to n times, including the first attempt. // The last error will be returned. Any other errors will be silently logged. // Between each run of the loop, it will sleep according the the uiauto.Context pollOpts. func (ac *Context) Retry(n int, fn Action) Action { return action.Retry(n, fn, ac.pollOpts.Interval) } // RetrySilently returns a function that retries a given action if it returns error. // The action will be executed up to n times, including the first attempt. // The last error will be returned. Any other errors will be ignored. // Between each run of the loop, it will sleep according the the uiauto.Context pollOpts. func (ac *Context) RetrySilently(n int, fn Action) Action { return action.RetrySilently(n, fn, ac.pollOpts.Interval) } // CheckRestriction returns a function that checks the restriction of the node found by the input finder is as expected. // disabled/enabled is a common usecase, e.g, // // CheckRestriction(installButton, restriction.Disabled) // CheckRestriction(installButton, restriction.None) func (ac *Context) CheckRestriction(finder *nodewith.Finder, restriction restriction.Restriction) Action { return func(ctx context.Context) error { nodeInfo, err := ac.Info(ctx, finder) if err != nil { return err } if nodeInfo.Restriction != restriction { return errors.Wrapf(err, "failed to check restriction state: got %v, want %v", nodeInfo.Restriction, restriction) } return nil } } // DoDefault returns a function that calls doDefault() JS method to trigger the // default action on a node regardless of its location, e.g. left click on a button. // This function can be used when the a11y tree fails to find the accurate location // of a node thus mouse.LeftClick() fails consequently. func (ac *Context) DoDefault(finder *nodewith.Finder) Action { return func(ctx context.Context) error { q, err := finder.GenerateQuery() if err != nil { return err } query := fmt.Sprintf(` (async () => { %s node.doDefault(); })() `, q) if err := testing.Poll(ctx, func(ctx context.Context) error { return ac.tconn.Eval(ctx, query, nil) }, &ac.pollOpts); err != nil { return errors.Wrap(err, "failed to call doDefault() on the node") } return nil } } // ResetScrollOffset returns a function that calls setScrollOffset(0, 0) JS method to reset the // scroll offset on a node to scroll it to its default scroll position. func (ac *Context) ResetScrollOffset(finder *nodewith.Finder) Action { return func(ctx context.Context) error { q, err := finder.GenerateQuery() if err != nil { return err } query := fmt.Sprintf(` (async () => { %s node.setScrollOffset(0, 0); })() `, q) if err := testing.Poll(ctx, func(ctx context.Context) error { return ac.tconn.Eval(ctx, query, nil) }, &ac.pollOpts); err != nil { return errors.Wrap(err, "failed to call setScrollOffset() on the node") } return nil } }
package main import "fmt" var runa rune = '쯼' // si no se especifica lo toma como tipo rune (int32) var byta byte = 'a' // esto lo va a tomar como byte (uint8) var bytb = 'b' //este al no especificar lo tomara como rune (int32) var runb int32 = 52220 //esto va a mostrar lo mismo que runa var runc rune = 2344 /* una runa se puede inicializar tanto con un 'caracter' como con un valor numerico dado que es en realidad un int32 */ func main() { for i := 124; i < 129; i++ { /* byte es alias de uint8, aqui lo que hacemos es imprimir primero i, luego imprimimos la conversion (casting) de i a string, lo que da una letra u otro símbolo y luego convertimos esa string a un slice (parecido a un array) de bytes, (un string es un slice de bytes) que son una representacion numerica del valor de esa letra en UTF, UTF utiliza para codificar todos los caracteres posibles de todos los idiomas entre 1 y 4 bytes, asi que dependiendo del caracter el slice puede tener entre 1 y 4 bytes. */ fmt.Println(i, "-", string(i), "-", []byte(string(i))) } /* una rune es un alias de int32, por lo que ocupa y puede utilizar 4 bytes, y representa uno de los caracteres usados en UTF (codepoint), al ser 4 bytes -> 64 bits permite 2^64 -> 4294967296 combinaciones dividido entre 2 pq no es unsigned int (uint32), y los valores negativos no se usan -> el maximo valor es 2147483647 sin el 0, pero UTF no utiliza todos esos valores (usa algo mas de 100000). Una string esta compuesto por una slice de bytes, no de runes, las runes a su vez estan compuestas por entre 1 y 4 bytes */ fmt.Println(runa, "-", string(runa), "-", []byte(string(runa))) //este caracter usa 3 bytes fmt.Println(string(runb)) fmt.Println(string(runc)) fmt.Printf("%T\n", runa) //en printf el %T te indica el tipo de la variable fmt.Printf("%T\n", byta) fmt.Printf("%T\n", bytb) }
package compute_test import ( "errors" "github.com/genevieve/leftovers/gcp/compute" "github.com/genevieve/leftovers/gcp/compute/fakes" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" gcpcompute "google.golang.org/api/compute/v1" ) var _ = Describe("Disks", func() { var ( client *fakes.DisksClient logger *fakes.Logger zones map[string]string disks compute.Disks ) BeforeEach(func() { client = &fakes.DisksClient{} logger = &fakes.Logger{} zones = map[string]string{"https://zone-1": "zone-1"} disks = compute.NewDisks(client, logger, zones) }) Describe("List", func() { var filter string BeforeEach(func() { filter = "banana" logger.PromptWithDetailsCall.Returns.Proceed = true client.ListDisksCall.Returns.DiskSlice = []*gcpcompute.Disk{{ Name: "banana-disk", Zone: "https://zone-1", }, { Name: "just-another-disk", Zone: "https://zone-2", }} }) It("lists, filters, and prompts for disks to delete", func() { list, err := disks.List(filter, false) Expect(err).NotTo(HaveOccurred()) Expect(client.ListDisksCall.CallCount).To(Equal(1)) Expect(client.ListDisksCall.Receives.Zone).To(Equal("zone-1")) Expect(logger.PromptWithDetailsCall.CallCount).To(Equal(1)) Expect(logger.PromptWithDetailsCall.Receives.ResourceType).To(Equal("Disk")) Expect(logger.PromptWithDetailsCall.Receives.ResourceName).To(Equal("banana-disk")) Expect(list).To(HaveLen(1)) }) Context("when the client fails to list disks", func() { BeforeEach(func() { client.ListDisksCall.Returns.Error = errors.New("some error") }) It("returns the error", func() { _, err := disks.List(filter, false) Expect(err).To(MatchError("List Disks for zone zone-1: some error")) }) }) Context("when the disk name does not contain the filter", func() { It("does not add it to the list", func() { list, err := disks.List("grape", false) Expect(err).NotTo(HaveOccurred()) Expect(client.ListDisksCall.CallCount).To(Equal(1)) Expect(logger.PromptWithDetailsCall.CallCount).To(Equal(0)) Expect(list).To(HaveLen(0)) }) }) Context("when the user says no to the prompt", func() { BeforeEach(func() { logger.PromptWithDetailsCall.Returns.Proceed = false }) It("does not add it to the list", func() { list, err := disks.List(filter, false) Expect(err).NotTo(HaveOccurred()) Expect(list).To(HaveLen(0)) }) }) }) })
/* * Copyright 2021 American Express * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing * permissions and limitations under the License. */ package core import ( "flag" "os" cfgreader "github.com/americanexpress/earlybird/pkg/config" "github.com/americanexpress/earlybird/pkg/utils" ) const ( rulesDir = "rules" falsePositivesDir = "falsepositives" labelsDir = "labels" solutionsDir = "solutions" ) type arrayFlags []string func (i *arrayFlags) String() string { return "" } func (i *arrayFlags) Set(value string) error { *i = append(*i, value) return nil } // Define our static CLI flags var ( userHomeDir, _ = os.UserHomeDir() levelOptions = utils.GetDisplayList(cfgreader.Settings.GetLevelNames()) ptrStreamInput = flag.Bool("stream", false, "Use stream IO as input instead of file(s)") enableFlags arrayFlags ptrUpdateFlag = flag.Bool("update", false, "Update module configurations") ptrGitStreamInput = flag.Bool("git-commit-stream", false, "Use stream IO of Git commit log as input instead of file(s) -- e.g., 'cat secrets.text > go-earlybird'") ptrVerbose = flag.Bool("verbose", false, "Reports details about file reads") ptrSuppressSecret = flag.Bool("suppress", false, "Suppress reporting of the secret found (important if output is going to Slack or other logs)") ptrWorkerCount = flag.Int("workers", 100, "Set number of workers.") ptrWorkLength = flag.Int("worksize", 2500, "Set Line Wrap Length.") ptrMaxFileSize = flag.Int64("max-file-size", 10240000, "Maximum file size to scan (in bytes)") ptrShowFullLine = flag.Bool("show-full-line", false, "Display the full line where the pattern match was found (warning: this can be dangerous with minified script files)") ptrConfigDir = flag.String("config", utils.GetConfigDir(), "Directory where configuration files are stored") ptrRulesOnly = flag.Bool("show-rules-only", false, "Display rules that would be run, but do not execute a scan") ptrSkipComments = flag.Bool("skip-comments", false, "Skip scanning comments in files -- applies only to the 'content' module") ptrIgnoreFPRules = flag.Bool("ignore-fp-rules", false, "Ignore the false positive post-process rules") ptrShowSolutions = flag.Bool("show-solutions", false, "Display recommended solution for each finding") ptrGitStagedFlag = flag.Bool("git-staged", false, "Scan only git staged files") ptrGitTrackedFlag = flag.Bool("git-tracked", false, "Scan only git tracked files") ptrPath = flag.String("path", utils.MustGetWD(), "Directory to scan (defaults to CWD) -- ABSOLUTE PATH ONLY") ptrOutputFormat = flag.String("format", "console", "Output format [ console | json | csv ]") ptrWithConsole = flag.Bool("with-console", false, "While using --format, this flag will help to print findings in console") ptrOutputFile = flag.String("file", "", "Output file -- e.g., 'go-earlybird --file=/home/jdoe/myfile.csv'") ptrIgnoreFile = flag.String("ignorefile", userHomeDir+string(os.PathSeparator)+".ge_ignore", "Patterns File (including wildcards) for files to ignore. (e.g. *.jpg)") ptrFailSeverityThreshold = flag.String("fail-severity", cfgreader.Settings.TranslateLevelID(cfgreader.Settings.FailThreshold), "Lowest severity level at which to fail "+levelOptions) ptrDisplaySeverityThreshold = flag.String("display-severity", cfgreader.Settings.TranslateLevelID(cfgreader.Settings.DisplayThreshold), "Lowest severity level to display "+levelOptions) ptrDisplayConfidenceThreshold = flag.String("display-confidence", cfgreader.Settings.TranslateLevelID(cfgreader.Settings.DisplayConfidenceThreshold), "Lowest confidence level to display "+levelOptions) ptrFailConfidenceThreshold = flag.String("fail-confidence", cfgreader.Settings.TranslateLevelID(cfgreader.Settings.FailThreshold), "Lowest confidence level at which to fail "+levelOptions) ptrModuleConfigFile = flag.String("module-config-file", "", "Path to file with per module config settings") ptrDisableHttpKeepAlives = flag.Bool("disable-keep-alives", false, "To disable keep-alives when running as http Server. By default, keep-alives are always enabled") )
package main import ( "fmt" "time" "runtime" ) func chanFlow(left, right chan int, bufferLen int) { if bufferLen <= 0 { left <- 1 + <-right } else { for i := 0; i < bufferLen; i++ { left <- 1 + <-right } } } func main() { fmt.Println("Num Chan:", runtime.NumGoroutine()) nruntime := 100000 chanBuffer := 1000 result := make([]int, 0, 100) lastChan := make(chan int, chanBuffer) var left chan int = nil right := lastChan begin := time.Now() fmt.Println("begin at:", begin) for i := 0; i < nruntime; i++ { left, right = right, make(chan int, chanBuffer) go chanFlow(left, right, chanBuffer) } fmt.Println("Num Chan:", runtime.NumGoroutine()) for i := 0; i < chanBuffer; i++ { right <- 0 } for i := 0; i < chanBuffer; i++ { result = append(result, <-lastChan) } end := time.Now() fmt.Println("end at:", end, time.Since(begin)) //fmt.Println(result) fmt.Println("Num Chan:", runtime.NumGoroutine()) }
package list import ( "errors" "fmt" "io" "github.com/operator-framework/operator-registry/alpha/action" "github.com/operator-framework/operator-registry/alpha/model" "github.com/spf13/cobra" kcmdutil "k8s.io/kubectl/pkg/cmd/util" "k8s.io/kubectl/pkg/util/templates" "github.com/openshift/oc-mirror/pkg/cli" "github.com/openshift/oc-mirror/pkg/image" ) type OperatorsOptions struct { *cli.RootOptions Catalog string Package string Channel string Version string Catalogs bool } func NewOperatorsCommand(f kcmdutil.Factory, ro *cli.RootOptions) *cobra.Command { o := OperatorsOptions{} o.RootOptions = ro cmd := &cobra.Command{ Use: "operators", Short: "List available operator catalog content and versions", Example: templates.Examples(` # List available operator catalog release versions oc-mirror list operators # Output default operator catalogs for OpenShift release 4.8 oc-mirror list operators --catalogs --version=4.8 # List all operator packages in a catalog oc-mirror list operators --catalog=catalog-name # List all channels in an operator package oc-mirror list operators --catalog=catalog-name --package=package-name # List all available versions for a specified operator in a channel oc-mirror list operators --catalog=catalog-name --package=operator-name --channel=channel-name `), Run: func(cmd *cobra.Command, args []string) { kcmdutil.CheckErr(o.Complete()) kcmdutil.CheckErr(o.Validate()) kcmdutil.CheckErr(o.Run(cmd)) }, } fs := cmd.Flags() fs.BoolVar(&o.Catalogs, "catalogs", o.Catalogs, "List available catalogs for an OpenShift release version, requires --version") fs.StringVar(&o.Catalog, "catalog", o.Catalog, "List information for a specified catalog") fs.StringVar(&o.Package, "package", o.Package, "List information for a specified package") fs.StringVar(&o.Channel, "channel", o.Channel, "List information for a specified channel") fs.StringVar(&o.Version, "version", o.Version, "Specify an OpenShift release version") o.BindFlags(cmd.PersistentFlags()) return cmd } func (o *OperatorsOptions) Complete() error { if len(o.Version) > 0 { o.Catalogs = true } return nil } func (o *OperatorsOptions) Validate() error { if len(o.Version) == 0 && o.Catalogs { return errors.New("must specify --version with --catalogs") } if len(o.Channel) > 0 && (len(o.Package) == 0 || len(o.Catalog) == 0) { return errors.New("must specify --catalog and --package with --channel") } if len(o.Package) > 0 && len(o.Catalog) == 0 { return errors.New("must specify --catalog with --package") } return nil } func (o *OperatorsOptions) Run(cmd *cobra.Command) error { w := o.IOStreams.Out ctx := cmd.Context() // Process cases from most specific to most broad switch { case len(o.Channel) > 0: // Print Version for all bundles in a channel var ch model.Channel lc := action.ListChannels{ IndexReference: o.Catalog, PackageName: o.Package, } res, err := lc.Run(ctx) if err != nil { return err } // Find target channel for searching for _, c := range res.Channels { if c.Name == o.Channel { ch = c break } } if _, err := fmt.Fprintln(w, "VERSIONS"); err != nil { return err } // List all bundle versions in channel for _, bndl := range ch.Bundles { if _, err := fmt.Fprintln(w, bndl.Version); err != nil { return err } } case len(o.Package) > 0: lc := action.ListChannels{ IndexReference: o.Catalog, PackageName: o.Package, } chRes, err := lc.Run(ctx) if err != nil { return err } if len(chRes.Channels) > 0 { pkg := chRes.Channels[0].Package pkgRes := action.ListPackagesResult{ Packages: []model.Package{*pkg}, } if err := pkgRes.WriteColumns(w); err != nil { return err } fmt.Fprintln(w, "") } if err := chRes.WriteColumns(w); err != nil { return err } case len(o.Catalog) > 0: lp := action.ListPackages{ IndexReference: o.Catalog, } res, err := lp.Run(ctx) if err != nil { return fmt.Errorf("failed to list operators, please check catalog name - %s : %w", o.Catalog, err) } if err := res.WriteColumns(o.IOStreams.Out); err != nil { return err } case o.Catalogs: if _, err := fmt.Fprintln(w, "Available OpenShift OperatorHub catalogs:"); err != nil { return err } if err := o.listCatalogs(w); err != nil { return err } default: vm, err := image.GetVersionsFromImage(catalogs[0]) if err != nil { return err } fmt.Fprintln(w, "Available OpenShift OperatorHub catalog versions:") for v := range vm { if _, err := fmt.Fprintf(w, " %s\n", v); err != nil { return err } } } return nil } var catalogs = []string{ "registry.redhat.io/redhat/redhat-operator-index", "registry.redhat.io/redhat/certified-operator-index", "registry.redhat.io/redhat/community-operator-index", "registry.redhat.io/redhat/redhat-marketplace-index", } func (o *OperatorsOptions) listCatalogs(w io.Writer) error { if _, err := fmt.Fprintf(w, "OpenShift %s:\n", o.Version); err != nil { return err } for _, catalog := range catalogs { versions, err := image.GetVersionsFromImage(catalog) if err != nil { fmt.Fprintf(w, "Failed to get catalog version details: %s", err) continue } if versions["v"+o.Version] > 0 { fmt.Fprintf(w, "%s:v%s\n", catalog, o.Version) } else { fmt.Fprintf(w, "Invalid catalog reference, please check version: %s:v%s\n", catalog, o.Version) } } return nil }
package cfstack import ( "errors" "fmt" "github.com/CleverTap/cfstack/internal/pkg/aws/cloudformation" "github.com/CleverTap/cfstack/internal/pkg/aws/s3" "github.com/CleverTap/cfstack/internal/pkg/aws/session" "github.com/CleverTap/cfstack/internal/pkg/stack" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/fatih/color" "github.com/golang/glog" "github.com/spf13/cobra" "os" "strings" "time" ) type DeployStackOpts struct { name string region string stack stack.Stack } func (opts *DeployOpts) RunStackDeploy() error { for _, region := range opts.manifest.Regions { if region.Name == opts.deployStackOpts.region { for _, s := range region.Stacks { if s.StackName == opts.deployStackOpts.name { fmt.Printf("==> %s Deploying stack %s in region %s\n", rocket, s.StackName, region.Name) sess, err := session.NewSession(&session.Opts{ Profile: opts.profile, Region: opts.deployStackOpts.region, }) if err != nil { return err } uploader := s3.New(sess) deployer := cloudformation.New(sess, opts.values) bucket, err := deployer.GetStackResourcePhysicalId("cfstack-Init", "TemplatesS3Bucket") if err != nil { return err } s.SetRegion(opts.deployStackOpts.region) s.SetUuid(opts.uid) s.SetBucket(bucket) s.TemplateRootPath = opts.templatesRoot s.Uploader = uploader s.Deployer = deployer s.RoleArn = opts.role timeout := time.After(24 * time.Hour) ticker := time.Tick(10 * time.Second) deployComplete := false for deployComplete == false { select { case <-timeout: return errors.New("too many AWS API calls. Try again later") case <-ticker: err = s.Deploy() if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { case "Throttling": glog.Warningf("AWS rate limit error for stack %s, retrying..", s.StackName) continue case "ValidationError": if strings.Contains(aerr.Message(), "S3 error: Access Denied") { glog.Warningf("AWS request error for stack %s, retrying..", s.StackName) continue } case "RequestError": glog.Warningf("AWS request error for stack %s - %s, retrying..", s.StackName, aerr.Message()) continue case "ChangeSetNotFound": glog.Warningf("Looks like changeset not found for stack %s, retrying..", s.StackName) continue default: glog.Errorf("unhandled AWS error for stack %s\n%s : %s", s.StackName, aerr.Code(), aerr.Message()) } } } deployComplete = true } } if err != nil { fmt.Fprintf(os.Stdout, color.RedString(" %v\n", err)) return fmt.Errorf("%s stack deployment has failed", s.StackName) } return nil } } } } return fmt.Errorf("%s stack from %s region was not found in manifest file %s", opts.deployStackOpts.name, opts.deployStackOpts.region, opts.manifestFile) } func (opts *DeployOpts) NewDeployStackCmd() *cobra.Command { opts.deployStackOpts = &DeployStackOpts{} cmd := &cobra.Command{ Use: "stack", Aliases: []string{"service"}, Short: "Deploy a single stack", Long: `Deploy specific stack using this command by passing the stack name along with manifest`, PreRunE: func(cmd *cobra.Command, args []string) error { return opts.preRun() }, Run: func(cmd *cobra.Command, args []string) { err := opts.RunStackDeploy() if err != nil { ExitWithError("Deploy stack", err) } color.New(color.Bold, color.FgGreen).Fprintf(os.Stdout, "\nDeploy stack command completed\n") }, } cmd.Flags().StringVarP(&opts.deployStackOpts.name, "name", "n", "", "Name of the stack to be deployed") cmd.Flags().StringVarP(&opts.deployStackOpts.region, "region", "r", "", "Region in which stack is to be deployed") err := cmd.MarkFlagRequired("name") if err != nil { ExitWithError("Deploy stack", err) } err = cmd.MarkFlagRequired("region") if err != nil { ExitWithError("Deploy stack", err) } return cmd }
// 速率限制 是控制服务资源利用和质量的重要机制。 // 基于协程、通道和打点器,Go 优雅的支持速率限制 package main import ( "fmt" "time" ) func main() { // 首先,我们将看一个基本的速率限制 // 假设我们想限制对收到请求的处理,我们可以通过一个channel处理这些请求 requests := make(chan int, 5) for i := 1; i <= 5; i++ { requests <- i } close(requests) // limiter 通道每 200ms 接收一个值。 这是我们任务速率限制的调度器 limiter := time.Tick(200 * time.Millisecond) // 通过在每次请求前阻塞 limiter 通道的一个接收, 可以将频率限制为,每 200ms 执行一次请求 for req := range requests { <-limiter fmt.Println("request", req, time.Now()) } // 有时候我们可能希望在速率限制方案中允许短暂的并发请求,并同时保留总体速率限制 // 我们可以通过缓冲通道来完成此任务。 burstyLimiter 通道允许最多 3 个爆发(bursts)事件 burstyLimiter := make(chan time.Time, 3) // 填充通道,表示允许的爆发(bursts) for i := 0; i < 3; i++ { burstyLimiter <- time.Now() } // 每 200ms 我们将尝试添加一个新的值到 burstyLimiter中, 直到达到 3 个的限制 go func() { for t := range time.Tick(200 * time.Millisecond) { burstyLimiter <- t } }() // 现在,模拟另外 5 个传入请求。 受益于 burstyLimiter 的爆发(bursts)能力,前 3 个请求可以快速完成 burstyRequests := make(chan int, 5) for i := 1; i <= 5; i++ { burstyRequests <- i } close(burstyRequests) for req := range burstyRequests { <-burstyLimiter fmt.Println("request", req, time.Now()) } // 运行程序,我们看到第一批请求意料之中的大约每 200ms 处理一次 // 第二批请求,由于爆发(burstable)速率控制,我们直接连续处理了 3 个请求, // 然后以大约每 200ms 一次的速度,处理了剩余的 2 个请求 }
package main import "fmt" func f() { defer fmt.Println("D") fmt.Println("F") } // 被调用函数里的 defer 语句在返回之前就会被执行,所以输出顺序是 F D M。 func main() { f() fmt.Println("M") }
package strongly_connected_components import ( "reflect" "testing" ) var tests = []struct { graph Graph components []int }{ { Graph{ 1, true, []Edge{}, }, []int{0}, }, { Graph{ 2, true, []Edge{}, }, []int{1, 0}, }, { Graph{ 2, true, []Edge{ {0, 1}, }, }, []int{0, 1}, }, { Graph{ 2, true, []Edge{ {0, 1}, {1, 0}, }, }, []int{0, 0}, }, { Graph{ 3, true, []Edge{ {0, 1}, {2, 1}, }, }, []int{1, 2, 0}, }, { Graph{ 3, true, []Edge{ {0, 1}, {1, 0}, {2, 1}, }, }, []int{1, 1, 0}, }, { Graph{ 8, true, []Edge{ {0, 1}, {1, 2}, {1, 4}, {1, 5}, {2, 3}, {2, 6}, {3, 2}, {3, 7}, {4, 0}, {4, 5}, {5, 6}, {6, 5}, {7, 3}, {7, 6}, }, }, []int{0, 0, 1, 1, 0, 2, 2, 1}, }, { Graph{ 8, true, []Edge{ {0, 1}, {1, 2}, {1, 3}, {1, 4}, {2, 0}, {3, 0}, {3, 5}, {3, 7}, {4, 5}, {5, 6}, {6, 4}, {7, 5}, }, }, []int{0, 0, 0, 0, 2, 2, 2, 1}, }, } func Test(t *testing.T) { for _, test := range tests { if c := StronglyConnectedComponents(test.graph); !reflect.DeepEqual(c, test.components) { t.Fatalf("For graph %+v, expected strongly connected components to be %v, was %v", test.graph, test.components, c) } } }
package main import "fmt" /* Given two strings s1, s2, find the lowest ASCII sum of deleted characters to make two strings equal. Example 1: Input: s1 = "sea", s2 = "eat" Output: 231 Explanation: Deleting "s" from "sea" adds the ASCII value of "s" (115) to the sum. Deleting "t" from "eat" adds 116 to the sum. At the end, both strings are equal, and 115 + 116 = 231 is the minimum sum possible to achieve this. Example 2: Input: s1 = "delete", s2 = "leet" Output: 403 Explanation: Deleting "dee" from "delete" to turn the string into "let", adds 100[d]+101[e]+101[e] to the sum. Deleting "e" from "leet" adds 101[e] to the sum. At the end, both strings are equal to "let", and the answer is 100+101+101+101 = 403. If instead we turned both strings into "lee" or "eet", we would get answers of 433 or 417, which are higher. Note: 0 < s1.length, s2.length <= 1000. All elements of each string will have an ASCII value in [97, 122]. */ // dp[i][j]=min(dp[i+1][j],dp[i][j+1]) func minimumDeleteSum(s1 string, s2 string) int { dp := make([][]int,0) for i:=0;i<=len(s1);i++ { dp = append(dp,make([]int,len(s2)+1)) } for i:=len(s1)-1;i>=0;i-- { dp[i][len(s2)] = dp[i+1][len(s2)]+int(s1[i]) } for i:=len(s2)-1;i>=0;i-- { dp[len(s1)][i] = dp[len(s1)][i+1]+int(s2[i]) } for i:=len(s1)-1;i>=0;i-- { for j:=len(s2)-1;j>=0;j-- { if s1[i]==s2[j] { dp[i][j]=dp[i+1][j+1] } else { if dp[i+1][j]+int(s1[i]) > dp[i][j+1]+int(s2[j]) { dp[i][j]=dp[i][j+1]+int(s2[j]) } else { dp[i][j]=dp[i+1][j]+int(s1[i]) } } } } return dp[0][0] } func main() { fmt.Println(minimumDeleteSum("delete","leet")) }
package main import "fmt" /** 1.3.29 用环形链表实现 CircleQueue。环形链表也是一条链表,只是没有任何结点的链接为空,且只要链表非 空则 last.next 的值为 first。只能使用一个 node 类型的实例变量(last)。 */ func main() { circleQueue := NewQueue() circleQueue.Enqueue(1) circleQueue.Enqueue(2) circleQueue.Enqueue(3) circleQueue.Enqueue(4) circleQueue.Dequeue() fmt.Println(circleQueue) }
package compile import ( "fmt" "io" "os" "strconv" "subc/compile/arch" "subc/types" ) type opcode int type node struct { op opcode left, right *node lv [2]arch.LV } const ( opGlue opcode = iota + 1 opAdd opAddr opAssign opBinAnd opBinOr opBinXor opBool opBrFalse opBrTrue opCall opCalr opComma opDec opDiv opEq opGt opGeq opIdent opIfElse opLab opLdlab opLt opLit opLogNot opLsh opLeq opMod opMul opNeg opNot opNeq opPlus opPreDec opPreInc opPostDec opPostInc opRsh opRval opScale opScaleBy opSub ) func (op opcode) String() string { tab := [...]string{ opGlue: "glue", opAdd: "add", opAddr: "addr", opAssign: "assign", opBinAnd: "binand", opBinOr: "binor", opBinXor: "binxor", opBool: "bool", opBrFalse: "brfalse", opBrTrue: "brtrue", opCall: "call", opCalr: "calr", opComma: "comma", opDec: "dec", opDiv: "div", opEq: "eq", opGt: "gt", opGeq: "geq", opIdent: "ident", opIfElse: "ifelse", opLab: "lab", opLdlab: "ldlab", opLt: "lt", opLit: "lit", opLogNot: "lognot", opLsh: "lsh", opLeq: "leq", opMod: "mod", opMul: "mul", opNeg: "neg", opNot: "not", opNeq: "neq", opPlus: "plus", opPreDec: "predec", opPreInc: "preinc", opPostDec: "postdec", opPostInc: "postinc", opRsh: "rsh", opRval: "rval", opScale: "scale", opScaleBy: "scaleby", opSub: "sub", } if op == 0 || int(op) >= len(tab) { return "unknown" } return tab[op] } func newNode(op opcode, l1, l2 *arch.LV, left, right *node) *node { n := &node{ op: op, left: left, right: right, } if l1 != nil { n.lv[0] = *l1 } if l2 != nil { n.lv[1] = *l2 } return n } func (c *compiler) emitArgs(n *node) { if n == nil { return } c.tree(n.right) c.emitArgs(n.left) } func (c *compiler) tree(n *node) { if n == nil { return } intSize := c.cg.Int() lv := n.lv[0] switch n.op { case opIdent, opGlue: // ignore case opAddr: c.cg.Addr(lv) case opLit: value, _ := strconv.Atoi(lv.Value.String()) c.cg.Lit(value) case opPreInc, opPreDec, opPostInc, opPostDec: c.tree(n.left) switch n.op { case opPreInc: c.cg.Inc(lv, true, true) case opPreDec: c.cg.Inc(lv, false, true) case opPostInc: c.cg.Inc(lv, true, false) case opPostDec: c.cg.Inc(lv, false, false) } case opLogNot, opNeg, opNot, opScale: c.tree(n.left) switch n.op { case opBool: c.cg.Bool() case opLogNot: c.cg.LogNot() case opNeg: c.cg.Neg() case opNot: c.cg.Not() case opScale: c.cg.Scale() } case opEq, opNeq, opLt, opGt, opLeq, opGeq: c.tree(n.left) c.tree(n.right) c.cg.Commit() x, y, z, w := arch.Below, arch.Above, arch.BelowEqual, arch.AboveEqual if intTypes(lv.Btype) { x, y, z, w = arch.Less, arch.Greater, arch.LessEqual, arch.GreaterEqual } switch n.op { case opEq: c.cg.QueueCmp(arch.Equal) case opNeq: c.cg.QueueCmp(arch.NotEqual) case opLt: c.cg.QueueCmp(x) case opGt: c.cg.QueueCmp(y) case opLeq: c.cg.QueueCmp(z) case opGeq: c.cg.QueueCmp(w) } case opMod, opLsh, opRsh, opDiv, opBinAnd, opBinOr, opBinXor, opMul, opSub, opPlus, opAdd: c.tree(n.left) c.tree(n.right) c.cg.Commit() switch n.op { case opLsh: c.cg.Shl(true) case opRsh: c.cg.Shr(true) case opDiv: c.cg.Div(true) case opMod: c.cg.Mod(true) case opBinAnd: c.cg.And() case opBinOr: c.cg.Or() case opBinXor: c.cg.Xor() case opMul: c.cg.Mul() case opAdd: c.cg.Add(types.Typ[types.Int], types.Typ[types.Int], true) case opPlus: c.cg.Add(n.lv[0].Type, n.lv[1].Type, true) case opSub: c.cg.Sub(n.lv[0].Type, n.lv[1].Type, true) } case opCall: c.emitArgs(n.left) c.cg.Commit() c.cg.Spill() c.cg.Call(lv) c.cg.Stack(lv.Size * intSize) case opCalr: c.emitArgs(n.left) c.cg.Commit() c.cg.Spill() c.cg.Clear(false) c.cg.Rval(lv) c.cg.Calr(lv) c.cg.Stack(lv.Size * intSize) case opLab: c.tree(n.left) c.cg.Commit() c.cg.Lab(lv.Addr) case opLdlab: c.cg.Ldlab(lv.Addr) case opRval: c.tree(n.left) c.cg.Rval(lv) case opAssign: if n.left != nil && n.left.op == opIdent { c.tree(n.right) c.cg.Commit() c.tree(n.left) } else { c.tree(n.left) c.cg.Commit() c.tree(n.right) c.cg.Commit() } c.cg.Store(lv) case opScaleBy: c.tree(n.left) c.cg.ScaleBy(lv.Size) case opIfElse: c.emitCond(n.left, lv) c.cg.Commit() c.cg.Lab(lv.Addr) case opBrFalse, opBrTrue: c.tree(n.left) c.cg.Commit() if n.op == opBrTrue { c.cg.BrTrue(lv.Addr) } else { c.cg.BrFalse(lv.Addr) } c.cg.Clear(false) c.tree(n.right) case opComma: c.tree(n.left) c.cg.Commit() c.cg.Clear(false) c.tree(n.right) case opBool: c.tree(n.left) c.cg.Bool() default: panic(fmt.Sprintf("internal: unhandle op %v", n.op)) } } func (c *compiler) emitCond(n *node, lv arch.LV) { if n.left.left.op == opGlue { c.emitCond(n.left.left, lv) } c.tree(n.left.left) c.cg.BrFalse(n.left.lv[0].Addr) c.cg.Clear(false) c.tree(n.left.right) c.cg.Jump(lv.Addr) c.cg.Commit() c.cg.Lab(n.left.lv[0].Addr) c.cg.Clear(false) c.tree(n.right) } func (c *compiler) printTree(n *node) { p := &treePrinter{w: os.Stdout, indent: -1} p.Dump(n) } func (c *compiler) emit(n *node) { c.optimize(n) //c.printTree(n) c.tree(n) } type treePrinter struct { w io.Writer indent int } func (p *treePrinter) Dump(n *node) { if n == nil { return } p.indent++ for i := 0; i < p.indent; i++ { fmt.Printf(" ") } switch n.op { case opLit: fmt.Fprintf(p.w, "lit %s\n", n.lv[0].Value) case opCall: fmt.Fprintf(p.w, "call %s %v\n", n.lv[0].Name, n.lv[0].Type) p.Dump(n.left) case opCalr: fmt.Fprintf(p.w, "calr %s %v\n", n.lv[0].Name, n.lv[0].Type) p.Dump(n.left) case opLab: fmt.Fprintf(p.w, "label L%d\n", n.lv[0].Addr) case opLdlab: fmt.Fprintf(p.w, "ldlab L%d\n", n.lv[0].Addr) case opGlue: fmt.Fprintf(p.w, "glue\n") p.Dump(n.left) p.Dump(n.right) case opRval: p.dumpUnaryExpr(n, "*") case opLt: p.dumpBinExpr(n, "<") case opLeq: p.dumpBinExpr(n, "<=") case opAssign: p.dumpBinExpr(n, "=") case opEq: p.dumpBinExpr(n, "==") case opNeq: p.dumpBinExpr(n, "!=") case opGt: p.dumpBinExpr(n, ">") case opGeq: p.dumpBinExpr(n, ">=") case opBinAnd: p.dumpBinExpr(n, "&") case opBinOr: p.dumpBinExpr(n, "|") case opBinXor: p.dumpBinExpr(n, "^") case opLsh: p.dumpBinExpr(n, "<<") case opRsh: p.dumpBinExpr(n, ">>") case opAdd, opPlus: p.dumpBinExpr(n, "+") case opSub: p.dumpBinExpr(n, "-") case opMul: p.dumpBinExpr(n, "*") case opDiv: p.dumpBinExpr(n, "/") case opMod: p.dumpBinExpr(n, "%") case opNeg: p.dumpUnaryExpr(n, "-") case opNot: p.dumpUnaryExpr(n, "~") case opPreInc: p.dumpUnaryExpr(n, "++") case opPreDec: p.dumpUnaryExpr(n, "--") case opBool: p.dumpUnaryExpr(n, "bool ") case opPostInc: x, _ := nameOrValues(n.lv) p.dumpUnaryOp(n, "%s++\n", x) case opPostDec: x, _ := nameOrValues(n.lv) p.dumpUnaryOp(n, "%s--\n", x) case opLogNot: p.dumpUnaryExpr(n, "!") case opIdent: fmt.Fprintf(p.w, "id %s\n", n.lv[0].Name) case opAddr: x, _ := nameOrValues(n.lv) fmt.Fprintf(p.w, "addr %s\n", x) case opScale: p.dumpUnaryExpr(n, "scale ") case opScaleBy: p.dumpUnaryExpr(n, "scaleby ") case opIfElse: p.dumpUnaryExpr(n, "ifelse ") case opBrFalse: p.dumpBinExpr(n, "brfalse") case opBrTrue: p.dumpBinExpr(n, "brtrue") case opComma: p.dumpBinExpr(n, ",") default: panic(fmt.Sprintf("unknown tree printer op: %v", n.op)) } p.indent-- } func (p *treePrinter) dumpUnaryOp(n *node, format string, args ...interface{}) { fmt.Fprintf(p.w, format, args...) p.indent++ p.Dump(n.left) p.indent-- } func (p *treePrinter) dumpUnaryExpr(n *node, op string) { x, _ := nameOrValues(n.lv) p.dumpUnaryOp(n, op+"%s\n", x) } func (p *treePrinter) dumpBinOp(n *node, format string, args ...interface{}) { fmt.Fprintf(p.w, format, args...) p.indent++ p.Dump(n.left) p.Dump(n.right) p.indent-- } func (p *treePrinter) dumpBinExpr(n *node, op string) { x, y := nameOrValues(n.lv) if op == "%" { op = "%%" } p.dumpBinOp(n, "%s "+op+" %s\n", x, y) } func nameOrValues(lv [2]arch.LV) (string, string) { x, y := lv[0].Name, lv[1].Name if !lv[0].Ident && lv[0].Value != nil { x = lv[0].Value.String() } if !lv[1].Ident && lv[1].Value != nil { y = lv[1].Value.String() } return x, y } func intTypes(t types.Type) bool { return t == types.Typ[types.Int] || t == types.Typ[types.Char] }
package domain import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" type Patch struct { MetaData metav1.ObjectMeta `json:"metadata"` }
package faker import ( "regexp" "strconv" "testing" ) func TestImageURL(t *testing.T) { expectedURL := "http://lorempixel.com/600/400/food?200" imgurl := f.Image().ImageURL(600, 400, "food") regex := `http://lorempixel.com/600/400/food\?` matched, err := regexp.MatchString(regex+"[1-999]", imgurl) if err != nil { t.Errorf("Regex error while matching string\n%v", err) } if matched { return } t.Errorf("Expected image url: %s, \nGot: %s", expectedURL, imgurl) } func TestDataURL(t *testing.T) { rawPrefix := `data:image/svg+xml;charset=UTF-8,` svgString := `<svg xmlns="http://www.w3.org/2000/svg" version="1.1" baseProfile="full" width="` + strconv.Itoa(600) + `" height="` + strconv.Itoa(400) + `"> <rect width="100%" height="100%" fill="grey"/> <text x="0" y="20" font-size="20" text-anchor="start" fill="white">` + strconv.Itoa(600) + `x` + strconv.Itoa(400) + `</text> </svg>` expectedDataURL := rawPrefix + svgString dataURL := f.Image().DataURL(600, 400) if expectedDataURL == dataURL { return } t.Errorf("Non-matching data urls. \nExpected: \n%v,\nGot: \n%v", expectedDataURL, dataURL) }
package main import ( "fmt" "sort" ) type Student2 struct { Name string Age int } type StudentSet struct { Items []Student2 } func (ss StudentSet) Len() int { return len(ss.Items) } func (ss StudentSet) Swap(i, j int) { ss.Items[i], ss.Items[j] = ss.Items[j], ss.Items[i] } func (ss StudentSet) Less(i, j int) bool { return ss.Items[i].Age < ss.Items[j].Age } func main() { ss := StudentSet{} ss.Items = append(ss.Items, Student2{Name: "Tom", Age: 10}) ss.Items = append(ss.Items, Student2{Name: "Jerry", Age: 12}) ss.Items = append(ss.Items, Student2{Name: "David", Age: 11}) ss.Items = append(ss.Items, Student2{Name: "Elon", Age: 9}) ss.Items = append(ss.Items, Student2{Name: "Satoshi", Age: 13}) sort.Sort(ss) fmt.Println(ss) }
package main func waysToStep(n int) int { if n < 3 { return n } if n == 3 { return 4 } var steps = make([]int, n) steps[0] = 1 steps[1] = 2 steps[2] = 4 for i := 3; i < n; i++ { steps[i] = (steps[i-1] + steps[i-2] + steps[i-3]) % 1000000007 } return steps[n-1] }
package tokens import ( "time" "github.com/jrapoport/gothic/models/token" "github.com/jrapoport/gothic/models/types/provider" "github.com/jrapoport/gothic/store" ) // GrantAuthToken gets or creates an auth token for the provider. func GrantAuthToken(conn *store.Connection, p provider.Name, exp time.Duration) (*token.AuthToken, error) { var t token.Token err := conn.Transaction(func(tx *store.Connection) error { t = token.NewAuthToken(p, exp) return conn.Create(t).Error }) if err != nil { return nil, err } return t.(*token.AuthToken), nil } // GetAuthToken returns the auth token for the token string if found. func GetAuthToken(conn *store.Connection, tok string) (*token.AuthToken, error) { var ct token.AuthToken err := conn.First(&ct, "token = ?", tok).Error if err != nil { return nil, err } return &ct, nil }
package proto import ( // "rpc" ) type AddPlayer struct { PlayerId string AuthKey string // ChannelId rpc.GameLocation ClanName string } type AddPlayerResult struct { } type DelPlayer struct { PlayerId string } type DelPlayerResult struct { } type PlayerChatToPlayer struct { FromPlayerId string FromPlayerName string FromPlayerLevel int32 ToPlayerId string Content string } type PlayerChatToPlayerResult struct { } type PlayerWorldChat struct { FromPlayerId string FromPlayerName string FromPlayerLevel int32 Content string CName string CSymbol uint32 LastLeagueRank uint32 Viplevel uint32 UseIM bool VoiceTime string } type PlayerWorldChatResult struct { } type ChatSendMsg2Player struct { MsgName string PlayerList []string Buf []byte } type ChatSendMsg2PlayerResult struct { } type ChatSendMsg2LPlayer struct { MsgName string Buf []byte // Channel rpc.Login_Platform LevelMin uint32 LevelMax uint32 } type ChatSendMsg2LPlayerResult struct { }
package mapbson import ( "reflect" "strconv" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" ) type customInt int func customIntEncoder(key reflect.Value) (string, error) { gID := key.Interface().(customInt) return strconv.Itoa(int(gID)), nil } func customIntDecoder(key string) (reflect.Value, error) { i, err := strconv.Atoi(key) if err != nil { return reflect.Value{}, err } typedKey := customInt(i) return reflect.ValueOf(typedKey), nil } type hasCustomInt struct { CustomMap map[customInt]string `bson:"customMap"` } func getCustomRegistry() *bsoncodec.Registry { rb := bsoncodec.NewRegistryBuilder() bsoncodec.DefaultValueEncoders{}.RegisterDefaultEncoders(rb) bsoncodec.DefaultValueDecoders{}.RegisterDefaultDecoders(rb) ciMapType := reflect.TypeOf(hasCustomInt{}.CustomMap) cmc := newCustomMapCoder( ciMapType, customIntEncoder, customIntDecoder, ) rb.RegisterEncoder(ciMapType, cmc) rb.RegisterDecoder(ciMapType, cmc) return rb.Build() } func TestCustomMapCoder(t *testing.T) { registry := getCustomRegistry() testCases := []struct { msg string input hasCustomInt expOutput hasCustomInt }{{ msg: `custom map with three entries`, input: hasCustomInt{ CustomMap: map[customInt]string{ customInt(1): `one`, customInt(2): `two`, customInt(3): `three`, }, }, expOutput: hasCustomInt{ CustomMap: map[customInt]string{ customInt(1): `one`, customInt(2): `two`, customInt(3): `three`, }, }, }, { msg: `empty map`, input: hasCustomInt{ CustomMap: map[customInt]string{}, }, expOutput: hasCustomInt{ CustomMap: map[customInt]string{}, }, }} for _, tc := range testCases { data, err := bson.MarshalWithRegistry(registry, tc.input) require.NoError(t, err, tc.msg) actOutput := hasCustomInt{} err = bson.UnmarshalWithRegistry(registry, data, &actOutput) require.NoError(t, err, tc.msg) assert.Equal(t, tc.expOutput, actOutput, tc.msg) } }
package main import ( "fmt" "sort" "strings" "time" "github.com/gorilla/feeds" "github.com/mmcdole/gofeed" ) // Merge takes a list of raw feed strings, parses and then merges them func Merge(rawFeeds []string) feeds.Feed { fp := gofeed.NewParser() var items itemList var feedList []string for _, feed := range rawFeeds { feed, err := fp.ParseString(feed) if err == nil { feedList = append(feedList, feed.Title) for _, item := range feed.Items { published := item.PublishedParsed updated := item.UpdatedParsed if published == nil && updated == nil { continue } time := published if time == nil { time = updated } items = append(items, convertItem(item, *time, feed)) } } } sort.Sort(sort.Reverse(items)) feed := feeds.Feed{ Title: fmt.Sprintf("RSSMerge Feed (%v feeds)", len(feedList)), Link: &feeds.Link{Href: "PLACEHOLDER"}, Description: strings.Join(feedList, ", "), Author: &feeds.Author{Name: "RSSMerge", Email: "rssmerge@charlieegan3.com"}, Created: time.Now(), Items: items, } return feed } func convertItem(item *gofeed.Item, created time.Time, feed *gofeed.Feed) *feeds.Item { if item.Link == "" { item.Link = feed.Link } return &feeds.Item{ Title: item.Title, Link: &feeds.Link{Href: item.Link}, Created: created, Description: feed.Title, } } type itemList []*feeds.Item func (slice itemList) Len() int { return len(slice) } func (slice itemList) Less(i, j int) bool { return slice[i].Created.Before(slice[j].Created) } func (slice itemList) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] }
package main import ( "encoding/json" "errors" "fmt" "log" "strconv" "strings" "github.com/aws/aws-lambda-go/events" "github.com/aws/aws-lambda-go/lambda" ) func handler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) { // OrgStatus from request var reqBody OrgStatus log.Printf("Received request %+v\n", request) reqType := request.Headers["Content-Type"] log.Printf("request Content-Type: %s\n", reqType) if reqType == "" { // API gateway renames header name Content-Type to lower case reqType = request.Headers["content-type"] log.Printf("request content-type: %s\n", reqType) } if reqType == "text/plain" { // assume csv data 'OrgID,Status,EffectiveDate' tokens := strings.Split(request.Body, ",") if len(tokens) != 3 { return events.APIGatewayProxyResponse{ StatusCode: 400, Headers: map[string]string{ "Content-Type": "text/plain", }, Body: "{}", }, fmt.Errorf("Invalid request %s. it should be of format 'orgID,statu,effectiveDate'", request.Body) } reqBody = OrgStatus{ OrgID: tokens[0], Status: tokens[1], EffectiveDate: tokens[2], } } else { // default content-type is application/json, so parse json request body reqType = "application/json" if err := json.Unmarshal([]byte(request.Body), &reqBody); err != nil { // return client request error return events.APIGatewayProxyResponse{ StatusCode: 400, Headers: map[string]string{ "Content-Type": "application/json", }, Body: "{}", }, err } } if reqBody.OrgID == "" { // return client request error return events.APIGatewayProxyResponse{ StatusCode: 400, Headers: map[string]string{ "Content-Type": reqType, }, Body: "{}", }, errors.New("Bad request: missing org-ID") } // invoke orgstatus rules invokeRules(&reqBody) // return the result result := strconv.FormatBool(reqBody.Inforce) if reqType == "application/json" { result = fmt.Sprintf("{\"inforce\": %t}", reqBody.Inforce) } log.Printf("Return Inforce: %s", result) return events.APIGatewayProxyResponse{ StatusCode: 200, Headers: map[string]string{ "Content-Type": reqType, }, Body: result, }, nil } func main() { lambda.Start(handler) }
package mypkg import "fmt" type Person struct { Name string } func (p *Person) Introduce() { fmt.Println("Hi, My name is", p.Name) } type Student struct { Person // promotion School string } func (s *Student) Introduce() { s.Person.Introduce() s.StudiesAt() } func (s *Student) StudiesAt() { fmt.Println("I study at ", s.School) }
package entity // Store Склад type Store struct { Meta *Meta `json:"meta,omitempty"` // Метаданные Склада Id string `json:"id,omitempty"` // ID Склада (Только для чтения) AccountId string `json:"accountId,omitempty"` // ID учетной записи (Только для чтения) Owner *Employee `json:"owner,omitempty"` // Владелец (Сотрудник) Shared bool `json:"shared,omitempty"` // Общий доступ Group *Group `json:"group,omitempty"` // Отдел сотрудника Updated string `json:"updated,omitempty"` // Момент последнего обновления Склада (Только для чтения) Name string `json:"name,omitempty"` // Наименование Склада Description string `json:"description,omitempty"` // Комментарий к Складу Code string `json:"code,omitempty"` // Код Склада ExternalCode string `json:"externalCode,omitempty"` // Внешний код Склада (Только для чтения) Archived bool `json:"archived,omitempty"` // Добавлен ли Склад в архив Address string `json:"address,omitempty"` // Адрес склада AddressFull *AddressFull `json:"addressFull,omitempty"` // Адрес с детализацией по отдельным полям Parent *Store `json:"parent,omitempty"` // Метаданные родительского склада (Группы) PathName string `json:"pathName,omitempty"` // Группа Склада Attributes []Attribute `json:"attributes,omitempty"` // Массив метаданных дополнительных полей склада } // RetailStore Точка продаж type RetailStore struct { Meta *Meta `json:"meta,omitempty"` // Метаданные Точки продаж Id string `json:"id,omitempty"` // ID Точки продаж (Только для чтения) AccountId string `json:"accountId,omitempty"` // ID учетной записи (Только для чтения) Owner *Employee `json:"owner,omitempty"` // Владелец (Сотрудник) Shared bool `json:"shared,omitempty"` // Общий доступ Group *Group `json:"group,omitempty"` // Отдел сотрудника Updated string `json:"updated,omitempty"` // Момент последнего обновления Точки продаж (Только для чтения) Name string `json:"name,omitempty"` // Наименование Точки продаж Description string `json:"description,omitempty"` // Комментарий к Точке продаж ExternalCode string `json:"externalCode,omitempty"` // Внешний код Точки продаж (Только для чтения) Archived bool `json:"archived,omitempty"` // Добавлена ли Точка продаж в архив Address string `json:"address,omitempty"` // Адрес Точки продаж AddressFull *AddressFull `json:"addressFull,omitempty"` // Адрес с детализацией по отдельным полям ControlShippingStock bool `json:"controlShippingStock,omitempty"` // Контроль остатков. Не может быть true, если AllowCreateProducts имеет значение true OnlyInStock bool `json:"onlyInStock,omitempty"` // Выгружать только товары в наличии. Доступно только при активном контроле остатков. Влияет только на выгрузку остатков в POS API Active bool `json:"active,omitempty"` // Состояние точки продаж (Включена/Отключена) ControlCashierChoice bool `json:"controlCashierChoice,omitempty"` // Выбор продавца DiscountEnable bool `json:"discountEnable,omitempty"` // Разрешить скидки DiscountMaxPercent int `json:"discountMaxPercent,omitempty"` // Максимальная скидка (в процентах) PriceType *PriceType `json:"priceType,omitempty"` // Тип цены, с которыми будут продаваться товары в рознице Cashiers []Cashier `json:"cashiers,omitempty"` // Метаданные Кассиров Organization *Organization `json:"organization,omitempty"` // Метаданные Юрлица Store *Store `json:"store,omitempty"` // Метаданные Склада Acquire *Counterparty `json:"acquire,omitempty"` // Метаданные Банка-эквайера BankPercent int `json:"bankPercent,omitempty"` // Комиссия банка-эквайера (в процентах) IssueOrders bool `json:"issueOrders,omitempty"` // Выдача заказов SellReserves bool `json:"sellReserves,omitempty"` // Учет резервов LastOperationNames []LastOperation `json:"lastOperationNames,omitempty"` // Последние операции (Только для чтения) OfdEnabled bool `json:"ofdEnabled,omitempty"` // Отправлять электронный чек через ОФД (Только для чтения) PriorityOfdSend string `json:"priorityOfdSend,omitempty"` // Приоритет отправки электронного чека. Активен только, когда отправка электронных чеков через ОФД включена. // -- // priorityOfdSend // phone Приоритет отправки на телефон // email Приоритет отправки на e-mail // none Отсутствие отправки чека // -- AllowCustomPrice bool `json:"allowCustomPrice,omitempty"` // Разрешить продажу по свободной цене (Только для чтения) AuthTokenAttached bool `json:"authTokenAttached,omitempty"` // Создан ли токен для точки продаж (Только для чтения) OrderToState *State `json:"orderToState,omitempty"` // Метаданные статуса, который проставится заказу после проведения продажи на его основании (если указано) CustomerOrderStates []State `json:"customerOrderStates,omitempty"` // Метаданные статусов, в которых выгружаются заказы в точку продаж (если указано) Environment *Environment `json:"environment,omitempty"` // Информация об окружении. Подробнее тут Только для чтения да State *stateRetailStore `json:"state,omitempty"` // Информация статусе точки продаж (Только для чтения) DefaultTaxSystem string `json:"defaultTaxSystem,omitempty"` // Код системы налогообложения по умолчанию // -- // Значения поля defaultTaxSystem. // GENERAL_TAX_SYSTEM ОСН // SIMPLIFIED_TAX_SYSTEM_INCOME УСН. Доход // SIMPLIFIED_TAX_SYSTEM_INCOME_OUTCOME УСН. Доход-Расход // UNIFIED_AGRICULTURAL_TAX ЕСХН // PRESUMPTIVE_TAX_SYSTEM ЕНВД // PATENT_BASED Патент // -- OrderTaxSystem string `json:"orderTaxSystem,omitempty"` // Код системы налогообложения для заказов // -- // Значения поля orderTaxSystem. // GENERAL_TAX_SYSTEM ОСН // SIMPLIFIED_TAX_SYSTEM_INCOME УСН. Доход // SIMPLIFIED_TAX_SYSTEM_INCOME_OUTCOME УСН. Доход-Расход // UNIFIED_AGRICULTURAL_TAX ЕСХН // PRESUMPTIVE_TAX_SYSTEM ЕНВД // PATENT_BASED Патент // -- DemandPrefix string `json:"demandPrefix,omitempty"` // Префикс номера продаж AllowSellTobaccoWithoutMRC bool `json:"allowSellTobaccoWithoutMRC,omitempty"` // Разрешить продавать табачную продукцию не по МРЦ TobaccoMrcControlType string `json:"tobaccoMrcControlType"` //Контроль МРЦ для табачной продукции // -- // tobaccoMrcControlType // USER_PRICE Не контролировать МРЦ // MRC_PRICE Продавать по МРЦ указанной на пачке // SAME_PRICE Запрещать продажу, если цена продажи не совпадает с МРЦ // -- MarkingSellingMode string `json:"markingSellingMode"` // Режим продалжи маркированной продукции // -- // markingSellingMode // CORRECT_MARKS_ONLY Только с правильными кодами маркировки // WITHOUT_ERRORS С правильными кодами и те, которые не удалось проверить // ALL Все – независимо от результатов проверки кодов маркировки // -- AllowCreateProducts bool `json:"allowCreateProducts,omitempty"` // Контроль остатков. Не может быть true, если controlShippingStock имеет значение true ProductFolders []ProductFolder `json:"productFolders,omitempty"` // Коллекция Метаданных групп товаров, из которых можно выгружать товары // createAgentsTags Array(Meta) Коллекция групп покупателей, представленных в формате строк. Определяет группы, в которые добавляются новые покупатели. Значения null игнорируются — нет // filterAgentsTags Array(Meta) Коллекция групп покупателей, представленных в формате строк. Определяет группы, из которых выгружаются покупатели. Значения null игнорируются — нет PrintAlways bool `json:"printAlways,omitempty"` // Всегда печатать кассовые чеки ReceiptTemplate *EmbeddedTemplate `json:"receiptTemplate,omitempty"` // Метаданные шаблона печати кассовых чеков CreatePaymentInOnRetailShiftClosing bool `json:"createPaymentInOnRetailShiftClosing,omitempty"` // Создавать входящий платеж при закрытии смены CreateCashInOnRetailShiftClosing bool `json:"createCashInOnRetailShiftClosing,omitempty"` // Создавать ПКО при закрытии смены ReturnFromClosedShiftEnabled bool `json:"returnFromClosedShiftEnabled,omitempty"` // Разрешить возвраты в закрытых сменах EnableReturnsWithNoReason bool `json:"enableReturnsWithNoReason,omitempty"` // Разрешить возвраты без основания CreateOrderWithState *State `json:"createOrderWithState,omitempty"` // Метаданные статуса, который будет указан при создании заказа ReservePrepaidGoods bool `json:"reservePrepaidGoods,omitempty"` // Резервировать товары, за которые внесена предоплата FiscalType string `json:"fiscalType,omitempty"` // Тип формирования чеков // -- // fiscalType // STANDARD Стандартное // MASTER Стандартное с обработкой облачных операций // CLOUD Облачное // -- MinionToMasterType string `json:"minionToMasterType,omitempty"` // Стратегия выбора кассы для фискализации облачных чеков // -- // ANY Любая мастер касса // SAME_GROUP Только кассы из того же отдела // CHOSEN Выбранные кассы из списка в поле masterRetailStores // -- MasterRetailStores []*RetailStore `json:"masterRetailStores,omitempty"` // Ссылка на точки продаж, которые могут фискализировать операции с текущей точки продаж, если minionToMaster = CHOSEN QrAcquire *Meta `json:"qrAcquire"` // Метаданные банка-эквайера по операциям по QR-коду QrBankPercent float64 `json:"qrBankPercent"` // Комиссия банка-эквайера по операциям по QR-коду QrPayEnabled bool `json:"qrPayEnabled"` // Возможность продаж по QR-коду на точке продаж IdQR string `json:"idQR"` // Идентификатор устройства QR для приложения оплаты по QR QrTerminalId string `json:"qrTerminalId"` // Идентификатор терминала для приложения по QR } // LastOperation Последние операции type LastOperation struct { Entity string `json:"entity,omitempty"` // Ключевое слово, обозначающее тип последней операции (Только для чтения) Name string `json:"name,omitempty"` // Наименование (номер) последней операции (Только для чтения) } type Environment struct { Device string `json:"device,omitempty"` // Информация об устройстве OS string `json:"os,omitempty"` // Информация об операционной системе Software *software `json:"software,omitempty"` // Информация о ПО ChequePrinter *kkt `json:"chequePrinter,omitempty"` // Данные о ККТ PaymentTerminal string `json:"paymentTerminal,omitempty"` // Информация о платежном терминале } type stateRetailStore struct { Sync *sync `json:"sync,omitempty"` // Состояние синхронизации LastCheckMoment string `json:"lastCheckMoment,omitempty"` // Дата и время последней синхронизации FiscalMemory *fiscalMemory `json:"fiscalMemory,omitempty"` // Информация о фискальном накопителе PaymentTerminal *paymentTerminal `json:"paymentTerminal,omitempty"` // Информация о платежном терминале } type paymentTerminal struct { AcquiringType string `json:"acquiringType,omitempty"` // Информация о типе эквайера (например: inpas/payme) } type sync struct { Message string `json:"message,omitempty"` // Состояние синхронизации LastAttempMoment string `json:"lastAttempMoment,omitempty"` // Дата последней сихронизации (не обязательно успешной) } type fiscalMemory struct { Error errorF `json:"error,omitempty"` // Информация об ошибке ФН NotSendDocCount int `json:"notSendDocCount,omitempty"` // Количество неотправленных документов в ОФД } type errorF struct { Code int `json:"сode,omitempty"` // Код ошибки ФН Message string `json:"message,omitempty"` // Описание ошибки } type software struct { Name string `json:"name,omitempty"` // Наименование ПО Vendor string `json:"vendor,omitempty"` // Производитель Version string `json:"version,omitempty"` // Версия ПО } type kkt struct { Vendor string `json:"vendor,omitempty"` // Производитель Name string `json:"name,omitempty"` // Наименование ПО Serial string `json:"serial,omitempty"` // Серийный номер FiscalDataVersion string `json:"fiscalDataVersion,omitempty"` // Формат фискальных данных Driver *driver `json:"driver,omitempty"` // Информация об используемом драйвере FiscalMemory *fiscalDevice `json:"fiscalMemory,omitempty"` // Информация о фискальном накопителе FirmwareVersion string `json:"firmwareVersion,omitempty"` // Версия прошивки ККТ } type driver struct { Name string `json:"name,omitempty"` // Наименование драйвера Version string `json:"version,omitempty"` // Версия драйвера } type fiscalDevice struct { FiscalDataVersion string `json:"fiscalDataVersion,omitempty"` // Версия фискальной памяти FiscalValidityDate string `json:"fiscalValidityDate,omitempty"` // Версия фискальной памяти }
//go:generate moq -out ../../test/testhelpers/workflowMock.go -pkg testhelpers . Workflow:WorkflowMock package workflow import ( "context" "errors" "fmt" "io" "net/http" "strings" argoWorkflowAPIClient "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow" argoWorkflowAPISpec "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" ) const mainContainer = "main" // Workflow interface is used for interacting with workflow services. type Workflow interface { ListStatus(ctx context.Context) ([]Status, error) Logs(ctx context.Context, workflowName string) (*Logs, error) LogStream(ctx context.Context, workflowName string, data http.ResponseWriter) error Status(ctx context.Context, workflowName string) (*Status, error) Submit(ctx context.Context, from string, parameters map[string]string, labels map[string]string) (string, error) } // NewArgoWorkflow creates an Argo workflow. func NewArgoWorkflow(cl argoWorkflowAPIClient.WorkflowServiceClient, n string) Workflow { return &ArgoWorkflow{ namespace: n, svc: cl, } } // ArgoWorkflow represents an Argo Workflow. type ArgoWorkflow struct { namespace string svc argoWorkflowAPIClient.WorkflowServiceClient } // Logs represents workflow logs. type Logs struct { Logs []string `json:"logs"` } // List returns a list of workflow statuses. func (a ArgoWorkflow) ListStatus(ctx context.Context) ([]Status, error) { workflowListResult, err := a.svc.ListWorkflows(ctx, &argoWorkflowAPIClient.WorkflowListRequest{ Namespace: a.namespace, }) if err != nil { return []Status{}, err } workflows := make([]Status, len(workflowListResult.Items)) for k, wf := range workflowListResult.Items { wfStatus := Status{ Name: wf.ObjectMeta.Name, Status: strings.ToLower(string(wf.Status.Phase)), Created: fmt.Sprint(wf.ObjectMeta.CreationTimestamp.Unix()), } if wf.Status.Phase != argoWorkflowAPISpec.WorkflowRunning { wfStatus.Finished = fmt.Sprint(wf.Status.FinishedAt.Unix()) } workflows[k] = wfStatus } return workflows, nil } // Status represents a workflow status. type Status struct { Name string `json:"name"` Status string `json:"status"` Created string `json:"created"` Finished string `json:"finished,omitempty"` } // Status returns a workflow status. func (a ArgoWorkflow) Status(ctx context.Context, workflowName string) (*Status, error) { workflow, err := a.svc.GetWorkflow(ctx, &argoWorkflowAPIClient.WorkflowGetRequest{ Name: workflowName, Namespace: a.namespace, }) if err != nil { return nil, err } workflowData := Status{ Name: workflowName, Status: strings.ToLower(string(workflow.Status.Phase)), Created: fmt.Sprint(workflow.CreationTimestamp.Unix()), Finished: fmt.Sprint(workflow.Status.FinishedAt.Unix()), } return &workflowData, nil } // Logs returns logs for a workflow. func (a ArgoWorkflow) Logs(ctx context.Context, workflowName string) (*Logs, error) { stream, err := a.svc.WorkflowLogs(ctx, &argoWorkflowAPIClient.WorkflowLogRequest{ Name: workflowName, Namespace: a.namespace, LogOptions: &v1.PodLogOptions{ Container: mainContainer, }, }) if err != nil { return nil, err } var argoWorkflowLogs Logs for { event, err := stream.Recv() if errors.Is(err, io.EOF) { break } if err != nil { return nil, err } argoWorkflowLogs.Logs = append(argoWorkflowLogs.Logs, fmt.Sprintf("%s: %s", event.PodName, event.Content)) } return &argoWorkflowLogs, nil } // LogStream returns a log stream for a workflow. func (a ArgoWorkflow) LogStream(argoCtx context.Context, workflowName string, w http.ResponseWriter) error { stream, err := a.svc.WorkflowLogs(argoCtx, &argoWorkflowAPIClient.WorkflowLogRequest{ Name: workflowName, Namespace: a.namespace, LogOptions: &v1.PodLogOptions{ Container: mainContainer, Follow: true, }, }) if err != nil { return err } for { event, err := stream.Recv() if errors.Is(err, io.EOF) { return nil } if err != nil { return err } fmt.Fprintf(w, "%s: %s\n", event.PodName, event.Content) w.(http.Flusher).Flush() } } // Submit submits a workflow execution. func (a ArgoWorkflow) Submit(ctx context.Context, from string, parameters map[string]string, workflowLabels map[string]string) (string, error) { parts := strings.SplitN(from, "/", 2) for _, part := range parts { if part == "" { return "", fmt.Errorf("resource identifier '%s' is malformed. Should be `kind/name`, e.g. cronwf/hello-world-cwf", from) } } kind := parts[0] name := parts[1] var parameterStrings []string for k, v := range parameters { parameterStrings = append(parameterStrings, fmt.Sprintf("%s=%s", k, v)) } generateNamePrefix := fmt.Sprintf("%s-%s-", parameters["project_name"], parameters["target_name"]) created, err := a.svc.SubmitWorkflow(ctx, &argoWorkflowAPIClient.WorkflowSubmitRequest{ Namespace: a.namespace, ResourceKind: kind, ResourceName: name, SubmitOptions: &argoWorkflowAPISpec.SubmitOpts{ GenerateName: generateNamePrefix, Parameters: parameterStrings, Labels: labels.FormatLabels(workflowLabels), }, }) if err != nil { return "", fmt.Errorf("failed to submit workflow: %w", err) } return strings.ToLower(created.Name), nil } // NewParameters creates workflow parameters. func NewParameters(environmentVariablesString, executeCommand, executeContainerImageURI, targetName, projectName string, cliParameters map[string]string, credentialsToken string) map[string]string { parameters := map[string]string{ "environment_variables_string": environmentVariablesString, "execute_command": executeCommand, "execute_container_image_uri": executeContainerImageURI, "project_name": projectName, "target_name": targetName, "credentials_token": credentialsToken, } // this include override parameters // don't want to necessarily allow overriding everything // for now, constrainting to execute image uri // TODO find a dynamic way to combine two json objects // Either do it here or after it is generated and passed to argoWorkflow submit for k, v := range cliParameters { if k == "execute_container_image_uri" { parameters["execute_container_image_uri"] = v } if k == "pre_container_image_uri" { parameters["pre_container_image_uri"] = v } } return parameters } // CreateWorkflowResponse creates a workflow response. type CreateWorkflowResponse struct { WorkflowName string `json:"workflow_name"` }
/* Copyright 2020 The Tilt Dev Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( "context" "path/filepath" strings "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation/field" "github.com/tilt-dev/tilt-apiserver/pkg/server/builder/resource" "github.com/tilt-dev/tilt-apiserver/pkg/server/builder/resource/resourcerest" "github.com/tilt-dev/tilt-apiserver/pkg/server/builder/resource/resourcestrategy" ) // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ExtensionRepo specifies a repo or folder where a set of extensions live. // +k8s:openapi-gen=true // +tilt:starlark-gen=true type ExtensionRepo struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` Spec ExtensionRepoSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` Status ExtensionRepoStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // ExtensionRepoList // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type ExtensionRepoList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` Items []ExtensionRepo `json:"items" protobuf:"bytes,2,rep,name=items"` } // ExtensionRepoSpec defines how to access the repo. type ExtensionRepoSpec struct { // The URL of the repo. // // Allowed: // https: URLs that point to a public git repo // file: URLs that point to a location on disk. URL string `json:"url" protobuf:"bytes,1,opt,name=url"` // A reference to sync the repo to. If empty, Tilt will always update // the repo to the latest version. // +optional Ref string `json:"ref,omitempty" protobuf:"bytes,2,opt,name=ref"` } var _ resource.Object = &ExtensionRepo{} var _ resourcerest.SingularNameProvider = &ExtensionRepo{} var _ resourcestrategy.Validater = &ExtensionRepo{} func (in *ExtensionRepo) GetSingularName() string { return "extensionrepo" } func (in *ExtensionRepo) GetSpec() interface{} { return in.Spec } func (in *ExtensionRepo) GetObjectMeta() *metav1.ObjectMeta { return &in.ObjectMeta } func (in *ExtensionRepo) NamespaceScoped() bool { return false } func (in *ExtensionRepo) ShortNames() []string { return []string{"repo", "extrepo"} } func (in *ExtensionRepo) New() runtime.Object { return &ExtensionRepo{} } func (in *ExtensionRepo) NewList() runtime.Object { return &ExtensionRepoList{} } func (in *ExtensionRepo) GetGroupVersionResource() schema.GroupVersionResource { return schema.GroupVersionResource{ Group: "tilt.dev", Version: "v1alpha1", Resource: "extensionrepos", } } func (in *ExtensionRepo) IsStorageVersion() bool { return true } func (in *ExtensionRepo) Validate(ctx context.Context) field.ErrorList { var fieldErrors field.ErrorList url := in.Spec.URL isWeb := strings.HasPrefix(url, "https://") || strings.HasPrefix(url, "http://") isFile := strings.HasPrefix(url, "file://") if !isWeb && !isFile { fieldErrors = append(fieldErrors, field.Invalid( field.NewPath("spec.url"), url, "URLs must start with http(s):// or file://")) } else if isFile && !filepath.IsAbs(strings.TrimPrefix(url, "file://")) { fieldErrors = append(fieldErrors, field.Invalid( field.NewPath("spec.url"), url, "file:// URLs must be absolute (e.g., file:///home/user/repo)")) } return fieldErrors } var _ resource.ObjectList = &ExtensionRepoList{} func (in *ExtensionRepoList) GetListMeta() *metav1.ListMeta { return &in.ListMeta } // ExtensionRepoStatus defines the observed state of ExtensionRepo type ExtensionRepoStatus struct { // Contains information about any problems loading the repo. Error string `json:"error,omitempty" protobuf:"bytes,1,opt,name=error"` // The last time the repo was fetched and checked for validity. LastFetchedAt metav1.Time `json:"lastFetchedAt,omitempty" protobuf:"bytes,2,opt,name=lastFetchedAt"` // The path to the repo on local disk. Path string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` // The reference that we currently have checked out. // On git, this is the commit hash. // On file repos, this is empty. CheckoutRef string `json:"checkoutRef,omitempty" protobuf:"bytes,4,opt,name=checkoutRef"` // If StaleReason is non-empty, that indicates the repo failed to fetch, but // we were successfully able to use an on-disk copy. StaleReason string `json:"staleReason,omitempty" protobuf:"bytes,5,opt,name=staleReason"` } // ExtensionRepo implements ObjectWithStatusSubResource interface. var _ resource.ObjectWithStatusSubResource = &ExtensionRepo{} func (in *ExtensionRepo) GetStatus() resource.StatusSubResource { return in.Status } // ExtensionRepoStatus{} implements StatusSubResource interface. var _ resource.StatusSubResource = &ExtensionRepoStatus{} func (in ExtensionRepoStatus) CopyTo(parent resource.ObjectWithStatusSubResource) { parent.(*ExtensionRepo).Status = in }
package evnet //k8s.io/api/core/v1/types.go type EventSource struct { Component string Host string } type Event struct { metav1.TypeMeta metav1.ObjectMeta InvolvedObject ObjectReference Reason string Message string Source EventSource FirstTimestamp metav1.Time LastTimestamp metav1.Time Count int32 Type string EventTime metav1.MicroTime Series *EventSeries Action string Related *ObjectReference ReportingController string ReportingInstance string } //event.go // EventSink knows how to store events (client.Client implements it.) type EventSink interface { Create(event *v1.Event) (*v1.Event, error) Update(event *v1.Event) (*v1.Event, error) Patch(oldEvent *v1.Event, data []byte) (*v1.Event, error) } // EventRecorder knows how to record events on behalf of an EventSource. type EventRecorder interface { Event(object runtime.Object, eventtype, reason, message string) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) } // EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. type EventBroadcaster interface { StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface StartRecordingToSink(sink EventSink) watch.Interface StartLogging(logf func(format string, args ...interface{})) watch.Interface // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster // with the event source set to the given event source. NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder } type EventFilterFunc func(event *v1.Event) bool type EventCorrelator struct { filterFunc EventFilterFunc aggregator *EventAggregator logger *eventLogger } type EventCorrelateResult struct { Event *v1.Event Patch []byte Skip bool } type EventAggregator struct { sync.RWMutex cache *lru.Cache keyFunc EventAggregatorKeyFunc messageFunc EventAggregatorMessageFunc maxEvents uint maxIntervalInSeconds uint clock clock.Clock } // EventAggregatorKeyFunc is responsible for grouping events for aggregation // It returns a tuple of the following: // aggregateKey - key the identifies the aggregate group to bucket this event // localKey - key that makes this event in the local group type EventAggregatorKeyFunc func(event *v1.Event) (aggregateKey string, localKey string) // EventAggregatorMessageFunc is responsible for producing an aggregation message type EventAggregatorMessageFunc func(event *v1.Event) string /* generate event module use EventRecorder to create event use EventBroadcaster to broadcast event to api server, log file and other event sinker before send event to api server, EventCorrelator will aggregate event, based on the result, make the decision whether to skip event or to update event(has related events) or post new events to etcd */
package wyre import ( "bytes" "crypto/hmac" "crypto/sha256" "encoding/hex" "encoding/json" "fmt" "net/http" "net/url" ) type Client struct { http *http.Client key, secret, baseURL string } func NewClient(key, secret string, sandbox bool) *Client { // get the base URL, based on sandbox flag baseURL := "https://api.sendwyre.com" if sandbox { baseURL = "https://api.testwyre.com" } baseURL += "/v2" return &Client{ http: &http.Client{}, key: key, secret: secret, baseURL: baseURL, } } func (c *Client) makeURL(path string, params url.Values) string { return fmt.Sprintf("%s%s?%s", c.baseURL, normalizePath(path), params.Encode()) } func (c *Client) calculateRequestSignature(uri string, body []byte) (string, error) { data := fmt.Sprintf("%s%s", uri, body) h := hmac.New(sha256.New, []byte(c.secret)) _, err := h.Write([]byte(data)) if err != nil { return "", err } sigBytes := h.Sum(nil) return hex.EncodeToString(sigBytes), nil } func (c *Client) doRequest(path, method string, params url.Values, body []byte, result interface{}) error { if params == nil { params = make(url.Values) } params.Add("timestamp", getTimestampString()) uri := c.makeURL(path, params) req, err := http.NewRequest(method, uri, bytes.NewBuffer(body)) if err != nil { return err } sig, err := c.calculateRequestSignature(uri, body) if err != nil { return err } req.Header.Add("X-Api-Key", c.key) req.Header.Add("X-Api-Signature", sig) resp, err := c.http.Do(req) if err != nil { return err } defer resp.Body.Close() dec := json.NewDecoder(resp.Body) if resp.StatusCode != 200 { apiErr := &APIError{} err = dec.Decode(apiErr) if err != nil { return err } return apiErr } err = dec.Decode(result) if err != nil { return err } return nil }
package h2md import ( "bufio" "bytes" "golang.org/x/net/html" "io" "strconv" "strings" ) // H2MD H2MD struct type H2MD struct { *html.Node ulN int blockquoteN int tdN int tableSpliced bool skipNewline bool replacers map[string]Replacer } type Replacer func(val string, n *html.Node) string // NewH2MD create H2MD with html text func NewH2MD(htmlText string) (*H2MD, error) { node, err := html.Parse(strings.NewReader(htmlText)) if err == nil { return &H2MD{ Node: node, ulN: -1, blockquoteN: 0, tdN: 0, tableSpliced: false, skipNewline: true, replacers: make(map[string]Replacer), }, nil } return nil, err } //NewH2MDFromNode create H2MD with html node func NewH2MDFromNode(node *html.Node) (*H2MD, error) { return &H2MD{ Node: node, ulN: -1, blockquoteN: 0, tdN: 0, tableSpliced: false, skipNewline: true, replacers: make(map[string]Replacer), }, nil } // Replace Replace element attribute value func (h *H2MD) Replace(attr string, r Replacer) { h.replacers[attr] = r } // Attr Return the element attribute func (h *H2MD) Attr(name string, n *html.Node) string { for _, attr := range n.Attr { if name == attr.Key { if r, ok := h.replacers[name]; ok { return r(attr.Val, n) } return attr.Val } } return "" } // Text return the markdown content func (h *H2MD) Text() string { var buf bytes.Buffer var f func(*html.Node) f = func(n *html.Node) { var parse = func(tag string, single bool) { buf.WriteString(tag) for c := n.FirstChild; c != nil; c = c.NextSibling { f(c) } if n.LastChild != nil { n = n.LastChild.NextSibling } if !single { buf.WriteString(tag) } } if n.Type == html.TextNode { if h.skipNewline { n.Data = strings.TrimSpace(n.Data) } buf.WriteString(n.Data) } if n.Type == html.ElementNode { switch n.Data { case "hr": buf.WriteString("\n---\n") case "a": if c := n.FirstChild; c != nil { buf.WriteString("[" + c.Data + "](" + h.Attr("href", n) + ")") n = c } case "img": if n.Parent != nil && n.Parent.Data == "p" { buf.WriteString("\n") } buf.WriteString("![" + h.Attr("alt", n) + "](" + h.Attr("src", n) + ")") if n.Parent != nil && n.Parent.Data == "p" { buf.WriteString("\n") } case "del": parse("~~", false) case "i": parse("*", false) case "strong", "b": parse("**", false) case "h1", "h2", "h3", "h4", "h5", "h6": buf.WriteString("\n") j, _ := strconv.Atoi(n.Data[1:]) h.skipNewline = true parse(strings.Repeat("#", j)+" ", true) buf.WriteString("\n") case "code": h.skipNewline = false lang := h.Attr("class", n) var newline = "" if n.Parent != nil && n.Parent.Data == "pre" { buf.WriteString("\n") if lang == "" { lang = h.Attr("class", n.Parent) } newline = "\n" } lang = strings.ReplaceAll(lang, "hljs", "") lang = strings.ReplaceAll(lang, "prism", "") lang = strings.ReplaceAll(lang, "highlight", "") lang = strings.ReplaceAll(lang, "highlight-source-", "") lang = strings.ReplaceAll(lang, "language-", "") lang = strings.TrimSpace(lang) if lang != "" { lang = strings.Split(lang, " ")[0] newline = "\n" } buf.WriteString(newline) buf.WriteString("```") buf.WriteString(lang) buf.WriteString(newline) parse("", true) buf.WriteString(newline) buf.WriteString("```") case "ul", "ol": h.ulN++ parse("", true) h.ulN-- case "li": h.skipNewline = true buf.WriteString("\n") if h.ulN > 0 { buf.WriteString(strings.Repeat(" ", h.ulN)) } parse("- ", true) case "blockquote": h.skipNewline = true h.blockquoteN++ var prevBuf bytes.Buffer prevBuf.Write(buf.Bytes()) buf.Reset() parse("", true) h.blockquoteN-- br := bufio.NewReader(&buf) for { a, _, c := br.ReadLine() if c == io.EOF { break } prevBuf.WriteString("\n> ") prevBuf.Write(a) } buf.Reset() buf.Write(prevBuf.Bytes()) h.skipNewline = false case "tr": h.skipNewline = true if h.tdN > 0 && !h.tableSpliced { buf.WriteString("\n| ") buf.WriteString(strings.Repeat("---- | ", h.tdN)) h.tdN = 0 h.tableSpliced = true } h.skipNewline = true buf.WriteString("\n| ") case "td", "th": h.skipNewline = false parse("", true) buf.WriteString(" | ") h.skipNewline = true h.tdN++ case "pre": h.skipNewline = false if n.FirstChild != nil && n.FirstChild.Data != "code" { parse("\n```\n", false) } h.skipNewline = true case "p": if !h.skipNewline { buf.WriteString("\n") } case "br": buf.WriteString("\n") } } if n != nil && n.FirstChild != nil { for c := n.FirstChild; c != nil; c = c.NextSibling { f(c) } } } f(h.Node) return buf.String() }
/* * Copyright (c) 2020 InterDigital Communications, Inc * * Licensed under the Apache License, Version 2.0 (the \"License\"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an \"AS IS\" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * AdvantEDGE Radio Network Information Service REST API * * Radio Network Information Service is AdvantEDGE's implementation of [ETSI MEC ISG MEC012 RNI API](http://www.etsi.org/deliver/etsi_gs/MEC/001_099/012/01.01.01_60/gs_MEC012v010101p.pdf) <p>[Copyright (c) ETSI 2017](https://forge.etsi.org/etsi-forge-copyright-notice.txt) <p>**Micro-service**<br>[meep-rnis](https://github.com/InterDigitalInc/AdvantEDGE/tree/master/go-apps/meep-rnis) <p>**Type & Usage**<br>Edge Service used by edge applications that want to get information about radio conditions in the network <p>**Details**<br>API details available at _your-AdvantEDGE-ip-address/api_ * * API version: 1.1.1 * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) */ package server import ( "net/http" ) func CaReConfSubscriptionSubscriptionsGET(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func CaReConfSubscriptionSubscriptionsPOST(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func CaReConfSubscriptionSubscriptionsPUT(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func CaReConfSubscriptionsSubscrIdDELETE(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func CellChangeSubscriptionsGET(w http.ResponseWriter, r *http.Request) { cellChangeSubscriptionsGET(w, r) } func CellChangeSubscriptionsPOST(w http.ResponseWriter, r *http.Request) { cellChangeSubscriptionsPOST(w, r) } func CellChangeSubscriptionsPUT(w http.ResponseWriter, r *http.Request) { cellChangeSubscriptionsPUT(w, r) } func CellChangeSubscriptionsSubscrIdDELETE(w http.ResponseWriter, r *http.Request) { cellChangeSubscriptionsDELETE(w, r) } func MeasRepUeReportSubscriptionsPUT(w http.ResponseWriter, r *http.Request) { measRepUeReportSubscriptionsPUT(w, r) } func MeasRepUeSubscriptionsGET(w http.ResponseWriter, r *http.Request) { measRepUeReportSubscriptionsGET(w, r) } func MeasRepUeSubscriptionsPOST(w http.ResponseWriter, r *http.Request) { measRepUeReportSubscriptionsPOST(w, r) } func MeasRepUeSubscriptionsSubscrIdDELETE(w http.ResponseWriter, r *http.Request) { measRepUeReportSubscriptionsDELETE(w, r) } func MeasTaSubscriptionsGET(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func MeasTaSubscriptionsPOST(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func MeasTaSubscriptionsPUT(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func MeasTaSubscriptionsSubscrIdDELETE(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func PlmnInfoGET(w http.ResponseWriter, r *http.Request) { plmnInfoGET(w, r) } func RabEstSubscriptionSubscriptionsGET(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func RabEstSubscriptionSubscriptionsPOST(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func RabEstSubscriptionSubscriptionsPUT(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func RabEstSubscriptionsSubscrIdDELETE(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func RabInfoGET(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func RabModSubscriptionSubscriptionsGET(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func RabModSubscriptionSubscriptionsPOST(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func RabModSubscriptionSubscriptionsPUT(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func RabModSubscriptionsSubscrIdDELETE(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func RabRelSubscriptionSubscriptionsGET(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func RabRelSubscriptionSubscriptionsPOST(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func RabRelSubscriptionSubscriptionsPUT(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func RabRelSubscriptionsSubscrIdDELETE(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func S1BearerInfoGET(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func S1BearerSubscriptionSubscriptionsGET(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func S1BearerSubscriptionSubscriptionsPOST(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func S1BearerSubscriptionSubscriptionsPUT(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func S1BearerSubscriptionsSubscrIdDELETE(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func SubscriptionLinkListSubscriptionsCcGET(w http.ResponseWriter, r *http.Request) { subscriptionLinkListSubscriptionsCcGET(w, r) } func SubscriptionLinkListSubscriptionsCrGET(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func SubscriptionLinkListSubscriptionsGET(w http.ResponseWriter, r *http.Request) { subscriptionLinkListSubscriptionsGET(w, r) } func SubscriptionLinkListSubscriptionsMrGET(w http.ResponseWriter, r *http.Request) { subscriptionLinkListSubscriptionsMrGET(w, r) } func SubscriptionLinkListSubscriptionsReGET(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func SubscriptionLinkListSubscriptionsRmGET(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func SubscriptionLinkListSubscriptionsRrGET(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func SubscriptionLinkListSubscriptionsS1GET(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) } func SubscriptionLinkListSubscriptionsTaGET(w http.ResponseWriter, r *http.Request) { notImplemented(w, r) }
package lmq_test import ( "reflect" "time" . "github.com/zwb-ict/lmq" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("LmqSingleTopicWithSingleCp", func() { var ( aproducer AsyncProducer aperr error consumer Consumer cerr error topicName string opt *Options topicOption TopicOption msgsTotalCount int msg string ) BeforeEach(func() { opt = &Options{ DataPath: "./queue_data_single_topic_with_single_cp", MaxTopicCount: 1, Topics: make(map[string]TopicOption), BackendStorage: "Lmdb", } topicName = "single" topicOption = TopicOption{ Name: topicName, MaxBytesPerFile: 2 * 1024 * 1024, MaxDataFiles: 50, BufferSize: 1, BufferFlushInterval: 2 * time.Millisecond, FetchSize: 100, } opt.Topics[topicName] = topicOption msgsTotalCount = 50000 msg = "hello lmq with single topic with single cp" }) JustBeforeEach(func() { aproducer, aperr = NewAsyncProducer(opt) consumer, cerr = NewConsumer("single", opt) }) Context("when the aproducer, consumer create succesfully", func() { It("aproducer should be an AsyncProducer object", func() { Expect(reflect.TypeOf(aproducer).String()).To(Equal("*lmq.asyncProducer")) }) It("aperr should be nil", func() { Expect(aperr).NotTo(HaveOccurred()) }) It("consumer should be an Consumer object", func() { Expect(reflect.TypeOf(consumer).String()).To(Equal("*lmq.consumer")) }) It("cerr should be nil", func() { Expect(cerr).NotTo(HaveOccurred()) }) Context("and publish and consume msgs", func() { It("consumed count should be equal with produced count", func() { for i := 0; i < msgsTotalCount; i++ { aproducer.Input() <- &ProducerMessage{ Topic: topicName, Body: []byte(msg), Timestamp: time.Now(), } } time.Sleep(5 * time.Second) tc, err := consumer.ConsumeTopic(topicName, 0) Expect(err).NotTo(HaveOccurred()) msgs := tc.Messages() consumedCount := 0 timeout := time.NewTimer(100 * time.Millisecond) for { select { case <-msgs: consumedCount++ timeout.Reset(2 * time.Second) case <-timeout.C: goto result } } result: Expect(consumedCount).To(Equal(msgsTotalCount)) }) }) }) })
package main import ( "encoding/json" "errors" "fmt" "net/url" "strings" "time" "github.com/gorilla/schema" ) // ProjectList struct type ProjectList []*Project func (pl *ProjectList) formatJSON() *ProjectList { for _, p := range *pl { p.formatJSON() } return pl } // Project struct type Project struct { ID int64 `db:"id" json:"id"` Name string `db:"name" json:"name"` Slug string `db:"slug" json:"slug"` Description string `db:"description" json:"description"` Tags []string `db:"-" json:"tags"` Tagstr string `db:"tags" json:"-"` Image string `db:"image" json:"image,omitempty"` Repo string `db:"repo" json:"repo,omitempty"` Demo string `db:"demo" json:"demo,omitempty"` IsHidden bool `db:"is_hidden" json:"is_hidden" schema:"is_hidden"` AddedOn int64 `db:"added_on" json:"added_on"` EditedOn int64 `db:"edited_on" json:"edited_on,omitempty"` } // NewProject initializes Project struct with current time as AddedOn value func NewProject() *Project { p := Project{} p.setAddedOn() return &p } // NewProjectFromMap takes a map as input and returns a Project struct // TODO func NewProjectFromMap(m map[string]interface{}) *Project { return NewProject() } // NewProjectFromPostForm takes a formdata as input and returns a Project struct // TODO func NewProjectFromPostForm(pf url.Values) (*Project, error) { p := NewProject() decoder := schema.NewDecoder() if err := decoder.Decode(p, pf); err != nil { fmt.Println(err) return nil, errors.New("Invalid project input. Form-data expected") } if !p.IsValid() { return nil, errors.New("Incomplete project data") } return p, nil } // // Assign assigns the input *Project fields values to the current *Project // // except for ID, AddedOn, EditedOn. EditedOn receives a timestamp // func (p *Project) Assign(p1 *Project) { // if p.Tagstr == "" { // p.setTagstr() // } // if p1.Tagstr == "" { // p1.setTagstr() // } // p.Name = p1.Name // p.Slug = p1.Slug // p.Description = p1.Description // p.Tagstr = p1.Tagstr // p.Image = p1.Image // p.Repo = p1.Repo // p.Demo = p1.Demo // p.IsHidden = p1.IsHidden // p.Update() // } // Init adds current timestamp to AddedOn field if not created // with NewProject() method func (p *Project) Init() *Project { return p.setAddedOn() } // Update updates the value of EditedOn field with the current time func (p *Project) Update() *Project { return p.setEditedOn() } // SetTagstr sets TagStr field from Tags value func (p *Project) setTagstr() *Project { p.Tagstr = strings.Join(p.Tags, ",") return p } // SetTags sets Tags fields from TagStr value (string with comma-separated values) func (p *Project) setTags() *Project { if p.Tagstr != "" { p.Tags = strings.Split(p.Tagstr, ",") } else { p.Tags = []string{} } return p } // IsValid checks whether a project contains the required fields func (p *Project) IsValid() bool { return p.Name != "" && p.Slug != "" && p.Description != "" && p.AddedOn != 0 } func (p *Project) formatJSON() *Project { return p.setTags() } func (p *Project) formatSQL() *Project { if p.AddedOn == 0 { p.setAddedOn() } return p.setTagstr() } func (p Project) String() string { j, err := json.MarshalIndent(p, "", " ") if err != nil { fmt.Println(err) } return string(j) + "\n" } func (p *Project) setAddedOn() *Project { p.AddedOn = timestamp() return p } func (p *Project) setEditedOn() *Project { p.EditedOn = timestamp() return p } func timestamp() int64 { return time.Now().Unix() }
// Copyright The containerd Authors. // Copyright 2021 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build linux // +build linux package shim import ( "context" "fmt" cgroupsv2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime/v2/shim" "github.com/sirupsen/logrus" ) // newOOMv2Epoller returns an implementation that listens to OOM events // from a container's cgroups v2. This is copied from containerd to avoid // having to upgrade containerd package just to get it func newOOMv2Poller(publisher shim.Publisher) (oomPoller, error) { return &watcherV2{ itemCh: make(chan itemV2), publisher: publisher, }, nil } // watcher implementation for handling OOM events from a container's cgroup type watcherV2 struct { itemCh chan itemV2 publisher shim.Publisher } type itemV2 struct { id string ev cgroupsv2.Event err error } // Close closes the watcher func (w *watcherV2) Close() error { return nil } // Run the loop func (w *watcherV2) run(ctx context.Context) { lastOOMMap := make(map[string]uint64) // key: id, value: ev.OOM for { select { case <-ctx.Done(): w.Close() return case i := <-w.itemCh: if i.err != nil { logrus.WithError(i.err).Debugf("Error listening for OOM, id: %q", i.id) delete(lastOOMMap, i.id) continue } logrus.Debugf("Received OOM event, id: %q, event: %+v", i.id, i.ev) lastOOM := lastOOMMap[i.id] if i.ev.OOM > lastOOM { if err := w.publisher.Publish(ctx, runtime.TaskOOMEventTopic, &TaskOOM{ ContainerID: i.id, }); err != nil { logrus.WithError(err).Error("Publish OOM event") } } if i.ev.OOM > 0 { lastOOMMap[i.id] = i.ev.OOM } } } } // Add cgroups.Cgroup to the epoll monitor func (w *watcherV2) add(id string, cgx any) error { cg, ok := cgx.(*cgroupsv2.Manager) if !ok { return fmt.Errorf("expected *cgroupsv2.Manager, got: %T", cgx) } // NOTE: containerd/cgroups/v2 does not support closing eventCh routine // currently. The routine shuts down when an error happens, mostly when the // cgroup is deleted. eventCh, errCh := cg.EventChan() go func() { for { i := itemV2{id: id} select { case ev := <-eventCh: i.ev = ev w.itemCh <- i case err := <-errCh: i.err = err w.itemCh <- i // we no longer get any event/err when we got an err logrus.WithError(err).Warn("error from eventChan") return } } }() return nil }
package main import ( "github.com/gin-gonic/gin" "github.com/micro/go-plugins/registry/consul/v2" "github.com/micro/go-micro/v2/registry" "github.com/micro/go-micro/v2/web" "log" //"time" ) func main() { consulReg := consul.NewRegistry(func(op *registry.Options){ op.Addrs = []string{ "127.0.0.1:8500", // consul 地址,可以添加多个 // "192.168.1.1:8500", } }) // gin router 初始化路由 ginRouter := gin.Default() ginRouter.GET("/hello", func(ctx *gin.Context) { ctx.String(200, "hello consul world!") }) // 注册服务 service := web.NewService( web.Name("hello-service2"), // 微服务的服务名称,把这个服务名称注册到了 consul 里 web.Address(":8080"), // http 端口 web.Handler(ginRouter), // gin 路由 web.Registry(consulReg), // 注册 consul 的地址 web.Metadata(map[string]string{"data": "the first service test"}), // 携带额外信息到consul //web.RegisterTTL(time.Second * 30), // 设置主从服务的过期时间 //web.RegisterInterval(time.Second * 20), // 设置间隔多久再次注册服务 ) // 运行 if err := service.Run(); err != nil { log.Println(err.Error()) } }
package main import ( "encoding/json" "errors" "fmt" "log" "math/rand" "net/http" "os" "sort" "strconv" "time" uuid "github.com/satori/go.uuid" "github.com/gin-gonic/gin" "github.com/gorilla/websocket" _ "github.com/heroku/x/hmetrics/onload" ) var positions [nColumns * nRows]Pos // convenience for getting Pos of board index var wsupgrader = websocket.Upgrader{ ReadBufferSize: 1024, WriteBufferSize: 1024, } // get n random values from slice (mutates input slice) // (shuffles whole slice, so not ideal for large slice) func randSelect(n int, candidates []int) []int { rand.Shuffle(len(candidates), func(i, j int) { candidates[i], candidates[j] = candidates[j], candidates[i] }) if n > len(candidates) { return candidates } return candidates[:n] } func processMessage(msg []byte, match *Match, player string) { currentRound := match.Round var event string idx := 0 for ; idx < len(msg); idx++ { if msg[idx] == ' ' { event = string(msg[:idx]) msg = msg[idx+1:] } } if event == "ping" { // used for keep alive (heroku timesout connections with no activity for 55 seconds) // Needn't send response to keep connection alive as long as one side of connection is active return } match.Mutex.Lock() notifyOpponent, newTurn := match.processEvent(event, player, msg) processConnection := func(conn *websocket.Conn, color string, private *PrivateState, newTurn bool) { turnElapsed := time.Now().UnixNano() - match.LastMoveTime remainingTurnTime := (match.TurnTimer - turnElapsed) / 1000000 if conn != nil { response := gin.H{ "turnRemainingMilliseconds": remainingTurnTime, "color": color, "board": match.Board.Pieces, "boardStatus": match.SquareStatuses, "private": private, "turn": match.Turn, "newTurn": newTurn, "winner": match.Winner, "round": match.Round, "newRound": match.Round > currentRound, "lastMoveTime": match.LastMoveTime, "blackPublic": match.BlackPublic, "whitePublic": match.WhitePublic, "phase": match.Phase, "firstTurnColor": match.FirstTurnColor, "log": match.Log, } bytes, err := json.Marshal(response) if err != nil { fmt.Printf("Error JSON encoding state: %+v", err) } err = conn.WriteMessage(websocket.TextMessage, bytes) if err != nil { if !websocket.IsCloseError(err) { fmt.Printf("Error writing message to %+v connection: %+v", color, err) } } } } if player == black { processConnection(match.BlackConn, black, &match.BlackPrivate, newTurn) if notifyOpponent { processConnection(match.WhiteConn, white, &match.WhitePrivate, newTurn) } } else { processConnection(match.WhiteConn, white, &match.WhitePrivate, newTurn) if notifyOpponent { processConnection(match.BlackConn, black, &match.BlackPrivate, newTurn) } } match.Mutex.Unlock() } func fmtDuration(d time.Duration) string { d = d.Round(time.Minute) h := d / time.Hour d -= h * time.Hour m := d / time.Minute s := "" if h > 0 { s = strconv.Itoa(int(h)) + " hrs " } s += strconv.Itoa(int(m)) + " min" return s } func NewMatchMap() *MatchMap { return &MatchMap{ internal: make(map[string]*Match), } } func (mm *MatchMap) Load(key string) (*Match, bool) { mm.RLock() result, ok := mm.internal[key] mm.RUnlock() return result, ok } func (mm *MatchMap) Delete(key string) { mm.Lock() delete(mm.internal, key) mm.Unlock() } func (mm *MatchMap) Store(match *Match) { mm.Lock() mm.internal[match.Name] = match mm.Unlock() } func NewUserMap() *UserMap { return &UserMap{ internal: make(map[string]bool), } } func (um *UserMap) Exists(key string) bool { um.RLock() _, ok := um.internal[key] um.RUnlock() return ok } func (um *UserMap) Delete(key string) { um.Lock() delete(um.internal, key) um.Unlock() } func (um *UserMap) Store(userID string) { um.Lock() um.internal[userID] = true um.Unlock() } var userNumber = 1 // used for default user names // returns userID (which may be new if argument does not exist func validateUser(c *gin.Context, userID string, userName string, users *UserMap) (string, string, error) { users.Lock() if !users.internal[userID] { u2, err := uuid.NewV4() if err != nil { users.Unlock() return "", "", err } userID = u2.String() const tenYears = 10 * 365 * 24 * 60 * 60 c.SetCookie("user_id", userID, tenYears, "/", "", false, false) userName = strconv.Itoa(userNumber) c.SetCookie("user_name", strconv.Itoa(userNumber), tenYears, "/", "", false, false) userNumber++ } users.internal[userID] = true users.Unlock() return userID, userName, nil } func createMatch(c *gin.Context, liveMatches *MatchMap, users *UserMap) (string, error) { userID, err := c.Cookie("user_id") userName, _ := c.Cookie("user_name") userID, userName, err = validateUser(c, userID, userName, users) if err != nil { return "", err } name := adjectives[rand.Intn(len(adjectives))] + "-" + animals[rand.Intn(len(animals))] liveMatches.Lock() // if name collision with existing match, randomly generate new names until finding one that's not in use // (not ideal, but this is partly why we limit number of active matches) for _, ok := liveMatches.internal[name]; ok; { name = adjectives[rand.Intn(len(adjectives))] + "-" + animals[rand.Intn(len(animals))] } match := &Match{ Name: name, WhitePlayerID: userID, BlackPlayerID: userID, CreatorName: userName, TurnTimer: turnTimer, Phase: readyUpPhase, Round: 0, // when incrementing from 0, will sound new round fanfare } if c.Query("dev") == "true" { match.TurnTimer = turnTimerDev match.DevMode = true match.Phase = kingPlacementPhase match.Round = 1 match.LastMoveTime = time.Now().UnixNano() } if c.Query("ai") == "true" { match.BlackAI = true match.Phase = kingPlacementPhase match.Round = 1 match.LastMoveTime = time.Now().UnixNano() } // clean up any dead or timedout matches for name, match := range liveMatches.internal { exceededTimeout := time.Now().UnixNano() > match.LastMoveTime+matchTimeout if match.Phase == gameoverPhase || exceededTimeout { liveMatches.internal[name].Mutex.Lock() delete(liveMatches.internal, name) } } nMatches := len(liveMatches.internal) liveMatches.Unlock() if nMatches >= maxConcurrentMatches { c.String(http.StatusInternalServerError, "Cannot create match. Server currently at max number of matches.") return "", errors.New("At max matches. Cannot create an additional match.") } initMatch(match) liveMatches.Store(match) return match.Name, nil } func timeTrack(start time.Time, name string) { elapsed := time.Since(start) log.Printf("%s took %s\n", name, elapsed) } func main() { rand.Seed(time.Now().UnixNano()) port := os.Getenv("PORT") if port == "" { log.Fatal("$PORT must be set") } sort.Slice(allCards, func(i, j int) bool { return allCards[i].Rank < allCards[j].Rank }) if len(allCards) == 0 { panic("allCards should not be empty") } rank := 0 i := 0 for i < len(allCards) { card := allCards[i] if card.Rank > rank { cardRankCount = append(cardRankCount, i) rank++ } else { i++ } } cardRankCount = append(cardRankCount, i) fmt.Println("cardRankCount", cardRankCount) liveMatches := NewMatchMap() { x := 0 y := 0 for i := range positions { positions[i] = Pos{x, y} x++ if x == nColumns { x = 0 y++ } } } users := NewUserMap() router := gin.New() router.Use(gin.Logger()) router.LoadHTMLGlob("templates/*.tmpl") router.Static("/static", "static") router.GET("/", func(c *gin.Context) { userID, err := c.Cookie("user_id") userName, _ := c.Cookie("user_name") userID, userName, err = validateUser(c, userID, userName, users) if err != nil { fmt.Printf("Error generating UUIDv4: %s", err) return } fmt.Printf("User id: %s User name: %s \n", userID, userName) now := time.Now() type match struct { Name string CreatorName string StartTime int64 Elapsed string Color string } liveMatches.Lock() matches := []match{} playerMatches := []match{} for _, m := range liveMatches.internal { elapsed := fmtDuration(now.Sub(time.Unix(0, m.StartTime))) if m.IsBlackOpen() && m.DevMode == false { matches = append(matches, match{m.Name, m.CreatorName, m.StartTime, elapsed, none}) } if m.BlackPlayerID == userID { playerMatches = append(playerMatches, match{m.Name, m.CreatorName, m.StartTime, elapsed, black}) } else if m.WhitePlayerID == userID { playerMatches = append(playerMatches, match{m.Name, m.CreatorName, m.StartTime, elapsed, white}) } } sort.Slice(matches, func(i, j int) bool { return matches[i].StartTime > matches[j].StartTime }) liveMatches.Unlock() c.HTML(http.StatusOK, "home.tmpl", struct { ID string Name string Matches []match PlayerMatches []match }{userID, userName, matches, playerMatches}) }) router.GET("/guide", func(c *gin.Context) { c.HTML(http.StatusOK, "guide.tmpl", nil) }) router.GET("/createMatch", func(c *gin.Context) { name, err := createMatch(c, liveMatches, users) if err != nil { fmt.Printf(err.Error()) return } c.Redirect(http.StatusSeeOther, "/match/"+name+"/white") }) router.GET("/dev", func(c *gin.Context) { name, err := createMatch(c, liveMatches, users) if err != nil { fmt.Printf(err.Error()) return } c.Redirect(http.StatusSeeOther, "/dev/"+name) }) router.GET("/dev/:name", func(c *gin.Context) { name := c.Param("name") c.HTML(http.StatusOK, "dev.tmpl", name) }) router.GET("/match/:name/:color", func(c *gin.Context) { userID, err := c.Cookie("user_id") userName, _ := c.Cookie("user_name") userID, userName, err = validateUser(c, userID, userName, users) if err != nil { fmt.Printf("Error generating UUIDv4: %s", err) return } name := c.Param("name") color := c.Param("color") if color != "black" && color != "white" { c.String(http.StatusNotFound, "Must specify black or white. Invalid match color: '%s'.", color) return } log.Printf("joining match: %v\n", name) match, ok := liveMatches.Load(name) match.Mutex.Lock() if !ok { c.String(http.StatusNotFound, "No match with id '%s' exists.", name) match.Mutex.Unlock() return } if color == "black" { if match.BlackPlayerID == "" { match.BlackPlayerID = userID } else if match.BlackPlayerID != userID { c.String(http.StatusBadRequest, "Cannot join match '%s' as black. Another player is already playing that color.", name, ) match.Mutex.Unlock() return } } else { if match.WhitePlayerID == "" { match.WhitePlayerID = userID } else if match.WhitePlayerID != userID { c.String(http.StatusBadRequest, "Cannot join match '%s' as white. Another player is already playing that color.", name, ) match.Mutex.Unlock() return } } match.Mutex.Unlock() c.HTML(http.StatusOK, "index.tmpl", nil) }) router.GET("/ws/:name/:color", func(c *gin.Context) { userID, err := c.Cookie("user_id") userName, _ := c.Cookie("user_name") userID, userName, err = validateUser(c, userID, userName, users) if err != nil { fmt.Printf("Error generating UUIDv4: %s", err) return } name := c.Param("name") color := c.Param("color") if color != "black" && color != "white" { c.String(http.StatusNotFound, "Must specify black or white. Invalid match color: '%s'.", color) return } log.Printf("joining match: %v\n", name) match, ok := liveMatches.Load(name) if !ok { c.String(http.StatusNotFound, "No match with id '%s' exists.", name) return } match.Mutex.Lock() if color == "black" { if match.BlackPlayerID != userID { c.String(http.StatusBadRequest, "Cannot join match '%s' as black. Another player is already playing that color.", name, ) match.Mutex.Unlock() return } } else { if match.WhitePlayerID != userID { c.String(http.StatusBadRequest, "Cannot join match '%s' as white. Another player is already playing that color.", name, ) match.Mutex.Unlock() return } } conn, err := wsupgrader.Upgrade(c.Writer, c.Request, nil) if err != nil { fmt.Printf("Failed to set websocket upgrade: %+v", err) match.Mutex.Unlock() return } // if client is valid, we kill previous websocket to start new one if color == black { if match.BlackConn != nil { match.BlackConn.Close() fmt.Printf("Closed black connection in match '%s' ", match.Name) } match.BlackConn = conn } else { if match.WhiteConn != nil { match.WhiteConn.Close() fmt.Printf("Closed white connection in match '%s' ", match.Name) } match.WhiteConn = conn } match.Mutex.Unlock() for { _, msg, err := conn.ReadMessage() if err != nil { break } processMessage(msg, match, color) } match.Mutex.Lock() conn.Close() if color == black { // a subsequent request may have replaced this conn, so we check if match.BlackConn == conn { match.BlackConn = nil } } else if color == white { if match.WhiteConn == conn { match.WhiteConn = nil } } fmt.Printf("Closed connection '%s' in match %s ", color, match.Name) match.Mutex.Unlock() }) router.Run(":" + port) }
// SPDX-License-Identifier: ISC // Copyright (c) 2014-2020 Bitmark Inc. // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package block import ( "github.com/bitmark-inc/bitmarkd/messagebus" "github.com/bitmark-inc/logger" ) type blockstore struct { log *logger.L } // initialise the broadcaster func (blk *blockstore) initialise() error { log := logger.New("blockstore") blk.log = log log.Info("initialising…") return nil } // wait for new blocks func (blk *blockstore) Run(args interface{}, shutdown <-chan struct{}) { log := blk.log log.Info("starting…") queue := messagebus.Bus.Blockstore.Chan() loop: for { log.Debug("waiting…") select { case <-shutdown: break loop case item := <-queue: log.Infof("received: %s data: %x", item.Command, item.Parameters) blk.process(&item) } } messagebus.Bus.Blockstore.Release() } // process the received block func (blk *blockstore) process(item *messagebus.Message) { log := blk.log if 1 == len(item.Parameters) { packedBlock := item.Parameters[0] err := StoreIncoming(packedBlock, nil, RescanVerified) if nil == err { // broadcast this packedBlock to peers if the block was valid messagebus.Bus.Broadcast.Send("block", packedBlock) } else { log.Debugf("store block: %x error: %s", packedBlock, err) } } }
// Copyright 2018 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package platform import ( "context" "time" upstartcommon "chromiumos/tast/common/upstart" "chromiumos/tast/local/chrome" "chromiumos/tast/local/upstart" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: MLServiceBootstrap, LacrosStatus: testing.LacrosVariantUnneeded, Desc: "Checks that Chrome can establish a Mojo connection to ML Service", Contacts: []string{"amoylan@chromium.org"}, Attr: []string{"group:mainline"}, SoftwareDeps: []string{"chrome", "ml_service"}, Pre: chrome.LoggedIn(), }) } func MLServiceBootstrap(ctx context.Context, s *testing.State) { const ( instanceParameter = "TASK" instance = "mojo_service" ) cr := s.PreValue().(*chrome.Chrome) tconn, err := cr.TestAPIConn(ctx) if err != nil { s.Fatal("Creating test API connection failed: ", err) } const job = "ml-service" s.Log("Stopping ML Service daemon if it is running") if err = upstart.StopJob(ctx, job, upstart.WithArg(instanceParameter, instance)); err != nil { s.Fatalf("Failed to stop %s: %v", job, err) } s.Log("Waiting for ML Service daemon to fully stop") if err := upstart.WaitForJobStatus(ctx, job, upstartcommon.StopGoal, upstartcommon.WaitingState, upstart.RejectWrongGoal, 15*time.Second, upstart.WithArg(instanceParameter, instance)); err != nil { s.Fatalf("Failed waiting for %v to stop: %v", job, err) } s.Log("Waiting for Chrome to complete a basic call to ML Service") if err = tconn.Call(ctx, nil, `tast.promisify(chrome.autotestPrivate.bootstrapMachineLearningService)`); err != nil { s.Fatal("Running autotestPrivate.bootstrapMachineLearningService failed: ", err) } s.Log("Checking ML Service is running") if err := upstart.WaitForJobStatus(ctx, job, upstartcommon.StartGoal, upstartcommon.RunningState, upstart.RejectWrongGoal, 15*time.Second, upstart.WithArg(instanceParameter, instance)); err != nil { s.Fatalf("Failed waiting for %v to start: %v", job, err) } }
// If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. // Find the sum of all the multiples of 3 or 5 below 1000. package main import "fmt" func main() { fmt.Println("Searching :") const LIMIT = 1000 var sum uint = 0 var totalFound uint = 0 for number := 0; number <= LIMIT; number++ { if number % 3 == 0 || number % 5 == 0 { fmt.Println(number) sum += uint(number) totalFound += uint(1) } } fmt.Println("Found", totalFound, "with a sum of", sum) }
package main import "fmt" func main(){ fmt.Println("Hello, This is Deepika") fmt.Println("Nice to See") fmt.println("Have a good day") }
package e4 import "testing" // WrapFunc wraps an error to form a chain. // // Instances must follow these rules: // if argument is nil, return value must be nil type WrapFunc func(err error) error // Wrap forms an error chain by calling wrap functions in order func Wrap(err error, fns ...WrapFunc) error { for _, fn := range fns { e := fn(err) if e != nil { if _, ok := e.(Error); !ok { err = MakeErr(e, err) } else { err = e } } else { return nil } } return err } // DefaultWrap wraps error with stacktrace func DefaultWrap(err error, fns ...WrapFunc) error { err = Wrap(err, fns...) if err != nil && !stacktraceIncluded(err) { err = NewStacktrace()(err) } return err } // TestWrapFunc tests a WrapFunc instance func TestWrapFunc(t *testing.T, fn WrapFunc) { if fn(nil) != nil { t.Fatal("should return nil") } }
package auth import ( "github.com/baetyl/baetyl-cloud/v2/common" "github.com/baetyl/baetyl-cloud/v2/plugin" ) type defaultAuth struct { cfg CloudConfig } func init() { plugin.RegisterFactory("defaultauth", New) } // New New func New() (plugin.Plugin, error) { var cfg CloudConfig if err := common.LoadConfig(&cfg); err != nil { return nil, err } return &defaultAuth{ cfg: cfg, }, nil } func (d *defaultAuth) Authenticate(c *common.Context) error { c.SetNamespace(d.cfg.DefaultAuth.Namespace) return nil } func (d *defaultAuth) AuthAndVerify(c *common.Context, pr *plugin.PermissionRequest) error { return nil } func (d *defaultAuth) Verify(c *common.Context, pr *plugin.PermissionRequest) error { return nil } // Close Close func (d *defaultAuth) Close() error { return nil }
package rss import ( "encoding/xml" "testing" ) var tests = []struct { in string want *Feed }{ {`<?xml version="1.0" encoding="UTF-8"?> <rss xmlns:atom="http://www.w3.org/2005/Atom" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:feedburner="http://rssnamespace.org/feedburner/ext/1.0" version="2.0" xml:base="https://www.zscaler.com/"> <channel> <title>Research Blog</title> <link>https://www.zscaler.com/</link> <language>en</language> <pubDate>Wed, 04 Oct 2017 03:54:41 -0700</pubDate> <lastBuildDate>Fri, 06 Oct 2017 01:00:11 -0700</lastBuildDate> <atom10:link xmlns:atom10="http://www.w3.org/2005/Atom" rel="self" type="application/rss+xml" href="http://feeds.feedburner.com/zscaler/research" /> <feedburner:info uri="zscaler/research" /> <atom10:link xmlns:atom10="http://www.w3.org/2005/Atom" rel="hub" href="http://pubsubhubbub.appspot.com/" /> <item> <title>Infostealer spreading through a compromised website</title> <link>...</link> <description>The Zscaler ThreatLabZ ...</description> <author>tdewan@zscaler.com</author> <pubDate>October 04, 2017</pubDate> <source url="https://www.zscaler.com/...">Research Blog</source> <feedburner:origLink>http://www.zscaler.com/...</feedburner:origLink> </item> </channel> </rss>`, &Feed{ XMLName: xml.Name{Space: "", Local: "rss"}, Channel: &Channel{ Title: "Research Blog", Link: "https://www.zscalaer.com/", Links: []string{"https://www.zscaler.com/", "", ""}, Description: "", Language: "en", PubDate: "Wed, 04 Oct 2017 03:54:41 -0700", LastBuildDate: "Fri, 06 Oct 2017 01:00:11 -0700", Items: []Item(nil), }, }, }, } var testsBad = []struct { in string want *Feed }{ {`<html></html>`, &Feed{}}, } func TestNewFeed(t *testing.T) { for _, test := range tests { buf := []byte(test.in) actual, err := NewFeed(buf) if err != nil { t.Fatalf("In %v: unexpected error: %s", test.in, err) } if actual.Channel.Title != test.want.Channel.Title { t.Errorf("Have %#v, want %#v", actual, test.want) } } for _, test := range testsBad { buf := []byte(test.in) _, err := NewFeed(buf) if err != nil { if err.Error() != "expected element type <rss> but have <html>" { t.Errorf("%s", err) } } } }
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package bench import "testing" func BenchmarkGrantRole(b *testing.B) { tests := []RoundTripBenchTestCase{ { name: "grant 1 role", setup: `CREATE ROLE a; CREATE ROLE b;`, stmt: "GRANT a TO b", reset: "DROP ROLE a,b", }, { name: "grant 2 roles", setup: `CREATE ROLE a; CREATE ROLE b; CREATE ROLE c;`, stmt: "GRANT a,b TO c", reset: "DROP ROLE a,b,c", }, } RunRoundTripBenchmark(b, tests) } func BenchmarkRevokeRole(b *testing.B) { tests := []RoundTripBenchTestCase{ { name: "revoke 1 role", setup: `CREATE ROLE a; CREATE ROLE b; GRANT a TO b`, stmt: "REVOKE a FROM b", reset: "DROP ROLE a,b", }, { name: "revoke 2 roles", setup: `CREATE ROLE a; CREATE ROLE b; CREATE ROLE c; GRANT a,b TO c;`, stmt: "REVOKE a,b FROM c", reset: "DROP ROLE a,b,c", }, } RunRoundTripBenchmark(b, tests) }
package backend import ( "github.com/leachim2k/go-shorten/pkg/dataservice/interfaces" "math/rand" "sync" "time" ) type backend struct { mutex sync.RWMutex entityCache map[string]interfaces.Entity statCache map[int][]*interfaces.StatEntity } func (m *backend) All(owner string) (*[]*interfaces.Entity, error) { // TODO: implement me panic("implement me") } func NewInmemoryBackend() interfaces.Backend { rand.Seed(time.Now().UnixNano()) return &backend{ entityCache: map[string]interfaces.Entity{}, statCache: map[int][]*interfaces.StatEntity{}, } } func (m *backend) Create(request interfaces.CreateRequest) (*interfaces.Entity, error) { entity := interfaces.Entity{ ID: rand.Int(), Owner: *request.Owner, Link: *request.Link, Code: request.Code, Count: 0, MaxCount: request.MaxCount, CreatedAt: time.Now(), UpdatedAt: time.Now(), StartTime: request.StartTime, ExpiresAt: request.ExpiresAt, Attributes: request.Attributes, } m.mutex.Lock() m.entityCache[request.Code] = entity m.mutex.Unlock() return &entity, nil } func (m *backend) CreateStat(shortenerId int, clientIp string, userAgent string, referer string) (*interfaces.StatEntity, error) { entity := interfaces.StatEntity{ ShortenerID: shortenerId, ClientIP: clientIp, UserAgent: userAgent, Referer: referer, CreatedAt: time.Now(), } m.mutex.Lock() stats, ok := m.statCache[shortenerId] if !ok { stats = make([]*interfaces.StatEntity, 0) } m.statCache[shortenerId] = append(stats, &entity) m.mutex.Unlock() return &entity, nil } func (m *backend) AllStats(code string) (*[]*interfaces.StatEntity, error) { entity, err := m.Read(code) if err != nil { return nil, err } m.mutex.RLock() stats, ok := m.statCache[entity.ID] m.mutex.RUnlock() if !ok { return nil, nil } return &stats, nil } func (m *backend) Read(code string) (*interfaces.Entity, error) { m.mutex.RLock() entity, ok := m.entityCache[code] m.mutex.RUnlock() if !ok { return nil, nil } return &entity, nil } func (m *backend) Update(entity *interfaces.Entity) (*interfaces.Entity, error) { m.mutex.Lock() m.entityCache[entity.Code] = *entity m.mutex.Unlock() return entity, nil } func (m *backend) Delete(owner string, code string) error { m.mutex.Lock() delete(m.entityCache, code) m.mutex.Unlock() return nil }
package provider type combinedProvider struct { status func() (string, error) } func init() { registry.Add("combined", NewCombinedFromConfig) registry.Add("openwb", NewCombinedFromConfig) } // NewCombinedFromConfig creates combined provider func NewCombinedFromConfig(other map[string]interface{}) (Provider, error) { status, err := NewOpenWBStatusProviderFromConfig(other) if err != nil { return nil, err } o := &combinedProvider{status: status} return o, nil } func (o *combinedProvider) StringGetter() func() (string, error) { return func() (string, error) { return o.status() } }
package classfile import "fmt" const leftBoundOfMajorVersion = 45 const rightBoundOfMajorVersion = 52 const classFileMagic = 0xCAFEBABE type ClassFile struct { magic uint32 minorVersion uint16 majorVersion uint16 constantPool ConstantPool accessFlags uint16 thisClass uint16 superClass uint16 interfaces []uint16 fields []*MemberInfo methods []*MemberInfo attributes []AttributeInfo } func Parse(classData []byte) (cf *ClassFile, err error) { defer func() { if r := recover(); r != nil { var ok bool err, ok = r.(error) if !ok { err = fmt.Errorf("%v", r) } } }() cr := &ClassReader{classData} cf = &ClassFile{} cf.read(cr) return } func (cf *ClassFile) read(reader *ClassReader) { cf.readAndCheckMagic(reader) cf.readAndCheckVersion(reader) cf.constantPool = readConstantPool(reader) cf.accessFlags = reader.readUint16() cf.thisClass = reader.readUint16() cf.superClass = reader.readUint16() cf.interfaces = reader.readUint16s() cf.fields = readMembers(reader, cf.constantPool) cf.methods = readMembers(reader, cf.constantPool) cf.attributes = readAttributes(reader, cf.constantPool) } func (cf *ClassFile) readAndCheckMagic(reader *ClassReader) { magic := reader.readUint32() if magic != classFileMagic { // todo 后面改成异常抛出 panic("java.lang.ClassFormatError: magic! ") } cf.magic = magic } func (cf *ClassFile) readAndCheckVersion(reader *ClassReader) { cf.minorVersion = reader.readUint16() cf.majorVersion = reader.readUint16() if cf.majorVersion == leftBoundOfMajorVersion { return } if leftBoundOfMajorVersion < cf.majorVersion && cf.majorVersion <= rightBoundOfMajorVersion { if cf.minorVersion == 0 { return } } // todo 后面改成异常抛出 panic("java.lang.UnsupportClassVersionError! ") } func (cf *ClassFile) MajorVersion() uint16 { return cf.majorVersion } func (cf *ClassFile) MinorVersion() uint16 { return cf.minorVersion } func (cf *ClassFile) ConstantPool() ConstantPool { return cf.constantPool } func (cf *ClassFile) AccessFlags() uint16 { return cf.accessFlags } func (cf *ClassFile) ClassName() string { return cf.constantPool.getClassName(cf.thisClass) } func (cf *ClassFile) SuperClassName() string { if cf.superClass > 0 { return cf.constantPool.getClassName(cf.superClass) } // 只有java.lang.Object没有父类 return "" } func (cf *ClassFile) InterfaceNames() []string { interfaceNames := make([]string, len(cf.interfaces)) for i, cpIndex := range cf.interfaces { interfaceNames[i] = cf.constantPool.getClassName(cpIndex) } return interfaceNames } func (cf *ClassFile) Fields() []*MemberInfo { return cf.fields } func (cf *ClassFile) Methods() []*MemberInfo { return cf.methods } func (cf *ClassFile) SourceFileAttribute() *SourceFileAttribute { for _, attrInfo := range cf.attributes { switch attrType := attrInfo.(type) { case *SourceFileAttribute: return attrType } } return nil }
package testutils import ( "testing" plugin_v1 "github.com/cyberark/secretless-broker/internal/plugin/v1" "github.com/stretchr/testify/assert" ) // CanProvideTestCase captures a test case where a provider is expected to return a value // and no error type CanProvideTestCase struct { Description string ID string ExpectedValue string } // CanProvide calls GetValues on the provider and ensures that the provider response for // the given id has the expected value and no error func CanProvide(provider plugin_v1.Provider, id string, expectedValue string) func(t *testing.T) { return func(t *testing.T) { values, err := provider.GetValues(id) assert.NoError(t, values[id].Error) assert.NoError(t, err) value := values[id] assertGoodProviderResponse(value, expectedValue, t) } } // CanProvideMultiple calls GetValues on the provider and ensures that the provider's // responses for the each id match the expected value and there are no errors. It also // duplicates some ids to ensure GetValues can handle multiple instances of the same id func CanProvideMultiple( provider plugin_v1.Provider, expectedStringValueByID map[string]string, ) func(t *testing.T) { return func(t *testing.T) { ids := make([]string, 0, len(expectedStringValueByID)*2) expectedStringValueByID := map[string]string{} for id := range expectedStringValueByID { ids = append(ids, id) ids = append(ids, id) } responses, err := provider.GetValues(ids...) // Ensure no global error assert.NoError(t, err) // Ensure there many responses as there are ids assert.Len(t, responses, len(ids)) // Ensure each id has the expected response for _, id := range ids { assertGoodProviderResponse( responses[id], expectedStringValueByID[id], t, ) } } } // assertGoodProviderResponse asserts that a provider response has the expected string // value and no error func assertGoodProviderResponse( response plugin_v1.ProviderResponse, expectedValueAsStr string, t *testing.T, ) { assert.NotNil(t, response) assert.NoError(t, response.Error) assert.NotNil(t, response.Value) assert.Equal(t, expectedValueAsStr, string(response.Value)) } // ReportsTestCase captures a test case where a provider is expected to return an error type ReportsTestCase struct { Description string ID string ExpectedErrString string } // Reports calls GetValues on the provider and ensures that the provider response for the // given id has the expected error and no value func Reports(provider plugin_v1.Provider, id string, expectedErrString string) func(t *testing.T) { return func(t *testing.T) { values, err := provider.GetValues(id) assert.NoError(t, err) assert.Contains(t, values, id) assert.Nil(t, values[id].Value) assert.Error(t, values[id].Error) assert.EqualError(t, values[id].Error, expectedErrString) } }
// Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. package orchestratorexplorer import ( "testing" "github.com/go-logr/logr" "github.com/stretchr/testify/assert" ) func TestMapAPIGroupsResources(t *testing.T) { for _, tt := range []struct { name string customResources []string expected map[string][]string }{ { name: "empty crs", customResources: []string{}, expected: map[string][]string{}, }, { name: "two crs, same group", customResources: []string{"datadoghq.com/v1alpha1/datadogmetrics", "datadoghq.com/v1alpha1/watermarkpodautoscalers"}, expected: map[string][]string{ "datadoghq.com": {"datadogmetrics", "watermarkpodautoscalers"}, }, }, { name: "three crs, different groups", customResources: []string{"datadoghq.com/v1alpha1/datadogmetrics", "datadoghq.com/v1alpha1/watermarkpodautoscalers", "cilium.io/v1/ciliumendpoints"}, expected: map[string][]string{ "datadoghq.com": {"datadogmetrics", "watermarkpodautoscalers"}, "cilium.io": {"ciliumendpoints"}, }, }, } { actualGroupsResources := mapAPIGroupsResources(logr.Logger{}, tt.customResources) assert.Equal(t, tt.expected, actualGroupsResources) } }
package models type Student struct { Username string `json:"username" csv:"username"` Name string `json:"name" csv:"name"` FacultyID string `json:"tpb" csv:"tpb"` MajorID string `json:"major" csv:"major"` Email string `json:"email" csv:"email"` }
package syscalls import ( "github.com/lunixbochs/struc" "../models" ) func trunc(s string, length int) string { if length+1 < len(s) { return s[:length-1] + "\x00" } return s + "\x00" } func Uname(u models.Usercorn, addr uint64, un *models.Uname) uint64 { struc.Pack(u.Mem().StreamAt(addr), un) /* var utsname syscall.Utsname if err := syscall.Uname(utsname); err != nil { return 1 } */ return 0 }