text stringlengths 11 4.05M |
|---|
/*
Copyright 2018 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"context"
"testing"
"time"
"cloud.google.com/go/storage"
"github.com/go-test/deep"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
corev1alpha1 "github.com/crossplaneio/crossplane/pkg/apis/core/v1alpha1"
"github.com/crossplaneio/crossplane/pkg/apis/gcp"
"github.com/crossplaneio/crossplane/pkg/apis/gcp/storage/v1alpha1"
gcpv1alpha1 "github.com/crossplaneio/crossplane/pkg/apis/gcp/v1alpha1"
gcpstorage "github.com/crossplaneio/crossplane/pkg/clients/gcp/storage"
gcpstoragefake "github.com/crossplaneio/crossplane/pkg/clients/gcp/storage/fake"
"github.com/crossplaneio/crossplane/pkg/test"
)
func init() {
_ = gcp.AddToScheme(scheme.Scheme)
}
type MockBucketCreateUpdater struct {
MockCreate func(context.Context) (reconcile.Result, error)
MockUpdate func(context.Context, *storage.BucketAttrs) (reconcile.Result, error)
}
func newMockBucketCreateUpdater() *MockBucketCreateUpdater {
return &MockBucketCreateUpdater{
MockUpdate: func(i context.Context, attrs *storage.BucketAttrs) (result reconcile.Result, e error) {
return requeueOnSuccess, nil
},
MockCreate: func(i context.Context) (result reconcile.Result, e error) {
return requeueOnSuccess, nil
},
}
}
func (m *MockBucketCreateUpdater) create(ctx context.Context) (reconcile.Result, error) {
return m.MockCreate(ctx)
}
func (m *MockBucketCreateUpdater) update(ctx context.Context, a *storage.BucketAttrs) (reconcile.Result, error) {
return m.MockUpdate(ctx, a)
}
var _ createupdater = &MockBucketCreateUpdater{}
type MockBucketSyncDeleter struct {
MockDelete func(context.Context) (reconcile.Result, error)
MockSync func(context.Context) (reconcile.Result, error)
}
func newMockBucketSyncDeleter() *MockBucketSyncDeleter {
return &MockBucketSyncDeleter{
MockSync: func(i context.Context) (result reconcile.Result, e error) {
return requeueOnSuccess, nil
},
MockDelete: func(i context.Context) (result reconcile.Result, e error) {
return result, nil
},
}
}
func (m *MockBucketSyncDeleter) delete(ctx context.Context) (reconcile.Result, error) {
return m.MockDelete(ctx)
}
func (m *MockBucketSyncDeleter) sync(ctx context.Context) (reconcile.Result, error) {
return m.MockSync(ctx)
}
var _ syncdeleter = &MockBucketSyncDeleter{}
type MockBucketFactory struct {
MockNew func(context.Context, *v1alpha1.Bucket) (syncdeleter, error)
}
func newMockBucketFactory(rh syncdeleter, err error) *MockBucketFactory {
return &MockBucketFactory{
MockNew: func(i context.Context, bucket *v1alpha1.Bucket) (handler syncdeleter, e error) {
return rh, err
},
}
}
func (m *MockBucketFactory) newHandler(ctx context.Context, b *v1alpha1.Bucket) (syncdeleter, error) {
return m.MockNew(ctx, b)
}
type bucket struct {
*v1alpha1.Bucket
}
func newBucket(ns, name string) *bucket {
return &bucket{Bucket: &v1alpha1.Bucket{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
Finalizers: []string{},
},
}}
}
func (b *bucket) withUID(uid string) *bucket {
b.ObjectMeta.UID = types.UID(uid)
return b
}
func (b *bucket) withCondition(c corev1alpha1.Condition) *bucket {
b.Status.ConditionedStatus.SetCondition(c)
return b
}
func (b *bucket) withFailedCondition(reason, msg string) *bucket {
b.Status.SetFailed(reason, msg)
return b
}
func (b *bucket) withDeleteTimestamp(t metav1.Time) *bucket {
b.Bucket.ObjectMeta.DeletionTimestamp = &t
return b
}
func (b *bucket) withFinalizer(f string) *bucket {
b.Bucket.ObjectMeta.Finalizers = append(b.Bucket.ObjectMeta.Finalizers, f)
return b
}
func (b *bucket) withProvider(name string) *bucket {
b.Spec.ProviderRef = corev1.LocalObjectReference{Name: name}
return b
}
func (b *bucket) withReclaimPolicy(policy corev1alpha1.ReclaimPolicy) *bucket {
b.Spec.ReclaimPolicy = policy
return b
}
func (b *bucket) withSpecRequesterPays(rp bool) *bucket {
b.Spec.BucketSpecAttrs.RequesterPays = rp
return b
}
type provider struct {
*gcpv1alpha1.Provider
}
func newProvider(ns, name string) *provider {
return &provider{Provider: &gcpv1alpha1.Provider{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
}}
}
func (p *provider) withCondition(c corev1alpha1.Condition) *provider {
p.Status.ConditionedStatus.SetCondition(c)
return p
}
func (p *provider) withSecret(name, key string) *provider {
p.Spec.Secret = corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: name,
},
Key: key,
}
return p
}
type secret struct {
*corev1.Secret
}
func newSecret(ns, name string) *secret {
return &secret{
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
},
}
}
func (s *secret) withKeyData(key, data string) *secret {
if s.Data == nil {
s.Data = make(map[string][]byte)
}
s.Data[key] = []byte(data)
return s
}
const (
testNamespace = "default"
testBucketName = "testBucket"
)
func TestReconciler_Reconcile(t *testing.T) {
ns := testNamespace
name := testBucketName
key := types.NamespacedName{Namespace: ns, Name: name}
req := reconcile.Request{NamespacedName: key}
ctx := context.TODO()
rsDone := reconcile.Result{}
type fields struct {
client client.Client
factory factory
}
tests := []struct {
name string
fields fields
wantRs reconcile.Result
wantErr error
wantObj *v1alpha1.Bucket
}{
{
name: "get err-not-found",
fields: fields{fake.NewFakeClient(), nil},
wantRs: rsDone,
wantErr: nil,
},
{
name: "get error other",
fields: fields{
client: &test.MockClient{
MockGet: func(context.Context, client.ObjectKey, runtime.Object) error {
return errors.New("test-get-error")
},
},
factory: nil},
wantRs: rsDone,
wantErr: errors.New("test-get-error"),
},
{
name: "bucket handler error",
fields: fields{
client: fake.NewFakeClient(newBucket(ns, name).withFinalizer("foo.bar").Bucket),
factory: newMockBucketFactory(nil, errors.New("handler-factory-error")),
},
wantRs: resultRequeue,
wantErr: nil,
wantObj: newBucket(ns, name).
withFailedCondition(failedToGetHandler, "handler-factory-error").
withFinalizer("foo.bar").Bucket,
},
{
name: "reconcile delete",
fields: fields{
client: fake.NewFakeClient(newBucket(ns, name).
withDeleteTimestamp(metav1.NewTime(time.Now())).Bucket),
factory: newMockBucketFactory(newMockBucketSyncDeleter(), nil),
},
wantRs: rsDone,
wantErr: nil,
},
{
name: "reconcile sync",
fields: fields{
client: fake.NewFakeClient(newBucket(ns, name).Bucket),
factory: newMockBucketFactory(newMockBucketSyncDeleter(), nil),
},
wantRs: requeueOnSuccess,
wantErr: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := &Reconciler{
Client: tt.fields.client,
factory: tt.fields.factory,
}
got, err := r.Reconcile(req)
if diff := deep.Equal(err, tt.wantErr); diff != nil {
t.Errorf("Reconciler.Reconcile() error = %v, wantErr %v\n%s", err, tt.wantErr, diff)
return
}
if diff := deep.Equal(got, tt.wantRs); diff != nil {
t.Errorf("Reconciler.Reconcile() result = %v, wantRs %v\n%s", got, tt.wantRs, diff)
}
if tt.wantObj != nil {
b := &v1alpha1.Bucket{}
if err := r.Get(ctx, key, b); err != nil {
t.Errorf("Reconciler.Reconcile() bucket error: %s", err)
}
if diff := deep.Equal(b, tt.wantObj); diff != nil {
t.Errorf("Reconciler.Reconcile() bucket = \n%+v, wantObj \n%+v\n%s", b, tt.wantObj, diff)
}
}
})
}
}
func Test_bucketFactory_newHandler(t *testing.T) {
ctx := context.TODO()
ns := testNamespace
bucketName := testBucketName
providerName := "test-provider"
secretName := "test-secret"
secretKey := "creds"
secretData := `{
"type": "service_account",
"project_id": "%s",
"private_key_id": "%s",
"private_key": "-----BEGIN PRIVATE KEY-----\n%s\n-----END PRIVATE KEY-----\n",
"client_email": "%s",
"client_id": "%s",
"auth_uri": "https://accounts.google.com/bucket/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "%s"}`
type want struct {
err error
sd syncdeleter
}
tests := []struct {
name string
Client client.Client
bucket *v1alpha1.Bucket
want want
}{
{
name: "err provider is not found",
Client: fake.NewFakeClient(),
bucket: newBucket(ns, bucketName).withProvider(providerName).Bucket,
want: want{
err: kerrors.NewNotFound(schema.GroupResource{
Group: gcpv1alpha1.Group,
Resource: "providers"}, "test-provider"),
},
},
{
name: "provider is not ready",
Client: fake.NewFakeClient(newProvider(ns, providerName).
withCondition(corev1alpha1.NewCondition(corev1alpha1.Failed, "", "")).Provider),
bucket: newBucket(ns, bucketName).withProvider("test-provider").Bucket,
want: want{
err: errors.Errorf("provider: %s is not ready", ns+"/test-provider"),
},
},
{
name: "provider secret is not found",
Client: fake.NewFakeClient(newProvider(ns, providerName).
withCondition(corev1alpha1.NewCondition(corev1alpha1.Ready, "", "")).
withSecret(secretName, secretKey).Provider),
bucket: newBucket(ns, bucketName).withProvider("test-provider").Bucket,
want: want{
err: errors.WithStack(
errors.Errorf("cannot get provider's secret %s/%s: secrets \"%s\" not found", ns, secretName, secretName)),
},
},
{
name: "invalid credentials",
Client: fake.NewFakeClient(newProvider(ns, providerName).
withCondition(corev1alpha1.NewCondition(corev1alpha1.Ready, "", "")).
withSecret(secretName, secretKey).Provider,
newSecret(ns, secretName).Secret),
bucket: newBucket(ns, bucketName).withProvider("test-provider").Bucket,
want: want{
err: errors.WithStack(
errors.Errorf("cannot retrieve creds from json: unexpected end of JSON input")),
},
},
{
name: "successful",
Client: fake.NewFakeClient(newProvider(ns, providerName).
withCondition(corev1alpha1.NewCondition(corev1alpha1.Ready, "", "")).
withSecret(secretName, secretKey).Provider,
newSecret(ns, secretName).withKeyData(secretKey, secretData).Secret),
bucket: newBucket(ns, bucketName).withUID("test-uid").withProvider("test-provider").Bucket,
want: want{
sd: newBucketSyncDeleter(&gcpstorage.BucketClient{BucketHandle: &storage.BucketHandle{}}, nil, nil, ""),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := &bucketFactory{
Client: tt.Client,
}
got, err := m.newHandler(ctx, tt.bucket)
if diff := deep.Equal(err, tt.want.err); diff != nil {
t.Errorf("bucketFactory.newHandler() error = \n%v, wantErr: \n%v\n%s", err, tt.want.err, diff)
return
}
if diff := deep.Equal(got, tt.want.sd); diff != nil {
t.Errorf("bucketFactory.newHandler() = \n%+v, want \n%+v\n%s", got, tt.want.sd, diff)
}
})
}
}
func Test_bucketHandler_delete(t *testing.T) {
ctx := context.TODO()
ns := "default"
bucketName := "test-bucket"
type fields struct {
sc gcpstorage.Client
cc client.Client
obj *v1alpha1.Bucket
}
type want struct {
err error
res reconcile.Result
obj *v1alpha1.Bucket
}
tests := []struct {
name string
fields fields
want want
}{
{
name: "retain policy",
fields: fields{
obj: newBucket(ns, bucketName).withReclaimPolicy(corev1alpha1.ReclaimRetain).
withFinalizer(finalizer).withFinalizer("test").Bucket,
cc: &test.MockClient{
MockUpdate: func(ctx context.Context, obj runtime.Object) error {
return nil
},
},
},
want: want{
err: nil,
res: reconcile.Result{},
obj: newBucket(ns, bucketName).withReclaimPolicy(corev1alpha1.ReclaimRetain).
withFinalizer("test").Bucket,
},
},
{
name: "delete successful",
fields: fields{
obj: newBucket(ns, bucketName).withReclaimPolicy(corev1alpha1.ReclaimDelete).
withFinalizer(finalizer).Bucket,
cc: &test.MockClient{
MockUpdate: func(ctx context.Context, obj runtime.Object) error {
return nil
},
},
sc: gcpstoragefake.NewMockBucketClient(),
},
want: want{
err: nil,
res: reconcile.Result{},
obj: newBucket(ns, bucketName).withReclaimPolicy(corev1alpha1.ReclaimDelete).Bucket,
},
},
{
name: "delete failed",
fields: fields{
obj: newBucket(ns, bucketName).withReclaimPolicy(corev1alpha1.ReclaimDelete).
withFinalizer(finalizer).Bucket,
cc: &test.MockClient{
MockStatusUpdate: func(ctx context.Context, obj runtime.Object) error {
return nil
},
},
sc: &gcpstoragefake.MockBucketClient{
MockDelete: func(ctx context.Context) error {
return errors.New("test-delete-error")
},
},
},
want: want{
err: nil,
res: resultRequeue,
obj: newBucket(ns, bucketName).withReclaimPolicy(corev1alpha1.ReclaimDelete).
withFinalizer(finalizer).
withFailedCondition(failedToDelete, "test-delete-error").
Bucket,
},
},
{
name: "delete non-existent",
fields: fields{
obj: newBucket(ns, bucketName).withReclaimPolicy(corev1alpha1.ReclaimDelete).
withFinalizer(finalizer).Bucket,
cc: &test.MockClient{
MockUpdate: func(ctx context.Context, obj runtime.Object) error { return nil },
},
sc: &gcpstoragefake.MockBucketClient{
MockDelete: func(ctx context.Context) error {
return storage.ErrBucketNotExist
},
},
},
want: want{
err: nil,
res: reconcile.Result{},
obj: newBucket(ns, bucketName).withReclaimPolicy(corev1alpha1.ReclaimDelete).Bucket,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
bh := newBucketSyncDeleter(tt.fields.sc, tt.fields.cc, tt.fields.obj, "")
got, err := bh.delete(ctx)
if diff := deep.Equal(err, tt.want.err); diff != nil {
t.Errorf("bucketSyncDeleter.delete() error = %v, wantErr %v\n%s", err, tt.want.err, diff)
return
}
if diff := deep.Equal(got, tt.want.res); diff != nil {
t.Errorf("bucketSyncDeleter.delete() result = %v, wantRes %v\n%s", got, tt.want.res, diff)
return
}
if diff := deep.Equal(tt.fields.obj, tt.want.obj); diff != nil {
t.Errorf("bucketSyncDeleter.delete() bucket = \n%+v, wantObj \n%+v\n%s", tt.fields.obj, tt.want.obj, diff)
return
}
})
}
}
func Test_bucketHandler_sync(t *testing.T) {
ctx := context.TODO()
ns := testNamespace
name := testBucketName
type fields struct {
sc gcpstorage.Client
cc client.Client
obj *v1alpha1.Bucket
}
type want struct {
err error
res reconcile.Result
obj *v1alpha1.Bucket
}
tests := []struct {
name string
fields fields
want want
}{
{
name: "secret error",
fields: fields{
cc: &test.MockClient{
MockCreate: func(ctx context.Context, obj runtime.Object) error {
return errors.New("test-error-saving-secret")
},
MockStatusUpdate: func(ctx context.Context, obj runtime.Object) error { return nil },
},
obj: newBucket(ns, name).Bucket,
},
want: want{
err: nil,
res: resultRequeue,
obj: newBucket(ns, name).withFailedCondition(failedToSaveSecret, "test-error-saving-secret").Bucket,
},
},
{
name: "attrs error",
fields: fields{
sc: &gcpstoragefake.MockBucketClient{
MockAttrs: func(i context.Context) (attrs *storage.BucketAttrs, e error) {
return nil, errors.WithStack(errors.New("test-attrs-error"))
},
},
cc: &test.MockClient{
MockCreate: func(ctx context.Context, obj runtime.Object) error { return nil },
MockStatusUpdate: func(ctx context.Context, obj runtime.Object) error {
return nil
},
},
obj: newBucket(ns, name).withUID("test-uid").Bucket,
},
want: want{
err: nil,
res: resultRequeue,
obj: newBucket(ns, name).withUID("test-uid").withFailedCondition(failedToRetrieve, "test-attrs-error").Bucket,
},
},
{
name: "attrs not found (create)",
fields: fields{
cc: &test.MockClient{
MockCreate: func(ctx context.Context, obj runtime.Object) error { return nil },
},
sc: &gcpstoragefake.MockBucketClient{
MockAttrs: func(i context.Context) (attrs *storage.BucketAttrs, e error) {
return nil, storage.ErrBucketNotExist
},
},
obj: newBucket(ns, name).withUID("test-uid").Bucket,
},
want: want{
err: nil,
res: requeueOnSuccess,
obj: newBucket(ns, name).withUID("test-uid").Bucket,
},
},
{
name: "update",
fields: fields{
cc: &test.MockClient{
MockCreate: func(ctx context.Context, obj runtime.Object) error { return nil },
},
sc: &gcpstoragefake.MockBucketClient{
MockAttrs: func(i context.Context) (attrs *storage.BucketAttrs, e error) {
return &storage.BucketAttrs{}, nil
},
},
obj: newBucket(ns, name).withUID("test-uid").Bucket,
},
want: want{
err: nil,
res: requeueOnSuccess,
obj: newBucket(ns, name).withUID("test-uid").Bucket,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
bh := &bucketSyncDeleter{
createupdater: newMockBucketCreateUpdater(),
Client: tt.fields.sc,
kube: tt.fields.cc,
object: tt.fields.obj,
}
got, err := bh.sync(ctx)
if diff := deep.Equal(err, tt.want.err); diff != nil {
t.Errorf("bucketSyncDeleter.delete() error = %v, wantErr %v\n%s", err, tt.want.err, diff)
return
}
if diff := deep.Equal(got, tt.want.res); diff != nil {
t.Errorf("bucketSyncDeleter.delete() result = %v, wantRes %v\n%s", got, tt.want.res, diff)
return
}
if diff := deep.Equal(tt.fields.obj, tt.want.obj); diff != nil {
t.Errorf("bucketSyncDeleter.delete() bucket = \n%+v, wantObj \n%+v\n%s", tt.fields.obj, tt.want.obj, diff)
return
}
})
}
}
func Test_bucketCreateUpdater_create(t *testing.T) {
ctx := context.TODO()
ns := testNamespace
name := testBucketName
type fields struct {
sc gcpstorage.Client
kube client.Client
bucket *v1alpha1.Bucket
projectID string
}
type want struct {
err error
res reconcile.Result
bucket *v1alpha1.Bucket
}
tests := []struct {
name string
fields fields
want want
}{
{
name: "create error",
fields: fields{
sc: &gcpstoragefake.MockBucketClient{
MockCreate: func(ctx context.Context, pid string, attrs *storage.BucketAttrs) error {
return errors.New("test-create-error")
},
},
kube: test.NewMockClient(),
bucket: newBucket(ns, name).Bucket,
},
want: want{
err: nil,
res: resultRequeue,
bucket: newBucket(ns, name).
withFailedCondition(failedToCreate, "test-create-error").
withFinalizer(finalizer).
Bucket,
},
},
{
name: "create success, attrs error",
fields: fields{
sc: &gcpstoragefake.MockBucketClient{
MockCreate: func(ctx context.Context, pid string, attrs *storage.BucketAttrs) error {
return nil
},
MockAttrs: func(i context.Context) (attrs *storage.BucketAttrs, e error) {
return nil, errors.New("test-attrs-error")
},
},
kube: test.NewMockClient(),
bucket: newBucket(ns, name).Bucket,
},
want: want{
err: nil,
res: resultRequeue,
bucket: newBucket(ns, name).
withCondition(corev1alpha1.NewCondition(corev1alpha1.Ready, "", "")).
withFailedCondition(failedToRetrieve, "test-attrs-error").
withFinalizer(finalizer).
Bucket,
},
},
{
name: "create success, update error",
fields: fields{
sc: gcpstoragefake.NewMockBucketClient(),
kube: &test.MockClient{
MockUpdate: func(ctx context.Context, obj runtime.Object) error {
return errors.New("test-update-error")
},
},
bucket: newBucket(ns, name).Bucket,
},
want: want{
err: errors.New("test-update-error"),
res: resultRequeue,
bucket: newBucket(ns, name).
withCondition(corev1alpha1.NewCondition(corev1alpha1.Ready, "", "")).
withFinalizer(finalizer).
Bucket,
},
},
{
name: "create success",
fields: fields{
sc: gcpstoragefake.NewMockBucketClient(),
kube: &test.MockClient{
MockUpdate: func(ctx context.Context, obj runtime.Object) error { return nil },
MockStatusUpdate: func(ctx context.Context, obj runtime.Object) error { return nil },
},
bucket: newBucket(ns, name).Bucket,
},
want: want{
err: nil,
res: requeueOnSuccess,
bucket: newBucket(ns, name).
withCondition(corev1alpha1.NewCondition(corev1alpha1.Ready, "", "")).
withFinalizer(finalizer).
Bucket,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
bh := &bucketCreateUpdater{
Client: tt.fields.sc,
kube: tt.fields.kube,
bucket: tt.fields.bucket,
projectID: tt.fields.projectID,
}
got, err := bh.create(ctx)
if diff := deep.Equal(err, tt.want.err); diff != nil {
t.Errorf("bucketCreateUpdater.create() error = %v, wantErr %v\n%s", err, tt.want.err, diff)
return
}
if diff := deep.Equal(got, tt.want.res); diff != nil {
t.Errorf("bucketCreateUpdater.create() result = %v, wantRes %v\n%s", got, tt.want.res, diff)
return
}
if tt.want.bucket != nil {
if diff := deep.Equal(tt.fields.bucket, tt.want.bucket); diff != nil {
t.Errorf("bucketCreateUpdater.create() bucket = \n%+v, wantObj \n%+v\n%s", tt.fields.bucket, tt.want.bucket, diff)
return
}
}
})
}
}
func Test_bucketCreateUpdater_update(t *testing.T) {
ctx := context.TODO()
ns := testNamespace
name := testBucketName
type fields struct {
sc gcpstorage.Client
cc client.Client
o *v1alpha1.Bucket
}
type want struct {
res reconcile.Result
err error
obj *v1alpha1.Bucket
}
tests := []struct {
name string
fields fields
attrs *storage.BucketAttrs
want want
}{
{
name: "no changes",
attrs: &storage.BucketAttrs{},
fields: fields{
o: newBucket(ns, name).Bucket,
},
want: want{
err: nil,
res: requeueOnSuccess,
obj: newBucket(ns, name).Bucket,
},
},
{
name: "update failed",
attrs: &storage.BucketAttrs{},
fields: fields{
o: newBucket(ns, name).withSpecRequesterPays(true).Bucket,
sc: &gcpstoragefake.MockBucketClient{
MockUpdate: func(ctx context.Context, update storage.BucketAttrsToUpdate) (attrs *storage.BucketAttrs, e error) {
return nil, errors.New("test-bucket-update-error")
},
},
cc: &test.MockClient{
MockStatusUpdate: func(ctx context.Context, obj runtime.Object) error { return nil },
},
},
want: want{
err: nil,
res: resultRequeue,
obj: newBucket(ns, name).withSpecRequesterPays(true).
withFailedCondition(failedToUpdate, "test-bucket-update-error").Bucket,
},
},
{
name: "update back failed",
attrs: &storage.BucketAttrs{},
fields: fields{
o: newBucket(ns, name).withSpecRequesterPays(true).Bucket,
sc: gcpstoragefake.NewMockBucketClient(),
cc: &test.MockClient{
MockUpdate: func(ctx context.Context, obj runtime.Object) error {
return errors.New("test-spec-update-error")
},
},
},
want: want{
err: errors.New("test-spec-update-error"),
res: resultRequeue,
obj: newBucket(ns, name).withSpecRequesterPays(false).Bucket,
},
},
{
name: "update success",
attrs: &storage.BucketAttrs{},
fields: fields{
o: newBucket(ns, name).withSpecRequesterPays(true).Bucket,
sc: gcpstoragefake.NewMockBucketClient(),
cc: test.NewMockClient(),
},
want: want{
err: nil,
res: requeueOnSuccess,
obj: newBucket(ns, name).withSpecRequesterPays(false).Bucket,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
bh := &bucketCreateUpdater{
Client: tt.fields.sc,
kube: tt.fields.cc,
bucket: tt.fields.o,
}
got, err := bh.update(ctx, tt.attrs)
if diff := deep.Equal(err, tt.want.err); diff != nil {
t.Errorf("bucketCreateUpdater.update() error = %v, wantErr %v\n%s", err, tt.want.err, diff)
return
}
if diff := deep.Equal(got, tt.want.res); diff != nil {
t.Errorf("bucketCreateUpdater.update() result = %v, wantRes %v\n%s", got, tt.want.res, diff)
return
}
if diff := deep.Equal(tt.fields.o, tt.want.obj); diff != nil {
t.Errorf("bucketSyncDeleter.delete() bucket = \n%+v, wantObj \n%+v\n%s", tt.fields.o, tt.want.obj, diff)
return
}
})
}
}
|
// server.go
//
// REST APIs with Go and MySql.
//
// Usage:
//
// # run go server in the background
// $ go run server.go
package main
import (
"net/http"
_ "github.com/go-sql-driver/mysql"
"github.com/elgs/gosqljson"
"github.com/gorilla/mux"
"database/sql"
"log"
"time"
)
// Global sql.DB to access the database by all handlers
var db *sql.DB
var err error
var theCase string
func IndexHandler(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("TeamWork!!!!\n"))
}
func getEmployeeLowEarners(w http.ResponseWriter, r *http.Request) {
data, _ := gosqljson.QueryDbToMapJSON(db, theCase,
"select employe.emp_id as employeeId, " +
"employe.emp_name as employeeName, " +
"DATE_FORMAT(employe.hire_date, '%d-%b-%Y') as hireDate, " +
"employe.salary as salary " +
"from employees employe " +
"where salary < (select salary from employees employee where employee.hire_date > employe.hire_date order by hire_date limit 1) " +
"order by hire_date ")
w.Write([]byte(data))
}
func getBonusDepartment(w http.ResponseWriter, r *http.Request) {
deptNo := mux.Vars(r)["deptNo"]
data, _ := gosqljson.QueryDbToMapJSON(db, theCase,
"select employe.dept_no as deptId, " +
"sum(employe.salary) as totalSalary," +
"sum((employe.salary * (select sum(bonus.type) * 10 as bonus from bonuses bonus where bonus.emp_id = employe.emp_id)/100)) as totalBonuses " +
"from employees employe " +
"where employe.dept_no = " + deptNo +
" group by dept_no")
w.Write([]byte(data))
}
func getEmployees(w http.ResponseWriter, r *http.Request) {
data, _ := gosqljson.QueryDbToMapJSON(db, theCase, "SELECT emp_id as employeeId, emp_name as employeeName, dept_no as deptId, salary, DATE_FORMAT(hire_date, '%d-%b-%Y') as hireDate FROM employees")
w.Write([]byte(data))
}
func main() {
db, err = sql.Open("mysql", "root:root@/teamwork")
theCase = "lower" // "lower", "upper", "camel" or the orignal case if this is anything other than these three
if err != nil {
panic(err.Error())
}
defer db.Close()
err = db.Ping()
if err != nil {
panic(err.Error())
}
r := mux.NewRouter()
r.HandleFunc("/", IndexHandler).Methods("GET")
r.HandleFunc("/employees", getEmployees).Methods("GET")
r.HandleFunc("/depts/{deptNo:[0-9]+}/bonuses", getBonusDepartment).Methods("GET")
r.HandleFunc("/employees/lowearners", getEmployeeLowEarners).Methods("GET")
srv := &http.Server{
Handler: r,
Addr: "localhost:8080",
// Good practice: enforce timeouts for servers you create!
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
log.Fatal(srv.ListenAndServe())
}
|
package pprof
import (
"net/http"
_ "net/http/pprof"
)
func StartPP() {
go func() {
err := http.ListenAndServe("localhost:6060", nil)
if err != nil {
panic(err)
}
}()
}
|
package main
// Product model
type Product struct {
ID int `json:"id"`
Name string `json:"name"`
Category string `json:"category"`
Inventory int16 `json:"inventory"`
Price int16 `json:"price"`
}
//Products array/slice composed of Product's
type Products []Product
|
package main
import "github.com/Chris-SG/BauxeBot_Go/Discord"
func main() {
bauxebotdiscord.StartBotDiscord("!")
}
|
package main
import (
"testing"
"os"
"flag"
"net/http"
"net/http/httptest"
"log"
)
func init() {
logger = log.New(os.Stdout, "rss-macine ", log.Ldate | log.Ltime | log.Lshortfile)
errorsLogger = log.New(os.Stderr, "rss-macine ", log.Ldate | log.Ltime | log.Lshortfile)
}
func TestMain(m *testing.M) {
flag.Parse()
os.Exit(m.Run())
}
func TestSuccessfulParse(t *testing.T) {
url := "http://rss.cnn.com/rss/edition.rss"
req, _ := http.NewRequest("GET", "/parse?url=" + url, nil)
w := httptest.NewRecorder()
parseHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("Could not parse")
}
}
func TestEmptyParse(t *testing.T) {
req, _ := http.NewRequest("GET", "/parse?url=", nil)
w := httptest.NewRecorder()
parseHandler(w, req)
if w.Code != http.StatusBadRequest {
t.Errorf("Should be StatusBadRequest")
}
}
func TestFailedParse(t *testing.T) {
url := "http://rss.cnn.com/rss/editionx.rss"
req, _ := http.NewRequest("GET", "/parse?url=" + url, nil)
w := httptest.NewRecorder()
parseHandler(w, req)
if w.Code != http.StatusInternalServerError {
t.Errorf("Should return StatusInternalServerError")
}
}
|
package api
import (
"context"
"net/http"
"google.golang.org/grpc"
"github.com/caos/zitadel/internal/api/authz"
grpc_util "github.com/caos/zitadel/internal/api/grpc"
"github.com/caos/zitadel/internal/api/grpc/server"
"github.com/caos/zitadel/internal/api/oidc"
authz_es "github.com/caos/zitadel/internal/authz/repository/eventsourcing"
"github.com/caos/zitadel/internal/config/systemdefaults"
)
type Config struct {
GRPC grpc_util.Config
OIDC oidc.OPHandlerConfig
}
type API struct {
grpcServer *grpc.Server
gatewayHandler *server.GatewayHandler
verifier *authz.TokenVerifier
serverPort string
}
func Create(config Config, authZ authz.Config, authZRepo *authz_es.EsRepository, sd systemdefaults.SystemDefaults) *API {
api := &API{
serverPort: config.GRPC.ServerPort,
}
api.verifier = authz.Start(authZRepo)
api.grpcServer = server.CreateServer(api.verifier, authZ, sd.DefaultLanguage)
api.gatewayHandler = server.CreateGatewayHandler(config.GRPC)
return api
}
func (a *API) RegisterServer(ctx context.Context, server server.Server) {
server.RegisterServer(a.grpcServer)
a.gatewayHandler.RegisterGateway(ctx, server)
a.verifier.RegisterServer(server.AppName(), server.MethodPrefix(), server.AuthMethods())
}
func (a *API) RegisterHandler(prefix string, handler http.Handler) {
a.gatewayHandler.RegisterHandler(prefix, handler)
}
func (a *API) Start(ctx context.Context) {
server.Serve(ctx, a.grpcServer, a.serverPort)
a.gatewayHandler.Serve(ctx)
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package cloudflare
import (
"context"
cf "github.com/cloudflare/cloudflare-go"
)
// MockCloudflare mocks the Cloudflarer interface
type MockCloudflare struct {
mockGetZoneID func(zoneName string) (zoneID string, err error)
mockCreateDNSRecord func(ctx context.Context, rc *cf.ResourceContainer, params cf.CreateDNSRecordParams) (cf.DNSRecord, error)
mockDeleteDNSRecord func(ctx context.Context, rc *cf.ResourceContainer, recordID string) error
mockUpdateDNSRecord func(ctx context.Context, rc *cf.ResourceContainer, params cf.UpdateDNSRecordParams) (cf.DNSRecord, error)
mockListDNSRecords func(ctx context.Context, rc *cf.ResourceContainer, params cf.ListDNSRecordsParams) ([]cf.DNSRecord, *cf.ResultInfo, error)
}
// MockAWSClient mocks the AWS client interface
type MockAWSClient struct {
mockGetPublicHostedZoneNames func() []string
}
// GetPublicHostedZoneNames mocks AWS client method
func (a *MockAWSClient) GetPublicHostedZoneNames() []string {
return a.mockGetPublicHostedZoneNames()
}
// ZoneIDByName mocks the getZoneID
func (c *MockCloudflare) ZoneIDByName(zoneName string) (string, error) {
return c.mockGetZoneID(zoneName)
}
// DNSRecords mocks cloudflare package same method
func (c *MockCloudflare) ListDNSRecords(ctx context.Context, rc *cf.ResourceContainer, params cf.ListDNSRecordsParams) ([]cf.DNSRecord, *cf.ResultInfo, error) {
return c.mockListDNSRecords(ctx, rc, params)
}
// CreateDNSRecord mocks cloudflare package same method
func (c *MockCloudflare) CreateDNSRecord(ctx context.Context, rc *cf.ResourceContainer, params cf.CreateDNSRecordParams) (cf.DNSRecord, error) {
return c.mockCreateDNSRecord(ctx, rc, params)
}
// UpdateDNSRecord mocks cloudflare package same method
func (c *MockCloudflare) UpdateDNSRecord(ctx context.Context, rc *cf.ResourceContainer, params cf.UpdateDNSRecordParams) (cf.DNSRecord, error) {
return c.mockUpdateDNSRecord(ctx, rc, params)
}
// DeleteDNSRecord mocks cloudflare package same method
func (c *MockCloudflare) DeleteDNSRecord(ctx context.Context, rc *cf.ResourceContainer, recordID string) error {
return c.mockDeleteDNSRecord(ctx, rc, recordID)
}
|
package main
import (
"auth0-backup-tool/pkg"
"flag"
"fmt"
"gopkg.in/auth0.v3/management"
"os"
"strings"
)
type Flags struct {
ConfigFile string
ClientId string
ClientSecret string
Domain string
UsersFile string
UserAttributes string
Connection string
Action string
}
var RequiredFlags = []string{
"client-id",
"client-secret",
"domain",
"action",
}
func parseFlags() Flags {
flags := Flags{}
flag.StringVar(&flags.ClientId, "client-id", "", "Client ID of an application with user management rights")
flag.StringVar(&flags.ClientSecret, "client-secret", "", "Client secret of an application with user management rights")
flag.StringVar(&flags.Domain, "domain", "", "Auth0 domain")
flag.StringVar(&flags.Connection, "connection", "", "Auth0 connection ID. If it's empty a backup to all connections will be performed")
flag.StringVar(&flags.UsersFile, "users-file", "users-export.json", "File path where to store the exported users or where to read the users to import. When export all connections this flag contains the folder name where to store all exported files")
flag.StringVar(&flags.UserAttributes, "user-attributes", "", "List of user attributes to export. Format: attr1,attr2,attr3")
flag.StringVar(&flags.Action, "action", "", "Action to perform. Can be 'import' or 'export'")
flag.Parse()
checkNeededFlags(flags)
return flags
}
func abortWithCommandHelp() {
fmt.Fprintf(os.Stderr, "\n\nUsage of %s:\n", os.Args[0])
flag.PrintDefaults()
os.Exit(2) // the same exit code flag.Parse uses
}
func checkNeededFlags(flags Flags) {
seen := make(map[string]bool)
var missingFlags = false
flag.Visit(func(f *flag.Flag) { seen[f.Name] = true })
for _, req := range RequiredFlags {
if !seen[req] {
fmt.Fprintf(os.Stderr, "missing required -%s argument/flag\n", req)
missingFlags = true
}
}
if missingFlags {
abortWithCommandHelp()
}
}
func main() {
flags := parseFlags()
manager, _ := management.New(flags.Domain, flags.ClientId, flags.ClientSecret)
switch flags.Action {
case "export":
if flags.Connection == "" {
pkg.ExportFromAllConnections(manager, flags.UsersFile)
} else {
pkg.ExportUsers(manager.Job, flags.Connection, strings.Split(flags.UserAttributes, ","), flags.UsersFile)
}
case "import":
pkg.ImportUsers(manager.Job, flags.Connection, flags.UsersFile, false)
default:
fmt.Fprintf(os.Stderr, "Wrong action '%s'", flags.Action)
abortWithCommandHelp()
}
}
|
package integrations
import "gorm.io/gorm"
// OIDCIntegrationClient is the name of an OIDC auth mechanism client
type OIDCIntegrationClient string
// The supported OIDC auth mechanism clients
const (
OIDCKube OIDCIntegrationClient = "kube"
)
// OIDCIntegration is an auth mechanism that uses oidc. Spec:
// https://openid.net/specs/openid-connect-core-1_0.html
type OIDCIntegration struct {
gorm.Model
// The name of the auth mechanism
Client OIDCIntegrationClient `json:"client"`
// The id of the user that linked this auth mechanism
UserID uint `json:"user_id"`
// The project that this integration belongs to
ProjectID uint `json:"project_id"`
// ------------------------------------------------------------------
// All fields encrypted before storage.
// ------------------------------------------------------------------
// The "Issuer Identifier" of the OIDC spec (16.15)
IssuerURL []byte `json:"idp-issuer-url"`
// The ID issued to the Relying Party
ClientID []byte `json:"client-id"`
// The secret issued to the Relying Party
//
// This is present because it used to be a required field in a kubeconfig.
// However, because the kube apiserver acts as a Relying Party, the client
// secret is not necessary.
ClientSecret []byte `json:"client-secret"`
// The CA data -- certificate check must be performed (16.17)
CertificateAuthorityData []byte `json:"idp-certificate-authority-data"`
// The user's JWT id token
IDToken []byte `json:"id-token"`
// The user's refresh token
RefreshToken []byte `json:"refresh-token"`
}
// OIDCIntegrationExternal is a OIDCIntegration to be shared over REST
type OIDCIntegrationExternal struct {
ID uint `json:"id"`
// The name of the auth mechanism
Client OIDCIntegrationClient `json:"client"`
// The id of the user that linked this auth mechanism
UserID uint `json:"user_id"`
// The project that this integration belongs to
ProjectID uint `json:"project_id"`
}
// Externalize generates an external KubeIntegration to be shared over REST
func (o *OIDCIntegration) Externalize() *OIDCIntegrationExternal {
return &OIDCIntegrationExternal{
ID: o.ID,
Client: o.Client,
UserID: o.UserID,
ProjectID: o.ProjectID,
}
}
// ToProjectIntegration converts a gcp integration to a project integration
func (o *OIDCIntegration) ToProjectIntegration(
category string,
service IntegrationService,
) *ProjectIntegration {
return &ProjectIntegration{
ID: o.ID,
ProjectID: o.ProjectID,
AuthMechanism: "oidc",
Category: category,
Service: service,
}
}
|
// Copyright 2020 apirator.io
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package apimock
import (
"context"
apirator "github.com/apirator/apirator/pkg/apis/apirator/v1alpha1"
)
func (r *ReconcileAPIMock) markAsSuccessful(obj *apirator.APIMock) error {
log.Info("Updating APIMock with status Provisioned...", "APIMock.Namespace", obj.Namespace, "APIMock.Name", obj.Name)
err := r.updateStatus(obj, apirator.PROVISIONED)
if err != nil {
log.Error(err, "Failed to update APIMock with Provisioned status", "APIMock.Namespace", obj.Namespace, "APIMock.Name", obj.Name)
return err
}
log.Info("Status Provisioned update successfully", "APIMock.Namespace", obj.Namespace, "APIMock.Name", obj.Name)
return nil
}
func (r *ReconcileAPIMock) markAsFailure(obj *apirator.APIMock) error {
log.Info("Updating APIMock with status Error...", "APIMock.Namespace", obj.Namespace, "APIMock.Name", obj.Name)
err := r.updateStatus(obj, apirator.ERROR)
if err != nil {
log.Error(err, "Failed to update APIMock with Provisioned status", "APIMock.Namespace", obj.Namespace, "APIMock.Name", obj.Name)
return err
}
log.Info("Status Error update successfully", "APIMock.Namespace", obj.Namespace, "APIMock.Name", obj.Name)
return nil
}
func (r *ReconcileAPIMock) markAsInvalidOAS(obj *apirator.APIMock) error {
log.Info("Updating APIMock with status OAS invalid...", "APIMock.Namespace", obj.Namespace, "APIMock.Name", obj.Name)
err := r.updateStatus(obj, apirator.INVALID_OAS)
if err != nil {
log.Error(err, "Failed to update APIMock with Invalid OAS", "APIMock.Namespace", obj.Namespace, "APIMock.Name", obj.Name)
return err
}
log.Info("Status Invalid OAS update successfully", "APIMock.Namespace", obj.Namespace, "APIMock.Name", obj.Name)
return nil
}
func (r *ReconcileAPIMock) updateStatus(obj *apirator.APIMock, status string) error {
if obj.Status.Phase != status {
obj.Status.Phase = status
err := r.client.Update(context.TODO(), obj)
if err != nil {
return err
}
}
return nil
}
|
package commands
import (
"github.com/codegangsta/cli"
//"github.com/brooklyncentral/brooklyn-cli/api/entity_policies"
"github.com/brooklyncentral/brooklyn-cli/command_metadata"
"github.com/brooklyncentral/brooklyn-cli/net"
"github.com/brooklyncentral/brooklyn-cli/scope"
)
type AddPolicy struct {
network *net.Network
}
func NewAddPolicy(network *net.Network) (cmd *AddPolicy) {
cmd = new(AddPolicy)
cmd.network = network
return
}
func (cmd *AddPolicy) Metadata() command_metadata.CommandMetadata {
return command_metadata.CommandMetadata{
Name: "add-policy",
Description: "Add a new policy",
Usage: "BROOKLYN_NAME [ SCOPE ] add-policy APPLICATION ENTITY POLICY_TYPE",
Flags: []cli.Flag{},
}
}
func (cmd *AddPolicy) Run(scope scope.Scope, c *cli.Context) {
// Todo
}
|
package maximum_flow
type Graph struct {
VertexCount int
Directed bool
Edges []Edge
}
type Edge struct {
X int
Y int
Capacity int
}
type AdjacencyMatrix [][]int
func NewAdjacencyMatrix(g Graph) AdjacencyMatrix {
a := newEmptyAdjacencyMatrix(g.VertexCount)
for _, edge := range g.Edges {
a.insertEdge(edge, g.Directed)
}
return a
}
func newEmptyAdjacencyMatrix(n int) AdjacencyMatrix {
a := make(AdjacencyMatrix, n)
for x := range a {
a[x] = make([]int, n)
// Keep the default int value of 0. For network flow applications, an edge with capacity 0
// might as well not exist at all.
}
return a
}
func (a AdjacencyMatrix) insertEdge(edge Edge, directed bool) {
a[edge.X][edge.Y] = edge.Capacity
if !directed {
reversed := Edge{edge.Y, edge.X, edge.Capacity}
a.insertEdge(reversed, true)
}
}
func (a AdjacencyMatrix) BreadthFirstSearch(start int, callback func(Edge) bool) {
visited := make([]bool, len(a))
visited[start] = true
queue := []int{start}
for len(queue) > 0 {
// Dequeue first element.
x := queue[0]
queue = queue[1:]
for y := range a {
if visited[y] || a[x][y] == 0 {
continue
}
visited[y] = true
queue = append(queue, y)
// The edge callback can return true to abort the current graph traversal.
if callback(Edge{x, y, a[x][y]}) {
return
}
}
}
}
|
package middleware
import (
"log"
"net/http"
"spectra/interfaces"
"spectra/providers"
"strings"
"github.com/gin-gonic/gin"
)
func Authentication() gin.HandlerFunc {
return func(c *gin.Context) {
log.Println("Authentication middleware")
authHeader := c.GetHeader("Authorization")
if len(authHeader) == 0 {
c.AbortWithStatusJSON(http.StatusUnauthorized, interfaces.ErrorResponse{
Data: "Token not provided",
Status: http.StatusUnauthorized,
})
return
}
bearerAndToken := strings.Fields(authHeader)
if len(bearerAndToken) != 2 {
c.AbortWithStatusJSON(http.StatusUnauthorized, interfaces.ErrorResponse{
Data: "Token bad formated",
Status: http.StatusUnauthorized,
})
return
}
authControl := providers.AuthControl{}
authControlResponse, err := authControl.ValidToken(providers.ValidTokenInput{
Token: bearerAndToken[1],
TokenKind: "LOGIN_USER",
})
if err.Message != "" {
c.AbortWithStatusJSON(http.StatusUnauthorized, interfaces.ErrorResponse{
Data: err.Message,
Status: err.StatusCode(),
})
return
}
c.Set("user_owner_email", authControlResponse.Data.Email)
}
}
|
package http
import (
bm "github.com/go-kratos/kratos/pkg/net/http/blademaster"
"github.com/go-kratos/kratos/pkg/net/http/blademaster/binding"
)
func getNextCronJobList(ctx *bm.Context) {
ctx.JSON(svc.GetNextCronJobList(ctx))
}
func getJobCount(ctx *bm.Context) {
var req struct{
Creator string `json:"creator" form:"creator"`
}
if err := ctx.BindWith(&req, binding.Query); err != nil {
return
}
ctx.JSON(svc.GetJobCount(ctx, req.Creator))
}
|
package pb
import (
"github.com/Workiva/go-datastructures/set"
)
var dt_set *set.Set
func init() {
dt_set = set.New()
}
// 添加内部数据定义
func addInnerDt(dt string) {
dt_set.Add(dt)
}
// 检查是否存在字符串描述的数据定义
func isExistInnerDt(dt string) bool {
return dt_set.Exists(dt)
}
|
// Copyright 2020 The Tekton Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-github/v32/github"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
duck "knative.dev/pkg/apis/duck/v1beta1"
)
func TestUpsertCheckRun(t *testing.T) {
ctx := context.Background()
output := &github.CheckRunOutput{
Summary: github.String("foo"),
}
for _, tc := range []struct {
nameAnnotation string
wantName string
}{
{
nameAnnotation: "",
wantName: "default/echo-6b4fn-echo-xrxq4",
},
{
nameAnnotation: "tacocat",
wantName: "tacocat",
},
} {
t.Run(tc.nameAnnotation, func(t *testing.T) {
mux := http.NewServeMux()
srv := httptest.NewServer(mux)
client := github.NewClient(srv.Client())
client.BaseURL = mustParseURL(srv.URL + "/")
tr := taskrun("testdata/taskrun.yaml")
tr.Annotations[key("name")] = tc.nameAnnotation
cr := &github.CheckRun{
Name: github.String(tc.wantName),
HeadSHA: github.String("db165c3a71dc45d096aebd0f49f07ec565ad1e08"),
ExternalID: github.String("/apis/tekton.dev/v1beta1/namespaces/default/taskruns/echo-6b4fn-echo-xrxq4"),
DetailsURL: github.String("https://dashboard.dogfooding.tekton.dev/#/namespaces/default/taskruns/echo-6b4fn-echo-xrxq4"),
Status: github.String("completed"),
Conclusion: github.String("success"),
StartedAt: &github.Timestamp{Time: time.Date(2020, 8, 27, 15, 21, 37, 0, time.FixedZone("Z", 0))},
CompletedAt: &github.Timestamp{Time: time.Date(2020, 8, 27, 15, 21, 46, 0, time.FixedZone("Z", 0))},
Output: output,
}
t.Run("Create", func(t *testing.T) {
mux.HandleFunc("/repos/tektoncd/test/check-runs", validateCheckRun(t, cr))
if _, err := UpsertCheckRun(ctx, client, tr, output); err != nil {
t.Fatalf("UpsertCheckRun: %v", err)
}
})
t.Run("Update", func(t *testing.T) {
tr.Annotations[key("checkrun")] = "1234"
// StartedAt isn't set on update.
cr.StartedAt = nil
mux.HandleFunc("/repos/tektoncd/test/check-runs/1234", validateCheckRun(t, cr))
if _, err := UpsertCheckRun(ctx, client, tr, output); err != nil {
t.Fatalf("UpsertCheckRun: %v", err)
}
})
})
}
}
func mustParseURL(s string) *url.URL {
u, err := url.Parse(s)
if err != nil {
panic(fmt.Errorf("error parsing URL %s: %v", s, err))
}
return u
}
func validateCheckRun(t *testing.T, want *github.CheckRun) func(rw http.ResponseWriter, r *http.Request) {
t.Helper()
return func(rw http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("error reading HTTP body: %v", err)
}
got := new(github.CheckRun)
if err := json.Unmarshal(body, got); err != nil {
t.Fatalf("error unmarshalling HTTP body: %v", err)
}
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("-want,+got: %s", diff)
}
enc := json.NewEncoder(rw)
if err := enc.Encode(got); err != nil {
rw.WriteHeader(http.StatusInternalServerError)
rw.Write([]byte(err.Error()))
}
}
}
func TestGitHubStatus(t *testing.T) {
// Test cases pulled from https://github.com/tektoncd/pipeline/blob/master/docs/taskruns.md#monitoring-execution-status
for _, tc := range []struct {
condStatus corev1.ConditionStatus
reason string
completionTime bool
status, conclusion string
}{
{
condStatus: corev1.ConditionUnknown,
reason: v1beta1.TaskRunReasonStarted.String(),
status: CheckRunStatusInProgress,
},
{
condStatus: corev1.ConditionUnknown,
// Exists in documentation, but not Tekton const.
reason: "Pending",
status: CheckRunStatusQueued,
},
{
condStatus: corev1.ConditionUnknown,
reason: v1beta1.TaskRunReasonRunning.String(),
status: CheckRunStatusInProgress,
},
{
condStatus: corev1.ConditionUnknown,
reason: v1beta1.TaskRunReasonCancelled.String(),
status: CheckRunStatusCompleted,
conclusion: CheckRunConclusionCancelled,
},
{
condStatus: corev1.ConditionFalse,
reason: v1beta1.TaskRunReasonCancelled.String(),
status: CheckRunStatusCompleted,
conclusion: CheckRunConclusionCancelled,
},
{
condStatus: corev1.ConditionTrue,
reason: v1beta1.TaskRunReasonSuccessful.String(),
status: CheckRunStatusCompleted,
conclusion: CheckRunConclusionSuccess,
},
{
condStatus: corev1.ConditionTrue,
reason: v1beta1.TaskRunReasonFailed.String(),
status: CheckRunStatusCompleted,
conclusion: CheckRunConclusionFailure,
},
{
condStatus: corev1.ConditionFalse,
reason: "non-permanent error",
status: CheckRunStatusInProgress,
},
{
condStatus: corev1.ConditionFalse,
reason: "permanent error",
completionTime: true,
status: CheckRunStatusCompleted,
conclusion: CheckRunConclusionActionRequired,
},
{
condStatus: corev1.ConditionFalse,
reason: v1beta1.TaskRunReasonTimedOut.String(),
status: CheckRunStatusCompleted,
conclusion: CheckRunConclusionTimeout,
},
} {
t.Run(fmt.Sprintf("%s_%s", tc.condStatus, tc.reason), func(t *testing.T) {
s := v1beta1.TaskRunStatus{
Status: duck.Status{
Conditions: []apis.Condition{{
Type: apis.ConditionSucceeded,
Reason: tc.reason,
Status: tc.condStatus,
}},
},
}
if tc.completionTime {
s.TaskRunStatusFields = v1beta1.TaskRunStatusFields{
CompletionTime: &metav1.Time{Time: time.Now()},
}
}
status, conclusion := status(s)
if tc.status != status {
t.Errorf("status: want %s, got %s", tc.status, status)
}
if tc.conclusion != conclusion {
t.Errorf("conclusion: want %s, got %s", tc.conclusion, conclusion)
}
})
}
}
|
package main
import (
"fmt"
"strings"
"time"
//generate a manifest and compile via rsrc -manifest test.manifest -o rsrc.syso
//then compile with go build -ldflags="-H windowsgui"
"github.com/lxn/walk"
. "github.com/lxn/walk/declarative"
//has no ability to list open com ports
//but has a better serial communicatio library
"github.com/tarm/serial"
//we only use this for the com port list function
//it can't set a timeout on serial connections so its garbage
search "go.bug.st/serial"
)
var teensy *serial.Port
var errPort error
var comNum string
var windowMessage string = "Hello"
func main() {
var inTE, outTE *walk.TextEdit
var nitra string
err := sync()
if err != nil {
windowMessage = "You Broke It"
}
MainWindow{
Title: "B.O.R.I.S.",
Size: Size{300, 200},
Layout: VBox{},
Children: []Widget{
HSplitter{
Children: []Widget{
TextEdit{AssignTo: &inTE},
TextEdit{AssignTo: &outTE, ReadOnly: true, Text: windowMessage},
},
},
PushButton{
Text: "UPDATE",
OnClicked: func() {
nitra = inTE.Text()
teensy.Write([]byte(nitra + "\n"))
outTE.SetText(windowMessage)
},
},
},
}.Run()
}
func sync() (err error) {
comPortList, err := search.GetPortsList()
if err != nil {
return err
}
if len(comPortList) > 0 {
for p := range comPortList {
connectSuccessful := connect(comPortList[p])
fmt.Println(comPortList[p])
if connectSuccessful {
break
}
}
} else {
windowMessage = "0 COM ports detected"
}
return err
}
// takes a com port ie COM9 and attempts to PING/ACK.
func connect(comPort string) (success bool) {
//generate the serial port configuration
c := &serial.Config{Name: comPort, Baud: 9600, ReadTimeout: time.Millisecond * 500}
teensy, errPort = serial.OpenPort(c)
if errPort != nil {
teensy = nil
return false
}
//attempt to start the handshake process with the teensy
//all communications must end with newline so the teensy knows we've stopped transmitting
c1 := make(chan error, 1)
go func() {
_, writeErr := teensy.Write([]byte("PING\n"))
c1 <- writeErr
}()
select {
case errPort = <-c1:
buff := make([]byte, 32)
incoming := ""
for {
n, err := teensy.Read(buff)
if err != nil {
windowMessage = "There was an error reading from teensy"
return false
} else if n == 0 {
//end of file
break
}
//append the characters in the buffer to the message we're trying to recieve
incoming += string(buff[:n])
//dont let another device spam us
if len(incoming) > 5 {
break
}
// If we receive a newline stop reading, this is a good thing
if strings.Contains(string(buff[:n]), "\n") {
break
}
}
incoming = strings.TrimSpace(incoming)
if incoming == "ACK" {
windowMessage = "Connected on port: " + comPort
return true
}
case <-time.After(100 * time.Millisecond):
windowMessage = "No ports responded, connection failure"
return false
}
return false
}
func writeNitra(nitra string) {
if teensy != nil {
_, err := teensy.Write([]byte(nitra + "\n"))
if err != nil {
windowMessage = "There was an error: " + err.Error()
}
} else {
windowMessage = "update sent"
}
}
|
package utils
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestRegexMustMatch(t *testing.T) {
if !RegexMustMatch("Camera [0-9]+ Detection status (ACTIVE|PAUSE)", "Camera 0 Detection status ACTIVE") {
t.Error("not match")
}
}
func TestRegexFirstSubmatchString(t *testing.T) {
require.Equal(t, RegexFirstSubmatchString("Camera [0-9]+ Detection status (ACTIVE|PAUSE)", "Camera 0 Detection status ACTIVE"), "ACTIVE")
require.Equal(t, RegexFirstSubmatchString("Camera [0-9]+ Detection status (ACTIVE|PAUSE)", "Camera 0 Detection status PAUSE"), "PAUSE")
}
func TestRegexConfigList(t *testing.T) {
testString := "#comment here\n;comment here\nhello 12\nword 11\nnullparam (null)"
testMap := RegexSubmatchTypedMap("(?m)^([^;#][a-zA-Z0-9_]+) ([a-zA-Z0-9_()]+)$", testString, nil)
require.Equal(t, 3, len(testMap))
require.Equal(t, "12", testMap["hello"])
require.Equal(t, "11", testMap["word"])
require.Equal(t, "(null)", testMap["nullparam"])
}
|
package redis
import (
"github.com/go-redis/redis"
"github.com/spf13/viper"
)
var Redis = &redis.Client{}
func InitRedis() error {
options := &redis.Options{Network:viper.GetString("network"),Addr:viper.GetString("addr")}
Redis = redis.NewClient(options)
return Redis.Ping().Err()
} |
package utils
// 实现一个可以排序的Map
type Map struct {
Key uint32
Value string
}
type MapSort []Map
func NewMapSort(m map[uint32]string) MapSort {
ms := make(MapSort, len(m))
for k, v := range m {
ms = append(ms, Map{Key: k, Value: v})
}
return ms
}
func (this MapSort) Len() int {
return len(this)
}
func (this MapSort) Less(i, j int) bool {
return this[i].Key < this[j].Key
}
func (this MapSort) Swap(i, j int) {
this[i], this[j] = this[j], this[i]
}
|
package plugins
import (
"frank/src/go/config"
"frank/src/go/helpers/log"
"frank/src/go/models"
"gobot.io/x/gobot/drivers/gpio"
"gobot.io/x/gobot/platforms/firmata"
)
type PluginFirmata struct {
}
func NewPluginFirmata() PluginFirmata {
pf := PluginFirmata{}
return pf
}
func (ctx *PluginFirmata) ExecAction(action models.Action, extraText map[string]string) {
device, err := config.GetDevice(action.DeviceName)
if err != nil {
log.Log.Error(err.Error())
return
}
log.Log.Debugf("Interacting with device %s", device.Name)
go FirmataHandler(action, device, extraText)
}
func FirmataHandler(action models.Action, device models.Device, extraText map[string]string) {
var firmataA *firmata.Adaptor
if device.Connection.Type == "tcp" {
firmataAdaptor := firmata.NewTCPAdaptor(device.Connection.Address)
firmataA = firmataAdaptor.Adaptor
} else {
firmataAdaptor := firmata.NewAdaptor(device.Connection.Address)
firmataA = firmataAdaptor
}
err := firmataA.Connect()
if err != nil {
log.Log.Error("Could not connect to", device.Name, "at", device.Connection.Address)
return
}
for _, di := range device.Interfaces {
if len(action.MatchingInterface) > 0 {
colore := action.MatchingInterface["colore"].(map[string]interface{})
if val, ok := colore[extraText["colore"]].(string); ok {
if val == di.Name {
switch di.Driver {
case "led":
FirmataLedInterface(firmataA, action, di)
}
}
}
} else if di.Name == action.InterfaceName {
switch di.Driver {
case "led":
FirmataLedInterface(firmataA, action, di)
}
}
}
err = firmataA.Disconnect()
if err != nil {
log.Log.Notice("Could not disconnect from", device.Name)
return
}
}
func FirmataLedInterface(firmataA *firmata.Adaptor, action models.Action, deviceInterface models.DeviceInterface) {
log.Log.Debug("Interacting with led", deviceInterface.Pin)
led := gpio.NewLedDriver(firmataA, deviceInterface.Pin)
if action.Action["action"].(string) == "on" {
led.On()
} else if action.Action["action"].(string) == "off" {
led.Off()
} else {
led.Toggle()
}
}
|
package models
import (
"regexp"
"time"
mgo "gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
//User data model
type User struct {
ID string `bson:"_id,omitempty" json:"_id"`
Email string `bson:"email" json:"email"`
Password string `bson:"password" json:"password"`
CreatedAt time.Time `bson:"createdAt" json:"createdAt"`
UpdatedAt time.Time `bson:"updatedAt" json:"updatedAt"`
}
//Save user to mongo
func (u *User) Save(mango *mgo.Session) []error {
users := mango.DB("DSDev").C("user")
errors := make([]error, 0)
validEmail, _ := regexp.MatchString("[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+.[A-Za-z]{2,3}", u.Email)
if !validEmail {
errors = append(errors, &SaveErr{"Invalid Email Entered!"})
}
if len(u.Password) < 8 {
errors = append(errors, &SaveErr{"Invalid Password Entered! Must be at least eight characters"})
}
var existingUser User
err := users.Find(bson.M{"email": u.Email}).One(&existingUser)
if err == nil {
errors = append(errors, &SaveErr{"Email already exists"})
}
if len(errors) > 0 {
return errors
}
u.CreatedAt = time.Now()
u.UpdatedAt = time.Now()
err2 := mango.DB("DSDev").C("user").Insert(u)
if err2 != nil {
panic(err2)
}
return nil
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// go_generics reads a Go source file and writes a new version of that file with
// a few transformations applied to each. Namely:
//
// 1. Global types can be explicitly renamed with the -t option. For example,
// if -t=A=B is passed in, all references to A will be replaced with
// references to B; a function declaration like:
//
// func f(arg *A)
//
// would be renamed to:
//
// func f(arg *B)
//
// 2. Global type definitions and their method sets will be removed when they're
// being renamed with -t. For example, if -t=A=B is passed in, the following
// definition and methods that existed in the input file wouldn't exist at
// all in the output file:
//
// type A struct{}
//
// func (*A) f() {}
//
// 3. All global types, variables, constants and functions (not methods) are
// prefixed and suffixed based on the option -prefix and -suffix arguments.
// For example, if -suffix=A is passed in, the following globals:
//
// func f()
// type t struct{}
//
// would be renamed to:
//
// func fA()
// type tA struct{}
//
// Some special tags are also modified. For example:
//
// "state:.(t)"
//
// would become:
//
// "state:.(tA)"
//
// 4. The package is renamed to the value via the -p argument.
// 5. Value of constants can be modified with -c argument.
//
// Note that not just the top-level declarations are renamed, all references to
// them are also properly renamed as well, taking into account visibility rules
// and shadowing. For example, if -suffix=A is passed in, the following:
//
// var b = 100
//
// func f() {
// g(b)
// b := 0
// g(b)
// }
//
// Would be replaced with:
//
// var bA = 100
//
// func f() {
// g(bA)
// b := 0
// g(b)
// }
//
// Note that the second call to g() kept "b" as an argument because it refers to
// the local variable "b".
//
// Note that go_generics can handle anonymous fields with renamed types if
// -anon is passed in, however it does not perform strict checking on parameter
// types that share the same name as the global type and therefore will rename
// them as well.
//
// You can see an example in the tools/go_generics/generics_tests/interface test.
package main
import (
"bytes"
"flag"
"fmt"
"go/ast"
"go/format"
"go/parser"
"go/token"
"io/ioutil"
"os"
"regexp"
"strings"
"gvisor.dev/gvisor/tools/go_generics/globals"
)
var (
input = flag.String("i", "", "input `file`")
output = flag.String("o", "", "output `file`")
suffix = flag.String("suffix", "", "`suffix` to add to each global symbol")
prefix = flag.String("prefix", "", "`prefix` to add to each global symbol")
packageName = flag.String("p", "main", "output package `name`")
printAST = flag.Bool("ast", false, "prints the AST")
processAnon = flag.Bool("anon", false, "process anonymous fields")
types = make(mapValue)
consts = make(mapValue)
imports = make(mapValue)
inputSubstr = make(mapValue)
outputSubstr = make(mapValue)
)
// mapValue implements flag.Value. We use a mapValue flag instead of a regular
// string flag when we want to allow more than one instance of the flag. For
// example, we allow several "-t A=B" arguments, and will rename them all.
type mapValue map[string]string
func (m mapValue) String() string {
var b bytes.Buffer
first := true
for k, v := range m {
if !first {
b.WriteRune(',')
} else {
first = false
}
b.WriteString(k)
b.WriteRune('=')
b.WriteString(v)
}
return b.String()
}
func (m mapValue) Set(s string) error {
sep := strings.Index(s, "=")
if sep == -1 {
return fmt.Errorf("missing '=' from '%s'", s)
}
m[s[:sep]] = s[sep+1:]
return nil
}
// stateTagRegexp matches against the 'typed' state tags.
var stateTagRegexp = regexp.MustCompile(`^(.*[^a-z0-9_])state:"\.\(([^\)]*)\)"(.*)$`)
var identifierRegexp = regexp.MustCompile(`^(.*[^a-zA-Z_])([a-zA-Z_][a-zA-Z0-9_]*)(.*)$`)
func main() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %s [options]\n", os.Args[0])
flag.PrintDefaults()
}
flag.Var(types, "t", "rename type A to B when `A=B` is passed in. Multiple such mappings are allowed.")
flag.Var(consts, "c", "reassign constant A to value B when `A=B` is passed in. Multiple such mappings are allowed.")
flag.Var(imports, "import", "specifies the import libraries to use when types are not local. `name=path` specifies that 'name', used in types as name.type, refers to the package living in 'path'.")
flag.Var(inputSubstr, "in-substr", "replace input sub-string A with B when `A=B` is passed in. Multiple such mappings are allowed.")
flag.Var(outputSubstr, "out-substr", "replace output sub-string A with B when `A=B` is passed in. Multiple such mappings are allowed.")
flag.Parse()
if *input == "" || *output == "" {
flag.Usage()
os.Exit(1)
}
// Parse the input file.
fset := token.NewFileSet()
inputBytes, err := os.ReadFile(*input)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
for old, new := range inputSubstr {
inputBytes = bytes.ReplaceAll(inputBytes, []byte(old), []byte(new))
}
f, err := parser.ParseFile(fset, *input, inputBytes, parser.ParseComments|parser.DeclarationErrors|parser.SpuriousErrors)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
// Print the AST if requested.
if *printAST {
ast.Print(fset, f)
}
cmap := ast.NewCommentMap(fset, f, f.Comments)
// Update imports based on what's used in types and consts.
maps := []mapValue{types, consts}
importDecl, err := updateImports(maps, imports)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
types = maps[0]
consts = maps[1]
// Reassign all specified constants.
for _, decl := range f.Decls {
d, ok := decl.(*ast.GenDecl)
if !ok || d.Tok != token.CONST {
continue
}
for _, gs := range d.Specs {
s := gs.(*ast.ValueSpec)
for i, id := range s.Names {
if n, ok := consts[id.Name]; ok {
s.Values[i] = &ast.BasicLit{Value: n}
}
}
}
}
// Go through all globals and their uses in the AST and rename the types
// with explicitly provided names, and rename all types, variables,
// consts and functions with the provided prefix and suffix.
globals.Visit(fset, f, func(ident *ast.Ident, kind globals.SymKind) {
if n, ok := types[ident.Name]; ok && kind == globals.KindType {
ident.Name = n
} else {
switch kind {
case globals.KindType, globals.KindVar, globals.KindConst, globals.KindFunction:
if ident.Name != "_" && !(ident.Name == "init" && kind == globals.KindFunction) {
ident.Name = *prefix + ident.Name + *suffix
}
case globals.KindTag:
// Modify the state tag appropriately.
if m := stateTagRegexp.FindStringSubmatch(ident.Name); m != nil {
if t := identifierRegexp.FindStringSubmatch(m[2]); t != nil {
typeName := *prefix + t[2] + *suffix
if n, ok := types[t[2]]; ok {
typeName = n
}
ident.Name = m[1] + `state:".(` + t[1] + typeName + t[3] + `)"` + m[3]
}
}
}
}
}, *processAnon)
// Remove the definition of all types that are being remapped.
set := make(typeSet)
for _, v := range types {
set[v] = struct{}{}
}
removeTypes(set, f)
// Add the new imports, if any, to the top.
if importDecl != nil {
newDecls := make([]ast.Decl, 0, len(f.Decls)+1)
newDecls = append(newDecls, importDecl)
newDecls = append(newDecls, f.Decls...)
f.Decls = newDecls
}
// Update comments to remove the ones potentially associated with the
// type T that we removed.
f.Comments = cmap.Filter(f).Comments()
// If there are file (package) comments, delete them.
if f.Doc != nil {
for i, cg := range f.Comments {
if cg == f.Doc {
f.Comments = append(f.Comments[:i], f.Comments[i+1:]...)
break
}
}
}
// Write the output file.
f.Name.Name = *packageName
var buf bytes.Buffer
if err := format.Node(&buf, fset, f); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
byteBuf := buf.Bytes()
for old, new := range outputSubstr {
byteBuf = bytes.ReplaceAll(byteBuf, []byte(old), []byte(new))
}
if err := ioutil.WriteFile(*output, byteBuf, 0644); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}
|
package model
// Onboard a request to onboard is stored in here, and all the tasks associated are created and also stored here
type Onboard struct {
ManagerEmail string `json:"managerEmail"`
Name string `json:"name"`
Email string `json:"email"`
Role string `json:"role"`
StartDate string `json:"startDate"`
BeforeJoining []*Task `json:"beforeJoining"`
AfterJoining []*Task `json:"afterJoining"`
Systems []*Task `json:"systems"`
}
func NewOnboard() *Onboard {
return &Onboard{
BeforeJoining: []*Task{},
AfterJoining: []*Task{},
Systems: []*Task{},
}
}
// Task A task to be done
type Task struct {
Name string `json:"name"`
AssigneeEmail string `json:"assigneeEmail"`
}
|
package common
const ( // app msgs
MSG_SAVE_SUCCESS string = "Data Saved Successfully !!"
MSG_SAVE_ERROR string = "Could not save data !!"
MSG_UPDATE_SUCCESS string = "Data Updated Successfully !!"
MSG_UPDATE_ERROR string = "Could not save data !!"
MSG_DELETE_SUCCESS string = "Data removed Successfully !!"
MSG_DELETE_ERROR string = "Could not remove data !!"
MSG_REQUEST_FAILED string = "Could not process request. Try later !!"
MSG_INVALID_ID string = "Invalid Identifier"
MSG_INVALID_GROUP string = "Invalid Group Code"
MSG_INVALID_MOBILE string = "Invalid Mobile Number"
MSG_BAD_INPUT string = "Bad request data"
MSG_INVALID_STATE string = "Invalid State !!"
MSG_NO_ROLE string = "No such role Exists !!"
MSG_NO_STATUS string = "No such status Exists !!"
MSG_NO_RECORD string = "No record found !!"
MSG_INSUFFICIENT_USER_COUNT string = "Expected 2 users, but found "
MSG_REMOVE_USER_SUCCESS string = "User Removed Successfully !!"
MSG_REMOVE_USER_ERROR string = "Could not remove user !!"
MSG_REMOVE_TEMPLATE_SUCCESS string = "Template Removed Successfully !!"
MSG_REMOVE_TEMPLATE_ERROR string = "Could not remove template !!"
MSG_REMOVE_TEST_SUCCESS string = "Test Paper Removed Successfully !!"
MSG_REMOVE_TEST_ERROR string = "Could not remove test paper !!"
MSG_LOGIN_SUCCESS string = "Login Sucess !!"
MSG_INVALID_CREDENTIALS_MOBILE string = "Mobile number not registered with us !!"
MSG_INVALID_CREDENTIALS_PWD string = "Invalid Password!!"
MSG_QUES_SUBMIT_SUCCESS string = "Successfully submitted for review !!"
MSG_UNATHORIZED_ACCESS string = "Unauthorized access to DB !!"
MSG_QUES_STATUS_SUCCESS string = "Successfully updated the status !!"
MSG_QUES_STATUS_ERROR string = "Status could not be updated !!"
MSG_QUES_REMOVE_SUCCESS string = "Successfully removed question !!"
MSG_QUES_REMOVE_ERROR string = "Question could not be removed !!"
MSG_CORRUPT_DATA string = "Corrupt Criteria Data"
MSG_DUPLICATE_RECORD string = "Duplicate Record"
MSG_FULL_NAME_ERROR string = "First & Last name required !!"
MSG_MOBILE_MIN_LENGTH_ERROR string = "Mobile should be 10 digits"
MSG_PWD_MIN_LENGTH_ERROR string = "Password should be atleast 8 characters"
MSG_PWD_RESET_SUCCESS string = "Password changed successfully !!"
MSG_MOBILE_UPDATE_SUCCESS string = "Mobile number changed successfully !!"
MSG_SCHEDULE_UPDATE_SUCCESS string = "Schedule updated successfully !!"
MSG_USER_AUTH_ERROR string = "User Authentication Error !!"
MSG_USER_SESSION_ERROR string = "Session Not Found !!"
MSG_USER_FORGERY_ERROR string = "Detected user forgery !! !!"
)
const ( // codes
ERR_CODE_DUPLICATE int = 11000
)
const ( // congfig params
PARAM_KEY_ID = "_id"
PARAM_KEY_CODE = "code"
PARAM_KEY_MOBILE = "mobile"
PARAM_KEY_USERNAME = "userName"
DATE_TIME_FORMAT = "02 Jan,2006 03:04:05 PM"
DATE_FORMAT = "02 Jan,2006" // 01= Month , 02 = Date
TEMP_PWD_PREFIX = "TP_"
USERNAME_PREFIX = "user@"
MIN_VALID_ROLE = 0
MAX_VALID_ROLE = 4
MIN_VALID_STD = 1
MAX_VALID_STD = 12
MOBILE_LENGTH = 10
FULLNAME_MIN_LENGTH = 2
PWD_MIN_LENGTH = 8
DEF_REQUESTS_PAGE_SIZE = 10
DEF_REQUESTS_PAGE = 0
QUES_BATCH_SIZE = 10000
OTP_LENGTH = 6
OTP_UPPER_BOUND = 989899
OTP_LOWER_BOUND = 101010
GROUP_CODE_PREFIX = "GP_"
CACHE_STORE_KEY = "CACHE_STORE"
REQUEST_OTP_SESSION_ID_KEY = "rqst_otp_sess_id"
RESPONSE_OTP_SESSION_ID_KEY = "RESP_OTP_SESS_ID"
COOKIE_SESSION_KEY = "c_user"
COOKIE_XSRF_KEY = "XSRF-TOKEN"
HEADER_XSRF_KEY = "X-Xsrf-Token"
CORS_REQUEST_METHOD = "OPTIONS"
USER_SESSION_TIMEOUT = 360 //minutes
COOKIE_SESSION_TIMEOUT = 360
CACHE_OTP_TIMEOUT = 10
CACHE_OBJ_LONG_TIMEOUT = 60
CACHE_OBJ_SHORT_TIMEOUT = 30
APP_NAME = "IntelliQ"
APP_PORT = ":8080"
CACHE_PORT = ":6379"
CACHE_DOMAIN = "localhost"
//CACHE_DOMAIN = "redis"
// PRIVATE_KEY_FILEPATH = "/Users/lionheart/.ssh/appKey.priv"
// SSL_CERT_FILEPATH = "/Users/lionheart/.ssh/ssl.crt"
// SSL_KEY_FILEPATH = "/Users/lionheart/.ssh/sslKey.key"
PRIVATE_KEY_FILEPATH = "/var/lib/intelliq/.ssh/appKey.priv"
SSL_CERT_FILEPATH = "/var/lib/intelliq/.ssh/ssl.crt"
SSL_KEY_FILEPATH = "/var/lib/intelliq/.ssh/sslKey.key"
LOG_FILE = "/var/log/intelliq/intelliq.log"
//LOG_FILE = "/Users/lionheart/log/intelliq/intelliq.log"
LOG_MAX_BYTES = 1000000
LOG_BACKUP_COUNT = 20
)
|
package main
import (
"fmt"
"log"
"os"
_ "./search"
)
func init(){
log.SetOutput(os.Stdout)
}
func main(){
fmt.Println("Hello World!")
}
|
package mouvement
import (
"fmt"
"math/rand"
"sync"
"time"
"github.com/yanndr/rpi/bdngobot/process"
"github.com/yanndr/rpi/bdngobot/situation"
"github.com/yanndr/rpi/controller"
)
type MouvmentCommand string
const (
Stop MouvmentCommand = "StopMoving"
Start MouvmentCommand = "StartMoving"
)
var Started = false
const (
cruiseSpeed = 1.0
escapeSpeed = 1.0
backSpeed = 0.5
)
type MouvementProcess struct {
process.BaseProcess
motorsController controller.MotorsController
mutex sync.Mutex
}
func NewMouvementProcess(motorsController controller.MotorsController) *MouvementProcess {
return &MouvementProcess{
BaseProcess: process.BaseProcess{Channel: make(chan interface{})},
motorsController: motorsController,
}
}
func (mp *MouvementProcess) Start() {
go mp.eventChannelListener()
fmt.Println("Mouvment process started.")
}
func (mp *MouvementProcess) Stop() {
mp.motorsController.Stop()
// fmt.Println("Mouvment process stoped.")
}
func (mp *MouvementProcess) farHandler() {
mp.mutex.Lock()
defer mp.mutex.Unlock()
// fmt.Println("Mouvement Far handler")
mp.moveStraight(cruiseSpeed)
}
func (mp *MouvementProcess) mediumHandler() {
mp.mutex.Lock()
defer mp.mutex.Unlock()
// fmt.Println("Mouvement medium handler")
r := rand.New(rand.NewSource(time.Now().UnixNano()))
v := r.Intn(1)
if v == 0 {
mp.turnLeft(cruiseSpeed - 0.2)
fmt.Println("left")
} else {
mp.turnRight(cruiseSpeed - 0.2)
fmt.Println("right")
}
}
func (mp *MouvementProcess) closeHandler() {
mp.mutex.Lock()
defer mp.mutex.Unlock()
// fmt.Println("Mouvement Close handler")
mp.motorsController.Stop()
mp.moveStraight(-cruiseSpeed / 2)
time.Sleep(time.Second * 1)
mp.motorsController.Stop()
mp.motorsController.RotateRight()
time.Sleep(time.Second * 1)
mp.motorsController.Stop()
}
func (mp *MouvementProcess) moveStraight(speed float64) {
if !mp.motorsController.IsMoving() {
mp.motorsController.Start()
}
mp.motorsController.SetSpeed(speed)
mp.motorsController.SetBalance(1, 1)
}
func (mp *MouvementProcess) turnLeft(speed float64) {
mp.motorsController.SetSpeed(speed)
mp.motorsController.SetBalance(0.6, 1)
}
func (mp *MouvementProcess) turnRight(speed float64) {
mp.motorsController.SetSpeed(speed)
mp.motorsController.SetBalance(1, 0.6)
}
// func (mp *MouvementProcess) rotate(speed float64) {
// mp.motorsController.SetSpeed(speed)
// mp.motorsController.RotateRight()
// time.Sleep(time.Second * 1)
// }
func (mp *MouvementProcess) eventChannelListener() {
for value := range mp.Channel {
if Started {
if value == situation.ObstacleFar {
mp.farHandler()
} else if value == situation.ObstacleMedium {
mp.mediumHandler()
} else if value == situation.ObstacleClose {
mp.closeHandler()
} else if value == Stop {
mp.motorsController.Stop()
Started = false
}
}
if value == Start {
Started = true
}
}
}
|
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tagreplication
import (
"errors"
"fmt"
"time"
"github.com/jmoiron/sqlx"
"github.com/mattn/go-sqlite3"
"github.com/uber/kraken/lib/persistedretry"
)
// Store stores tags to be replicated asynchronously.
type Store struct {
db *sqlx.DB
}
// NewStore creates a new Store.
func NewStore(db *sqlx.DB, rv RemoteValidator) (*Store, error) {
s := &Store{db}
if err := s.deleteInvalidTasks(rv); err != nil {
return nil, fmt.Errorf("delete invalid tasks: %s", err)
}
return s, nil
}
// GetPending returns all pending tasks.
func (s *Store) GetPending() ([]persistedretry.Task, error) {
return s.selectStatus("pending")
}
// GetFailed returns all failed tasks.
func (s *Store) GetFailed() ([]persistedretry.Task, error) {
return s.selectStatus("failed")
}
// AddPending adds r as pending.
func (s *Store) AddPending(r persistedretry.Task) error {
return s.addWithStatus(r, "pending")
}
// AddFailed adds r as failed.
func (s *Store) AddFailed(r persistedretry.Task) error {
return s.addWithStatus(r, "failed")
}
// MarkPending marks r as pending.
func (s *Store) MarkPending(r persistedretry.Task) error {
res, err := s.db.NamedExec(`
UPDATE replicate_tag_task
SET status = "pending"
WHERE tag=:tag AND destination=:destination
`, r.(*Task))
if err != nil {
return err
}
if n, err := res.RowsAffected(); err != nil {
panic("driver does not support RowsAffected")
} else if n == 0 {
return persistedretry.ErrTaskNotFound
}
return nil
}
// MarkFailed marks r as failed.
func (s *Store) MarkFailed(r persistedretry.Task) error {
t := r.(*Task)
res, err := s.db.NamedExec(`
UPDATE replicate_tag_task
SET last_attempt = CURRENT_TIMESTAMP,
failures = failures + 1,
status = "failed"
WHERE tag=:tag AND destination=:destination
`, t)
if err != nil {
return err
}
if n, err := res.RowsAffected(); err != nil {
panic("driver does not support RowsAffected")
} else if n == 0 {
return persistedretry.ErrTaskNotFound
}
t.Failures++
t.LastAttempt = time.Now()
return nil
}
// Remove removes r.
func (s *Store) Remove(r persistedretry.Task) error {
return s.delete(r)
}
// Find is not supported.
func (s *Store) Find(query interface{}) ([]persistedretry.Task, error) {
return nil, errors.New("not supported")
}
func (s *Store) addWithStatus(r persistedretry.Task, status string) error {
query := fmt.Sprintf(`
INSERT INTO replicate_tag_task (
tag,
digest,
dependencies,
destination,
last_attempt,
failures,
delay,
status
) VALUES (
:tag,
:digest,
:dependencies,
:destination,
:last_attempt,
:failures,
:delay,
%q
)
`, status)
_, err := s.db.NamedExec(query, r.(*Task))
if se, ok := err.(sqlite3.Error); ok {
if se.ExtendedCode == sqlite3.ErrConstraintPrimaryKey {
return persistedretry.ErrTaskExists
}
}
return err
}
func (s *Store) selectStatus(status string) ([]persistedretry.Task, error) {
var tasks []*Task
err := s.db.Select(&tasks, `
SELECT tag, digest, dependencies, destination, created_at, last_attempt, failures, delay
FROM replicate_tag_task
WHERE status=?`, status)
if err != nil {
return nil, err
}
var result []persistedretry.Task
for _, t := range tasks {
result = append(result, t)
}
return result, nil
}
// deleteInvalidTasks deletes replication tasks whose destinations are no longer
// valid remotes.
func (s *Store) deleteInvalidTasks(rv RemoteValidator) error {
tasks := []*Task{}
if err := s.db.Select(&tasks, `SELECT tag, destination FROM replicate_tag_task`); err != nil {
return fmt.Errorf("select all tasks: %s", err)
}
for _, t := range tasks {
if rv.Valid(t.Tag, t.Destination) {
continue
}
if err := s.delete(t); err != nil {
return fmt.Errorf("delete: %s", err)
}
}
return nil
}
func (s *Store) delete(r persistedretry.Task) error {
_, err := s.db.NamedExec(`
DELETE FROM replicate_tag_task
WHERE tag=:tag AND destination=:destination`, r.(*Task))
return err
}
|
package timeparser
import (
"time"
"github.com/bborbe/backup/constants"
)
type TimeParser interface {
TimeByName(name string) (time.Time, error)
}
type timeParser struct{}
func New() *timeParser {
return new(timeParser)
}
func (t *timeParser) TimeByName(name string) (time.Time, error) {
return timeByName(name)
}
func timeByName(name string) (time.Time, error) {
return time.Parse(constants.DATEFORMAT, name)
}
|
package main
import (
"net"
"fmt"
"os"
)
func recvFile(conn net.Conn, fileName string) {
// 按照文件名创建新文件
f, err := os.Create(fileName)
if err != nil {
fmt.Println("os.Create err:", err)
return
}
defer f.Close()
// 从 网络中读数据,写入本地文件
buf := make([]byte, 4096)
for {
n,_ := conn.Read(buf)
if n == 0 {
fmt.Println("接收文件完成。")
return
}
// 写入本地文件,读多少,写多少。
f.Write(buf[:n])
}
}
func main() {
// 创建用于监听的socket
listener, err := net.Listen("tcp", "127.0.0.1:8008")
if err != nil {
fmt.Println(" net.Listen err:", err)
return
}
defer listener.Close()
// 阻塞监听
conn, err := listener.Accept()
if err != nil {
fmt.Println(" listener.Accept() err:", err)
return
}
defer conn.Close()
// 获取文件名,保存
buf := make([]byte, 4096)
n, err := conn.Read(buf)
if err != nil {
fmt.Println(" conn.Read err:", err)
return
}
fileName := string(buf[:n])
// 回写 ok 给发送端
conn.Write([]byte("ok"))
// 获取文件内容
recvFile(conn, fileName)
} |
package e7_3_test
import (
"gopl/e7_3"
"testing"
)
func TestSort(t *testing.T) {
tree := e7_3.Add(nil, 2)
tree = e7_3.Add(tree, 3)
tree = e7_3.Add(tree, 1)
s := tree.String()
expected := "[2, 1, 3, ]"
if s != expected {
t.Errorf("String() returned %s. expected: %s", s, expected)
}
}
|
package adminApp
import (
"hd-mall-ed/packages/common/pkg/app"
)
type ApiFunction struct {
app.ApiFunction
}
|
// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not
// use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Package cloudwatch implements cloudwatch plugin and its configuration
package cloudwatch
import (
"bufio"
"io"
"os"
"github.com/aws/amazon-ssm-agent/agent/fileutil"
"github.com/aws/amazon-ssm-agent/agent/log"
)
// fileUtil defines the operations that fileutil uses to interact with file system
type fileUtil interface {
Exists(filePath string) bool
MakeDirs(destinationDir string) error
WriteIntoFileWithPermissions(absolutePath, content string, perm os.FileMode) (bool, error)
}
type fileUtilImpl struct{}
// Exists returns true if the given file exists, false otherwise, ignoring any underlying error
func (f fileUtilImpl) Exists(filePath string) bool {
return fileutil.Exists(filePath)
}
// MakeDirs create the directories along the path if missing.
func (f fileUtilImpl) MakeDirs(destinationDir string) error {
return fileutil.MakeDirs(destinationDir)
}
// WriteIntoFileWithPermissions writes into file with given file mode permissions
func (f fileUtilImpl) WriteIntoFileWithPermissions(absolutePath, content string, perm os.FileMode) (bool, error) {
return fileutil.WriteIntoFileWithPermissions(absolutePath, content, perm)
}
var fileUtilWrapper fileUtil = fileUtilImpl{}
// readLastLine reads the last line of the file
func readLastLine(log log.T, filename string) string {
file, err := os.Open(filename)
if err != nil {
panic(err)
}
defer file.Close()
reader := bufio.NewReader(file)
// we need to calculate the size of the last line for file.ReadAt(offset) to work
// NOTE : not a very effective solution as we need to read the entire file at least for 1 pass :(
// will change later
lastLineSize := 0
lastTwoLineSize := 0
size := 0
for {
line, _, err := reader.ReadLine()
if err == io.EOF {
break
}
lastTwoLineSize = lastLineSize
lastLineSize = len(line)
}
fileInfo, err := os.Stat(filename)
size = lastTwoLineSize
if lastLineSize > 1 {
size = lastLineSize
}
// make a buffer size according to the lastLineSize
buffer := make([]byte, size)
// +1 to compensate for the initial 0 byte of the line
// otherwise, the initial character of the line will be missing
// instead of reading the whole file into memory, we just read from certain offset
offset := fileInfo.Size() - int64(size+2)
numRead, err := file.ReadAt(buffer, offset)
buffer = buffer[:numRead]
return string(buffer)
}
|
// Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package linux provides syscall tables for amd64 and arm64 Linux.
package linux
import (
"gvisor.dev/gvisor/pkg/abi"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/syscalls"
)
const (
// LinuxSysname is the OS name advertised by gVisor.
LinuxSysname = "Linux"
// LinuxRelease is the Linux release version number advertised by gVisor.
LinuxRelease = "4.4.0"
// LinuxVersion is the version info advertised by gVisor.
LinuxVersion = "#1 SMP Sun Jan 10 15:06:54 PST 2016"
)
// AMD64 is a table of Linux amd64 syscall API with the corresponding syscall
// numbers from Linux 4.4.
var AMD64 = &kernel.SyscallTable{
OS: abi.Linux,
Arch: arch.AMD64,
Version: kernel.Version{
// Version 4.4 is chosen as a stable, longterm version of Linux, which
// guides the interface provided by this syscall table. The build
// version is that for a clean build with default kernel config, at 5
// minutes after v4.4 was tagged.
Sysname: LinuxSysname,
Release: LinuxRelease,
Version: LinuxVersion,
},
AuditNumber: linux.AUDIT_ARCH_X86_64,
Table: map[uintptr]kernel.Syscall{
0: syscalls.SupportedPoint("read", Read, PointRead),
1: syscalls.SupportedPoint("write", Write, PointWrite),
2: syscalls.SupportedPoint("open", Open, PointOpen),
3: syscalls.SupportedPoint("close", Close, PointClose),
4: syscalls.Supported("stat", Stat),
5: syscalls.Supported("fstat", Fstat),
6: syscalls.Supported("lstat", Lstat),
7: syscalls.Supported("poll", Poll),
8: syscalls.Supported("lseek", Lseek),
9: syscalls.Supported("mmap", Mmap),
10: syscalls.Supported("mprotect", Mprotect),
11: syscalls.Supported("munmap", Munmap),
12: syscalls.Supported("brk", Brk),
13: syscalls.Supported("rt_sigaction", RtSigaction),
14: syscalls.Supported("rt_sigprocmask", RtSigprocmask),
15: syscalls.Supported("rt_sigreturn", RtSigreturn),
16: syscalls.Supported("ioctl", Ioctl),
17: syscalls.SupportedPoint("pread64", Pread64, PointPread64),
18: syscalls.SupportedPoint("pwrite64", Pwrite64, PointPwrite64),
19: syscalls.SupportedPoint("readv", Readv, PointReadv),
20: syscalls.SupportedPoint("writev", Writev, PointWritev),
21: syscalls.Supported("access", Access),
22: syscalls.SupportedPoint("pipe", Pipe, PointPipe),
23: syscalls.Supported("select", Select),
24: syscalls.Supported("sched_yield", SchedYield),
25: syscalls.Supported("mremap", Mremap),
26: syscalls.PartiallySupported("msync", Msync, "Full data flush is not guaranteed at this time.", nil),
27: syscalls.PartiallySupported("mincore", Mincore, "Stub implementation. The sandbox does not have access to this information. Reports all mapped pages are resident.", nil),
28: syscalls.PartiallySupported("madvise", Madvise, "Options MADV_DONTNEED, MADV_DONTFORK are supported. Other advice is ignored.", nil),
29: syscalls.PartiallySupported("shmget", Shmget, "Option SHM_HUGETLB is not supported.", nil),
30: syscalls.PartiallySupported("shmat", Shmat, "Option SHM_RND is not supported.", nil),
31: syscalls.PartiallySupported("shmctl", Shmctl, "Options SHM_LOCK, SHM_UNLOCK are not supported.", nil),
32: syscalls.SupportedPoint("dup", Dup, PointDup),
33: syscalls.SupportedPoint("dup2", Dup2, PointDup2),
34: syscalls.Supported("pause", Pause),
35: syscalls.Supported("nanosleep", Nanosleep),
36: syscalls.Supported("getitimer", Getitimer),
37: syscalls.Supported("alarm", Alarm),
38: syscalls.Supported("setitimer", Setitimer),
39: syscalls.Supported("getpid", Getpid),
40: syscalls.Supported("sendfile", Sendfile),
41: syscalls.SupportedPoint("socket", Socket, PointSocket),
42: syscalls.SupportedPoint("connect", Connect, PointConnect),
43: syscalls.SupportedPoint("accept", Accept, PointAccept),
44: syscalls.Supported("sendto", SendTo),
45: syscalls.Supported("recvfrom", RecvFrom),
46: syscalls.Supported("sendmsg", SendMsg),
47: syscalls.Supported("recvmsg", RecvMsg),
48: syscalls.Supported("shutdown", Shutdown),
49: syscalls.SupportedPoint("bind", Bind, PointBind),
50: syscalls.Supported("listen", Listen),
51: syscalls.Supported("getsockname", GetSockName),
52: syscalls.Supported("getpeername", GetPeerName),
53: syscalls.SupportedPoint("socketpair", SocketPair, PointSocketpair),
54: syscalls.Supported("setsockopt", SetSockOpt),
55: syscalls.Supported("getsockopt", GetSockOpt),
56: syscalls.PartiallySupportedPoint("clone", Clone, PointClone, "Options CLONE_PIDFD, CLONE_NEWCGROUP, CLONE_PARENT, CLONE_NEWTIME, CLONE_CLEAR_SIGHAND, and CLONE_SYSVSEM not supported.", nil),
57: syscalls.SupportedPoint("fork", Fork, PointFork),
58: syscalls.SupportedPoint("vfork", Vfork, PointVfork),
59: syscalls.SupportedPoint("execve", Execve, PointExecve),
60: syscalls.Supported("exit", Exit),
61: syscalls.Supported("wait4", Wait4),
62: syscalls.Supported("kill", Kill),
63: syscalls.Supported("uname", Uname),
64: syscalls.Supported("semget", Semget),
65: syscalls.PartiallySupported("semop", Semop, "Option SEM_UNDO not supported.", nil),
66: syscalls.Supported("semctl", Semctl),
67: syscalls.Supported("shmdt", Shmdt),
68: syscalls.Supported("msgget", Msgget),
69: syscalls.Supported("msgsnd", Msgsnd),
70: syscalls.Supported("msgrcv", Msgrcv),
71: syscalls.Supported("msgctl", Msgctl),
72: syscalls.SupportedPoint("fcntl", Fcntl, PointFcntl),
73: syscalls.Supported("flock", Flock),
74: syscalls.Supported("fsync", Fsync),
75: syscalls.Supported("fdatasync", Fdatasync),
76: syscalls.Supported("truncate", Truncate),
77: syscalls.Supported("ftruncate", Ftruncate),
78: syscalls.Supported("getdents", Getdents),
79: syscalls.Supported("getcwd", Getcwd),
80: syscalls.SupportedPoint("chdir", Chdir, PointChdir),
81: syscalls.SupportedPoint("fchdir", Fchdir, PointFchdir),
82: syscalls.Supported("rename", Rename),
83: syscalls.Supported("mkdir", Mkdir),
84: syscalls.Supported("rmdir", Rmdir),
85: syscalls.SupportedPoint("creat", Creat, PointCreat),
86: syscalls.Supported("link", Link),
87: syscalls.Supported("unlink", Unlink),
88: syscalls.Supported("symlink", Symlink),
89: syscalls.Supported("readlink", Readlink),
90: syscalls.Supported("chmod", Chmod),
91: syscalls.Supported("fchmod", Fchmod),
92: syscalls.Supported("chown", Chown),
93: syscalls.Supported("fchown", Fchown),
94: syscalls.Supported("lchown", Lchown),
95: syscalls.Supported("umask", Umask),
96: syscalls.Supported("gettimeofday", Gettimeofday),
97: syscalls.Supported("getrlimit", Getrlimit),
98: syscalls.PartiallySupported("getrusage", Getrusage, "Fields ru_maxrss, ru_minflt, ru_majflt, ru_inblock, ru_oublock are not supported. Fields ru_utime and ru_stime have low precision.", nil),
99: syscalls.PartiallySupported("sysinfo", Sysinfo, "Fields loads, sharedram, bufferram, totalswap, freeswap, totalhigh, freehigh not supported.", nil),
100: syscalls.Supported("times", Times),
101: syscalls.PartiallySupported("ptrace", Ptrace, "Options PTRACE_PEEKSIGINFO, PTRACE_SECCOMP_GET_FILTER not supported.", nil),
102: syscalls.Supported("getuid", Getuid),
103: syscalls.PartiallySupported("syslog", Syslog, "Outputs a dummy message for security reasons.", nil),
104: syscalls.Supported("getgid", Getgid),
105: syscalls.SupportedPoint("setuid", Setuid, PointSetuid),
106: syscalls.SupportedPoint("setgid", Setgid, PointSetgid),
107: syscalls.Supported("geteuid", Geteuid),
108: syscalls.Supported("getegid", Getegid),
109: syscalls.Supported("setpgid", Setpgid),
110: syscalls.Supported("getppid", Getppid),
111: syscalls.Supported("getpgrp", Getpgrp),
112: syscalls.SupportedPoint("setsid", Setsid, PointSetsid),
113: syscalls.Supported("setreuid", Setreuid),
114: syscalls.Supported("setregid", Setregid),
115: syscalls.Supported("getgroups", Getgroups),
116: syscalls.Supported("setgroups", Setgroups),
117: syscalls.SupportedPoint("setresuid", Setresuid, PointSetresuid),
118: syscalls.Supported("getresuid", Getresuid),
119: syscalls.SupportedPoint("setresgid", Setresgid, PointSetresgid),
120: syscalls.Supported("getresgid", Getresgid),
121: syscalls.Supported("getpgid", Getpgid),
122: syscalls.ErrorWithEvent("setfsuid", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/260"}), // TODO(b/112851702)
123: syscalls.ErrorWithEvent("setfsgid", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/260"}), // TODO(b/112851702)
124: syscalls.Supported("getsid", Getsid),
125: syscalls.Supported("capget", Capget),
126: syscalls.Supported("capset", Capset),
127: syscalls.Supported("rt_sigpending", RtSigpending),
128: syscalls.Supported("rt_sigtimedwait", RtSigtimedwait),
129: syscalls.Supported("rt_sigqueueinfo", RtSigqueueinfo),
130: syscalls.Supported("rt_sigsuspend", RtSigsuspend),
131: syscalls.Supported("sigaltstack", Sigaltstack),
132: syscalls.Supported("utime", Utime),
133: syscalls.Supported("mknod", Mknod),
134: syscalls.Error("uselib", linuxerr.ENOSYS, "Obsolete", nil),
135: syscalls.ErrorWithEvent("personality", linuxerr.EINVAL, "Unable to change personality.", nil),
136: syscalls.ErrorWithEvent("ustat", linuxerr.ENOSYS, "Needs filesystem support.", nil),
137: syscalls.Supported("statfs", Statfs),
138: syscalls.Supported("fstatfs", Fstatfs),
139: syscalls.ErrorWithEvent("sysfs", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/165"}),
140: syscalls.PartiallySupported("getpriority", Getpriority, "Stub implementation.", nil),
141: syscalls.PartiallySupported("setpriority", Setpriority, "Stub implementation.", nil),
142: syscalls.CapError("sched_setparam", linux.CAP_SYS_NICE, "", nil),
143: syscalls.PartiallySupported("sched_getparam", SchedGetparam, "Stub implementation.", nil),
144: syscalls.PartiallySupported("sched_setscheduler", SchedSetscheduler, "Stub implementation.", nil),
145: syscalls.PartiallySupported("sched_getscheduler", SchedGetscheduler, "Stub implementation.", nil),
146: syscalls.PartiallySupported("sched_get_priority_max", SchedGetPriorityMax, "Stub implementation.", nil),
147: syscalls.PartiallySupported("sched_get_priority_min", SchedGetPriorityMin, "Stub implementation.", nil),
148: syscalls.ErrorWithEvent("sched_rr_get_interval", linuxerr.EPERM, "", nil),
149: syscalls.PartiallySupported("mlock", Mlock, "Stub implementation. The sandbox lacks appropriate permissions.", nil),
150: syscalls.PartiallySupported("munlock", Munlock, "Stub implementation. The sandbox lacks appropriate permissions.", nil),
151: syscalls.PartiallySupported("mlockall", Mlockall, "Stub implementation. The sandbox lacks appropriate permissions.", nil),
152: syscalls.PartiallySupported("munlockall", Munlockall, "Stub implementation. The sandbox lacks appropriate permissions.", nil),
153: syscalls.CapError("vhangup", linux.CAP_SYS_TTY_CONFIG, "", nil),
154: syscalls.Error("modify_ldt", linuxerr.EPERM, "", nil),
155: syscalls.Supported("pivot_root", PivotRoot),
156: syscalls.Error("sysctl", linuxerr.EPERM, "Deprecated. Use /proc/sys instead.", nil),
157: syscalls.PartiallySupported("prctl", Prctl, "Not all options are supported.", nil),
158: syscalls.PartiallySupported("arch_prctl", ArchPrctl, "Options ARCH_GET_GS, ARCH_SET_GS not supported.", nil),
159: syscalls.CapError("adjtimex", linux.CAP_SYS_TIME, "", nil),
160: syscalls.PartiallySupported("setrlimit", Setrlimit, "Not all rlimits are enforced.", nil),
161: syscalls.SupportedPoint("chroot", Chroot, PointChroot),
162: syscalls.Supported("sync", Sync),
163: syscalls.CapError("acct", linux.CAP_SYS_PACCT, "", nil),
164: syscalls.CapError("settimeofday", linux.CAP_SYS_TIME, "", nil),
165: syscalls.Supported("mount", Mount),
166: syscalls.Supported("umount2", Umount2),
167: syscalls.CapError("swapon", linux.CAP_SYS_ADMIN, "", nil),
168: syscalls.CapError("swapoff", linux.CAP_SYS_ADMIN, "", nil),
169: syscalls.CapError("reboot", linux.CAP_SYS_BOOT, "", nil),
170: syscalls.Supported("sethostname", Sethostname),
171: syscalls.Supported("setdomainname", Setdomainname),
172: syscalls.CapError("iopl", linux.CAP_SYS_RAWIO, "", nil),
173: syscalls.CapError("ioperm", linux.CAP_SYS_RAWIO, "", nil),
174: syscalls.CapError("create_module", linux.CAP_SYS_MODULE, "", nil),
175: syscalls.CapError("init_module", linux.CAP_SYS_MODULE, "", nil),
176: syscalls.CapError("delete_module", linux.CAP_SYS_MODULE, "", nil),
177: syscalls.Error("get_kernel_syms", linuxerr.ENOSYS, "Not supported in Linux > 2.6.", nil),
178: syscalls.Error("query_module", linuxerr.ENOSYS, "Not supported in Linux > 2.6.", nil),
179: syscalls.CapError("quotactl", linux.CAP_SYS_ADMIN, "", nil), // requires cap_sys_admin for most operations
180: syscalls.Error("nfsservctl", linuxerr.ENOSYS, "Removed after Linux 3.1.", nil),
181: syscalls.Error("getpmsg", linuxerr.ENOSYS, "Not implemented in Linux.", nil),
182: syscalls.Error("putpmsg", linuxerr.ENOSYS, "Not implemented in Linux.", nil),
183: syscalls.PartiallySupported("afs_syscall", AFSSyscall, "Test implementation.", nil),
184: syscalls.Error("tuxcall", linuxerr.ENOSYS, "Not implemented in Linux.", nil),
185: syscalls.Error("security", linuxerr.ENOSYS, "Not implemented in Linux.", nil),
186: syscalls.Supported("gettid", Gettid),
187: syscalls.Supported("readahead", Readahead),
188: syscalls.Supported("setxattr", SetXattr),
189: syscalls.Supported("lsetxattr", Lsetxattr),
190: syscalls.Supported("fsetxattr", Fsetxattr),
191: syscalls.Supported("getxattr", GetXattr),
192: syscalls.Supported("lgetxattr", Lgetxattr),
193: syscalls.Supported("fgetxattr", Fgetxattr),
194: syscalls.Supported("listxattr", ListXattr),
195: syscalls.Supported("llistxattr", Llistxattr),
196: syscalls.Supported("flistxattr", Flistxattr),
197: syscalls.Supported("removexattr", RemoveXattr),
198: syscalls.Supported("lremovexattr", Lremovexattr),
199: syscalls.Supported("fremovexattr", Fremovexattr),
200: syscalls.Supported("tkill", Tkill),
201: syscalls.Supported("time", Time),
202: syscalls.PartiallySupported("futex", Futex, "Robust futexes not supported.", nil),
203: syscalls.PartiallySupported("sched_setaffinity", SchedSetaffinity, "Stub implementation.", nil),
204: syscalls.PartiallySupported("sched_getaffinity", SchedGetaffinity, "Stub implementation.", nil),
205: syscalls.Error("set_thread_area", linuxerr.ENOSYS, "Expected to return ENOSYS on 64-bit", nil),
206: syscalls.PartiallySupported("io_setup", IoSetup, "Generally supported with exceptions. User ring optimizations are not implemented.", []string{"gvisor.dev/issue/204"}),
207: syscalls.PartiallySupported("io_destroy", IoDestroy, "Generally supported with exceptions. User ring optimizations are not implemented.", []string{"gvisor.dev/issue/204"}),
208: syscalls.PartiallySupported("io_getevents", IoGetevents, "Generally supported with exceptions. User ring optimizations are not implemented.", []string{"gvisor.dev/issue/204"}),
209: syscalls.PartiallySupported("io_submit", IoSubmit, "Generally supported with exceptions. User ring optimizations are not implemented.", []string{"gvisor.dev/issue/204"}),
210: syscalls.PartiallySupported("io_cancel", IoCancel, "Generally supported with exceptions. User ring optimizations are not implemented.", []string{"gvisor.dev/issue/204"}),
211: syscalls.Error("get_thread_area", linuxerr.ENOSYS, "Expected to return ENOSYS on 64-bit", nil),
212: syscalls.CapError("lookup_dcookie", linux.CAP_SYS_ADMIN, "", nil),
213: syscalls.Supported("epoll_create", EpollCreate),
214: syscalls.ErrorWithEvent("epoll_ctl_old", linuxerr.ENOSYS, "Deprecated.", nil),
215: syscalls.ErrorWithEvent("epoll_wait_old", linuxerr.ENOSYS, "Deprecated.", nil),
216: syscalls.ErrorWithEvent("remap_file_pages", linuxerr.ENOSYS, "Deprecated since Linux 3.16.", nil),
217: syscalls.Supported("getdents64", Getdents64),
218: syscalls.Supported("set_tid_address", SetTidAddress),
219: syscalls.Supported("restart_syscall", RestartSyscall),
220: syscalls.Supported("semtimedop", Semtimedop),
221: syscalls.PartiallySupported("fadvise64", Fadvise64, "The syscall is 'supported', but ignores all provided advice.", nil),
222: syscalls.Supported("timer_create", TimerCreate),
223: syscalls.Supported("timer_settime", TimerSettime),
224: syscalls.Supported("timer_gettime", TimerGettime),
225: syscalls.Supported("timer_getoverrun", TimerGetoverrun),
226: syscalls.Supported("timer_delete", TimerDelete),
227: syscalls.Supported("clock_settime", ClockSettime),
228: syscalls.Supported("clock_gettime", ClockGettime),
229: syscalls.Supported("clock_getres", ClockGetres),
230: syscalls.Supported("clock_nanosleep", ClockNanosleep),
231: syscalls.Supported("exit_group", ExitGroup),
232: syscalls.Supported("epoll_wait", EpollWait),
233: syscalls.Supported("epoll_ctl", EpollCtl),
234: syscalls.Supported("tgkill", Tgkill),
235: syscalls.Supported("utimes", Utimes),
236: syscalls.Error("vserver", linuxerr.ENOSYS, "Not implemented by Linux", nil),
237: syscalls.PartiallySupported("mbind", Mbind, "Stub implementation. Only a single NUMA node is advertised, and mempolicy is ignored accordingly, but mbind() will succeed and has effects reflected by get_mempolicy.", []string{"gvisor.dev/issue/262"}),
238: syscalls.PartiallySupported("set_mempolicy", SetMempolicy, "Stub implementation.", nil),
239: syscalls.PartiallySupported("get_mempolicy", GetMempolicy, "Stub implementation.", nil),
240: syscalls.Supported("mq_open", MqOpen),
241: syscalls.Supported("mq_unlink", MqUnlink),
242: syscalls.ErrorWithEvent("mq_timedsend", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
243: syscalls.ErrorWithEvent("mq_timedreceive", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
244: syscalls.ErrorWithEvent("mq_notify", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
245: syscalls.ErrorWithEvent("mq_getsetattr", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
246: syscalls.CapError("kexec_load", linux.CAP_SYS_BOOT, "", nil),
247: syscalls.Supported("waitid", Waitid),
248: syscalls.Error("add_key", linuxerr.EACCES, "Not available to user.", nil),
249: syscalls.Error("request_key", linuxerr.EACCES, "Not available to user.", nil),
250: syscalls.Error("keyctl", linuxerr.EACCES, "Not available to user.", nil),
251: syscalls.CapError("ioprio_set", linux.CAP_SYS_ADMIN, "", nil), // requires cap_sys_nice or cap_sys_admin (depending)
252: syscalls.CapError("ioprio_get", linux.CAP_SYS_ADMIN, "", nil), // requires cap_sys_nice or cap_sys_admin (depending)
253: syscalls.PartiallySupportedPoint("inotify_init", InotifyInit, PointInotifyInit, "inotify events are only available inside the sandbox.", nil),
254: syscalls.PartiallySupportedPoint("inotify_add_watch", InotifyAddWatch, PointInotifyAddWatch, "inotify events are only available inside the sandbox.", nil),
255: syscalls.PartiallySupportedPoint("inotify_rm_watch", InotifyRmWatch, PointInotifyRmWatch, "inotify events are only available inside the sandbox.", nil),
256: syscalls.CapError("migrate_pages", linux.CAP_SYS_NICE, "", nil),
257: syscalls.SupportedPoint("openat", Openat, PointOpenat),
258: syscalls.Supported("mkdirat", Mkdirat),
259: syscalls.Supported("mknodat", Mknodat),
260: syscalls.Supported("fchownat", Fchownat),
261: syscalls.Supported("futimesat", Futimesat),
262: syscalls.Supported("newfstatat", Newfstatat),
263: syscalls.Supported("unlinkat", Unlinkat),
264: syscalls.Supported("renameat", Renameat),
265: syscalls.Supported("linkat", Linkat),
266: syscalls.Supported("symlinkat", Symlinkat),
267: syscalls.Supported("readlinkat", Readlinkat),
268: syscalls.Supported("fchmodat", Fchmodat),
269: syscalls.Supported("faccessat", Faccessat),
270: syscalls.Supported("pselect6", Pselect6),
271: syscalls.Supported("ppoll", Ppoll),
272: syscalls.PartiallySupported("unshare", Unshare, "Mount, cgroup namespaces not supported. Network namespaces supported but must be empty.", nil),
273: syscalls.Supported("set_robust_list", SetRobustList),
274: syscalls.Supported("get_robust_list", GetRobustList),
275: syscalls.Supported("splice", Splice),
276: syscalls.Supported("tee", Tee),
277: syscalls.Supported("sync_file_range", SyncFileRange),
278: syscalls.ErrorWithEvent("vmsplice", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/138"}), // TODO(b/29354098)
279: syscalls.CapError("move_pages", linux.CAP_SYS_NICE, "", nil), // requires cap_sys_nice (mostly)
280: syscalls.Supported("utimensat", Utimensat),
281: syscalls.Supported("epoll_pwait", EpollPwait),
282: syscalls.SupportedPoint("signalfd", Signalfd, PointSignalfd),
283: syscalls.SupportedPoint("timerfd_create", TimerfdCreate, PointTimerfdCreate),
284: syscalls.SupportedPoint("eventfd", Eventfd, PointEventfd),
285: syscalls.PartiallySupported("fallocate", Fallocate, "Not all options are supported.", nil),
286: syscalls.SupportedPoint("timerfd_settime", TimerfdSettime, PointTimerfdSettime),
287: syscalls.SupportedPoint("timerfd_gettime", TimerfdGettime, PointTimerfdGettime),
288: syscalls.SupportedPoint("accept4", Accept4, PointAccept4),
289: syscalls.SupportedPoint("signalfd4", Signalfd4, PointSignalfd4),
290: syscalls.SupportedPoint("eventfd2", Eventfd2, PointEventfd2),
291: syscalls.Supported("epoll_create1", EpollCreate1),
292: syscalls.SupportedPoint("dup3", Dup3, PointDup3),
293: syscalls.SupportedPoint("pipe2", Pipe2, PointPipe2),
294: syscalls.PartiallySupportedPoint("inotify_init1", InotifyInit1, PointInotifyInit1, "inotify events are only available inside the sandbox.", nil),
295: syscalls.SupportedPoint("preadv", Preadv, PointPreadv),
296: syscalls.SupportedPoint("pwritev", Pwritev, PointPwritev),
297: syscalls.Supported("rt_tgsigqueueinfo", RtTgsigqueueinfo),
298: syscalls.ErrorWithEvent("perf_event_open", linuxerr.ENODEV, "No support for perf counters", nil),
299: syscalls.Supported("recvmmsg", RecvMMsg),
300: syscalls.ErrorWithEvent("fanotify_init", linuxerr.ENOSYS, "Needs CONFIG_FANOTIFY", nil),
301: syscalls.ErrorWithEvent("fanotify_mark", linuxerr.ENOSYS, "Needs CONFIG_FANOTIFY", nil),
302: syscalls.SupportedPoint("prlimit64", Prlimit64, PointPrlimit64),
303: syscalls.Error("name_to_handle_at", linuxerr.EOPNOTSUPP, "Not supported by gVisor filesystems", nil),
304: syscalls.Error("open_by_handle_at", linuxerr.EOPNOTSUPP, "Not supported by gVisor filesystems", nil),
305: syscalls.CapError("clock_adjtime", linux.CAP_SYS_TIME, "", nil),
306: syscalls.Supported("syncfs", Syncfs),
307: syscalls.Supported("sendmmsg", SendMMsg),
308: syscalls.Supported("setns", Setns),
309: syscalls.Supported("getcpu", Getcpu),
310: syscalls.ErrorWithEvent("process_vm_readv", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/158"}), // TODO(b/260724654)
311: syscalls.ErrorWithEvent("process_vm_writev", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/158"}), // TODO(b/260724654)
312: syscalls.CapError("kcmp", linux.CAP_SYS_PTRACE, "", nil),
313: syscalls.CapError("finit_module", linux.CAP_SYS_MODULE, "", nil),
314: syscalls.ErrorWithEvent("sched_setattr", linuxerr.ENOSYS, "gVisor does not implement a scheduler.", []string{"gvisor.dev/issue/264"}), // TODO(b/118902272)
315: syscalls.ErrorWithEvent("sched_getattr", linuxerr.ENOSYS, "gVisor does not implement a scheduler.", []string{"gvisor.dev/issue/264"}), // TODO(b/118902272)
316: syscalls.Supported("renameat2", Renameat2),
317: syscalls.Supported("seccomp", Seccomp),
318: syscalls.Supported("getrandom", GetRandom),
319: syscalls.Supported("memfd_create", MemfdCreate),
320: syscalls.CapError("kexec_file_load", linux.CAP_SYS_BOOT, "", nil),
321: syscalls.CapError("bpf", linux.CAP_SYS_ADMIN, "", nil),
322: syscalls.SupportedPoint("execveat", Execveat, PointExecveat),
323: syscalls.ErrorWithEvent("userfaultfd", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/266"}), // TODO(b/118906345)
324: syscalls.PartiallySupported("membarrier", Membarrier, "Not supported on all platforms.", nil),
325: syscalls.PartiallySupported("mlock2", Mlock2, "Stub implementation. The sandbox lacks appropriate permissions.", nil),
// Syscalls implemented after 325 are "backports" from versions
// of Linux after 4.4.
326: syscalls.ErrorWithEvent("copy_file_range", linuxerr.ENOSYS, "", nil),
327: syscalls.SupportedPoint("preadv2", Preadv2, PointPreadv2),
328: syscalls.SupportedPoint("pwritev2", Pwritev2, PointPwritev2),
329: syscalls.ErrorWithEvent("pkey_mprotect", linuxerr.ENOSYS, "", nil),
330: syscalls.ErrorWithEvent("pkey_alloc", linuxerr.ENOSYS, "", nil),
331: syscalls.ErrorWithEvent("pkey_free", linuxerr.ENOSYS, "", nil),
332: syscalls.Supported("statx", Statx),
333: syscalls.ErrorWithEvent("io_pgetevents", linuxerr.ENOSYS, "", nil),
334: syscalls.PartiallySupported("rseq", RSeq, "Not supported on all platforms.", nil),
// Linux skips ahead to syscall 424 to sync numbers between arches.
424: syscalls.ErrorWithEvent("pidfd_send_signal", linuxerr.ENOSYS, "", nil),
425: syscalls.PartiallySupported("io_uring_setup", IOUringSetup, "Not all flags and functionality supported.", nil),
426: syscalls.PartiallySupported("io_uring_enter", IOUringEnter, "Not all flags and functionality supported.", nil),
427: syscalls.ErrorWithEvent("io_uring_register", linuxerr.ENOSYS, "", nil),
428: syscalls.ErrorWithEvent("open_tree", linuxerr.ENOSYS, "", nil),
429: syscalls.ErrorWithEvent("move_mount", linuxerr.ENOSYS, "", nil),
430: syscalls.ErrorWithEvent("fsopen", linuxerr.ENOSYS, "", nil),
431: syscalls.ErrorWithEvent("fsconfig", linuxerr.ENOSYS, "", nil),
432: syscalls.ErrorWithEvent("fsmount", linuxerr.ENOSYS, "", nil),
433: syscalls.ErrorWithEvent("fspick", linuxerr.ENOSYS, "", nil),
434: syscalls.ErrorWithEvent("pidfd_open", linuxerr.ENOSYS, "", nil),
435: syscalls.PartiallySupported("clone3", Clone3, "Options CLONE_PIDFD, CLONE_NEWCGROUP, CLONE_INTO_CGROUP, CLONE_NEWTIME, CLONE_CLEAR_SIGHAND, CLONE_PARENT, CLONE_SYSVSEM and, SetTid are not supported.", nil),
436: syscalls.Supported("close_range", CloseRange),
439: syscalls.Supported("faccessat2", Faccessat2),
441: syscalls.Supported("epoll_pwait2", EpollPwait2),
},
Emulate: map[hostarch.Addr]uintptr{
0xffffffffff600000: 96, // vsyscall gettimeofday(2)
0xffffffffff600400: 201, // vsyscall time(2)
0xffffffffff600800: 309, // vsyscall getcpu(2)
},
Missing: func(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, error) {
t.Kernel().EmitUnimplementedEvent(t, sysno)
return 0, linuxerr.ENOSYS
},
}
// ARM64 is a table of Linux arm64 syscall API with the corresponding syscall
// numbers from Linux 4.4.
var ARM64 = &kernel.SyscallTable{
OS: abi.Linux,
Arch: arch.ARM64,
Version: kernel.Version{
Sysname: LinuxSysname,
Release: LinuxRelease,
Version: LinuxVersion,
},
AuditNumber: linux.AUDIT_ARCH_AARCH64,
Table: map[uintptr]kernel.Syscall{
0: syscalls.PartiallySupported("io_setup", IoSetup, "Generally supported with exceptions. User ring optimizations are not implemented.", []string{"gvisor.dev/issue/204"}),
1: syscalls.PartiallySupported("io_destroy", IoDestroy, "Generally supported with exceptions. User ring optimizations are not implemented.", []string{"gvisor.dev/issue/204"}),
2: syscalls.PartiallySupported("io_submit", IoSubmit, "Generally supported with exceptions. User ring optimizations are not implemented.", []string{"gvisor.dev/issue/204"}),
3: syscalls.PartiallySupported("io_cancel", IoCancel, "Generally supported with exceptions. User ring optimizations are not implemented.", []string{"gvisor.dev/issue/204"}),
4: syscalls.PartiallySupported("io_getevents", IoGetevents, "Generally supported with exceptions. User ring optimizations are not implemented.", []string{"gvisor.dev/issue/204"}),
5: syscalls.Supported("setxattr", SetXattr),
6: syscalls.Supported("lsetxattr", Lsetxattr),
7: syscalls.Supported("fsetxattr", Fsetxattr),
8: syscalls.Supported("getxattr", GetXattr),
9: syscalls.Supported("lgetxattr", Lgetxattr),
10: syscalls.Supported("fgetxattr", Fgetxattr),
11: syscalls.Supported("listxattr", ListXattr),
12: syscalls.Supported("llistxattr", Llistxattr),
13: syscalls.Supported("flistxattr", Flistxattr),
14: syscalls.Supported("removexattr", RemoveXattr),
15: syscalls.Supported("lremovexattr", Lremovexattr),
16: syscalls.Supported("fremovexattr", Fremovexattr),
17: syscalls.Supported("getcwd", Getcwd),
18: syscalls.CapError("lookup_dcookie", linux.CAP_SYS_ADMIN, "", nil),
19: syscalls.SupportedPoint("eventfd2", Eventfd2, PointEventfd2),
20: syscalls.Supported("epoll_create1", EpollCreate1),
21: syscalls.Supported("epoll_ctl", EpollCtl),
22: syscalls.Supported("epoll_pwait", EpollPwait),
23: syscalls.SupportedPoint("dup", Dup, PointDup),
24: syscalls.SupportedPoint("dup3", Dup3, PointDup3),
25: syscalls.SupportedPoint("fcntl", Fcntl, PointFcntl),
26: syscalls.PartiallySupportedPoint("inotify_init1", InotifyInit1, PointInotifyInit1, "inotify events are only available inside the sandbox.", nil),
27: syscalls.PartiallySupportedPoint("inotify_add_watch", InotifyAddWatch, PointInotifyAddWatch, "inotify events are only available inside the sandbox.", nil),
28: syscalls.PartiallySupportedPoint("inotify_rm_watch", InotifyRmWatch, PointInotifyRmWatch, "inotify events are only available inside the sandbox.", nil),
29: syscalls.Supported("ioctl", Ioctl),
30: syscalls.CapError("ioprio_set", linux.CAP_SYS_ADMIN, "", nil), // requires cap_sys_nice or cap_sys_admin (depending)
31: syscalls.CapError("ioprio_get", linux.CAP_SYS_ADMIN, "", nil), // requires cap_sys_nice or cap_sys_admin (depending)
32: syscalls.Supported("flock", Flock),
33: syscalls.Supported("mknodat", Mknodat),
34: syscalls.Supported("mkdirat", Mkdirat),
35: syscalls.Supported("unlinkat", Unlinkat),
36: syscalls.Supported("symlinkat", Symlinkat),
37: syscalls.Supported("linkat", Linkat),
38: syscalls.Supported("renameat", Renameat),
39: syscalls.Supported("umount2", Umount2),
40: syscalls.Supported("mount", Mount),
41: syscalls.Supported("pivot_root", PivotRoot),
42: syscalls.Error("nfsservctl", linuxerr.ENOSYS, "Removed after Linux 3.1.", nil),
43: syscalls.Supported("statfs", Statfs),
44: syscalls.Supported("fstatfs", Fstatfs),
45: syscalls.Supported("truncate", Truncate),
46: syscalls.Supported("ftruncate", Ftruncate),
47: syscalls.PartiallySupported("fallocate", Fallocate, "Not all options are supported.", nil),
48: syscalls.Supported("faccessat", Faccessat),
49: syscalls.SupportedPoint("chdir", Chdir, PointChdir),
50: syscalls.SupportedPoint("fchdir", Fchdir, PointFchdir),
51: syscalls.SupportedPoint("chroot", Chroot, PointChroot),
52: syscalls.Supported("fchmod", Fchmod),
53: syscalls.Supported("fchmodat", Fchmodat),
54: syscalls.Supported("fchownat", Fchownat),
55: syscalls.Supported("fchown", Fchown),
56: syscalls.SupportedPoint("openat", Openat, PointOpenat),
57: syscalls.SupportedPoint("close", Close, PointClose),
58: syscalls.CapError("vhangup", linux.CAP_SYS_TTY_CONFIG, "", nil),
59: syscalls.SupportedPoint("pipe2", Pipe2, PointPipe2),
60: syscalls.CapError("quotactl", linux.CAP_SYS_ADMIN, "", nil), // requires cap_sys_admin for most operations
61: syscalls.Supported("getdents64", Getdents64),
62: syscalls.Supported("lseek", Lseek),
63: syscalls.SupportedPoint("read", Read, PointRead),
64: syscalls.SupportedPoint("write", Write, PointWrite),
65: syscalls.SupportedPoint("readv", Readv, PointReadv),
66: syscalls.SupportedPoint("writev", Writev, PointWritev),
67: syscalls.SupportedPoint("pread64", Pread64, PointPread64),
68: syscalls.SupportedPoint("pwrite64", Pwrite64, PointPwrite64),
69: syscalls.SupportedPoint("preadv", Preadv, PointPreadv),
70: syscalls.SupportedPoint("pwritev", Pwritev, PointPwritev),
71: syscalls.Supported("sendfile", Sendfile),
72: syscalls.Supported("pselect6", Pselect6),
73: syscalls.Supported("ppoll", Ppoll),
74: syscalls.SupportedPoint("signalfd4", Signalfd4, PointSignalfd4),
75: syscalls.ErrorWithEvent("vmsplice", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/138"}), // TODO(b/29354098)
76: syscalls.Supported("splice", Splice),
77: syscalls.Supported("tee", Tee),
78: syscalls.Supported("readlinkat", Readlinkat),
79: syscalls.Supported("newfstatat", Newfstatat),
80: syscalls.Supported("fstat", Fstat),
81: syscalls.Supported("sync", Sync),
82: syscalls.Supported("fsync", Fsync),
83: syscalls.Supported("fdatasync", Fdatasync),
84: syscalls.Supported("sync_file_range", SyncFileRange),
85: syscalls.SupportedPoint("timerfd_create", TimerfdCreate, PointTimerfdCreate),
86: syscalls.SupportedPoint("timerfd_settime", TimerfdSettime, PointTimerfdSettime),
87: syscalls.SupportedPoint("timerfd_gettime", TimerfdGettime, PointTimerfdGettime),
88: syscalls.Supported("utimensat", Utimensat),
89: syscalls.CapError("acct", linux.CAP_SYS_PACCT, "", nil),
90: syscalls.Supported("capget", Capget),
91: syscalls.Supported("capset", Capset),
92: syscalls.ErrorWithEvent("personality", linuxerr.EINVAL, "Unable to change personality.", nil),
93: syscalls.Supported("exit", Exit),
94: syscalls.Supported("exit_group", ExitGroup),
95: syscalls.Supported("waitid", Waitid),
96: syscalls.Supported("set_tid_address", SetTidAddress),
97: syscalls.PartiallySupported("unshare", Unshare, "Mount, cgroup namespaces not supported. Network namespaces supported but must be empty.", nil),
98: syscalls.PartiallySupported("futex", Futex, "Robust futexes not supported.", nil),
99: syscalls.Supported("set_robust_list", SetRobustList),
100: syscalls.Supported("get_robust_list", GetRobustList),
101: syscalls.Supported("nanosleep", Nanosleep),
102: syscalls.Supported("getitimer", Getitimer),
103: syscalls.Supported("setitimer", Setitimer),
104: syscalls.CapError("kexec_load", linux.CAP_SYS_BOOT, "", nil),
105: syscalls.CapError("init_module", linux.CAP_SYS_MODULE, "", nil),
106: syscalls.CapError("delete_module", linux.CAP_SYS_MODULE, "", nil),
107: syscalls.Supported("timer_create", TimerCreate),
108: syscalls.Supported("timer_gettime", TimerGettime),
109: syscalls.Supported("timer_getoverrun", TimerGetoverrun),
110: syscalls.Supported("timer_settime", TimerSettime),
111: syscalls.Supported("timer_delete", TimerDelete),
112: syscalls.Supported("clock_settime", ClockSettime),
113: syscalls.Supported("clock_gettime", ClockGettime),
114: syscalls.Supported("clock_getres", ClockGetres),
115: syscalls.Supported("clock_nanosleep", ClockNanosleep),
116: syscalls.PartiallySupported("syslog", Syslog, "Outputs a dummy message for security reasons.", nil),
117: syscalls.PartiallySupported("ptrace", Ptrace, "Options PTRACE_PEEKSIGINFO, PTRACE_SECCOMP_GET_FILTER not supported.", nil),
118: syscalls.CapError("sched_setparam", linux.CAP_SYS_NICE, "", nil),
119: syscalls.PartiallySupported("sched_setscheduler", SchedSetscheduler, "Stub implementation.", nil),
120: syscalls.PartiallySupported("sched_getscheduler", SchedGetscheduler, "Stub implementation.", nil),
121: syscalls.PartiallySupported("sched_getparam", SchedGetparam, "Stub implementation.", nil),
122: syscalls.PartiallySupported("sched_setaffinity", SchedSetaffinity, "Stub implementation.", nil),
123: syscalls.PartiallySupported("sched_getaffinity", SchedGetaffinity, "Stub implementation.", nil),
124: syscalls.Supported("sched_yield", SchedYield),
125: syscalls.PartiallySupported("sched_get_priority_max", SchedGetPriorityMax, "Stub implementation.", nil),
126: syscalls.PartiallySupported("sched_get_priority_min", SchedGetPriorityMin, "Stub implementation.", nil),
127: syscalls.ErrorWithEvent("sched_rr_get_interval", linuxerr.EPERM, "", nil),
128: syscalls.Supported("restart_syscall", RestartSyscall),
129: syscalls.Supported("kill", Kill),
130: syscalls.Supported("tkill", Tkill),
131: syscalls.Supported("tgkill", Tgkill),
132: syscalls.Supported("sigaltstack", Sigaltstack),
133: syscalls.Supported("rt_sigsuspend", RtSigsuspend),
134: syscalls.Supported("rt_sigaction", RtSigaction),
135: syscalls.Supported("rt_sigprocmask", RtSigprocmask),
136: syscalls.Supported("rt_sigpending", RtSigpending),
137: syscalls.Supported("rt_sigtimedwait", RtSigtimedwait),
138: syscalls.Supported("rt_sigqueueinfo", RtSigqueueinfo),
139: syscalls.Supported("rt_sigreturn", RtSigreturn),
140: syscalls.PartiallySupported("setpriority", Setpriority, "Stub implementation.", nil),
141: syscalls.PartiallySupported("getpriority", Getpriority, "Stub implementation.", nil),
142: syscalls.CapError("reboot", linux.CAP_SYS_BOOT, "", nil),
143: syscalls.Supported("setregid", Setregid),
144: syscalls.SupportedPoint("setgid", Setgid, PointSetgid),
145: syscalls.Supported("setreuid", Setreuid),
146: syscalls.SupportedPoint("setuid", Setuid, PointSetuid),
147: syscalls.SupportedPoint("setresuid", Setresuid, PointSetresuid),
148: syscalls.Supported("getresuid", Getresuid),
149: syscalls.SupportedPoint("setresgid", Setresgid, PointSetresgid),
150: syscalls.Supported("getresgid", Getresgid),
151: syscalls.ErrorWithEvent("setfsuid", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/260"}), // TODO(b/112851702)
152: syscalls.ErrorWithEvent("setfsgid", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/260"}), // TODO(b/112851702)
153: syscalls.Supported("times", Times),
154: syscalls.Supported("setpgid", Setpgid),
155: syscalls.Supported("getpgid", Getpgid),
156: syscalls.Supported("getsid", Getsid),
157: syscalls.SupportedPoint("setsid", Setsid, PointSetsid),
158: syscalls.Supported("getgroups", Getgroups),
159: syscalls.Supported("setgroups", Setgroups),
160: syscalls.Supported("uname", Uname),
161: syscalls.Supported("sethostname", Sethostname),
162: syscalls.Supported("setdomainname", Setdomainname),
163: syscalls.Supported("getrlimit", Getrlimit),
164: syscalls.PartiallySupported("setrlimit", Setrlimit, "Not all rlimits are enforced.", nil),
165: syscalls.PartiallySupported("getrusage", Getrusage, "Fields ru_maxrss, ru_minflt, ru_majflt, ru_inblock, ru_oublock are not supported. Fields ru_utime and ru_stime have low precision.", nil),
166: syscalls.Supported("umask", Umask),
167: syscalls.PartiallySupported("prctl", Prctl, "Not all options are supported.", nil),
168: syscalls.Supported("getcpu", Getcpu),
169: syscalls.Supported("gettimeofday", Gettimeofday),
170: syscalls.CapError("settimeofday", linux.CAP_SYS_TIME, "", nil),
171: syscalls.CapError("adjtimex", linux.CAP_SYS_TIME, "", nil),
172: syscalls.Supported("getpid", Getpid),
173: syscalls.Supported("getppid", Getppid),
174: syscalls.Supported("getuid", Getuid),
175: syscalls.Supported("geteuid", Geteuid),
176: syscalls.Supported("getgid", Getgid),
177: syscalls.Supported("getegid", Getegid),
178: syscalls.Supported("gettid", Gettid),
179: syscalls.PartiallySupported("sysinfo", Sysinfo, "Fields loads, sharedram, bufferram, totalswap, freeswap, totalhigh, freehigh not supported.", nil),
180: syscalls.Supported("mq_open", MqOpen),
181: syscalls.Supported("mq_unlink", MqUnlink),
182: syscalls.ErrorWithEvent("mq_timedsend", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
183: syscalls.ErrorWithEvent("mq_timedreceive", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
184: syscalls.ErrorWithEvent("mq_notify", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
185: syscalls.ErrorWithEvent("mq_getsetattr", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
186: syscalls.Supported("msgget", Msgget),
187: syscalls.Supported("msgctl", Msgctl),
188: syscalls.Supported("msgrcv", Msgrcv),
189: syscalls.Supported("msgsnd", Msgsnd),
190: syscalls.Supported("semget", Semget),
191: syscalls.Supported("semctl", Semctl),
192: syscalls.Supported("semtimedop", Semtimedop),
193: syscalls.PartiallySupported("semop", Semop, "Option SEM_UNDO not supported.", nil),
194: syscalls.PartiallySupported("shmget", Shmget, "Option SHM_HUGETLB is not supported.", nil),
195: syscalls.PartiallySupported("shmctl", Shmctl, "Options SHM_LOCK, SHM_UNLOCK are not supported.", nil),
196: syscalls.PartiallySupported("shmat", Shmat, "Option SHM_RND is not supported.", nil),
197: syscalls.Supported("shmdt", Shmdt),
198: syscalls.SupportedPoint("socket", Socket, PointSocket),
199: syscalls.SupportedPoint("socketpair", SocketPair, PointSocketpair),
200: syscalls.SupportedPoint("bind", Bind, PointBind),
201: syscalls.Supported("listen", Listen),
202: syscalls.SupportedPoint("accept", Accept, PointAccept),
203: syscalls.SupportedPoint("connect", Connect, PointConnect),
204: syscalls.Supported("getsockname", GetSockName),
205: syscalls.Supported("getpeername", GetPeerName),
206: syscalls.Supported("sendto", SendTo),
207: syscalls.Supported("recvfrom", RecvFrom),
208: syscalls.Supported("setsockopt", SetSockOpt),
209: syscalls.Supported("getsockopt", GetSockOpt),
210: syscalls.Supported("shutdown", Shutdown),
211: syscalls.Supported("sendmsg", SendMsg),
212: syscalls.Supported("recvmsg", RecvMsg),
213: syscalls.Supported("readahead", Readahead),
214: syscalls.Supported("brk", Brk),
215: syscalls.Supported("munmap", Munmap),
216: syscalls.Supported("mremap", Mremap),
217: syscalls.Error("add_key", linuxerr.EACCES, "Not available to user.", nil),
218: syscalls.Error("request_key", linuxerr.EACCES, "Not available to user.", nil),
219: syscalls.Error("keyctl", linuxerr.EACCES, "Not available to user.", nil),
220: syscalls.PartiallySupportedPoint("clone", Clone, PointClone, "Options CLONE_PIDFD, CLONE_NEWCGROUP, CLONE_PARENT, CLONE_NEWTIME, CLONE_CLEAR_SIGHAND, and CLONE_SYSVSEM not supported.", nil),
221: syscalls.SupportedPoint("execve", Execve, PointExecve),
222: syscalls.Supported("mmap", Mmap),
223: syscalls.PartiallySupported("fadvise64", Fadvise64, "Not all options are supported.", nil),
224: syscalls.CapError("swapon", linux.CAP_SYS_ADMIN, "", nil),
225: syscalls.CapError("swapoff", linux.CAP_SYS_ADMIN, "", nil),
226: syscalls.Supported("mprotect", Mprotect),
227: syscalls.PartiallySupported("msync", Msync, "Full data flush is not guaranteed at this time.", nil),
228: syscalls.PartiallySupported("mlock", Mlock, "Stub implementation. The sandbox lacks appropriate permissions.", nil),
229: syscalls.PartiallySupported("munlock", Munlock, "Stub implementation. The sandbox lacks appropriate permissions.", nil),
230: syscalls.PartiallySupported("mlockall", Mlockall, "Stub implementation. The sandbox lacks appropriate permissions.", nil),
231: syscalls.PartiallySupported("munlockall", Munlockall, "Stub implementation. The sandbox lacks appropriate permissions.", nil),
232: syscalls.PartiallySupported("mincore", Mincore, "Stub implementation. The sandbox does not have access to this information. Reports all mapped pages are resident.", nil),
233: syscalls.PartiallySupported("madvise", Madvise, "Options MADV_DONTNEED, MADV_DONTFORK are supported. Other advice is ignored.", nil),
234: syscalls.ErrorWithEvent("remap_file_pages", linuxerr.ENOSYS, "Deprecated since Linux 3.16.", nil),
235: syscalls.PartiallySupported("mbind", Mbind, "Stub implementation. Only a single NUMA node is advertised, and mempolicy is ignored accordingly, but mbind() will succeed and has effects reflected by get_mempolicy.", []string{"gvisor.dev/issue/262"}),
236: syscalls.PartiallySupported("get_mempolicy", GetMempolicy, "Stub implementation.", nil),
237: syscalls.PartiallySupported("set_mempolicy", SetMempolicy, "Stub implementation.", nil),
238: syscalls.CapError("migrate_pages", linux.CAP_SYS_NICE, "", nil),
239: syscalls.CapError("move_pages", linux.CAP_SYS_NICE, "", nil), // requires cap_sys_nice (mostly)
240: syscalls.Supported("rt_tgsigqueueinfo", RtTgsigqueueinfo),
241: syscalls.ErrorWithEvent("perf_event_open", linuxerr.ENODEV, "No support for perf counters", nil),
242: syscalls.SupportedPoint("accept4", Accept4, PointAccept4),
243: syscalls.Supported("recvmmsg", RecvMMsg),
260: syscalls.Supported("wait4", Wait4),
261: syscalls.SupportedPoint("prlimit64", Prlimit64, PointPrlimit64),
262: syscalls.ErrorWithEvent("fanotify_init", linuxerr.ENOSYS, "Needs CONFIG_FANOTIFY", nil),
263: syscalls.ErrorWithEvent("fanotify_mark", linuxerr.ENOSYS, "Needs CONFIG_FANOTIFY", nil),
264: syscalls.Error("name_to_handle_at", linuxerr.EOPNOTSUPP, "Not supported by gVisor filesystems", nil),
265: syscalls.Error("open_by_handle_at", linuxerr.EOPNOTSUPP, "Not supported by gVisor filesystems", nil),
266: syscalls.CapError("clock_adjtime", linux.CAP_SYS_TIME, "", nil),
267: syscalls.Supported("syncfs", Syncfs),
268: syscalls.Supported("setns", Setns),
269: syscalls.Supported("sendmmsg", SendMMsg),
270: syscalls.ErrorWithEvent("process_vm_readv", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/158"}), // TODO(b/260724654)
271: syscalls.ErrorWithEvent("process_vm_writev", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/158"}), // TODO(b/260724654)
272: syscalls.CapError("kcmp", linux.CAP_SYS_PTRACE, "", nil),
273: syscalls.CapError("finit_module", linux.CAP_SYS_MODULE, "", nil),
274: syscalls.ErrorWithEvent("sched_setattr", linuxerr.ENOSYS, "gVisor does not implement a scheduler.", []string{"gvisor.dev/issue/264"}), // TODO(b/118902272)
275: syscalls.ErrorWithEvent("sched_getattr", linuxerr.ENOSYS, "gVisor does not implement a scheduler.", []string{"gvisor.dev/issue/264"}), // TODO(b/118902272)
276: syscalls.Supported("renameat2", Renameat2),
277: syscalls.Supported("seccomp", Seccomp),
278: syscalls.Supported("getrandom", GetRandom),
279: syscalls.Supported("memfd_create", MemfdCreate),
280: syscalls.CapError("bpf", linux.CAP_SYS_ADMIN, "", nil),
281: syscalls.SupportedPoint("execveat", Execveat, PointExecveat),
282: syscalls.ErrorWithEvent("userfaultfd", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/266"}), // TODO(b/118906345)
283: syscalls.PartiallySupported("membarrier", Membarrier, "Not supported on all platforms.", nil),
284: syscalls.PartiallySupported("mlock2", Mlock2, "Stub implementation. The sandbox lacks appropriate permissions.", nil),
// Syscalls after 284 are "backports" from versions of Linux after 4.4.
285: syscalls.ErrorWithEvent("copy_file_range", linuxerr.ENOSYS, "", nil),
286: syscalls.SupportedPoint("preadv2", Preadv2, PointPreadv2),
287: syscalls.SupportedPoint("pwritev2", Pwritev2, PointPwritev2),
288: syscalls.ErrorWithEvent("pkey_mprotect", linuxerr.ENOSYS, "", nil),
289: syscalls.ErrorWithEvent("pkey_alloc", linuxerr.ENOSYS, "", nil),
290: syscalls.ErrorWithEvent("pkey_free", linuxerr.ENOSYS, "", nil),
291: syscalls.Supported("statx", Statx),
292: syscalls.ErrorWithEvent("io_pgetevents", linuxerr.ENOSYS, "", nil),
293: syscalls.PartiallySupported("rseq", RSeq, "Not supported on all platforms.", nil),
// Linux skips ahead to syscall 424 to sync numbers between arches.
424: syscalls.ErrorWithEvent("pidfd_send_signal", linuxerr.ENOSYS, "", nil),
425: syscalls.PartiallySupported("io_uring_setup", IOUringSetup, "Not all flags and functionality supported.", nil),
426: syscalls.PartiallySupported("io_uring_enter", IOUringEnter, "Not all flags and functionality supported.", nil),
427: syscalls.ErrorWithEvent("io_uring_register", linuxerr.ENOSYS, "", nil),
428: syscalls.ErrorWithEvent("open_tree", linuxerr.ENOSYS, "", nil),
429: syscalls.ErrorWithEvent("move_mount", linuxerr.ENOSYS, "", nil),
430: syscalls.ErrorWithEvent("fsopen", linuxerr.ENOSYS, "", nil),
431: syscalls.ErrorWithEvent("fsconfig", linuxerr.ENOSYS, "", nil),
432: syscalls.ErrorWithEvent("fsmount", linuxerr.ENOSYS, "", nil),
433: syscalls.ErrorWithEvent("fspick", linuxerr.ENOSYS, "", nil),
434: syscalls.ErrorWithEvent("pidfd_open", linuxerr.ENOSYS, "", nil),
435: syscalls.PartiallySupported("clone3", Clone3, "Options CLONE_PIDFD, CLONE_NEWCGROUP, CLONE_INTO_CGROUP, CLONE_NEWTIME, CLONE_CLEAR_SIGHAND, CLONE_PARENT, CLONE_SYSVSEM and clone_args.set_tid are not supported.", nil),
436: syscalls.Supported("close_range", CloseRange),
439: syscalls.Supported("faccessat2", Faccessat2),
441: syscalls.Supported("epoll_pwait2", EpollPwait2),
},
Emulate: map[hostarch.Addr]uintptr{},
Missing: func(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, error) {
t.Kernel().EmitUnimplementedEvent(t, sysno)
return 0, linuxerr.ENOSYS
},
}
func init() {
kernel.RegisterSyscallTable(AMD64)
kernel.RegisterSyscallTable(ARM64)
}
|
package hw05_parallel_execution //nolint:golint,stylecheck
import (
"errors"
"sync"
)
var ErrErrorsLimitExceeded = errors.New("errors limit exceeded")
type Task func() error
type errorsCounter struct {
value int
mu sync.Mutex
}
// Run starts tasks in N goroutines and stops its work when receiving M errors from tasks.
func Run(tasks []Task, n int, m int) error {
var wg sync.WaitGroup
queueCh := make(chan int, n)
quitCh := make(chan int)
errorsCnt := errorsCounter{}
for _, task := range tasks {
select {
case <-quitCh:
break
case queueCh <- 1:
wg.Add(1)
go func(task Task) {
defer wg.Done()
releaseQueue := true
if task() != nil {
errorsCnt.mu.Lock()
errorsCnt.value++
if m == errorsCnt.value-1 {
releaseQueue = false
close(quitCh)
}
errorsCnt.mu.Unlock()
}
if releaseQueue {
<-queueCh
}
}(task)
}
}
// Wait, when all goroutines finish
wg.Wait()
if errorsCnt.value > m {
return ErrErrorsLimitExceeded
}
return nil
}
|
//
// Heavily influenced by https://github.com/prometheus/client_golang
// https://github.com/prometheus/client_golang/blob/8184d76b3b0bd3b01ed903690431ccb6826bf3e0/prometheus/promhttp/instrument_client.go
//
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Custom version of the promhttp.InstrumentRoundTripper's with labelling for the minecraft API Client "source"
package minecraft_trace
import (
"context"
"crypto/tls"
"net/http"
"net/http/httptrace"
"time"
"github.com/minotar/imgd/pkg/minecraft"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
func ctxLabels(ctx context.Context) prometheus.Labels {
source := minecraft.CtxGetSource(ctx)
if source == "" {
source = "Unknown"
}
return prometheus.Labels{"source": source}
}
// prometheus.GaugeVec *MUST* have a "source" label
func InstrumentRoundTripperInFlight(gaugeVec *prometheus.GaugeVec, next http.RoundTripper) promhttp.RoundTripperFunc {
return promhttp.RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
gauge := gaugeVec.With(ctxLabels(r.Context()))
gauge.Inc()
defer gauge.Dec()
return next.RoundTrip(r)
})
}
// prometheus.ObserverVec *MUST* have a "source". Optional "code" and "method" (though method will always be GET)
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) promhttp.RoundTripperFunc {
return promhttp.RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
observerVec := obs.MustCurryWith(ctxLabels(r.Context()))
return promhttp.InstrumentRoundTripperDuration(observerVec, next).RoundTrip(r)
})
}
// Each prometheus.ObserverVec *MUST* have a "source" label
type InstrumentTrace struct {
GotFirstResponseByte prometheus.ObserverVec
DNSStart prometheus.ObserverVec
DNSDone prometheus.ObserverVec
ConnectStart prometheus.ObserverVec
ConnectDone prometheus.ObserverVec
TLSHandshakeStart prometheus.ObserverVec
TLSHandshakeDone prometheus.ObserverVec
}
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) promhttp.RoundTripperFunc {
return promhttp.RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
labels := ctxLabels(r.Context())
start := time.Now()
trace := &httptrace.ClientTrace{
DNSStart: func(_ httptrace.DNSStartInfo) {
if it.DNSStart != nil {
it.DNSStart.With(labels).Observe(time.Since(start).Seconds())
}
},
DNSDone: func(_ httptrace.DNSDoneInfo) {
if it.DNSDone != nil {
it.DNSDone.With(labels).Observe(time.Since(start).Seconds())
}
},
ConnectStart: func(_, _ string) {
if it.ConnectStart != nil {
it.ConnectStart.With(labels).Observe(time.Since(start).Seconds())
}
},
ConnectDone: func(_, _ string, err error) {
if err != nil {
return
}
if it.ConnectDone != nil {
it.ConnectDone.With(labels).Observe(time.Since(start).Seconds())
}
},
GotFirstResponseByte: func() {
if it.GotFirstResponseByte != nil {
it.GotFirstResponseByte.With(labels).Observe(time.Since(start).Seconds())
}
},
TLSHandshakeStart: func() {
if it.TLSHandshakeStart != nil {
it.TLSHandshakeStart.With(labels).Observe(time.Since(start).Seconds())
}
},
TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
if err != nil {
return
}
if it.TLSHandshakeDone != nil {
it.TLSHandshakeDone.With(labels).Observe(time.Since(start).Seconds())
}
},
}
r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
return next.RoundTrip(r)
})
}
|
package xmppversion
import (
"encoding/xml"
"testing"
"github.com/stretchr/testify/assert"
)
func TestMarshalEmptyQueryResult(t *testing.T) {
result := IQQueryResult{}
xmlBuf, err := xml.Marshal(&result)
assert.Nil(t, err)
assert.Equal(t,
`<query xmlns="jabber:iq:version"><name></name><version></version></query>`,
string(xmlBuf))
}
func TestMarshalBasicQueryResult(t *testing.T) {
result := IQQueryResult{Name: "XMPP Go", Version: "1.23.456"}
xmlBuf, err := xml.Marshal(&result)
assert.Nil(t, err)
assert.Equal(t,
`<query xmlns="jabber:iq:version"><name>XMPP Go</name><version>1.23.456</version></query>`,
string(xmlBuf))
}
|
package app
import (
"time"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
repository "github.com/shandysiswandi/echo-service/internal/adapter/mongorepo"
"github.com/shandysiswandi/echo-service/internal/config"
"github.com/shandysiswandi/echo-service/internal/domain/usecase"
"github.com/shandysiswandi/echo-service/internal/infrastructure/app/handler"
"github.com/shandysiswandi/echo-service/internal/infrastructure/app/middle"
"github.com/shandysiswandi/echo-service/internal/infrastructure/mongodb"
"github.com/shandysiswandi/echo-service/pkg/clock"
"github.com/shandysiswandi/echo-service/pkg/gen"
"github.com/shandysiswandi/echo-service/pkg/validation"
)
func New(cfg *config.Config, dbm *mongodb.MongoDB) *echo.Echo {
e := echo.New()
v := validation.New()
/* ***** ***** ***** ***** ***** */
/* setup server
/* ***** ***** ***** ***** ***** */
e.HideBanner = true
e.Server.ReadTimeout = 30 * time.Second
e.Server.WriteTimeout = 30 * time.Second
e.HTTPErrorHandler = middle.HTTPCustomError
e.Validator = v
/* ***** ***** ***** ***** ***** */
/* setup middleware
/* ***** ***** ***** ***** ***** */
e.Pre(middleware.RemoveTrailingSlash())
e.Use(middle.Logger())
e.Use(middleware.Recover())
e.Use(middleware.Secure())
e.Use(middleware.BodyLimit("1M"))
e.Use(middleware.Decompress())
e.Use(middleware.GzipWithConfig(middleware.GzipConfig{Level: 9}))
e.Use(middle.CORS())
whiteList := []string{"/", "/xxx", "/health", "/jwt", "/graceful"}
e.Use(middle.JWT(cfg.JWTSecret, whiteList...))
/* ***** ***** ***** ***** ***** */
/* setup router
/* ***** ***** ***** ***** ***** */
// register all library
generator := gen.New()
clk := clock.New()
// register all repository
tdr := repository.NewTodoMongo(dbm.GetDB())
// register all usecase
tdu := usecase.NewTodoUsecase(tdr)
// register handler
h := handler.New(handler.HandlerConfig{
Validator: v,
Generator: generator,
Clock: clk,
TodoUsecase: tdu,
})
e.GET("/", h.Home)
e.GET("/graceful", h.Graceful)
e.GET("/health", h.Health)
e.GET("/jwt", h.JWT)
e.GET("/todos", h.FetchTodos)
e.GET("/todos/:id", h.GetTodoById)
e.POST("/todos", h.CreateTodo)
e.PATCH("/todos/:id", h.UpdateTodoById)
e.PUT("/todos/:id", h.ReplaceTodoById)
e.DELETE("/todos/:id", h.DeleteTodoById)
return e
}
|
package plugins
import (
"github.com/stretchr/testify/assert"
"testing"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
)
func Test_customHealthProbe(t *testing.T) {
tests := []struct {
unstructuredObj *unstructured.Unstructured
customHealthProbeArgs *CustomHealthProbeArgs
err error
result *unstructured.Unstructured
}{
{
unstructuredObj: convertObjToUnstructured(&appsv1.StatefulSet{
Spec: appsv1.StatefulSetSpec{
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "test-container",
LivenessProbe: &v1.Probe{
},
ReadinessProbe: &v1.Probe{},
},
},
},
},
},
}),
customHealthProbeArgs: &CustomHealthProbeArgs{
DisableAllReadinessProbe: true,
DisableAllLivenessProbe: true,
},
result: convertObjToUnstructured(&appsv1.StatefulSet{
Spec: appsv1.StatefulSetSpec{
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "test-container",
},
},
},
},
},
}),
},
}
for _, test := range tests {
err := customHealthProbe(test.unstructuredObj, test.customHealthProbeArgs)
assert.IsType(t, test.err, err)
assert.Equal(t, test.result, test.unstructuredObj)
}
}
|
package server
import (
pb "github.com/1851616111/xchain/pkg/protos"
cm "github.com/1851616111/xchain/pkg/server/connection_manager"
"log"
"os"
"time"
)
var (
logger = log.New(os.Stderr, "[controller]", log.LstdFlags)
)
func (n *Node) RunController() {
successFunc := func(target pb.EndPoint, con cm.Connection) error {
if err := n.Connect(target, con); err != nil {
return err
}
done := make(chan struct{}, 1)
n.runningClientDoneCH[target.Id] = done
go clientConnectionHandler(target, con, done)
return nil
}
pingFunc := func(id string, con cm.Connection) error {
if err := con.Send(makePingReqMsg()); err != nil {
logger.Printf("broadcast ping id:%s err %v\n", id, err)
n.RemoteClient(id)
return err
}
return nil
}
for {
select {
case rc := <-n.recvConnectCh:
logger.Printf("node controller: recv connection %v\n", rc)
if err := n.Accept(rc.client, rc.con); err != nil {
rc.errCh <- err
} else {
rc.doneCh <- done
}
case task := <-n.lounchClientCh:
logger.Printf("connect to entry point %s\n", task.targetAddress)
//接收到一个作为客户端发起连接的tash时
//先调用实际的握手handle流程,当握手成功后
//通过successfn回调n.Connect() 将信息加入到node节点上
//并将task的结果返回给调用者
if err := n.startAndJoin(task.targetAddress, successFunc); err != nil {
log.Printf("node controller: lounch connection err:%v\n", err)
task.errCh <- err
}
task.doneCh <- struct{}{}
continue
logger.Printf("node controller: success launch connection for %s\n", task.targetAddress)
case <-time.Tick(n.pingDuration):
if err := n.netManager.BroadcastFunc(true, pingFunc); err != nil {
logger.Printf("broadcast ping err %v\n", err)
}
}
}
}
|
// Copyright © SAS Institute Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package closeonce
import (
"sync"
"sync/atomic"
)
type Closed struct {
done uintptr
mu sync.Mutex
err error
}
func (o *Closed) Closed() bool {
return atomic.LoadUintptr(&o.done) != 0
}
func (o *Closed) Close(f func() error) error {
o.mu.Lock()
defer o.mu.Unlock()
if o.Closed() {
return o.err
}
o.err = f()
atomic.StoreUintptr(&o.done, 1)
o.done = 1
return o.err
}
|
package main
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"qiniupkg.com/api.v7/conf"
"qiniupkg.com/api.v7/kodo"
"qiniupkg.com/api.v7/kodocli"
"strings"
"time"
)
const defCfgFile = "gobak.cfg"
type UplInfo struct {
BakFile string
AK string
SK string
Bucket string
Key string
}
type PutRet struct {
Hash string `json:"hash"`
Key string `json:"key"`
}
func getCurDir() string {
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return ""
}
return dir + "/"
}
func do7zBak(cfgFile string, ui *UplInfo) bool {
bytes, _ := ioutil.ReadFile(cfgFile)
if bytes != nil {
lines := strings.Split(string(bytes), "\n")
max := len(lines)
/*
AK
SK
Bucket
file1
file2
..
*/
from := 3
if max > from {
ui.AK = lines[0]
ui.SK = lines[1]
ui.Bucket = lines[2]
//bak 3 days' files
ui.Key = fmt.Sprintf("%d", time.Now().Weekday()%3) + ".7z"
ui.BakFile = getCurDir() + ui.Key
os.Remove(ui.BakFile)
for i := from; i < max; i++ {
if lines[i] == "" || lines[i] == "\n" {
continue
}
cmd := exec.Command("7z", "a", "-t7z", "-mhe=on", ui.BakFile, lines[i])
if _, err := cmd.Output(); err != nil {
fmt.Println("7z fail:", lines[i], err)
}
}
return true
}
}
return false
}
func QNUpl(ui *UplInfo) bool {
conf.ACCESS_KEY = ui.AK
conf.SECRET_KEY = ui.SK
c := kodo.New(0, nil)
policy := &kodo.PutPolicy{
Scope: ui.Bucket + ":" + ui.Key,
Expires: 3600,
}
token := c.MakeUptoken(policy)
zone := 0
uploader := kodocli.NewUploader(zone, nil)
var ret PutRet
filepath := ui.BakFile
res := uploader.PutFile(nil, &ret, token, ui.Key, filepath, nil)
if res != nil {
fmt.Println("upl fail:", res)
return false
}
return true
}
func main() {
var ui UplInfo
fmt.Println("7z..")
if do7zBak(getCurDir()+defCfgFile, &ui) {
fmt.Println("upl..")
if QNUpl(&ui) {
fmt.Println("baked.")
}
os.Remove(ui.BakFile)
}
}
|
package main
import (
"fmt"
"net/http"
)
func sayHello(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w,"<script>alert(\"hello GO\")</script>")
}
func main() {
//路由
http.HandleFunc("/",sayHello)
err :=http.ListenAndServe(":9999",nil)
if err != nil {
fmt.Println("建立http服务器失败",err)
return
}
}
|
package conf
import (
"context"
"sync"
kit "shylinux.com/x/toolkits"
)
type Any = interface{}
type Conf struct {
data Any
cancel context.CancelFunc
ctx context.Context
wg sync.WaitGroup
sup *Conf
}
func (conf *Conf) GetBool(key string, def ...bool) bool {
if val := kit.Value(conf.data, key); val != nil {
switch kit.Format(val) {
case "true", "on", "1":
return true
default:
return false
}
}
for _, v := range def {
if v == true {
return v
}
}
return false
}
func (conf *Conf) GetInt(key string, def ...int) int {
if val := kit.Value(conf.data, key); val != nil {
return kit.Int(val)
}
for _, v := range def {
if v != 0 {
return v
}
}
return 0
}
func (conf *Conf) Get(key string, def ...string) string {
if val := kit.Value(conf.data, key); val != nil {
return kit.Format(val)
}
for _, v := range def {
if v != "" {
return v
}
}
return ""
}
func (conf *Conf) GetList(key string, def ...[]string) []string {
if val := kit.Value(conf.data, key); val != nil {
return kit.Simple(val)
}
for _, v := range def {
if len(v) > 0 {
return v
}
}
return nil
}
func (conf *Conf) GetDict(key string, def ...map[string]string) map[string]string {
res := map[string]string{}
if val := kit.Value(conf.data, key); val != nil {
switch val := val.(type) {
case map[string]string:
return val
case kit.Map:
for k, v := range val {
res[k] = kit.Format(v)
}
return res
}
}
for _, v := range def {
if len(v) > 0 {
return v
}
}
return res
}
func (conf *Conf) GetVal(key string, def ...Any) Any {
if val := kit.Value(conf.data, key); val != nil {
return val
}
for _, v := range def {
if v != nil {
return v
}
}
return nil
}
func (conf *Conf) Sub(key string) *Conf {
ctx, cancel := context.WithCancel(conf.ctx)
return &Conf{data: kit.Value(conf.data, key), cancel: cancel, ctx: ctx, sup: conf}
}
func New(data Any) *Conf {
ctx, cancel := context.WithCancel(context.TODO())
return &Conf{data: data, cancel: cancel, ctx: ctx}
}
|
package Utils
import (
"AlgorithmPractice/src/common/Intergration/DB"
"errors"
"fmt"
"reflect"
"strings"
)
// ClazzTools
// @author: liujun
// @date: 2022/6/1420:16
// @author—Email: ljfirst@mail.ustc.edu.cn
// @description:
// 获取反射对象的所有方法 {@link ClassReflectTools#GetExecMethod}
// 执行方法 {@link ClassReflectTools#ExecMethod}
// 转换方法的输出 {@link ClassReflectTools#GetMethodOutput}
// 转换方法的输入 {@link ClassReflectTools#GetMethodInput}
// 反射方法并执行 {@link ClassReflectTools#ExecAssert}
// 判断结构是否相等 {@link ClassReflectTools#judgeMethodResultEquals}
// 打印参数 {@link ClassReflectTools#printParams}
// @blogURL:
func GetClassName(clazz interface{}) string {
clazzType := reflect.TypeOf(clazz)
if clazzType.Kind() == reflect.Ptr { //指针类型获取真正type需要调用Elem
clazzType = clazzType.Elem()
}
//TypeString := clazzType.String() // reflect grammar : String() 获取的是:包名.类型名
TypeString := clazzType.Name() // reflect grammar : Name() 获取的是:类型名
//fmt.Println("ClassName:", TypeString)
TypeNameArr := strings.Split(TypeString, ".")
return TypeNameArr[len(TypeNameArr)-1]
}
func GetMethodAndName(clazz interface{}) ([]reflect.Method, []string) {
methodName := make([]string, 0)
methods := make([]reflect.Method, 0)
data_t := reflect.TypeOf(clazz)
for i := 0; i < data_t.NumMethod(); i++ {
method := data_t.Method(i)
// 对 solution 和 method 的方法进行操作
if strings.ContainsAny(method.Name, "method") || strings.ContainsAny(method.Name, "solution") {
//fmt.Printf("方法名:%s, 方法类型:%v\n", method.Name, method.Type)
methodName = append(methodName, method.Name)
methods = append(methods, method)
}
}
return methods, methodName
}
func GetReflectResult1(clazz interface{}, methodName string, inputParams ...interface{}) ([]reflect.Value, error) {
//data := doublePoint.ThreeSumLikely{}
//获得结构体的类型
data_t := reflect.TypeOf(clazz)
data_f := reflect.ValueOf(clazz)
fmt.Println("数据结构体的类型为:", data_t.Name())
fmt.Println("数据结构体的类型为:", data_f.MethodByName("method"))
for i := 0; i < data_t.NumMethod(); i++ {
method := data_t.Method(i)
fmt.Printf("方法名:%s, 方法类型:%v\n", method.Name, method.Type)
}
//fmt.Println("修改前的UserName为:", data.Name)
//通过反射调用方法
method := data_f.MethodByName(methodName)
if method.Kind() == reflect.Func {
args := []reflect.Value{
reflect.ValueOf(nil),
reflect.ValueOf(nil),
}
return method.Call(args), nil
//fmt.Println("修改后的UserName为:", data.Name)
}
return nil, nil
}
func GetReflectResult(method interface{}, methodName string, inputParams ...interface{}) ([]reflect.Value, error) {
/*clazzType := reflect.TypeOf(name)
if clazzType.Kind() == reflect.Ptr { //指针类型获取真正type需要调用Elem
clazzType = clazzType.Elem()
}*/
//v := reflect.ValueOf(method)
data_t := reflect.TypeOf(method)
data_f := reflect.ValueOf(&method)
for i := 0; i < data_t.NumMethod(); i++ {
method1 := data_t.Method(i)
fmt.Printf("方法名:%s, 方法类型:%v\n", method1.Name, method1.Type)
}
input := make([]reflect.Value, len(inputParams))
for k, v := range inputParams {
input[k] = reflect.ValueOf(v)
}
df := data_f.MethodByName(methodName)
if df.Kind() == reflect.Func {
return df.Call(input), nil
}
//methodOrigin := v.MethodByName(methodName)
execMethod := reflect.ValueOf(method)
tt := execMethod.Type()
fmt.Println(tt)
if len(inputParams) != tt.NumIn() {
return nil, errors.New("the number of input params not match!")
}
return nil, nil
}
func judgeMethodResultEquals(ExpectOutput, ActualOutput []reflect.Value) bool {
// ExpectOutput, ActualOutput 如果存在长度和类型不一致
if len(ExpectOutput) < 1 || len(ActualOutput) < 1 || len(ExpectOutput) != len(ActualOutput) ||
reflect.ValueOf(ExpectOutput[0]).Kind() != reflect.ValueOf(ActualOutput[0]).Kind() {
return false
}
/*ans := reflect.TypeOf(ExpectOutput[0])
ansking := reflect.TypeOf(ExpectOutput[0]).Kind()
fmt.Println(ans)
fmt.Println(ansking)*/
switch ExpectOutput[0].Interface().(type) {
case int:
return ExpectOutput[0].Int() == ActualOutput[0].Int()
case string:
return strings.EqualFold(ExpectOutput[0].String(), ActualOutput[0].String())
case bool:
return ExpectOutput[0].Bool() == ActualOutput[0].Bool()
case []int:
IntArrayEquals(ExpectOutput[0].Interface().([]int), ActualOutput[0].Interface().([]int))
return true
}
return false
}
func printParams(params interface{}) {
fmt.Println("ActualOutput:")
//obj reflect.Value
obj := reflect.ValueOf(params)
/*if obj == nil {
return
}*/
//objType := reflect.TypeOf(obj) // grammar : 获取obj对象的类型
/*aa := reflect.Indirect(obj) //go grammar: 获取v指向的值,即,如果v是nil指针,则Indirect返回零值。如果v不是指针,则Indirect返回v
fmt.Print(aa.Kind())
fmt.Print(aa)*/
switch obj.Interface().(type) {
case int:
fmt.Println(obj.Interface().(int))
case string:
fmt.Println(obj.Interface().(string))
case bool:
fmt.Println(obj.Interface().(bool))
case float64:
fmt.Println(obj.Interface().(float64))
case []int:
intArray := obj.Interface().([]int)
IntArrayPrint(intArray)
case *DB.SQLTestDataEntity:
entity := obj.Interface().(*DB.SQLTestDataEntity)
entity.Print()
}
}
|
package cmd
import (
"context"
"fmt"
"github.com/loft-sh/devspace/pkg/devspace/config"
"github.com/loft-sh/devspace/pkg/devspace/config/loader"
"github.com/loft-sh/devspace/pkg/devspace/config/versions/latest"
devspacecontext "github.com/loft-sh/devspace/pkg/devspace/context"
"github.com/loft-sh/devspace/pkg/devspace/dependency"
"github.com/loft-sh/devspace/pkg/devspace/dependency/types"
"github.com/loft-sh/devspace/pkg/devspace/hook"
"github.com/loft-sh/devspace/pkg/devspace/plugin"
"github.com/sirupsen/logrus"
"io"
"os"
"sort"
"github.com/loft-sh/devspace/cmd/flags"
"github.com/loft-sh/devspace/pkg/util/factory"
logger "github.com/loft-sh/devspace/pkg/util/log"
"github.com/loft-sh/devspace/pkg/util/message"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v3"
"github.com/spf13/cobra"
)
// PrintCmd is a struct that defines a command call for "print"
type PrintCmd struct {
*flags.GlobalFlags
Out io.Writer
StripNames bool
SkipInfo bool
Dependency string
}
// NewPrintCmd creates a new devspace print command
func NewPrintCmd(f factory.Factory, globalFlags *flags.GlobalFlags) *cobra.Command {
cmd := &PrintCmd{
GlobalFlags: globalFlags,
StripNames: true,
Out: os.Stdout,
}
printCmd := &cobra.Command{
Use: "print",
Short: "Prints displays the configuration",
Long: `
#######################################################
################## devspace print #####################
#######################################################
Prints the configuration for the current or given
profile after all patching and variable substitution
#######################################################`,
RunE: func(cobraCmd *cobra.Command, args []string) error {
plugin.SetPluginCommand(cobraCmd, args)
return cmd.Run(f)
},
}
printCmd.Flags().BoolVar(&cmd.SkipInfo, "skip-info", false, "When enabled, only prints the configuration without additional information")
printCmd.Flags().StringVar(&cmd.Dependency, "dependency", "", "The dependency to print the config from. Use dot to access nested dependencies (e.g. dep1.dep2)")
return printCmd
}
// Run executes the command logic
func (cmd *PrintCmd) Run(f factory.Factory) error {
// Set config root
log := f.GetLog()
configOptions := cmd.ToConfigOptions()
configLoader, err := f.NewConfigLoader(cmd.ConfigPath)
if err != nil {
return err
}
configExists, err := configLoader.SetDevSpaceRoot(log)
if err != nil {
return err
} else if !configExists {
return errors.New(message.ConfigNotFound)
}
// create kubectl client
client, err := f.NewKubeClientFromContext(cmd.KubeContext, cmd.Namespace)
if err != nil {
log.Warnf("Unable to create new kubectl client: %v", err)
}
parser := loader.NewEagerParser()
// load config
config, err := configLoader.LoadWithParser(context.Background(), nil, client, parser, configOptions, log)
if err != nil {
return err
}
// create devspace context
ctx := devspacecontext.NewContext(context.Background(), config.Variables(), log).
WithConfig(config).
WithKubeClient(client)
// resolve dependencies
dependencies, err := dependency.NewManagerWithParser(ctx, configOptions, parser).ResolveAll(ctx, dependency.ResolveOptions{})
if err != nil {
log.Warnf("Error resolving dependencies: %v", err)
}
ctx = ctx.WithDependencies(dependencies)
// Execute plugin hook
err = hook.ExecuteHooks(ctx, nil, "print")
if err != nil {
return err
}
if cmd.Dependency != "" {
dep := dependency.GetDependencyByPath(dependencies, cmd.Dependency)
if dep == nil {
return fmt.Errorf("couldn't find dependency %s: make sure it gets loaded correctly", cmd.Dependency)
}
ctx = ctx.AsDependency(dep)
}
bsConfig, err := marshalConfig(ctx.Config().Config(), cmd.StripNames)
if err != nil {
return err
}
if !cmd.SkipInfo {
err = printExtraInfo(ctx.Config(), dependencies, log)
if err != nil {
return err
}
}
if cmd.Out != nil {
_, err := cmd.Out.Write(bsConfig)
if err != nil {
return err
}
} else {
log.WriteString(logrus.InfoLevel, string(bsConfig))
}
return nil
}
func marshalConfig(config *latest.Config, stripNames bool) ([]byte, error) {
// remove the auto generated names
if stripNames {
for k := range config.Images {
config.Images[k].Name = ""
}
for k := range config.Deployments {
config.Deployments[k].Name = ""
}
for k := range config.Dependencies {
config.Dependencies[k].Name = ""
}
for k := range config.Pipelines {
config.Pipelines[k].Name = ""
}
for k := range config.Dev {
config.Dev[k].Name = ""
for c := range config.Dev[k].Containers {
config.Dev[k].Containers[c].Container = ""
}
}
for k := range config.Vars {
config.Vars[k].Name = ""
}
for k := range config.PullSecrets {
config.PullSecrets[k].Name = ""
}
for k := range config.Commands {
config.Commands[k].Name = ""
}
}
return yaml.Marshal(config)
}
func printExtraInfo(config config.Config, dependencies []types.Dependency, log logger.Logger) error {
log.WriteString(logrus.InfoLevel, "\n-------------------\n\nVars:\n")
headerColumnNames := []string{"Name", "Value"}
values := [][]string{}
resolvedVars := config.Variables()
for varName, varValue := range resolvedVars {
values = append(values, []string{
varName,
fmt.Sprintf("%v", varValue),
})
}
sort.Slice(values, func(i, j int) bool {
return values[i][0] < values[j][0]
})
if len(values) > 0 {
logger.PrintTable(log, headerColumnNames, values)
} else {
log.Info("No vars found")
}
log.WriteString(logrus.InfoLevel, "\n-------------------\n\nLoaded path: "+config.Path()+"\n\n-------------------\n\n")
if len(dependencies) > 0 {
log.WriteString(logrus.InfoLevel, "Dependency Tree:\n\n> Root\n")
for _, dep := range dependencies {
printDependencyRecursive("--", dep, 5, log)
}
log.WriteString(logrus.InfoLevel, "\n-------------------\n\n")
}
return nil
}
func printDependencyRecursive(prefix string, dep types.Dependency, maxDepth int, log logger.Logger) {
if maxDepth == 0 {
return
}
log.WriteString(logrus.InfoLevel, prefix+"> "+dep.Name()+"\n")
for _, child := range dep.Children() {
printDependencyRecursive(prefix+"--", child, maxDepth-1, log)
}
}
|
package server
import (
"testing"
"math/big"
"time"
"log"
)
func TestCalcEquilibrium(t *testing.T){
pp1 := PricePoint{"AUD", big.NewFloat(2.00), time.Now()} // 2 * 1 = 2
pp2 := PricePoint{"AUD", big.NewFloat(1.50), time.Now()} // 1.50 * 2 = 3
pp3 := PricePoint{"AUD", big.NewFloat(1.00), time.Now()} // 1 * 3 = 3
// should be 8/6 = 1.333333
ph := createValidPriceHistory(pp1, pp2, pp3)
log.Println(ph.calculateEquilibrium())
}
|
package main
import (
"errors"
"fmt"
)
type Stack struct {
//最大存放的个数
MaxTop int
//栈顶
Top int
//模拟栈
arr [5]int
}
func (s *Stack) Push(val int) (err error) {
if s.Top == s.MaxTop-1 {
fmt.Println("stack full")
return errors.New("stack full")
}
s.Top++
s.arr[s.Top] = val
return
}
func (s *Stack) List() {
if s.Top == -1 {
fmt.Println("stack empty")
return
}
curTop := s.Top
for i := curTop; i >= 0; i-- {
fmt.Printf("arr[%d]=%d\n", i, s.arr[i])
}
}
func (s *Stack) Pop() (val int, err error) {
if s.Top == -1 {
fmt.Println("stack empty")
return 0, errors.New("stack empty")
}
val = s.arr[s.Top]
s.Top--
return val, nil
}
func main() {
s := &Stack{
MaxTop: 5,
Top: -1,
arr: [5]int{},
}
s.Push(1)
s.Push(2)
s.Push(3)
s.Push(4)
s.Push(5)
s.List()
val, err := s.Pop()
if err != nil {
fmt.Println(err)
}
fmt.Println(val)
s.List()
}
|
package tempodb
import (
"fmt"
"testing"
"time"
"github.com/google/uuid"
"github.com/grafana/tempo/tempodb/backend"
"github.com/stretchr/testify/assert"
)
func TestTimeWindowBlockSelectorBlocksToCompact(t *testing.T) {
now := time.Now()
timeWindow := 12 * time.Hour
tenantID := ""
tests := []struct {
name string
blocklist []*backend.BlockMeta
minInputBlocks int // optional, defaults to global const
maxInputBlocks int // optional, defaults to global const
maxBlockBytes uint64 // optional, defaults to ???
expected []*backend.BlockMeta
expectedHash string
expectedSecond []*backend.BlockMeta
expectedHash2 string
}{
{
name: "nil - nil",
blocklist: nil,
expected: nil,
},
{
name: "empty - nil",
blocklist: []*backend.BlockMeta{},
expected: nil,
},
{
name: "only two",
blocklist: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000000"),
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
EndTime: now,
},
},
expected: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000000"),
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
EndTime: now,
},
},
expectedHash: fmt.Sprintf("%v-%v-%v", tenantID, 0, now.Unix()),
},
{
name: "choose smallest two",
blocklist: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
TotalObjects: 0,
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000000"),
TotalObjects: 1,
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
TotalObjects: 0,
EndTime: now,
},
},
maxInputBlocks: 2,
expected: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
TotalObjects: 0,
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
TotalObjects: 0,
EndTime: now,
},
},
expectedHash: fmt.Sprintf("%v-%v-%v", tenantID, 0, now.Unix()),
},
{
name: "different windows",
blocklist: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000000"),
EndTime: now.Add(-timeWindow),
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000004"),
EndTime: now.Add(-timeWindow),
},
},
expected: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
EndTime: now,
},
},
expectedHash: fmt.Sprintf("%v-%v-%v", tenantID, 0, now.Unix()),
expectedSecond: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000000"),
EndTime: now.Add(-timeWindow),
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000004"),
EndTime: now.Add(-timeWindow),
},
},
expectedHash2: fmt.Sprintf("%v-%v-%v", tenantID, 0, now.Add(-timeWindow).Unix()),
},
{
name: "different sizes",
blocklist: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000004"),
EndTime: now,
TotalObjects: 15,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
EndTime: now,
TotalObjects: 1,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000000"),
EndTime: now,
TotalObjects: 3,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
EndTime: now,
TotalObjects: 12,
},
},
maxInputBlocks: 2,
expected: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
EndTime: now,
TotalObjects: 1,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000000"),
EndTime: now,
TotalObjects: 3,
},
},
expectedHash: fmt.Sprintf("%v-%v-%v", tenantID, 0, now.Unix()),
expectedSecond: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
EndTime: now,
TotalObjects: 12,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000004"),
EndTime: now,
TotalObjects: 15,
},
},
expectedHash2: fmt.Sprintf("%v-%v-%v", tenantID, 0, now.Unix()),
},
{
name: "different compaction lvls",
blocklist: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
EndTime: now,
CompactionLevel: 1,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000000"),
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000003"),
EndTime: now,
CompactionLevel: 1,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
EndTime: now,
},
},
expected: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000000"),
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
EndTime: now,
},
},
expectedHash: fmt.Sprintf("%v-%v-%v", tenantID, 0, now.Unix()),
expectedSecond: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
EndTime: now,
CompactionLevel: 1,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000003"),
EndTime: now,
CompactionLevel: 1,
},
},
expectedHash2: fmt.Sprintf("%v-%v-%v", tenantID, 1, now.Unix()),
},
{
name: "active time window vs not",
blocklist: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000000"),
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000004"),
EndTime: now,
CompactionLevel: 1,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000003"),
EndTime: now.Add(-activeWindowDuration - time.Minute),
CompactionLevel: 1,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
EndTime: now.Add(-activeWindowDuration - time.Minute),
CompactionLevel: 0,
},
},
expected: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000000"),
EndTime: now,
},
},
expectedHash: fmt.Sprintf("%v-%v-%v", tenantID, 0, now.Unix()),
expectedSecond: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
EndTime: now.Add(-activeWindowDuration - time.Minute),
CompactionLevel: 0,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000003"),
EndTime: now.Add(-activeWindowDuration - time.Minute),
CompactionLevel: 1,
},
},
expectedHash2: fmt.Sprintf("%v-%v", tenantID, now.Add(-activeWindowDuration-time.Minute).Unix()),
},
{
name: "choose lowest compaction level",
blocklist: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000000"),
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000004"),
EndTime: now,
CompactionLevel: 1,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000004"),
EndTime: now,
CompactionLevel: 1,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000004"),
EndTime: now.Add(-timeWindow),
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000005"),
EndTime: now.Add(-timeWindow),
},
},
expected: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000000"),
EndTime: now,
},
},
expectedHash: fmt.Sprintf("%v-%v-%v", tenantID, 0, now.Unix()),
expectedSecond: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000004"),
EndTime: now.Add(-timeWindow),
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000005"),
EndTime: now.Add(-timeWindow),
},
},
expectedHash2: fmt.Sprintf("%v-%v-%v", tenantID, 0, now.Add(-timeWindow).Unix()),
},
{
name: "doesn't choose across time windows",
blocklist: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
EndTime: now.Add(-timeWindow),
},
},
expected: nil,
expectedHash: "",
expectedSecond: nil,
expectedHash2: "",
},
{
name: "doesn't exceed max compaction objects",
blocklist: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
TotalObjects: 99,
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
TotalObjects: 2,
EndTime: now,
},
},
expected: nil,
expectedHash: "",
expectedSecond: nil,
expectedHash2: "",
},
{
name: "doesn't exceed max block size",
blocklist: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
Size: 50,
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
Size: 51,
EndTime: now,
},
},
maxBlockBytes: 100,
expected: nil,
expectedHash: "",
expectedSecond: nil,
expectedHash2: "",
},
{
name: "Returns as many blocks as possible without exceeding max compaction objects",
blocklist: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
TotalObjects: 50,
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
TotalObjects: 50,
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000003"),
TotalObjects: 50,
EndTime: now,
}},
expected: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
TotalObjects: 50,
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
TotalObjects: 50,
EndTime: now,
},
},
expectedHash: fmt.Sprintf("%v-%v-%v", tenantID, 0, now.Unix()),
expectedSecond: nil,
expectedHash2: "",
},
{
name: "Returns as many blocks as possible without exceeding max block size",
maxBlockBytes: 100,
blocklist: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
Size: 50,
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
Size: 50,
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000003"),
Size: 1,
EndTime: now,
}},
expected: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
Size: 50,
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
Size: 50,
EndTime: now,
},
},
expectedHash: fmt.Sprintf("%v-%v-%v", tenantID, 0, now.Unix()),
expectedSecond: nil,
expectedHash2: "",
},
{
// First compaction gets 3 blocks, second compaction gets 2 more
name: "choose more than 2 blocks",
maxInputBlocks: 3,
blocklist: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
EndTime: now,
TotalObjects: 1,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
EndTime: now,
TotalObjects: 2,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000003"),
EndTime: now,
TotalObjects: 3,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000004"),
EndTime: now,
TotalObjects: 4,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000005"),
EndTime: now,
TotalObjects: 5,
},
},
expected: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
EndTime: now,
TotalObjects: 1,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
EndTime: now,
TotalObjects: 2,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000003"),
EndTime: now,
TotalObjects: 3,
},
},
expectedHash: fmt.Sprintf("%v-%v-%v", tenantID, 0, now.Unix()),
expectedSecond: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000004"),
EndTime: now,
TotalObjects: 4,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000005"),
EndTime: now,
TotalObjects: 5,
},
},
expectedHash2: fmt.Sprintf("%v-%v-%v", tenantID, 0, now.Unix()),
},
{
name: "honors minimum block count",
blocklist: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
EndTime: now,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
EndTime: now,
},
},
minInputBlocks: 3,
maxInputBlocks: 3,
expected: nil,
expectedHash: "",
expectedSecond: nil,
expectedHash2: "",
},
{
name: "can choose blocks not at the lowest compaction level",
blocklist: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
EndTime: now,
CompactionLevel: 0,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
EndTime: now,
CompactionLevel: 1,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000003"),
EndTime: now,
CompactionLevel: 1,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000004"),
EndTime: now,
CompactionLevel: 1,
},
},
minInputBlocks: 3,
maxInputBlocks: 3,
expected: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
EndTime: now,
CompactionLevel: 1,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000003"),
EndTime: now,
CompactionLevel: 1,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000004"),
EndTime: now,
CompactionLevel: 1,
},
},
expectedHash: fmt.Sprintf("%v-%v-%v", tenantID, 1, now.Unix()),
expectedSecond: nil,
expectedHash2: "",
},
{
name: "doesn't select blocks in last active window",
blocklist: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
EndTime: now.Add(-activeWindowDuration),
CompactionLevel: 0,
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000002"),
EndTime: now.Add(-activeWindowDuration),
CompactionLevel: 0,
},
},
},
{
name: "don't compact across dataEncodings",
blocklist: []*backend.BlockMeta{
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000000"),
EndTime: now,
DataEncoding: "bar",
},
{
BlockID: uuid.MustParse("00000000-0000-0000-0000-000000000001"),
EndTime: now,
DataEncoding: "foo",
},
},
expected: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
min := defaultMinInputBlocks
if tt.minInputBlocks > 0 {
min = tt.minInputBlocks
}
max := defaultMaxInputBlocks
if tt.maxInputBlocks > 0 {
max = tt.maxInputBlocks
}
maxSize := uint64(1024 * 1024)
if tt.maxBlockBytes > 0 {
maxSize = tt.maxBlockBytes
}
selector := newTimeWindowBlockSelector(tt.blocklist, time.Second, 100, maxSize, min, max)
actual, hash := selector.BlocksToCompact()
assert.Equal(t, tt.expected, actual)
assert.Equal(t, tt.expectedHash, hash)
actual, hash = selector.BlocksToCompact()
assert.Equal(t, tt.expectedSecond, actual)
assert.Equal(t, tt.expectedHash2, hash)
})
}
}
|
package kata
func FindUniq(arr []float32) float32 {
// Do the magic
if len(arr) < 3{
return 0
}
for i:= 0; i < len(arr) - 2; i++{
if arr[i] != arr[i+1] {
if arr[i] == arr[i+2] {
return arr[i+1]
}else if arr[i+1] == arr[i+2]{
return arr[i]
}
}
}
return arr[len(arr)-1]
} |
package storage
import (
"context"
"errors"
"testing"
"time"
"github.com/arschles/assert"
storagedriver "github.com/docker/distribution/registry/storage/driver"
)
const (
objPath = "myobj"
)
func TestObjectExistsSuccess(t *testing.T) {
objInfo := storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{Path: objPath, Size: 1234}}
statter := &FakeObjectStatter{
Fn: func(context.Context, string) (storagedriver.FileInfo, error) {
return objInfo, nil
},
}
exists, err := ObjectExists(statter, objPath)
assert.NoErr(t, err)
assert.True(t, exists, "object not found when it should be present")
assert.Equal(t, len(statter.Calls), 1, "number of StatObject calls")
assert.Equal(t, statter.Calls[0].Path, objPath, "object key")
}
func TestObjectExistsNoObject(t *testing.T) {
statter := &FakeObjectStatter{
Fn: func(context.Context, string) (storagedriver.FileInfo, error) {
return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{}}, storagedriver.PathNotFoundError{Path: objPath}
},
}
exists, err := ObjectExists(statter, objPath)
assert.NoErr(t, err)
assert.False(t, exists, "object found when it should be missing")
assert.Equal(t, len(statter.Calls), 1, "number of StatObject calls")
}
func TestObjectExistsOtherErr(t *testing.T) {
expectedErr := errors.New("other error")
statter := &FakeObjectStatter{
Fn: func(context.Context, string) (storagedriver.FileInfo, error) {
return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{}}, expectedErr
},
}
exists, err := ObjectExists(statter, objPath)
assert.Err(t, err, expectedErr)
assert.False(t, exists, "object found when the statter errored")
}
func TestWaitForObjectMissing(t *testing.T) {
statter := &FakeObjectStatter{
Fn: func(context.Context, string) (storagedriver.FileInfo, error) {
return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{}}, storagedriver.PathNotFoundError{Path: objPath}
},
}
err := WaitForObject(statter, objPath, 1*time.Millisecond, 2*time.Millisecond)
assert.True(t, err != nil, "no error received when there should have been")
// it should make 1 call immediately, then calls at 1ms and 2ms
assert.True(
t,
len(statter.Calls) >= 1,
"the statter was not called, but should have been called at least once",
)
}
func TestWaitForObjectExists(t *testing.T) {
statter := &FakeObjectStatter{
Fn: func(context.Context, string) (storagedriver.FileInfo, error) {
return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{}}, nil
},
}
assert.NoErr(t, WaitForObject(statter, objPath, 1*time.Millisecond, 2*time.Millisecond))
// it should make 1 call immediately, then immediateley succeed
assert.Equal(t, len(statter.Calls), 1, "number of calls to the statter")
}
|
package main
import "fmt"
func main() {
var red uint8 = 255
red++
fmt.Println(red)
var number int8 = 127
number++
fmt.Println(number)
var green uint8 = 3
fmt.Printf("%08b\n", green)
green++
fmt.Printf("%08b\n", green)
var blue uint8 = 255
fmt.Printf("%08b\n", blue)
blue++
fmt.Printf("%08b\n", blue)
}
|
package main
import (
"fmt"
"github.com/hectorhammett/graphs/digraph"
"github.com/hectorhammett/graphs/graph"
"github.com/hectorhammett/graphs/node"
"github.com/hectorhammett/graphs/ugraph"
)
func main() {
a := node.NewNode("a", nil)
b := node.NewNode("b", nil)
c := node.NewNode("c", nil)
d := node.NewNode("d", nil)
g := ugraph.NewUGraph()
g.AddVertice(a, c)
g.AddVertice(b, c)
g.AddVertice(d, c)
ug := digraph.NewDigraph()
ug.AddVertice(a, c)
ug.AddVertice(b, c)
ug.AddVertice(d, c)
fmt.Println("UGraph:")
traverseGraph(g)
fmt.Println("Digraph:")
traverseGraph(ug)
}
func traverseGraph(g graph.Graph) {
for _, n := range g.GetNodesList() {
for _, c := range g.GetNodeAdjacencyList(n) {
fmt.Printf("%s is connected to %s\n", n.GetName(), c.GetName())
}
}
}
|
package tezos_test
import (
"fmt"
"testing"
"github.com/ecadlabs/signatory/pkg/tezos"
)
func TestValidate(t *testing.T) {
type testCase struct {
Name string
KeyPair *tezos.KeyPair
ExpectError bool
}
cases := []testCase{
testCase{
Name: "Valid case",
KeyPair: tezos.NewKeyPair("p2pk67PsiUBJZq9twKoFAWt8fSSVn53BR31dxKnTeLirLxHqB8gSnCq", "p2sk3LiJ6fU9Lvh8tdwar6tJ2Xg9bg3kQ9p4Sjmn83m29qJQdQPA5r"),
ExpectError: false,
},
testCase{
Name: "Invalid secret key prefix",
KeyPair: tezos.NewKeyPair("p2p67PsiUBJZq9twKoFAWt8fSSVn53BR31dxKnTeLirLxHqB8gSnCq", "p2sk3LiJ6fU9Lvh8tdwar6tJ2Xg9bg3kQ9p4Sjmn83m29qJQdQPA5r"),
ExpectError: true,
},
testCase{
Name: "Invalid public key prefix",
KeyPair: tezos.NewKeyPair("p2pk67PsiUBJZq9twKoFAWt8fSSVn53BR31dxKnTeLirLxHqB8gSnCq", "p2s3LiJ6fU9Lvh8tdwar6tJ2Xg9bg3kQ9p4Sjmn83m29qJQdQPA5r"),
ExpectError: true,
},
testCase{
Name: "Invalid public key (checksum)",
KeyPair: tezos.NewKeyPair("p2pk67PsiUBJZq9twKFAWt8fSSVn53BR31dxKnTeLirLxHqB8gSnCq", "p2sk3LiJ6fU9Lvh8tdwar6tJ2Xg9bg3kQ9p4Sjmn83m29qJQdQPA5r"),
ExpectError: true,
},
testCase{
Name: "Invalid private key (checksum)",
KeyPair: tezos.NewKeyPair("p2pk67PsiUBJZq9twKoFAWt8fSSVn53BR31dxKnTeLirLxHqB8gSnCq", "p2sk3Li6fU9Lvh8tdwar6tJ2Xg9bg3kQ9p4Sjmn83m29qJQdQPA5r"),
ExpectError: true,
},
testCase{
Name: "Unsupported key",
KeyPair: tezos.NewKeyPair("edpkvVPtveGg45XnB8a13kgXm9uLcPD3bqSCcaTdDfnpGUDw986oZy", "edsk4TjJWEszkHKono7XMnepVqwi37FrpbVt1KCsifJeAGimxheShG"),
ExpectError: true,
},
}
for _, testCase := range cases {
t.Run(testCase.Name, func(t *testing.T) {
keyPair := testCase.KeyPair
err := keyPair.Validate()
if !testCase.ExpectError && err != nil {
fmt.Printf("Unexpected error was thrown: %s\n", err.Error())
t.Fail()
}
if testCase.ExpectError && err == nil {
fmt.Printf("Expected error but none was thrown\n")
t.Fail()
}
})
}
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package store
import (
"testing"
"github.com/mattermost/mattermost-cloud/internal/testlib"
"github.com/mattermost/mattermost-cloud/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_GroupDTO(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := MakeTestSQLStore(t, logger)
group1 := &model.Group{
Name: "group1",
}
annotations1 := []*model.Annotation{
{Name: "ann-group1"},
{Name: "ann-group2"},
}
createAnnotations(t, sqlStore, annotations1)
err := sqlStore.CreateGroup(group1, annotations1)
require.NoError(t, err)
group2 := &model.Group{
Name: "group2",
}
annotations2 := []*model.Annotation{
{Name: "ann-group3"},
}
createAnnotations(t, sqlStore, annotations2)
err = sqlStore.CreateGroup(group2, annotations2)
require.NoError(t, err)
group3 := &model.Group{
Name: "group3",
}
err = sqlStore.CreateGroup(group3, nil)
require.NoError(t, err)
group4 := &model.Group{
Name: "group4",
}
err = sqlStore.CreateGroup(group4, annotations1[:1])
require.NoError(t, err)
t.Run("get group1 DTO", func(t *testing.T) {
group1DTO, err := sqlStore.GetGroupDTO(group1.ID)
require.NoError(t, err)
assert.Equal(t, group1, group1DTO.Group)
assert.Equal(t, 2, len(group1DTO.Annotations))
model.SortAnnotations(group1DTO.Annotations)
assert.Equal(t, group1.ToDTO(annotations1), group1DTO)
})
t.Run("get group3 DTO", func(t *testing.T) {
group3DTO, err := sqlStore.GetGroupDTO(group3.ID)
require.NoError(t, err)
assert.Equal(t, group3, group3DTO.Group)
assert.Equal(t, 0, len(group3DTO.Annotations))
assert.Equal(t, group3.ToDTO(nil), group3DTO)
})
t.Run("get group DTOs with installation count", func(t *testing.T) {
err := sqlStore.CreateInstallation(&model.Installation{
Name: "Dummy installation",
GroupID: &group1.ID,
}, []*model.Annotation{}, []*model.InstallationDNS{})
assert.NoError(t, err)
groups, err := sqlStore.GetGroupDTOs(&model.GroupFilter{
Paging: model.AllPagesNotDeleted(),
WithInstallationCount: true,
})
assert.NoError(t, err)
assert.NotZero(t, len(groups))
for _, g := range groups {
if g.ID == group1.ID {
assert.NotZero(t, g.InstallationCount)
}
}
})
t.Run("get group DTOs", func(t *testing.T) {
testCases := []struct {
Description string
Filter *model.GroupFilter
Expected []*model.GroupDTO
}{
{
"page 0, perPage 0",
&model.GroupFilter{
Paging: model.Paging{
Page: 0,
PerPage: 0,
IncludeDeleted: false,
},
},
[]*model.GroupDTO{},
},
{
"page 0, perPage 1",
&model.GroupFilter{
Paging: model.Paging{
Page: 0,
PerPage: 1,
IncludeDeleted: false,
},
},
[]*model.GroupDTO{group1.ToDTO(annotations1)},
},
{
"with multiple annotations",
&model.GroupFilter{
Paging: model.AllPagesNotDeleted(),
Annotations: &model.AnnotationsFilter{
MatchAllIDs: []string{annotations1[0].ID, annotations1[1].ID},
},
},
[]*model.GroupDTO{group1.ToDTO(annotations1)},
},
{
"with single annotation",
&model.GroupFilter{
Paging: model.AllPagesNotDeleted(),
Annotations: &model.AnnotationsFilter{
MatchAllIDs: []string{annotations1[0].ID},
},
},
[]*model.GroupDTO{group1.ToDTO(annotations1), group4.ToDTO(annotations1[:1])},
},
}
for _, testCase := range testCases {
t.Run(testCase.Description, func(t *testing.T) {
actual, err := sqlStore.GetGroupDTOs(testCase.Filter)
require.NoError(t, err)
for _, g := range actual {
model.SortAnnotations(g.Annotations)
}
assert.ElementsMatch(t, testCase.Expected, actual)
})
}
})
}
func createAnnotations(t *testing.T, sqlStore *SQLStore, anns []*model.Annotation) {
for _, ann := range anns {
err := sqlStore.CreateAnnotation(ann)
require.NoError(t, err)
}
}
|
package JwtHelper
import (
"github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin"
JwtConfig "golangdemo/rps-game/configs/jwt-conf"
LogConf "golangdemo/rps-game/configs/log-conf"
"golangdemo/rps-game/configs/structs"
SystemCode "golangdemo/rps-game/configs/system-code"
SystemPath "golangdemo/rps-game/configs/system-path"
"golangdemo/rps-game/helpers/logging"
"io/ioutil"
"os"
"strings"
"time"
)
var jwtKey = readJwtKeyFromFile()
func readJwtKeyFromFile() []byte {
file, err := os.Open(SystemPath.JWTKeyFilePath)
if err != nil {
logging.SysLog.Fatal(logging.FormatResult(LogConf.Fatal, SystemCode.ErrLoadFileFail, err.Error()))
}
defer file.Close()
content, _ := ioutil.ReadAll(file)
return content
}
func getTokenFromAuth(tok string) string {
if len(tok) <= 0 {
return ""
}
s := strings.Split(tok, " ")
return s[1]
}
func GenerateJwt(username string) (*structs.JwtGenResult, error) {
var expTime = JwtConfig.JwtExpirationTime
var issuedAt = time.Now().Unix()
myContentClaim := structs.JwtPayload{
Username: username,
}
myClaim := structs.CustomJwtClaims{
Value: myContentClaim,
StandardClaims: jwt.StandardClaims{
ExpiresAt: expTime, // exp
IssuedAt: issuedAt, // iat
},
}
readyToken := jwt.NewWithClaims(jwt.SigningMethodHS256, myClaim)
finalToken, tErr := readyToken.SignedString(jwtKey)
if tErr != nil {
logging.SysLog.Println(logging.FormatResult(LogConf.Error, SystemCode.ErrGenerateJwtFail, tErr.Error()))
return nil, SystemCode.ErrGenerateJwtFail
}
finalToken = "Bearer " + finalToken
result := structs.JwtGenResult{Token: finalToken, IssuedTime: issuedAt, ExpiredTime: expTime}
return &result, nil
}
func GetJwtClaims(tok string) (*structs.CustomJwtClaims, string) {
userToken := getTokenFromAuth(tok)
token, err := jwt.ParseWithClaims(userToken, &structs.CustomJwtClaims{}, func(token *jwt.Token) (interface{}, error) {
return jwtKey, nil
})
if err != nil {
return nil, SystemCode.InappropriateJwtToken
}
if claims, ok := token.Claims.(*structs.CustomJwtClaims); ok && token.Valid {
return claims, ""
} else {
return nil, SystemCode.InappropriateJwtToken
}
}
func VerifyJwt(tok string) (bool, string) {
userToken := getTokenFromAuth(tok)
token, err := jwt.Parse(userToken, func(token *jwt.Token) (interface{}, error) {
return jwtKey, nil
})
if token == nil {
return false, SystemCode.InappropriateJwtToken
}
if token.Valid {
return true, ""
} else if ve, ok := err.(*jwt.ValidationError); ok {
if ve.Errors&jwt.ValidationErrorMalformed != 0 {
return false, SystemCode.InappropriateJwtToken
} else if ve.Errors&(jwt.ValidationErrorExpired|jwt.ValidationErrorNotValidYet) != 0 {
return false, SystemCode.ExpiredJwtToken
} else {
return false, SystemCode.InappropriateJwtToken
}
} else {
return false, SystemCode.InappropriateJwtToken
}
}
func GetPayloadFromJwtWithGin(c *gin.Context) (*structs.JwtPayload, string) {
// Get token from Cookie => Check Jwt & Get data field
tokenCookie, tcErr := c.Cookie(JwtConfig.JwtCookieName)
if tcErr != nil {
return nil, SystemCode.JwtTokenNotFound
}
claims, cErr := GetJwtClaims(tokenCookie)
if len(cErr) > 0 {
return nil, SystemCode.InappropriateJwtToken
}
return &claims.Value, ""
} |
package devices
import (
"bytes"
"log"
"net/url"
"strconv"
"time"
)
//yh-500
// 地磅数据读取
/*
232 通信 9600、19200bps
采用ascii码传数据
STX 数据开始 XON 0x2
CR 数据结束 换行 0x
采用方式一,9600,485 AB线反接 232 连续发生 采用ascii编码 倒叙模式
数据模式 .0600000=.0700000=.0700000=.0700000=.0700000=
*/
func znDiBangStart(id uint) {
conn := getConn(id)
if conn == nil {
return
}
defer func() {
conn.Close() //关闭连接
log.Printf("移动地磅监测处理发生错误\n")
unBindConn(id)
//设置设备状态
}()
rCh := make(chan []byte, 10)
var strPre []byte //上次数据
strConuter := 0
stataCh := make(chan bool, 1)
timeout := time.NewTimer(5 * time.Second)
go readOneData(conn, rCh, []byte{'='}, 8, stataCh)
for {
var dat []byte
var state bool
select {
case dat = <-rCh:
timeout.Reset(5 * time.Second)
devList[id].dataState = 1
break
case state = <-stataCh:
if false == state {
return
}
case <-timeout.C:
log.Printf("移动地磅没有数据\n")
devList[id].cmdIsOk = 0
devList[id].dataState = 0
break
}
log.Printf("移动地磅数据:%s\n", dat)
if bytes.Equal(strPre, dat) {
strConuter++
if strConuter > 5 {
//log.Printf("地磅数据:%s\n", dat) //=.0036100 -92233720
w := znDibangDataTrans([]byte(dat)[1:8])
urlData := url.Values{"weight": {w}}
sendData("智能地磅", id, urlData)
devList[id].data = w
devList[id].lastTime = time.Now().Format("2006-01-02 15:04:05")
// if w != "0" {
// urlData := url.Values{"weight": {w}}
// //fmt.Printf("地磅发送数据:%v\n", urlData)
// sendData("智能地磅", id, urlData)
// }
strConuter = 0 //重新计数
}
} else {
strConuter = 0 //重新计数
}
strPre = dat //保存数据多次相同时才上报
}
}
func znDibangDataTrans(dat []byte) string {
r := invert(dat)
//log.Printf("地磅数据:%s\n", dat)
kg, err := strconv.ParseFloat(string(r), 64)
if err != nil {
log.Printf("智能地磅转换数据失败:%s %s\n", string(r), err.Error())
}
//log.Printf("地磅转化后数据:%f\n", kg)
return strconv.FormatInt(int64(kg*1000000000000)/100000000, 10) //乘大数,防止产生9999的小数
}
|
package grifts
import (
"github.com/google/uuid"
"github.com/icrowley/fake"
"github.com/markbates/grift/grift"
"github.com/daylightdata/shortbread/postgres"
)
var _ = grift.Namespace("db", func() {
grift.Desc("seed", "Seeds a database")
grift.Add("seed", func(c *grift.Context) error {
db, err := postgres.Connect()
if err != nil {
return err
}
defer db.Close()
err = fake.SetLang("en")
if err != nil {
return err
}
ins := `insert into resident_information (id, first_name, last_name, phone_number, room_number)
values ($1,$2,$3,$4,$5)`
for i := 0; i < 100; i++ {
_, err := db.Exec(ins, uuid.New(), fake.FirstName(), fake.LastName(), fake.Phone(), fake.Zip())
if err != nil {
return err
}
}
return nil
})
})
|
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package encryption
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestDeriveRootKey(t *testing.T) {
// ensure that we can derive with no errors
_, err := DeriveRootKey([]byte("password"), []byte("salt"), "", 8)
assert.NoError(t, err)
_, err = DeriveRootKey([]byte("password"), []byte("salt"), "any/path", 8)
assert.NoError(t, err)
}
|
package proteus
import "testing"
func TestEmbeddedNoSql(t *testing.T) {
type InnerEmbeddedProductDao struct {
Insert func(e Executor, p Product) (int64, error) `proq:"insert into Product(name) values(:p.Name:)" prop:"p"`
}
type OuterEmbeddedProductDao struct {
InnerEmbeddedProductDao
FindById func(e Querier, id int64) (Product, error) `proq:"select * from Product where id = :id:" prop:"id"`
}
productDao := OuterEmbeddedProductDao{}
err := Build(&productDao, Postgres)
if err != nil {
t.Fatal(err)
}
if productDao.FindById == nil {
t.Fatal("should have populated findById")
}
if productDao.Insert == nil {
t.Fatal("should have populated insert")
}
}
|
package capture
import (
"bufio"
"bytes"
"crypto/rand"
"encoding/hex"
"image"
"image/draw"
"image/png"
"os"
"path"
"path/filepath"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/metrics"
"gitlab.com/opennota/screengen"
"golang.org/x/net/context"
)
type ExtractRequest struct {
Video []byte
Name string
Time int64
Width int32
Height int32
}
type OverlayImageRequest struct {
Original []byte
Overlay []byte
X, Y int32
}
type ExtractResponse struct {
Data []byte
}
type Service interface {
Extract(context.Context, ExtractRequest) (ExtractResponse, error)
AddOverlay(request OverlayImageRequest) (ExtractResponse, error)
}
func NewService(logger log.Logger, extracts, overlays metrics.Counter) Service {
var svc Service
svc = VideoCaptureService{}
svc = LoggingMiddleware(logger)(svc)
svc = InstrumentingMiddleware(extracts, overlays)(svc)
return svc
}
// VideoCaptureService expose functions for video.
type VideoCaptureService struct {
}
func New() VideoCaptureService {
return VideoCaptureService{}
}
func tempFileName(suffix string) string {
randBytes := make([]byte, 16)
rand.Read(randBytes)
return filepath.Join(os.TempDir(), hex.EncodeToString(randBytes)+suffix)
}
// Extract an image from a video.
func (s VideoCaptureService) Extract(ctx context.Context, request ExtractRequest) (ExtractResponse, error) {
name, err := saveFile(tempFileName(path.Ext(request.Name)), request.Video)
if err != nil {
return ExtractResponse{}, err
}
defer os.Remove(name)
g, err := screengen.NewGenerator(name)
if err != nil {
return ExtractResponse{}, err
}
img, err := g.ImageWxH(request.Time, int(request.Width), int(request.Height))
if err != nil {
return ExtractResponse{}, err
}
result, err := saveToPng(img)
if err != nil {
return ExtractResponse{}, err
}
return ExtractResponse{Data: result}, nil
}
func (s VideoCaptureService) AddOverlay(request OverlayImageRequest) (ExtractResponse, error) {
r := bytes.NewReader(request.Original)
img, _, err := image.Decode(r)
if err != nil {
return ExtractResponse{}, err
}
or := bytes.NewReader(request.Overlay)
logo, _, err := image.Decode(or)
if err != nil {
return ExtractResponse{}, err
}
m := image.NewRGBA(image.Rect(0, 0, img.Bounds().Max.X, img.Bounds().Max.Y))
draw.Draw(m, m.Bounds(), img, image.Point{0, 0}, draw.Src)
draw.Draw(m, m.Bounds(), logo, image.Point{int(request.X), int(request.Y)}, draw.Src)
result, err := saveToPng(m)
if err != nil {
return ExtractResponse{}, err
}
return ExtractResponse{Data: result}, nil
}
func saveFile(name string, data []byte) (string, error) {
fo, err := os.Create(name)
if err != nil {
return "", err
}
w := bufio.NewWriter(fo)
if _, err := w.Write(data); err != nil {
return "", err
}
if err := w.Flush(); err != nil {
return "", err
}
return name, nil
}
func saveToPng(img image.Image) ([]byte, error) {
buf := new(bytes.Buffer)
if err := png.Encode(buf, img); err != nil {
return nil, err
}
// log.Println("Wrote ", len(buf.Bytes()))
return buf.Bytes(), nil
}
|
package template
import (
"os"
"testing"
)
func TestRender(t *testing.T) {
tt := DotType{
PackageName: "goDao",
Packages: []string{"github.com/jackc/pgtype"},
Functions: []Function{{
Name: "Add",
// language=PostgreSQL
SQL: `
-- https://en.wikipedia.org/wiki/List_of_most_popular_websites
create table if not exists "popular_websites" (
"site" text,
"domain" text,
"alexa_top" int2,
"similar_web_top" int2,
"description" text,
"territory" text
);`,
ReturnValueType: Exec,
}, {
Name: "Sub",
// language=PostgreSQL
SQL: `
select ($1::float8 - $2::float8)::float8;`,
ReturnValueType: QueryRow,
InputArguments: []Variable{{
Name: "a", Type: "float64",
}, {
Name: "b", Type: "float64",
}},
OutputArguments: []Variable{{
Name: "sum",
Type: "float64",
}},
}, {
Name: "GetPopularIn",
// language=PostgreSQL
SQL: `
select "domain" from "popular_websites" order by $1::name limit 5;`,
InputArguments: []Variable{{Name: "sortColumn", Type: "pgtype.Name"}},
ReturnValueType: QueryPlain,
OutputArguments: []Variable{{Name: "domains", Type: "[]string"}},
UnderlyingTypeName: "string",
}, {
Name: "FullGetPopularIn",
// language=PostgreSQL
SQL: `
select "site", "domain", "alexa_top", "similar_web_top", "description", "territory"
from "popular_websites"
order by $1::name
limit 5;`,
InputArguments: []Variable{{Name: "sortColumn", Type: "pgtype.Name"}},
OutputArguments: []Variable{{Name: "popularWebsites", Type: "[]PopularWebsite"}},
ReturnValueType: QueryStruct,
UnderlyingTypeName: "PopularWebsite",
RowFieldsNames: []string{"Site", "Domain", "Alexa_top", "SimilarWebTop", "Description", "Territory"},
}},
}
err := Render(tt, os.Stdout) // TODO: add test for output
if err != nil {
t.Fatal(err)
}
}
|
package main
import (
"fmt"
)
var z = 88
func main() {
x := 40
fmt.Println(x)
x = 50
fmt.Println(x)
}
|
package main
import (
"fmt"
"sync"
)
func main() {
var waitGroup sync.WaitGroup
//add //wait //done
//cara 1
/* waitGroup.Add(1)
go printText("Salam", &waitGroup)
waitGroup.Add(1)
go printText("Hallo", &waitGroup) */
//cara 2
waitGroup.Add(2)
go printText("Salam", &waitGroup)
go printText("Hallo", &waitGroup)
waitGroup.Wait()
}
func printText(text string, waitGroup *sync.WaitGroup) {
for i:=0; i<5; i++ {
fmt.Println(text)
}
waitGroup.Done()
} |
package util
func LogNotice(traceId string, data string) {
}
|
package generate
import (
"bytes"
"fmt"
"github.com/lvxin0315/gapi/core/generate/services"
"io/ioutil"
"reflect"
"strings"
)
/**
* @Author lvxin0315@163.com
* @Description mysql生成对应model
* @Date 11:29 上午 2020/12/11
**/
type GenService struct {
ServiceDir string
}
func (gen *GenService) AutoService(models ...interface{}) {
for _, m := range models {
moduleName := strings.ReplaceAll(reflect.TypeOf(m).Name(), "Model", "")
gen.writeServiceFile(moduleName)
}
}
func (gen *GenService) writeServiceFile(moduleName string) {
modelName := fmt.Sprintf("%sModel", moduleName)
commonServiceBytes := []byte(services.DemoServiceTpl)
//model name
commonServiceBytes = bytes.ReplaceAll(commonServiceBytes, []byte("DemoModel"), []byte(modelName))
//module name
commonServiceBytes = bytes.ReplaceAll(commonServiceBytes, []byte("Demo"), []byte(moduleName))
//写入文件
err := ioutil.WriteFile(fmt.Sprintf("%s/%s_service.go", gen.ServiceDir, strings.ToLower(moduleName)), commonServiceBytes, 0755)
if err != nil {
panic(err)
}
}
|
package helper
import (
"strconv"
"strings"
)
func HasValue(s string) bool {
return len(strings.Trim(s, " ")) > 0
}
func Atoi(v string, d int) int {
s := strings.Trim(v, "\"")
if len(s) == 0 {
return d
} else {
s = strings.TrimLeft(s, "0")
if len(s) == 0 { //v == "0"
return 0
}
}
if i, err := strconv.Atoi(s); err != nil {
return d
} else {
return i
}
}
func Atoi64(v string, d int64) int64 {
if i, err := strconv.ParseInt(strings.Trim(v, "\""), 10, 64); err != nil {
return d
} else {
return i
}
}
func PadLeft(num int, lenght int, char string) string {
s := strconv.Itoa(num)
for len(s) < lenght {
s = char + s
}
return s
}
func Split(s, sep string, removeEmptyItem bool) []string {
items := make([]string, 0, 10)
for _, v := range strings.Split(s, sep) {
if removeEmptyItem {
if HasValue(v) {
items = append(items, v)
}
} else {
items = append(items, v)
}
}
return items
}
func Distinct(source []string) []string {
l := make([]string, 0, len(source))
for _, i := range source {
if Contains(l, i) {
continue
} else {
l = append(l, i)
}
}
return l
}
func Substring(source string, start int) string {
if len(source) < start {
return ""
} else {
return source[start:]
}
}
func Contains(list []string, item string) bool {
for _, v := range list {
if v == item {
return true
}
}
return false
}
|
package Constants
import "fmt"
func DOB() {
fmt.Print("I was born on ", Day, "-", Month, "-", Year)
}
func Myname() {
fmt.Print("I am ", Name, ". ")
}
|
package main
import(
"os"
"fmt"
"net"
"time"
"strconv"
)
func main(){
ad:="127.0.0.1:1200"
addr,er:=net.ResolveUDPAddr("udp",ad)
if er !=nil{
fmt.Println("Erro ao converter endereço! ",os.Args[0])
os.Exit(1)
}
LocalAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:20076")
if err !=nil{
fmt.Println("Erro ao converter endereço local!")
os.Exit(1)
}
sock,err:=net.DialUDP("udp",LocalAddr,addr)
if err !=nil{
fmt.Println("Erro ao estabelecer conexão com o servidor!")
os.Exit(1)
}
defer sock.Close()
t := time.After(time.Second * 1)
message := make([]byte,7)
L:
for i:=0;i<10;i++{
select {
case <-t:
fmt.Println("Estouro do timeout!")
break L
default:
//Ping 10 vezes
beg := time.Now()
mens:= "PING "+strconv.Itoa(i)
_,erro:=sock.Write([]byte(mens))
if erro !=nil{
fmt.Println("Erro ao enviar mensagem ao servidor!", erro.Error())
}else{
fmt.Println("Mensagem enviada")
}
_, _, err:=sock.ReadFromUDP(message)
if err !=nil{
fmt.Println("Erro ao ler mensagem vinda do servidor!")
}else{
fmt.Println("Mensagem chegou")
}
end:=time.Now()
rtt:= end.Sub(beg) // rtt = end-beg
fmt.Println( "Message Received ", string(message))
fmt.Println("Round Trip Time ", rtt)
}
}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package health
import (
"context"
"time"
"github.com/shirou/gopsutil/v3/process"
"chromiumos/tast/errors"
"chromiumos/tast/local/croshealthd"
"chromiumos/tast/local/jsontypes"
"chromiumos/tast/testing"
)
type processInfo struct {
BytesRead jsontypes.Uint64 `json:"bytes_read"`
BytesWritten jsontypes.Uint64 `json:"bytes_written"`
CancelledBytesWritten jsontypes.Uint64 `json:"cancelled_bytes_written"`
Command string `json:"command"`
FreeMemoryKiB jsontypes.Uint32 `json:"free_memory_kib"`
Name string `json:"name"`
Nice int8 `json:"nice"`
ParentProcessID jsontypes.Uint32 `json:"parent_process_id"`
ProcessGroupID jsontypes.Uint32 `json:"process_group_id"`
ProcessID jsontypes.Uint32 `json:"process_id"`
PhysicalBytesRead jsontypes.Uint64 `json:"physical_bytes_read"`
PhysicalBytesWritten jsontypes.Uint64 `json:"physical_bytes_written"`
Priority int8 `json:"priority"`
ReadSystemCalls jsontypes.Uint64 `json:"read_system_calls"`
ResidentMemoryKiB jsontypes.Uint32 `json:"resident_memory_kib"`
State string `json:"state"`
Threads jsontypes.Uint32 `json:"threads"`
TotalMemoryKiB jsontypes.Uint32 `json:"total_memory_kib"`
UptimeTicks jsontypes.Uint64 `json:"uptime_ticks"`
UserID jsontypes.Uint32 `json:"user_id"`
WriteSystemCalls jsontypes.Uint64 `json:"write_system_calls"`
}
func init() {
testing.AddTest(&testing.Test{
Func: ProbeProcessInfo,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Check that we can probe cros_healthd for single process info",
Contacts: []string{"cros-tdm-tpe-eng@google.com"},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome", "diagnostics"},
Fixture: "crosHealthdRunning",
Timeout: 1 * time.Minute,
})
}
// validateProcessInfo validates name, parent_process_id, command values by comparing with results from gopsutil.
func validateProcessInfo(info *processInfo, p *process.Process) error {
if name, err := p.Name(); err != nil {
return errors.Errorf("can't get process name with gopsutil: %s", err)
} else if info.Name != name {
return errors.Errorf("unexpected name; got %s, want %s", info.Name, name)
}
if ppid, err := p.Ppid(); err != nil {
return errors.Errorf("can't get parent_process_id with gopsutil: %s", err)
} else if int(info.ParentProcessID) != int(ppid) {
return errors.Errorf("unexpected parent_process_id; got %d, want %d", int(info.ParentProcessID), int(ppid))
}
if command, err := p.Cmdline(); err != nil {
return errors.Errorf("can't get process command with gopsutil: %s", err)
} else if info.Command != command {
return errors.Errorf("unexpected process command; got %s, want %s", info.Command, command)
}
return nil
}
// validateSingleProcessInfo tests that process info with pid=1 (init) can be successfully and correctly fetched.
func validateSingleProcessInfo(ctx context.Context, outDir string) error {
params := croshealthd.TelemParams{PIDs: []int{1}}
var info processInfo
if err := croshealthd.RunAndParseJSONTelem(ctx, params, outDir, &info); err != nil {
return errors.Wrap(err, "failed to get process telemetry info")
}
p, err := process.NewProcess(1)
if err != nil {
return errors.Wrap(err, "process with pid=1 does not exist")
}
if err := validateProcessInfo(&info, p); err != nil {
return errors.Wrap(err, "failed to validate process info data")
}
return nil
}
// ProbeProcessInfo tests that different processes can be successfully and correctly fetched.
func ProbeProcessInfo(ctx context.Context, s *testing.State) {
if err := validateSingleProcessInfo(ctx, s.OutDir()); err != nil {
s.Fatal("Failed to validate single process data: ", err)
}
}
|
package basic
//go:generate futil -type func -basics
//go:generate futil -type result -basics
//go:generate futil -type option -basics
//go:generate futil -type array -basics
|
package analyzer
import "spider/utils/pool"
type Pool struct {
pool *pool.Pool
}
func NewPool() *Pool {
}
|
package main
import "fmt"
/**
* <p>
* 语言切片:参考:https://www.runoob.com/go/go-slice.html
* Go 语言切片是对数组的抽象。 ---> Go中的数组是定长数组,语言切片是非定长数组,跟Redis的自定义的结构体一致
* Go 数组的长度不可改变,在特定场景中这样的集合就不太适用,Go中提供了一种灵活,功能强悍的内置类型切片("动态数组"),
* 与数组相比切片的长度是不固定的,可以追加元素,在追加时可能使切片的容量增大。
* </p>
* @author: zhu.chen
* @date: 2020/8/5
* @version: v1.0.0
*/
/**
语法
var slice1 []type = make([]type, len) ==== slice1 := make([]type, len)
make([]T, length, capacity) : 切片的容量和长度
*/
func main() {
// len() 和 cap() 函数
var numbers = make([]int, 3, 5)
printSlice(numbers)
if numbers != nil {
fmt.Printf("切片是非空的")
}
fmt.Println("------------")
// 空切片
var numbers1 []int
printSlice(numbers1)
if numbers1 == nil {
fmt.Printf("切片是空的")
}
fmt.Println("------------")
// 切片截取
/* 创建切片 */
numbers3 := []int{0, 1, 2, 3, 4, 5, 6, 7, 8}
printSlice(numbers3)
/* 打印原始切片 */
fmt.Println("numbers3 ==", numbers3)
/* 打印子切片从索引1(包含) 到索引4(不包含)*/
fmt.Println("numbers3[1:4] ==", numbers3[1:4])
/* 默认下限为 0*/
fmt.Println("numbers3[:3] ==", numbers3[:3])
/* 默认上限为 len(s)*/
fmt.Println("numbers3[4:] ==", numbers3[4:])
numbers4 := make([]int, 0, 5)
printSlice(numbers4)
/* 打印子切片从索引 0(包含) 到索引 2(不包含) */
number5 := numbers[:2]
printSlice(number5)
/* 打印子切片从索引 2(包含) 到索引 5(不包含) */
number6 := numbers[2:5]
printSlice(number6)
fmt.Println("---------------")
slice()
}
func printSlice(x []int) {
fmt.Printf("len=%d cap=%d slice=%v\n", len(x), cap(x), x)
}
func slice() {
var numbers []int
printSlice(numbers)
/* 允许追加空切片 */
numbers = append(numbers, 0)
printSlice(numbers)
/* 向切片添加一个元素 */
numbers = append(numbers, 1)
printSlice(numbers)
/* 同时添加多个元素 */
numbers = append(numbers, 2, 3, 4)
printSlice(numbers)
/* 创建切片 numbers1 是之前切片的两倍容量*/
numbers1 := make([]int, len(numbers), (cap(numbers))*2)
/* 拷贝 numbers 的内容到 numbers1 */
copy(numbers1, numbers)
printSlice(numbers1)
}
|
package odoo
import (
"fmt"
)
// IrQwebFieldQweb represents ir.qweb.field.qweb model.
type IrQwebFieldQweb struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
}
// IrQwebFieldQwebs represents array of ir.qweb.field.qweb model.
type IrQwebFieldQwebs []IrQwebFieldQweb
// IrQwebFieldQwebModel is the odoo model name.
const IrQwebFieldQwebModel = "ir.qweb.field.qweb"
// Many2One convert IrQwebFieldQweb to *Many2One.
func (iqfq *IrQwebFieldQweb) Many2One() *Many2One {
return NewMany2One(iqfq.Id.Get(), "")
}
// CreateIrQwebFieldQweb creates a new ir.qweb.field.qweb model and returns its id.
func (c *Client) CreateIrQwebFieldQweb(iqfq *IrQwebFieldQweb) (int64, error) {
ids, err := c.CreateIrQwebFieldQwebs([]*IrQwebFieldQweb{iqfq})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateIrQwebFieldQweb creates a new ir.qweb.field.qweb model and returns its id.
func (c *Client) CreateIrQwebFieldQwebs(iqfqs []*IrQwebFieldQweb) ([]int64, error) {
var vv []interface{}
for _, v := range iqfqs {
vv = append(vv, v)
}
return c.Create(IrQwebFieldQwebModel, vv)
}
// UpdateIrQwebFieldQweb updates an existing ir.qweb.field.qweb record.
func (c *Client) UpdateIrQwebFieldQweb(iqfq *IrQwebFieldQweb) error {
return c.UpdateIrQwebFieldQwebs([]int64{iqfq.Id.Get()}, iqfq)
}
// UpdateIrQwebFieldQwebs updates existing ir.qweb.field.qweb records.
// All records (represented by ids) will be updated by iqfq values.
func (c *Client) UpdateIrQwebFieldQwebs(ids []int64, iqfq *IrQwebFieldQweb) error {
return c.Update(IrQwebFieldQwebModel, ids, iqfq)
}
// DeleteIrQwebFieldQweb deletes an existing ir.qweb.field.qweb record.
func (c *Client) DeleteIrQwebFieldQweb(id int64) error {
return c.DeleteIrQwebFieldQwebs([]int64{id})
}
// DeleteIrQwebFieldQwebs deletes existing ir.qweb.field.qweb records.
func (c *Client) DeleteIrQwebFieldQwebs(ids []int64) error {
return c.Delete(IrQwebFieldQwebModel, ids)
}
// GetIrQwebFieldQweb gets ir.qweb.field.qweb existing record.
func (c *Client) GetIrQwebFieldQweb(id int64) (*IrQwebFieldQweb, error) {
iqfqs, err := c.GetIrQwebFieldQwebs([]int64{id})
if err != nil {
return nil, err
}
if iqfqs != nil && len(*iqfqs) > 0 {
return &((*iqfqs)[0]), nil
}
return nil, fmt.Errorf("id %v of ir.qweb.field.qweb not found", id)
}
// GetIrQwebFieldQwebs gets ir.qweb.field.qweb existing records.
func (c *Client) GetIrQwebFieldQwebs(ids []int64) (*IrQwebFieldQwebs, error) {
iqfqs := &IrQwebFieldQwebs{}
if err := c.Read(IrQwebFieldQwebModel, ids, nil, iqfqs); err != nil {
return nil, err
}
return iqfqs, nil
}
// FindIrQwebFieldQweb finds ir.qweb.field.qweb record by querying it with criteria.
func (c *Client) FindIrQwebFieldQweb(criteria *Criteria) (*IrQwebFieldQweb, error) {
iqfqs := &IrQwebFieldQwebs{}
if err := c.SearchRead(IrQwebFieldQwebModel, criteria, NewOptions().Limit(1), iqfqs); err != nil {
return nil, err
}
if iqfqs != nil && len(*iqfqs) > 0 {
return &((*iqfqs)[0]), nil
}
return nil, fmt.Errorf("ir.qweb.field.qweb was not found with criteria %v", criteria)
}
// FindIrQwebFieldQwebs finds ir.qweb.field.qweb records by querying it
// and filtering it with criteria and options.
func (c *Client) FindIrQwebFieldQwebs(criteria *Criteria, options *Options) (*IrQwebFieldQwebs, error) {
iqfqs := &IrQwebFieldQwebs{}
if err := c.SearchRead(IrQwebFieldQwebModel, criteria, options, iqfqs); err != nil {
return nil, err
}
return iqfqs, nil
}
// FindIrQwebFieldQwebIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindIrQwebFieldQwebIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(IrQwebFieldQwebModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindIrQwebFieldQwebId finds record id by querying it with criteria.
func (c *Client) FindIrQwebFieldQwebId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(IrQwebFieldQwebModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("ir.qweb.field.qweb was not found with criteria %v and options %v", criteria, options)
}
|
// Worker package is responsible for fetching external websites,
// and scheduling goroutines
package worker
import (
"context"
"sync"
"time"
"github.com/SQLek/wp-interview/model"
)
type Config struct {
MinInterval uint `config:"MIN_INTERVAL"`
FetchTimeout time.Duration `config:"FETCH_TIMEOUT"`
}
func populateConfig(c Config) Config {
if c.MinInterval == 0 {
c.MinInterval = 5
}
if c.FetchTimeout == 0 {
c.FetchTimeout = 5 * time.Second
}
return c
}
// could be useful with testing
type workerFunc func(context.Context, model.Model, model.Task, time.Duration)
// Scheduler is responsible for spawning and managing goroutines
// that fetch data from external websites
type Scheduler interface {
// Spawns worker and adds task do model
SpawnWorker(context.Context, model.Task) (uint, error)
// Updates worker and data entry in model
// Currently timings can be little off.
// This method kils goroutine and start new.
// Some form of time kalculation could be done to alievate this
UpdateWorker(context.Context, model.Task) error
// Kils worker and inform model to remove task and entries
KillWorker(context.Context, uint) error
// Kils all workers
KillAll()
}
type scheduler struct {
config Config
model model.Model
// synchronized map with cancel funtions of every worker
mutex *sync.Mutex
cancelers map[uint]context.CancelFunc
// to have option with testing
worker workerFunc
}
func MakeSheduler(c Config, m model.Model) Scheduler {
return &scheduler{
config: populateConfig(c),
model: m,
mutex: &sync.Mutex{},
worker: WorkOnRequests,
cancelers: make(map[uint]context.CancelFunc),
}
}
func (sched *scheduler) SpawnWorker(ctx context.Context, task model.Task) (uint, error) {
sched.mutex.Lock()
defer sched.mutex.Unlock()
if task.Interval < sched.config.MinInterval {
task.Interval = sched.config.MinInterval
}
id, err := sched.model.PutTask(task)
if err != nil {
return 0, err
}
// task is delivered by value
// id isn't propagated here
task.ID = id
// ctx is from http server and will timeout worker quickly
workerCtx, cancel := context.WithCancel(context.Background())
sched.cancelers[id] = cancel
go sched.worker(workerCtx, sched.model, task, sched.config.FetchTimeout)
return id, nil
}
func (sched *scheduler) KillWorker(ctx context.Context, id uint) error {
sched.mutex.Lock()
defer sched.mutex.Unlock()
canceler, exists := sched.cancelers[id]
if !exists {
// TODO maybe refactor this error type here?
return model.ErrNoRowsAfected
}
canceler()
return sched.model.DeleteTask(id)
}
func (sched *scheduler) UpdateWorker(ctx context.Context, task model.Task) error {
sched.mutex.Lock()
defer sched.mutex.Unlock()
canceler, exists := sched.cancelers[task.ID]
if !exists {
// TODO maybe refactor this error type here?
return model.ErrNoRowsAfected
}
// TODO we could check last time fetch was executed
err := sched.model.UpdateTask(task)
if err != nil {
return err
}
canceler()
workerCtx, cancel := context.WithCancel(context.Background())
sched.cancelers[task.ID] = cancel
go sched.worker(workerCtx, sched.model, task, sched.config.FetchTimeout)
return nil
}
func (sched *scheduler) KillAll() {
sched.mutex.Lock()
defer sched.mutex.Unlock()
for _, canceler := range sched.cancelers {
canceler()
}
sched.cancelers = make(map[uint]context.CancelFunc)
}
|
package api
import (
"errors"
"fmt"
"net/http"
"net/url"
"reflect"
"strings"
"time"
"github.com/lucassabreu/clockify-cli/api/dto"
"github.com/lucassabreu/clockify-cli/strhlp"
stackedErrors "github.com/pkg/errors"
)
// Client will help to access Clockify API
type Client struct {
baseURL *url.URL
http.Client
debugLogger Logger
}
// baseURL is the Clockify API base URL
const baseURL = "https://api.clockify.me/api"
// ErrorMissingAPIKey returned if X-Api-Key is missing
var ErrorMissingAPIKey = errors.New("api Key must be informed")
// NewClient create a new Client, based on: https://clockify.github.io/clockify_api_docs/
func NewClient(apiKey string) (*Client, error) {
if apiKey == "" {
return nil, stackedErrors.WithStack(ErrorMissingAPIKey)
}
u, err := url.Parse(baseURL)
if err != nil {
return nil, stackedErrors.WithStack(err)
}
c := &Client{
baseURL: u,
Client: http.Client{
Transport: transport{
apiKey: apiKey,
next: http.DefaultTransport,
},
},
}
return c, nil
}
// GetWorkspaces will be used to filter the workspaces
type GetWorkspaces struct {
Name string
}
// Workspaces list all the user's workspaces
func (c *Client) GetWorkspaces(f GetWorkspaces) ([]dto.Workspace, error) {
var w []dto.Workspace
r, err := c.NewRequest("GET", "v1/workspaces", nil)
if err != nil {
return w, err
}
_, err = c.Do(r, &w, "GetWorkspaces")
if err != nil {
return w, err
}
if f.Name == "" {
return w, nil
}
ws := []dto.Workspace{}
n := strhlp.Normalize(strings.TrimSpace(f.Name))
for _, i := range w {
if strings.Contains(strhlp.Normalize(i.Name), n) {
ws = append(ws, i)
}
}
return ws, nil
}
type field string
const (
workspaceField = field("workspace")
userIDField = field("user id")
projectField = field("project id")
timeEntryIDField = field("time entry id")
)
func required(action string, values map[field]string) error {
for f := range values {
if values[f] == "" {
return fmt.Errorf("%s is required to %s", f, action)
}
}
return nil
}
type GetWorkspace struct {
ID string
}
func (c *Client) GetWorkspace(p GetWorkspace) (dto.Workspace, error) {
err := required("get workspace", map[field]string{
workspaceField: p.ID,
})
if err != nil {
return dto.Workspace{}, err
}
ws, err := c.GetWorkspaces(GetWorkspaces{})
if err != nil {
return dto.Workspace{}, err
}
for _, w := range ws {
if w.ID == p.ID {
return w, nil
}
}
return dto.Workspace{}, dto.Error{Message: "not found", Code: 404}
}
// WorkspaceUsersParam params to query workspace users
type WorkspaceUsersParam struct {
Workspace string
Email string
}
// WorkspaceUsers all users in a Workspace
func (c *Client) WorkspaceUsers(p WorkspaceUsersParam) ([]dto.User, error) {
var users []dto.User
err := required("get workspace users", map[field]string{
workspaceField: p.Workspace,
})
if err != nil {
return users, err
}
r, err := c.NewRequest(
"GET",
fmt.Sprintf("v1/workspaces/%s/users", p.Workspace),
nil,
)
if err != nil {
return users, err
}
_, err = c.Do(r, &users, "WorkspaceUsers")
if err != nil {
return users, err
}
if p.Email == "" {
return users, nil
}
uCopy := []dto.User{}
for _, i := range users {
if strings.Contains(strings.ToLower(i.Email), strings.ToLower(p.Email)) {
uCopy = append(uCopy, i)
}
}
return uCopy, nil
}
// PaginationParam parameters about pagination
type PaginationParam struct {
AllPages bool
Page int
PageSize int
}
// LogParam params to query entries
type LogParam struct {
Workspace string
UserID string
Date time.Time
PaginationParam
}
// Log list time entries from a date
func (c *Client) Log(p LogParam) ([]dto.TimeEntry, error) {
c.debugf("Log - Date Param: %s", p.Date)
d := p.Date.Round(time.Hour)
d = d.Add(time.Hour * time.Duration(d.Hour()) * -1)
return c.LogRange(LogRangeParam{
Workspace: p.Workspace,
UserID: p.UserID,
FirstDate: d,
LastDate: d.Add(time.Hour * 24),
PaginationParam: p.PaginationParam,
})
}
// LogRangeParam params to query entries
type LogRangeParam struct {
Workspace string
UserID string
FirstDate time.Time
LastDate time.Time
PaginationParam
}
// LogRange list time entries by date range
func (c *Client) LogRange(p LogRangeParam) ([]dto.TimeEntry, error) {
c.debugf("LogRange - First Date Param: %s | Last Date Param: %s", p.FirstDate, p.LastDate)
return c.GetUsersHydratedTimeEntries(GetUserTimeEntriesParam{
Workspace: p.Workspace,
UserID: p.UserID,
Start: &p.FirstDate,
End: &p.LastDate,
})
}
type GetUserTimeEntriesParam struct {
Workspace string
UserID string
OnlyInProgress *bool
Start *time.Time
End *time.Time
PaginationParam
}
// GetUserTimeEntries will list the time entries of a user on a workspace, can be paginated
func (c *Client) GetUserTimeEntries(p GetUserTimeEntriesParam) ([]dto.TimeEntryImpl, error) {
var timeEntries []dto.TimeEntryImpl
var tes []dto.TimeEntryImpl
err := c.getUserTimeEntries(p, false, &tes, func(res interface{}) (int, error) {
if res == nil {
return 0, nil
}
tes := res.(*[]dto.TimeEntryImpl)
timeEntries = append(timeEntries, *tes...)
return len(*tes), nil
})
return timeEntries, err
}
// GetUsersHydratedTimeEntries will list hydrated time entries of a user on a workspace, can be paginated
func (c *Client) GetUsersHydratedTimeEntries(p GetUserTimeEntriesParam) ([]dto.TimeEntry, error) {
var timeEntries []dto.TimeEntry
var tes []dto.TimeEntry
err := c.getUserTimeEntries(p, true, &tes, func(res interface{}) (int, error) {
if res == nil {
return 0, nil
}
tes := res.(*[]dto.TimeEntry)
timeEntries = append(timeEntries, *tes...)
return len(*tes), nil
})
if err != nil {
return timeEntries, err
}
user, err := c.GetUser(GetUser{p.Workspace, p.UserID})
if err != nil {
return timeEntries, err
}
for i := range timeEntries {
timeEntries[i].User = &user
}
return timeEntries, err
}
func (c *Client) getUserTimeEntries(
p GetUserTimeEntriesParam,
hydrated bool,
tmpl interface{},
reducer func(interface{}) (int, error),
) error {
err := required("get time entries", map[field]string{
workspaceField: p.Workspace,
userIDField: p.UserID,
})
if err != nil {
return err
}
inProgressFilter := "nil"
if p.OnlyInProgress != nil {
if *p.OnlyInProgress {
inProgressFilter = "true"
} else {
inProgressFilter = "false"
}
}
c.debugf("GetUserTimeEntries - Workspace: %s | User: %s | In Progress: %s",
p.Workspace,
p.UserID,
inProgressFilter,
)
r := dto.UserTimeEntriesRequest{
OnlyInProgress: p.OnlyInProgress,
Hydrated: &hydrated,
}
if p.Start != nil {
r.Start = &dto.DateTime{Time: *p.Start}
}
if p.End != nil {
r.End = &dto.DateTime{Time: *p.End}
}
return c.paginate(
"GET",
fmt.Sprintf(
"v1/workspaces/%s/user/%s/time-entries",
p.Workspace,
p.UserID,
),
p.PaginationParam,
r,
tmpl,
reducer,
"GetUserTimeEntries",
)
}
func (c *Client) paginate(method, uri string, p PaginationParam, request dto.PaginatedRequest, bodyTempl interface{}, reducer func(interface{}) (int, error), name string) error {
page := p.Page
if p.AllPages {
page = 1
}
if p.PageSize == 0 {
p.PageSize = 50
}
stop := false
for !stop {
r, err := c.NewRequest(
method,
uri,
request.WithPagination(page, p.PageSize),
)
if err != nil {
return err
}
response := reflect.New(reflect.TypeOf(bodyTempl).Elem()).Interface()
_, err = c.Do(r, &response, name)
if err != nil {
return err
}
count, err := reducer(response)
if err != nil {
return err
}
stop = count < p.PageSize || !p.AllPages
page++
}
return nil
}
// GetTimeEntryInProgressParam params to query entries
type GetTimeEntryInProgressParam struct {
Workspace string
UserID string
}
// GetTimeEntryInProgress show time entry in progress (if any)
func (c *Client) GetTimeEntryInProgress(p GetTimeEntryInProgressParam) (timeEntryImpl *dto.TimeEntryImpl, err error) {
b := true
ts, err := c.GetUserTimeEntries(GetUserTimeEntriesParam{
Workspace: p.Workspace,
UserID: p.UserID,
OnlyInProgress: &b,
})
if err != nil {
return
}
if err == nil && len(ts) > 0 {
timeEntryImpl = &ts[0]
}
return
}
// GetHydratedTimeEntryInProgress show hydrated time entry in progress (if any)
func (c *Client) GetHydratedTimeEntryInProgress(p GetTimeEntryInProgressParam) (timeEntry *dto.TimeEntry, err error) {
b := true
ts, err := c.GetUsersHydratedTimeEntries(GetUserTimeEntriesParam{
Workspace: p.Workspace,
UserID: p.UserID,
OnlyInProgress: &b,
})
if err == nil && len(ts) > 0 {
timeEntry = &ts[0]
}
return
}
// GetTimeEntryParam params to get a Time Entry
type GetTimeEntryParam struct {
Workspace string
TimeEntryID string
ConsiderDurationFormat bool
}
// GetTimeEntry will retrieve a Time Entry using its Workspace and ID
func (c *Client) GetTimeEntry(p GetTimeEntryParam) (timeEntry *dto.TimeEntryImpl, err error) {
err = required("get time entry", map[field]string{
workspaceField: p.Workspace,
timeEntryIDField: p.TimeEntryID,
})
if err != nil {
return nil, err
}
r, err := c.NewRequest(
"GET",
fmt.Sprintf(
"v1/workspaces/%s/time-entries/%s",
p.Workspace,
p.TimeEntryID,
),
dto.GetTimeEntryRequest{
ConsiderDurationFormat: &p.ConsiderDurationFormat,
},
)
if err != nil {
return timeEntry, err
}
_, err = c.Do(r, &timeEntry, "GetTimeEntry")
return timeEntry, err
}
func (c *Client) GetHydratedTimeEntry(p GetTimeEntryParam) (timeEntry *dto.TimeEntry, err error) {
err = required("get time entry (hydrated)", map[field]string{
workspaceField: p.Workspace,
timeEntryIDField: p.TimeEntryID,
})
if err != nil {
return nil, err
}
b := true
r, err := c.NewRequest(
"GET",
fmt.Sprintf(
"v1/workspaces/%s/time-entries/%s",
p.Workspace,
p.TimeEntryID,
),
dto.GetTimeEntryRequest{
ConsiderDurationFormat: &p.ConsiderDurationFormat,
Hydrated: &b,
},
)
if err != nil {
return timeEntry, err
}
_, err = c.Do(r, &timeEntry, "GetHydratedTimeEntry")
return timeEntry, err
}
// GetTagParam params to find a tag
type GetTagParam struct {
Workspace string
TagID string
}
// GetTag get a single tag, if it exists
func (c *Client) GetTag(p GetTagParam) (*dto.Tag, error) {
tags, err := c.GetTags(GetTagsParam{
Workspace: p.Workspace,
})
if err != nil {
return nil, err
}
for _, t := range tags {
if t.ID == p.TagID {
return &t, nil
}
}
return nil, stackedErrors.Errorf("tag %s not found on workspace %s", p.TagID, p.Workspace)
}
// GetProjectParam params to get a Project
type GetProjectParam struct {
Workspace string
ProjectID string
}
// GetProject get a single Project, if exists
func (c *Client) GetProject(p GetProjectParam) (*dto.Project, error) {
var project *dto.Project
err := required("get project", map[field]string{
workspaceField: p.Workspace,
projectField: p.ProjectID,
})
if err != nil {
return project, err
}
r, err := c.NewRequest(
"GET",
fmt.Sprintf(
"v1/workspaces/%s/projects/%s",
p.Workspace,
p.ProjectID,
),
nil,
)
if err != nil {
return project, err
}
_, err = c.Do(r, &project, "GetProject")
return project, err
}
// GetUser params to get a user
type GetUser struct {
Workspace string
UserID string
}
// GetUser filters the wanted user from the workspace users
func (c *Client) GetUser(p GetUser) (dto.User, error) {
err := required("get user", map[field]string{
workspaceField: p.Workspace,
userIDField: p.UserID,
})
if err != nil {
return dto.User{}, err
}
us, err := c.WorkspaceUsers(WorkspaceUsersParam{
Workspace: p.Workspace,
})
if err != nil {
return dto.User{}, err
}
for _, u := range us {
if u.ID == p.UserID {
return u, nil
}
}
return dto.User{}, dto.Error{Message: "not found", Code: 404}
}
// GetMe get details about the user who created the token
func (c *Client) GetMe() (dto.User, error) {
r, err := c.NewRequest("GET", "v1/user", nil)
if err != nil {
return dto.User{}, err
}
var user dto.User
_, err = c.Do(r, &user, "GetMe")
return user, err
}
// GetTasksParam param to find tasks of a project
type GetTasksParam struct {
Workspace string
ProjectID string
Active bool
Name string
PaginationParam
}
// GetTasks get tasks of a project
func (c *Client) GetTasks(p GetTasksParam) ([]dto.Task, error) {
var ps, tmpl []dto.Task
err := required("get tasks", map[field]string{
workspaceField: p.Workspace,
projectField: p.ProjectID,
})
if err != nil {
return ps, err
}
err = c.paginate(
"GET",
fmt.Sprintf(
"v1/workspaces/%s/projects/%s/tasks",
p.Workspace,
p.ProjectID,
),
p.PaginationParam,
dto.GetTasksRequest{
Name: p.Name,
Active: p.Active,
},
&tmpl,
func(res interface{}) (int, error) {
if res == nil {
return 0, nil
}
ls := *res.(*[]dto.Task)
ps = append(ps, ls...)
return len(ls), nil
},
"GetTasks",
)
return ps, err
}
// CreateTimeEntryParam params to create a new time entry
type CreateTimeEntryParam struct {
Workspace string
Start time.Time
End *time.Time
Billable bool
Description string
ProjectID string
TaskID string
TagIDs []string
}
// CreateTimeEntry create a new time entry
func (c *Client) CreateTimeEntry(p CreateTimeEntryParam) (dto.TimeEntryImpl, error) {
var t dto.TimeEntryImpl
err := required("create time entry", map[field]string{
workspaceField: p.Workspace,
})
if err != nil {
return t, err
}
var end *dto.DateTime
if p.End != nil {
end = &dto.DateTime{Time: *p.End}
}
r, err := c.NewRequest(
"POST",
fmt.Sprintf(
"v1/workspaces/%s/time-entries",
p.Workspace,
),
dto.CreateTimeEntryRequest{
Start: dto.DateTime{Time: p.Start},
End: end,
Billable: p.Billable,
Description: p.Description,
ProjectID: p.ProjectID,
TaskID: p.TaskID,
TagIDs: p.TagIDs,
},
)
if err != nil {
return t, err
}
_, err = c.Do(r, &t, "CreateTimeEntry")
return t, err
}
// GetTagsParam params to get all tags of a workspace
type GetTagsParam struct {
Workspace string
Name string
Archived bool
PaginationParam
}
// GetTags get all tags of a workspace
func (c *Client) GetTags(p GetTagsParam) ([]dto.Tag, error) {
var ps, tmpl []dto.Tag
err := required("get tags", map[field]string{
workspaceField: p.Workspace,
})
if err != nil {
return ps, err
}
err = c.paginate(
"GET",
fmt.Sprintf(
"v1/workspaces/%s/tags",
p.Workspace,
),
p.PaginationParam,
dto.GetTagsRequest{
Name: p.Name,
Archived: p.Archived,
},
&tmpl,
func(res interface{}) (int, error) {
if res == nil {
return 0, nil
}
ls := *res.(*[]dto.Tag)
ps = append(ps, ls...)
return len(ls), nil
},
"GetTags",
)
return ps, err
}
// GetProjectsParam params to get all project of a workspace
type GetProjectsParam struct {
Workspace string
Name string
Archived bool
PaginationParam
}
// GetProjects get all project of a workspace
func (c *Client) GetProjects(p GetProjectsParam) ([]dto.Project, error) {
var ps, tmpl []dto.Project
err := required("get projects", map[field]string{
workspaceField: p.Workspace,
})
if err != nil {
return ps, err
}
err = c.paginate(
"GET",
fmt.Sprintf(
"v1/workspaces/%s/projects",
p.Workspace,
),
p.PaginationParam,
dto.GetProjectRequest{
Name: p.Name,
Archived: p.Archived,
},
&tmpl,
func(res interface{}) (int, error) {
if res == nil {
return 0, nil
}
ls := *res.(*[]dto.Project)
ps = append(ps, ls...)
return len(ls), nil
},
"GetProjects",
)
return ps, err
}
// OutParam params to end the current time entry
type OutParam struct {
Workspace string
UserID string
End time.Time
}
// Out create a new time entry
func (c *Client) Out(p OutParam) error {
err := required("end running time entry", map[field]string{
workspaceField: p.Workspace,
userIDField: p.UserID,
})
if err != nil {
return err
}
r, err := c.NewRequest(
"PATCH",
fmt.Sprintf(
"v1/workspaces/%s/user/%s/time-entries",
p.Workspace,
p.UserID,
),
dto.OutTimeEntryRequest{
End: dto.DateTime{Time: p.End},
},
)
if err != nil {
return err
}
_, err = c.Do(r, nil, "Out")
return err
}
// UpdateTimeEntryParam params to update a new time entry
type UpdateTimeEntryParam struct {
Workspace string
TimeEntryID string
Start time.Time
End *time.Time
Billable bool
Description string
ProjectID string
TaskID string
TagIDs []string
}
// UpdateTimeEntry update a time entry
func (c *Client) UpdateTimeEntry(p UpdateTimeEntryParam) (dto.TimeEntryImpl, error) {
var t dto.TimeEntryImpl
err := required("update time entry", map[field]string{
workspaceField: p.Workspace,
timeEntryIDField: p.TimeEntryID,
})
if err != nil {
return t, err
}
var end *dto.DateTime
if p.End != nil {
end = &dto.DateTime{Time: *p.End}
}
r, err := c.NewRequest(
"PUT",
fmt.Sprintf(
"v1/workspaces/%s/time-entries/%s",
p.Workspace,
p.TimeEntryID,
),
dto.UpdateTimeEntryRequest{
Start: dto.DateTime{Time: p.Start},
End: end,
Billable: p.Billable,
Description: p.Description,
ProjectID: p.ProjectID,
TaskID: p.TaskID,
TagIDs: p.TagIDs,
},
)
if err != nil {
return t, err
}
_, err = c.Do(r, &t, "UpdateTimeEntry")
return t, err
}
// DeleteTimeEntryParam params to update a new time entry
type DeleteTimeEntryParam struct {
Workspace string
TimeEntryID string
}
// DeleteTimeEntry deletes a time entry
func (c *Client) DeleteTimeEntry(p DeleteTimeEntryParam) error {
err := required("delete time entry", map[field]string{
workspaceField: p.Workspace,
timeEntryIDField: p.TimeEntryID,
})
if err != nil {
return err
}
r, err := c.NewRequest(
"DELETE",
fmt.Sprintf(
"v1/workspaces/%s/time-entries/%s",
p.Workspace,
p.TimeEntryID,
),
nil,
)
if err != nil {
return err
}
_, err = c.Do(r, nil, "DeleteTimeEntry")
return err
}
type ChangeInvoicedParam struct {
Workspace string
TimeEntryIDs []string
Invoiced bool
}
// ChangeInvoiced changes time entries to invoiced or not
func (c *Client) ChangeInvoiced(p ChangeInvoicedParam) error {
r, err := c.NewRequest(
"PATCH",
fmt.Sprintf(
"v1/workspaces/%s/time-entries/invoiced",
p.Workspace,
),
dto.ChangeTimeEntriesInvoicedRequest{
TimeEntryIDs: p.TimeEntryIDs,
Invoiced: p.Invoiced,
},
)
if err != nil {
return err
}
_, err = c.Do(r, nil, "ChangeInvoiced")
return err
}
|
package JsMobile
import (
"JsGo/JsConfig"
"JsGo/JsHttp"
. "JsGo/JsLogger"
"JsGo/JsNet"
"fmt"
"log"
"math/rand"
"net/http"
"time"
"github.com/coocood/freecache"
)
var g_smscache *freecache.Cache
var g_rand_chan chan int
var g_sms_cfg map[string]string
//兼容老接口
func AlidayuInit() {
g_smscache = freecache.NewCache(32 * 1024 * 1024) // 32MB
g_rand_chan = make(chan int)
var err error
g_sms_cfg, err = JsConfig.GetConfigMap([]string{"MobileVerify"})
if err != nil {
log.Fatalln(err)
}
go randCoolie()
JsNet.WhiteHttp("/alidayu", alidayu_old)
}
func alidayu_old(session *JsNet.StSession) {
type Para struct {
SignName string
Mobile string
SmsCode string
Expire int
}
para := &Para{}
e := session.GetPara(para)
if e != nil {
session.Forward("1", e.Error(), nil)
return
}
ComJsMobileVerify(para.SignName, para.Mobile, para.SmsCode, "a", para.Expire, nil)
session.Forward("0", "success", "")
}
func NewAlidayuInit() {
g_smscache = freecache.NewCache(32 * 1024 * 1024) // 32MB
g_rand_chan = make(chan int)
var err error
g_sms_cfg, err = JsConfig.GetConfigMap([]string{"MobileVerify"})
if err != nil {
log.Fatalln(err)
}
go randCoolie()
JsHttp.WhiteHttps("/alidayu", alidayu)
}
func alidayu(session *JsHttp.Session) {
type Para struct {
SignName string
Mobile string
SmsCode string
Expire int
}
para := &Para{}
e := session.GetPara(para)
if e != nil {
session.Forward("1", e.Error(), nil)
return
}
Error("alidayu=%v", para)
ComJsMobileVerify(para.SignName, para.Mobile, para.SmsCode, "a", para.Expire, nil)
session.Forward("0", "success", "")
}
func randCoolie() {
rand_gen := rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
for {
g_rand_chan <- rand_gen.Int()
}
}
func getCode() string {
code := <-g_rand_chan
ret := fmt.Sprintf("%06d", code%1000000)
return ret
}
func ComJsMobileVerify(signName, mobile, smscode, t string, expire int, par map[string]string) {
code := ""
if par != nil {
code = par["code"]
}
if code == "" {
code = getCode()
g_smscache.Set([]byte(mobile), []byte(code), expire)
}
if signName == "" {
signName = g_sms_cfg["SignName"]
}
para := "?appkey="
para += g_sms_cfg["AppKey"]
para += "&secretkey="
para += g_sms_cfg["SecretKey"]
para += "&signname="
para += signName
para += "&mobile="
para += mobile
para += "&smscode="
para += smscode
para += "&type="
para += t
for k, v := range par {
para += "&"
para += k
para += "="
para += v
}
if par == nil || par["code"] == "" {
para += "&code="
para += code
}
Info(g_sms_cfg["VUrl"] + para)
response, e := http.Get(g_sms_cfg["VUrl"] + para)
if e != nil {
Error(e.Error())
return
}
b := make([]byte, 2048)
response.Body.Read(b)
n, _ := response.Body.Read(b)
defer response.Body.Close()
if e != nil {
Error("verify %s error, rsp:%s\n", mobile, string(b[:n]))
}
}
func VerifySmsCode(mobile, code string) bool {
vCode, e := g_smscache.Get([]byte(mobile))
if e == nil && string(vCode) == code {
return true
} else {
return false
}
}
////////////////////////////////////////////////////////////////////////////////
//
//新接口
// //
////////////////////////////////////////////////////////////////////////////////
// const v5_url = "http://www.api.zthysms.com/sendSms.do"
// const v5_username = "shxyhy"
// const v5_password = "9BApAi"
// func verify_ex(code, product, mobile string) {
// tkey := time.Now().Format("20060102150405")
// md5Ctx := md5.New()
// md5Ctx.Write([]byte(v5_password))
// cipherStr := md5Ctx.Sum(nil)
// md5Ctx = md5.New()
// md5Ctx.Write([]byte(hex.EncodeToString(cipherStr) + tkey))
// pwd := hex.EncodeToString(md5Ctx.Sum(nil))
// para := "?username=" + v5_username
// para += "&tkey=" + tkey
// para += "&password=" + pwd
// para += "&mobile=" + mobile
// para += "&content=hello"
// response, e := http.Get(v5_url + para)
// b := make([]byte, 2048)
// response.Body.Read(b)
// defer response.Body.Close()
// if e != nil {
// b := make([]byte, 2048)
// n, _ := response.Body.Read(b)
// g_log.Error("verify %s error, rsp:%s\n", mobile, string(b[:n]))
// }
// }
// func RegisterAuth_ex(mobile, product string, expire int) {
// code := getCode()
// g_smscache.Set([]byte(mobile), []byte(code), expire)
// g_log.Info("---------------------------------------------------------Code=%s\n", code)
// verify_ex(code, product, mobile)
// }
|
package _279_Perfect_Squares
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestPerfectSquares(t *testing.T){
ast := assert.New(t)
ast.Equal(0,numSquares(0))
ast.Equal(1,numSquares(1))
ast.Equal(2,numSquares(2))
ast.Equal(3,numSquares(3))
ast.Equal(1,numSquares(4))
ast.Equal(2,numSquares(5))
ast.Equal(2,numSquares(8))
ast.Equal(1,numSquares(9))
ast.Equal(2,numSquares(10))
ast.Equal(3,numSquares(12))
ast.Equal(2,numSquares(13))
ast.Equal(3,numSquares(14))
ast.Equal(1,numSquares(16))
ast.Equal(3,numSquares(19))
ast.Equal(3,numSquares(48))
ast.Equal(3,numSquares(235))
} |
package web
import (
"fmt"
"net/http"
"github.com/davidnorminton/tvshowCalendar/calendar"
)
func UpdateHandler(w http.ResponseWriter, r *http.Request) {
if err := calendar.UpdateCalendar(); err != nil {
fmt.Fprintf(w, "error")
} else {
fmt.Fprintf(w, "updated")
}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package ui
import (
"context"
"time"
"github.com/golang/protobuf/ptypes/empty"
"github.com/google/go-cmp/cmp"
"google.golang.org/grpc"
"google.golang.org/protobuf/testing/protocmp"
"google.golang.org/protobuf/types/known/structpb"
"chromiumos/tast/remote/crosserverutil"
pb "chromiumos/tast/services/cros/ui"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: TconnServiceGRPC,
Desc: "Check basic functionalities of UI TconnService",
Contacts: []string{"msta@google.com", "chromeos-engprod-syd@google.com"},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
HardwareDeps: hwdep.D(hwdep.Model("amd64-generic")),
LacrosStatus: testing.LacrosVariantUnneeded,
Timeout: time.Minute * 5,
})
}
// TconnServiceGRPC tests basic functionalities of UI TconnService.
func TconnServiceGRPC(ctx context.Context, s *testing.State) {
cl, err := crosserverutil.GetGRPCClient(ctx, s.DUT())
if err != nil {
s.Fatal("Failed to connect to the RPC service on the DUT: ", err)
}
defer cl.Close(ctx)
// Start Chrome on the DUT.
cs := pb.NewChromeServiceClient(cl.Conn)
loginReq := &pb.NewRequest{}
if _, err := cs.New(ctx, loginReq, grpc.WaitForReady(true)); err != nil {
s.Fatal("Failed to start Chrome: ", err)
}
defer cs.Close(ctx, &empty.Empty{})
svc := pb.NewTconnServiceClient(cl.Conn)
value := func(val interface{}) *structpb.Value {
result, err := structpb.NewValue(val)
if err != nil {
s.Fatal("Invalid value: ", err)
}
return result
}
undefined := &structpb.Value{}
if cmp.Equal(value(nil), undefined, protocmp.Transform()) {
s.Fatal("Nil and undefined are indistinguishable")
}
structValue := value(map[string]interface{}{"a": 1, "b": 2})
for _, tc := range []struct {
expr string
want *structpb.Value
}{
{
expr: "null",
want: value(nil),
},
{
expr: "undefined",
want: undefined,
},
{
expr: "1 + 1",
want: value(2),
},
{
expr: "tast.promisify(chrome.i18n.getAcceptLanguages)()",
want: value([]interface{}{"en-US", "en"}),
},
{
expr: "(() => { return {a: 1, b: 2} })()",
want: structValue,
},
} {
got, err := svc.Eval(ctx, &pb.EvalRequest{Expr: tc.expr})
if err != nil {
s.Fatalf("Eval(%s) failed: %v", tc.expr, err)
}
if diff := cmp.Diff(got, tc.want, protocmp.Transform()); diff != "" {
s.Fatalf("Eval(%s) mismatch (-got +want):%s", tc.expr, diff)
}
}
for _, tc := range []struct {
fn string
args []*structpb.Value
want *structpb.Value
}{
{
fn: "() => { return null; }",
args: []*structpb.Value{},
want: value(nil),
},
{
fn: "() => {}",
args: []*structpb.Value{},
want: undefined,
},
{
fn: "(x, y) => { return x + y; }",
args: []*structpb.Value{value(1), value(2)},
want: value(3),
},
{
fn: "(x) => { return x; }",
args: []*structpb.Value{structValue},
want: structValue,
},
} {
got, err := svc.Call(ctx, &pb.CallRequest{Fn: tc.fn, Args: tc.args})
if err != nil {
s.Fatalf("Call %s with args %v failed: %v", tc.fn, tc.args, err)
}
if diff := cmp.Diff(got, tc.want, protocmp.Transform()); diff != "" {
s.Fatalf("Call %s with args %v mismatch (-got +want):%s", tc.fn, tc.args, diff)
}
}
}
|
// Copyright 2022 Gravitational, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package native
// #cgo CFLAGS: -Wall -xobjective-c -fblocks -fobjc-arc -mmacosx-version-min=10.13
// #cgo LDFLAGS: -framework CoreFoundation -framework Foundation -framework IOKit -framework Security
// #include <stdint.h>
// #include <stdlib.h>
// #include "device_darwin.h"
import "C"
import (
"crypto/sha256"
"crypto/x509"
"unsafe"
"github.com/google/uuid"
"github.com/gravitational/trace"
"google.golang.org/protobuf/types/known/timestamppb"
devicepb "github.com/gravitational/teleport/api/gen/proto/go/teleport/devicetrust/v1"
"github.com/gravitational/teleport/lib/darwin"
)
func enrollDeviceInit() (*devicepb.EnrollDeviceInit, error) {
cred, err := deviceKeyGetOrCreate()
if err != nil {
return nil, trace.Wrap(err)
}
cd, err := collectDeviceData()
if err != nil {
return nil, trace.Wrap(err, "collecting device data")
}
return &devicepb.EnrollDeviceInit{
CredentialId: cred.Id,
DeviceData: cd,
Macos: &devicepb.MacOSEnrollPayload{
PublicKeyDer: cred.PublicKeyDer,
},
}, nil
}
func deviceKeyGetOrCreate() (*devicepb.DeviceCredential, error) {
newID := uuid.NewString()
newIDC := C.CString(newID)
defer C.free(unsafe.Pointer(newIDC))
var pubKeyC C.PublicKey
defer func() {
C.free(unsafe.Pointer(pubKeyC.id))
C.free(unsafe.Pointer(pubKeyC.pub_key))
}()
if res := C.DeviceKeyGetOrCreate(newIDC, &pubKeyC); res != 0 {
return nil, trace.Wrap(statusErrorFromC(res))
}
id := C.GoString(pubKeyC.id)
pubKeyRaw := C.GoBytes(unsafe.Pointer(pubKeyC.pub_key), C.int(pubKeyC.pub_key_len))
return pubKeyToCredential(id, pubKeyRaw)
}
func pubKeyToCredential(id string, pubKeyRaw []byte) (*devicepb.DeviceCredential, error) {
ecPubKey, err := darwin.ECDSAPublicKeyFromRaw(pubKeyRaw)
if err != nil {
return nil, trace.Wrap(err)
}
pubKeyDER, err := x509.MarshalPKIXPublicKey(ecPubKey)
if err != nil {
return nil, trace.Wrap(err)
}
return &devicepb.DeviceCredential{
Id: id,
PublicKeyDer: pubKeyDER,
}, nil
}
func collectDeviceData() (*devicepb.DeviceCollectedData, error) {
var dd C.DeviceData
defer func() { C.free(unsafe.Pointer(dd.serial_number)) }()
if res := C.DeviceCollectData(&dd); res != 0 {
return nil, trace.Wrap(statusErrorFromC(res))
}
return &devicepb.DeviceCollectedData{
CollectTime: timestamppb.Now(),
OsType: devicepb.OSType_OS_TYPE_MACOS,
SerialNumber: C.GoString(dd.serial_number),
}, nil
}
func signChallenge(chal []byte) (sig []byte, err error) {
h := sha256.Sum256(chal)
digC := C.Digest{
data: (*C.uint8_t)(C.CBytes(h[:])),
data_len: (C.size_t)(len(h)),
}
defer func() { C.free(unsafe.Pointer(digC.data)) }()
var sigC C.Signature
defer func() { C.free(unsafe.Pointer(sigC.data)) }()
if res := C.DeviceKeySign(digC, &sigC); res != 0 {
return nil, trace.Wrap(statusErrorFromC(res))
}
sig = C.GoBytes(unsafe.Pointer(sigC.data), C.int(sigC.data_len))
return sig, err
}
func getDeviceCredential() (*devicepb.DeviceCredential, error) {
var pubKeyC C.PublicKey
defer func() {
C.free(unsafe.Pointer(pubKeyC.id))
C.free(unsafe.Pointer(pubKeyC.pub_key))
}()
if res := C.DeviceKeyGet(&pubKeyC); res != 0 {
return nil, trace.Wrap(statusErrorFromC(res))
}
id := C.GoString(pubKeyC.id)
pubKeyRaw := C.GoBytes(unsafe.Pointer(pubKeyC.pub_key), C.int(pubKeyC.pub_key_len))
return pubKeyToCredential(id, pubKeyRaw)
}
func statusErrorFromC(res C.int32_t) error {
return &statusError{status: int32(res)}
}
|
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nvproxy
import (
"unsafe"
"golang.org/x/sys/unix"
)
func uvmIoctlInvoke[Params any](ui *uvmIoctlState, ioctlParams *Params) (uintptr, error) {
n, _, errno := unix.RawSyscall(unix.SYS_IOCTL, uintptr(ui.fd.hostFD), uintptr(ui.cmd), uintptr(unsafe.Pointer(ioctlParams)))
if errno != 0 {
return n, errno
}
return n, nil
}
|
package alipass
import (
"testing"
)
const (
AppId = "2015040200041603"
PrivateKey = "MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAKK0PXoLKnBkgtOl0kvyc9X2tUUdh/lRZr9RE1frjr2ZtAulZ+Moz9VJZFew1UZIzeK0478obY/DjHmD3GMfqJoTguVqJ2MEg+mJ8hJKWelvKLgfFBNliAw+/9O6Jah9Q3mRzCD8pABDEHY7BM54W7aLcuGpIIOa/qShO8dbXn+FAgMBAAECgYA8+nQ380taiDEIBZPFZv7G6AmT97doV3u8pDQttVjv8lUqMDm5RyhtdW4n91xXVR3ko4rfr9UwFkflmufUNp9HU9bHIVQS+HWLsPv9GypdTSNNp+nDn4JExUtAakJxZmGhCu/WjHIUzCoBCn6viernVC2L37NL1N4zrR73lSCk2QJBAPb/UOmtSx+PnA/mimqnFMMP3SX6cQmnynz9+63JlLjXD8rowRD2Z03U41Qfy+RED3yANZXCrE1V6vghYVmASYsCQQCoomZpeNxAKuUJZp+VaWi4WQeMW1KCK3aljaKLMZ57yb5Bsu+P3odyBk1AvYIPvdajAJiiikRdIDmi58dqfN0vAkEAjFX8LwjbCg+aaB5gvsA3t6ynxhBJcWb4UZQtD0zdRzhKLMuaBn05rKssjnuSaRuSgPaHe5OkOjx6yIiOuz98iQJAXIDpSMYhm5lsFiITPDScWzOLLnUR55HL/biaB1zqoODj2so7G2JoTiYiznamF9h9GuFC2TablbINq80U2NcxxQJBAMhw06Ha/U7qTjtAmr2qAuWSWvHU4ANu2h0RxYlKTpmWgO0f47jCOQhdC3T/RK7f38c7q8uPyi35eZ7S1e/PznY="
OpenApiUrl = "https://openapi.alipay.com/gateway.do"
UserId = "+0YuKZBkIc1cEDklW4gMk5qKo7ILToCxhO4skMe5bRK-S4-HyunMvYTqiRYEUD+U01"
Template = "2015051210573746242893580"
)
var (
openId, serialNumber = "20881011315239463742107232815092", "20150521140000"
)
func TestAddByTemplate(t *testing.T) {
// 卡券参数
paramValuePair := make(map[string]string)
paramValuePair["qrcode"] = serialNumber
paramValuePair["serialNumber"] = serialNumber
paramValuePair["channelID"] = AppId
paramValuePair["webServiceUrl"] = " "
// 用户参数
userParams := make(map[string]string)
userParams["open_id"] = openId
addReq := &AddTplRequest{}
addReq.AlipayApiUrl = OpenApiUrl
addReq.AppId = AppId
addReq.PrivateKeyData = PrivateKey
addReq.TemplateParamValuePair = paramValuePair
addReq.UserTypeParams = userParams
addReq.UserType = OPENID
addReq.TemplateId = Template
alipassService := &AlipassTransferService{}
resp, err := alipassService.AddByTemplate(addReq)
if err != nil {
t.Error(err)
t.FailNow()
}
t.Logf("%+v", resp)
}
func TestUpdateAlipass(t *testing.T) {
updReq := &UpdAlipssRequest{}
updReq.AppId = AppId
updReq.SerialNumber = serialNumber
updReq.ChannelId = AppId
updReq.Status = PASS_STATUS_USED
updReq.AlipayApiUrl = OpenApiUrl
updReq.PrivateKeyData = PrivateKey
updReq.VerifyCode = serialNumber
updReq.VerifyType = "qrcode"
alipassService := &AlipassTransferService{}
resp, err := alipassService.UpdateAlipass(updReq)
if err != nil {
t.Error(err)
t.FailNow()
}
t.Logf("%+v", resp)
}
|
package main
import (
"encoding/json"
"fmt"
"io"
"log"
)
type Player struct {
Name string `json:"name"`
Color string `json:"color"`
Points int `json:"points"`
}
type PlayerList []Player
var players PlayerList
func (players PlayerList) writeJSON(w io.WriteCloser) {
fmt.Fprintf(w, "\"players\": [")
separator := ""
for i := range players {
p := players[i]
pJson, err := json.Marshal(p)
if err != nil {
log.Println(err)
return
}
fmt.Fprintf(w, "%v%v", separator, string(pJson))
separator = ",\n"
}
fmt.Fprintf(w, "]")
}
|
package main
import (
"fmt"
"strconv"
"strings"
"github.com/storm84/AOC/AOC22/utils"
)
type sectionRange struct {
low int
high int
}
func main() {
lines, err := utils.ReadLines("input")
utils.Check(err)
aCnt := 0
bCnt := 0
for _, line := range lines {
if line != "" {
pairs := strings.Split(line, ",")
first := toSectionRange(pairs[0])
second := toSectionRange(pairs[1])
if isFullyOverlapping(first, second) {
aCnt++
}
if isOverlapping(first, second) {
bCnt++
}
}
}
fmt.Printf("The answer to A is: %d\n", aCnt)
fmt.Printf("The answer to B is: %d\n", bCnt)
}
func isFullyOverlapping(a sectionRange, b sectionRange) bool {
return a.low >= b.low && a.high <= b.high || b.low >= a.low && b.high <= a.high
}
func isOverlapping(a sectionRange, b sectionRange) bool {
return a.low >= b.low && a.low <= b.high ||
a.high >= b.low && a.high <= b.high ||
b.low >= a.low && b.low <= a.high ||
b.high >= a.low && b.high <= a.high
}
func toSectionRange(s string) sectionRange {
split := strings.Split(s, "-")
low, err := strconv.Atoi(split[0])
utils.Check(err)
high, err := strconv.Atoi(split[1])
utils.Check(err)
return sectionRange{low, high}
}
|
package builder
import (
"fmt"
)
var sshAuthorizedKeyEntry = `command="/home/git/gitreceive run",no-agent-forwarding,no-pty,no-user-rc,no-X11-forwarding,no-port-forwarding %s`
var gitReceiverScript =`#!/bin/bash
readonly GITUSER="${GITUSER:-git}"
readonly GITHOME="/home/${GITUSER}"
absolute_path() {
pushd "$(dirname $1)" > /dev/null
local abspath="$(pwd -P)"
popd > /dev/null
echo "$abspath/$(basename $1)"
}
strip_root_slash() {
local str="$(cat)"
if [ "${str:0:1}" == "/" ]; then
echo "$str" | cut -c 2-
else
echo "$str"
fi
}
parse_repo_from_ssh_command() {
awk '{print $2}' | sed 's/\\'\''/'\''/g' | sed "s/'//g" | strip_root_slash
}
ensure_bare_repo() {
declare repo_path="$1"
if [ ! -d "$repo_path" ]; then
mkdir -p "$repo_path"
cd "$repo_path"
git init --bare > /dev/null
cd - > /dev/null
fi
}
ensure_prereceive_hook() {
declare repo_path="$1" home_dir="$2" self="$3"
local hook_path="$repo_path/hooks/pre-receive"
cd "$home_dir"
cat > "$hook_path" <<EOF
#!/bin/bash
cat | $self hook
EOF
chmod +x "$hook_path"
cd - > /dev/null
}
setup_receiver_script() {
declare home_dir="$1"
local receiver_path="$home_dir/receiver"
cat > "$receiver_path" <<EOF
#!/bin/bash
#URL=http://requestb.in/rlh4znrl
#echo "----> Posting to \$URL ..."
#curl \\
# -X 'POST' \\
# -F "repository=\$1" \\
# -F "revision=\$2" \\
# -F "username=\$3" \\
# -F "fingerprint=\$4" \\
# -F contents=@- \\
# --silent \$URL
EOF
chmod +x "$receiver_path"
chown git:git "$receiver_path"
}
trigger_receiver() {
declare repo="$1" home_dir="$2"
local receiver_path="$home_dir/receiver"
# check if receiver is already exists
if [ ! -f "$receiver_path" ]; then
setup_receiver_script "$home_dir"
fi
echo "trigger_receiver:$repo" >> /home/git/push.log
# "$home_dir/receiver" "$repo" "$newrev" "$refname"
}
strip_remote_prefix() {
sed -u "s/^/"$'\e[1G'"/"
}
main() {
# Be unforgiving about errors
set -euo pipefail
readonly SELF="$(absolute_path $0)"
case "$1" in
run)
export RECEIVE_REPO="$(echo "$SSH_ORIGINAL_COMMAND" | parse_repo_from_ssh_command)"
local repo_path="$GITHOME/$RECEIVE_REPO"
echo "$repo_path" >> /home/git/push.log
ensure_bare_repo "$repo_path"
ensure_prereceive_hook "$repo_path" "$GITHOME" "$SELF"
cd "$GITHOME"
git-shell -c "$(echo "$SSH_ORIGINAL_COMMAND" | awk '{print $1}') '$RECEIVE_REPO'"
;;
# Called by the pre-receive hook
hook)
echo "hook called: $RECEIVE_REPO" >> /home/git/push.log
trigger_receiver "$RECEIVE_REPO" "$GITHOME" | strip_remote_prefix
;;
*)
echo "Usage: gitreceive <command> [options]"
;;
esac
}
[[ "$0" == "$BASH_SOURCE" ]] && main $@
`
func sshAuthorizedKey (sshPublicKey string) string {
return fmt.Sprintf(sshAuthorizedKeyEntry, sshPublicKey)
}
|
package commands
import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
func init() {
nodeCmds := []*cobra.Command{
CheckAddressCmd,
}
RootCmd.AddCommand(nodeCmds...)
RootSubCmdGroups["node"] = nodeCmds
}
var CheckAddressCmd = &cobra.Command{
Use: "checkAddress",
Short: "check Address",
Aliases: []string{"checkaddress"},
Example: `
checkAddress TmhoUdBKgRUhL7d4sGVQYi3zBMjhxeEvvmF
`,
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
var err error
var info string
address := args[0]
network := "testnet"
if len(args) > 1 {
network = args[1]
}
params := []interface{}{address, network}
info, err = getResString("checkAddress", params)
if err != nil {
log.Error(cmd.Use+" err: ", err)
} else {
output(info)
}
},
}
|
package awesome_mapper2
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/Knetic/govaluate"
"github.com/superchalupa/sailfish/src/ocp/model"
)
var functionsInit sync.Once
var functions map[string]govaluate.ExpressionFunction
func InitFunctions() map[string]govaluate.ExpressionFunction {
functionsInit.Do(func() { functions = map[string]govaluate.ExpressionFunction{} })
return functions
}
func AddFunction(name string, fn func(args ...interface{}) (interface{}, error)) {
InitFunctions()
functions[name] = fn
}
func init() {
InitFunctions()
// debugging function
AddFunction("echo", func(args ...interface{}) (interface{}, error) {
fmt.Println(args...)
return true, nil
})
AddFunction("array",
func(args ...interface{}) (interface{}, error) {
a := []interface{}{}
for _, i := range args {
a = append(a, i)
}
return a, nil
})
AddFunction("set_hash_value",
func(args ...interface{}) (interface{}, error) {
if len(args) < 2 {
return nil, errors.New("set_hash_value failed, not enough arguments")
}
hash := reflect.ValueOf(args[0])
key := reflect.ValueOf(args[1])
value := reflect.ValueOf(args[2])
hash.SetMapIndex(key, value)
return args[2], nil
})
AddFunction("int", func(args ...interface{}) (interface{}, error) {
switch t := args[0].(type) {
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr:
return float64(reflect.ValueOf(t).Int()), nil
case float32, float64:
return float64(reflect.ValueOf(t).Float()), nil
case string:
float, err := strconv.ParseFloat(t, 64)
return float, err
default:
return nil, errors.New("cant parse non-string")
}
})
AddFunction("removefromset", func(args ...interface{}) (interface{}, error) {
model, ok := args[0].(*model.Model)
if !ok {
return nil, errors.New("need model as first arg")
}
property, ok := args[1].(string)
if !ok {
return nil, errors.New("need property name as second arg")
}
str, ok := args[2].(string)
if !ok {
return nil, errors.New("need new value as third arg")
}
v, ok := model.GetPropertyOk(property)
if !ok || v == nil {
v = []string{}
}
vStr, ok := v.([]string)
if !ok {
v = []string{}
vStr = v.([]string)
}
ret := []string{}
for i := range vStr {
if vStr[i] == str {
ret = vStr[:i]
if i+1 < len(vStr) {
ret = append(ret, vStr[i+1:]...)
}
break
}
}
return ret, nil
})
AddFunction("addtoset", func(args ...interface{}) (interface{}, error) {
model, ok := args[0].(*model.Model)
if !ok {
return nil, errors.New("need model as first arg")
}
property, ok := args[1].(string)
if !ok {
return nil, errors.New("need property name as second arg")
}
str, ok := args[2].(string)
if !ok {
return nil, errors.New("need new value as third arg")
}
v, ok := model.GetPropertyOk(property)
if !ok || v == nil {
v = []string{}
}
vStr, ok := v.([]string)
if !ok {
v = []string{}
vStr = v.([]string)
}
found := false
for i := range vStr {
if vStr[i] == str {
found = true
}
}
if !found {
vStr = append(vStr, str)
}
return vStr, nil
})
AddFunction("nohash", func(args ...interface{}) (interface{}, error) {
str, ok := args[0].(string)
if !ok {
return nil, errors.New("expected a string argument")
}
if i := strings.Index(str, "#"); i > -1 {
return false, nil
}
return true, nil
})
AddFunction("baseuri", func(args ...interface{}) (interface{}, error) {
str, ok := args[0].(string)
if !ok {
return nil, errors.New("expected a string argument")
}
return path.Dir(str), nil
})
AddFunction("strlen", func(args ...interface{}) (interface{}, error) {
length := len(args[0].(string))
return (float64)(length), nil
})
AddFunction("epoch_to_date", func(args ...interface{}) (interface{}, error) {
return time.Unix(int64(args[0].(float64)), 0), nil
})
AddFunction("traverse_struct", func(args ...interface{}) (interface{}, error) {
s := args[0]
for _, name := range args[1:] {
n := name.(string)
r := reflect.ValueOf(s)
s = reflect.Indirect(r).FieldByName(n).Interface()
}
// have to return float64 for all numeric types
switch t := s.(type) {
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr:
return float64(reflect.ValueOf(t).Int()), nil
case float32, float64:
return float64(reflect.ValueOf(t).Float()), nil
default:
return s, nil
}
})
AddFunction("map_health_value", func(args ...interface{}) (interface{}, error) { //todo: turn into hash
switch t := args[0].(float64); t {
case 0, 1: //other, unknown
return nil, nil
case 2: //ok
return "OK", nil
case 3: //non-critical
return "Warning", nil
case 4, 5: //critical, non-recoverable
return "Critical", nil
default:
return nil, errors.New("Invalid object status")
}
})
AddFunction("map_led_value", func(args ...interface{}) (interface{}, error) { //todo: turn into hash
switch t := args[0].(string); t {
case "Blink-Off", "BLINK-OFF":
return "Lit", nil
case "Blink-1", "Blink-2", "BLINK-ON":
return "Blinking", nil
default:
return nil, nil
}
})
AddFunction("string", func(args ...interface{}) (interface{}, error) {
switch t := args[0].(type) {
case int, int8, int16, int32, int64:
str := strconv.FormatInt(reflect.ValueOf(t).Int(), 10)
return str, nil
case uint, uint8, uint16, uint32, uint64:
str := strconv.FormatUint(reflect.ValueOf(t).Uint(), 10)
return str, nil
case float32, float64:
str := strconv.FormatFloat(reflect.ValueOf(t).Float(), 'G', -1, 64)
return str, nil
case string:
return t, nil
default:
return nil, errors.New("Not an int, float, or string")
}
})
AddFunction("zero_to_null", func(args ...interface{}) (interface{}, error) {
if args[0] == 0 {
return nil, nil
}
return args[0], nil
})
AddFunction("subsystem_health", func(args ...interface{}) (interface{}, error) {
fqdd := strings.Split(args[0].(map[string]string)["FQDD"], "#")
subsys := fqdd[len(fqdd)-1]
health := args[0].(map[string]string)["Health"]
if health == "Absent" {
return nil, nil
}
return map[string]string{"subsys": subsys, health: "health"}, nil
})
AddFunction("read_file", func(args ...interface{}) (interface{}, error) {
lines := ""
file_path := args[0].(string)
if file_path == "NONE" {
return nil, nil
}
bytes, err := ioutil.ReadFile(file_path)
if err != nil {
return nil, err
} else {
lines = string(bytes)
}
err = os.Remove(file_path)
if err != nil {
return lines, err
}
return lines, nil
})
AddFunction("encryptn_ability", func(args ...interface{}) (interface{}, error) {
var attributes int64 = int64(args[0].(float64))
if attributes&0x04 == 0x04 {
return "SelfEncryptingDrive", nil
} else {
return "None", nil
}
})
AddFunction("encryptn_status", func(args ...interface{}) (interface{}, error) {
var security int64 = int64(args[0].(float64))
if security&0x01 == 0x01 {
return "Unlocked", nil
} else if security&0x02 == 0x02 {
return "Locked", nil
} else if security&0x04 == 0x04 {
return "Foreign", nil
} else {
return "Unencrypted", nil
}
})
AddFunction("fail_predicted", func(args ...interface{}) (interface{}, error) {
var attributes int64 = int64(args[0].(float64))
var objattributes int64 = int64(args[1].(float64))
if attributes&0x01 == 0x01 && objattributes&01 == 0x01 {
return true, nil
} else {
return false, nil
}
})
AddFunction("hotspare", func(args ...interface{}) (interface{}, error) {
var hotspare int8 = int8(args[0].(float64))
if hotspare&0x01 == 0x01 {
return "Dedicated", nil
} else if hotspare&0x02 == 0x02 {
return "Global", nil
} else {
return "None", nil
}
})
}
|
package main
import (
"os"
"12306.com/12306/common"
"12306.com/12306/stations"
"12306.com/12306/trains"
"12306.com/12306/users"
"github.com/gin-gonic/gin"
_ "github.com/go-sql-driver/mysql"
"github.com/spf13/viper"
)
func main() {
InitConfig()
db := common.InitDB()
db.AutoMigrate(&users.User{})
db.AutoMigrate(&stations.Station{})
db.AutoMigrate(&trains.Order{})
db.AutoMigrate(&users.Passanger{})
defer db.Close()
r := gin.Default()
r = CollectRoute(r) //注册路由
port := viper.GetString("server.port")
if port != "" {
panic(r.Run(":" + port))
}
panic(r.Run())
}
func InitConfig() {
workDir, _ := os.Getwd()
viper.SetConfigName("application")
viper.SetConfigType("yml")
viper.AddConfigPath(workDir + "/config")
err := viper.ReadInConfig()
if err != nil {
panic(err)
}
} //初始化配置文件
|
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httputil
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"math/big"
"net/http"
"testing"
"time"
"github.com/go-chi/chi"
"github.com/stretchr/testify/require"
"github.com/uber/kraken/utils/randutil"
"github.com/uber/kraken/utils/testutil"
)
func genKeyPair(t *testing.T, caPEM, caKeyPEM, caSercret []byte) (certPEM, keyPEM, secretBytes []byte) {
require := require.New(t)
secret := randutil.Text(12)
priv, err := rsa.GenerateKey(rand.Reader, 4096)
require.NoError(err)
pub := priv.Public()
template := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
Organization: []string{"kraken"},
CommonName: "kraken",
},
NotBefore: time.Now().Add(-5 * time.Minute),
NotAfter: time.Now().Add(time.Hour * 24 * 180),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
// Need for identifying root CA.
IsCA: caPEM == nil,
}
parent := &template
parentPriv := priv
// If caPEM is provided, certificate generated should have ca cert as parent.
if caPEM != nil {
block, _ := pem.Decode(caPEM)
require.NotNil(block)
caCert, err := x509.ParseCertificate(block.Bytes)
require.NoError(err)
block, _ = pem.Decode(caKeyPEM)
require.NotNil(block)
decoded, err := x509.DecryptPEMBlock(block, caSercret)
require.NoError(err)
caKey, err := x509.ParsePKCS1PrivateKey(decoded)
require.NoError(err)
parent = caCert
parentPriv = caKey
}
// Certificate should be signed with parent certificate, parent private key and child public key.
// If the certificate is self-signed, parent is an empty template, and parent private key is the private key of the public key.
derBytes, err := x509.CreateCertificate(rand.Reader, &template, parent, pub, parentPriv)
require.NoError(err)
// Encode cert and key to PEM format.
cert := &bytes.Buffer{}
require.NoError(pem.Encode(cert, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}))
encrypted, err := x509.EncryptPEMBlock(rand.Reader, "RSA PRIVATE KEY", x509.MarshalPKCS1PrivateKey(priv), secret, x509.PEMCipherAES256)
require.NoError(err)
return cert.Bytes(), pem.EncodeToMemory(encrypted), secret
}
func genCerts(t *testing.T) (config *TLSConfig, cleanupfunc func()) {
var cleanup testutil.Cleanup
defer cleanup.Recover()
// Server cert, which is also the root CA.
sCertPEM, sKeyPEM, sSecretBytes := genKeyPair(t, nil, nil, nil)
sCert, c := testutil.TempFile(sCertPEM)
cleanup.Add(c)
// Client cert, signed with root CA.
cCertPEM, cKeyPEM, cSecretBytes := genKeyPair(t, sCertPEM, sKeyPEM, sSecretBytes)
cSecret, c := testutil.TempFile(cSecretBytes)
cleanup.Add(c)
cCert, c := testutil.TempFile(cCertPEM)
cleanup.Add(c)
cKey, c := testutil.TempFile(cKeyPEM)
cleanup.Add(c)
config = &TLSConfig{}
config.Name = "kraken"
config.CAs = []Secret{{sCert}, {sCert}}
config.Client.Cert.Path = cCert
config.Client.Key.Path = cKey
config.Client.Passphrase.Path = cSecret
return config, cleanup.Run
}
func startTLSServer(t *testing.T, clientCAs []Secret) (addr string, serverCA Secret, cleanupFunc func()) {
var cleanup testutil.Cleanup
defer cleanup.Recover()
certPEM, keyPEM, passphrase := genKeyPair(t, nil, nil, nil)
certPath, c := testutil.TempFile(certPEM)
cleanup.Add(c)
passphrasePath, c := testutil.TempFile(passphrase)
cleanup.Add(c)
keyPath, c := testutil.TempFile(keyPEM)
cleanup.Add(c)
require := require.New(t)
var err error
keyPEM, err = parseKey(keyPath, passphrasePath)
require.NoError(err)
x509cert, err := tls.X509KeyPair(certPEM, keyPEM)
require.NoError(err)
caPool, err := createCertPool(clientCAs)
require.NoError(err)
config := &tls.Config{
Certificates: []tls.Certificate{x509cert},
ServerName: "kraken",
// A list if trusted CA to verify certificate from clients.
// In this test, server is using the root CA as both cert and trusted client CA.
ClientCAs: caPool,
// Enforce tls on client.
ClientAuth: tls.RequireAndVerifyClientCert,
CipherSuites: []uint16{tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256},
}
l, err := tls.Listen("tcp", ":0", config)
require.NoError(err)
r := chi.NewRouter()
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, "OK")
})
go http.Serve(l, r)
cleanup.Add(func() { l.Close() })
return l.Addr().String(), Secret{certPath}, cleanup.Run
}
func TestTLSClientDisabled(t *testing.T) {
require := require.New(t)
c := TLSConfig{}
c.Client.Disabled = true
tls, err := c.BuildClient()
require.NoError(err)
require.Nil(tls)
}
func TestTLSClientSuccess(t *testing.T) {
t.Skip("TODO https://github.com/uber/kraken/issues/230")
require := require.New(t)
c, cleanup := genCerts(t)
defer cleanup()
addr1, serverCA1, stop := startTLSServer(t, c.CAs)
defer stop()
addr2, serverCA2, stop := startTLSServer(t, c.CAs)
defer stop()
c.CAs = append(c.CAs, serverCA1, serverCA2)
tls, err := c.BuildClient()
require.NoError(err)
resp, err := Get("https://"+addr1+"/", SendTLS(tls))
require.NoError(err)
require.Equal(http.StatusOK, resp.StatusCode)
resp, err = Get("https://"+addr2+"/", SendTLS(tls))
require.NoError(err)
require.Equal(http.StatusOK, resp.StatusCode)
}
func TestTLSClientBadAuth(t *testing.T) {
t.Skip("TODO https://github.com/uber/kraken/issues/230")
require := require.New(t)
c, cleanup := genCerts(t)
defer cleanup()
addr, _, stop := startTLSServer(t, c.CAs)
defer stop()
badConfig := &TLSConfig{}
badtls, err := badConfig.BuildClient()
require.NoError(err)
_, err = Get("https://"+addr+"/", SendTLS(badtls), DisableHTTPFallback())
require.True(IsNetworkError(err))
}
func TestTLSClientFallback(t *testing.T) {
t.Skip("TODO https://github.com/uber/kraken/issues/230")
require := require.New(t)
c := &TLSConfig{}
tls, err := c.BuildClient()
require.NoError(err)
r := chi.NewRouter()
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, "OK")
})
addr, stop := testutil.StartServer(r)
defer stop()
resp, err := Get("https://"+addr+"/", SendTLS(tls))
require.NoError(err)
require.Equal(http.StatusOK, resp.StatusCode)
}
func TestTLSClientFallbackError(t *testing.T) {
t.Skip("TODO https://github.com/uber/kraken/issues/230")
require := require.New(t)
c := &TLSConfig{}
tls, err := c.BuildClient()
require.NoError(err)
_, err = Get("https://some-non-existent-addr/", SendTLS(tls))
require.Error(err)
}
|
package api
// ComparisonOperator compares two operands.
type ComparisonOperator string
const (
// Equal operator.
Equal ComparisonOperator = "eq"
// NotEqual operator.
NotEqual ComparisonOperator = "ne"
// Less operator.
Less ComparisonOperator = "lt"
// LessOrEqual operator.
LessOrEqual ComparisonOperator = "lte"
// Greater operator.
Greater ComparisonOperator = "gt"
// GreaterOrEqual operator.
GreaterOrEqual ComparisonOperator = "gte"
)
// Comparison is a comparison of a key, which needs to be resolved with a storer,
// and a query value with a comparison operator.
type Comparison struct {
// Key uniquely identifies a storer field.
Key string
// Value is compared to the resolved value of key.
Value QueryValue
// Op is the comparison operator used.
Op ComparisonOperator
}
// LogicalOperator evaluates the boolean result of two operands.
type LogicalOperator string
const (
// And operator.
And LogicalOperator = "and"
// Or operator.
Or LogicalOperator = "or"
)
// Expression applies a logical operator to multiple comparisons and expressions.
type Expression struct {
Comp []Comparison
Exp []Expression
Op LogicalOperator
}
|
/**
* Copyright (c) 2018-present, MultiVAC Foundation.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
package chain
import (
"fmt"
"github.com/multivactech/MultiVAC/base/db"
"github.com/multivactech/MultiVAC/base/rlp"
"github.com/multivactech/MultiVAC/base/util"
"github.com/multivactech/MultiVAC/configs/config"
"github.com/multivactech/MultiVAC/logger"
"github.com/multivactech/MultiVAC/logger/btclog"
"github.com/multivactech/MultiVAC/model/chaincfg/chainhash"
"github.com/multivactech/MultiVAC/model/chaincfg/genesis"
"github.com/multivactech/MultiVAC/model/chaincfg/multivacaddress"
"github.com/multivactech/MultiVAC/model/shard"
"github.com/multivactech/MultiVAC/model/wire"
"github.com/multivactech/MultiVAC/processor/shared/message"
)
const (
headerKeyTemplate = "header_%s"
blockKeyTemplate = "block_%s"
indexAndHeightKeyTemplate = "%d_%d"
smartContractKeyTemplate = "smartContract_%s"
slimBlockKeyTemplate = "toShard_%d_shard_%d_height_%d"
smartContractShardInitOutTemplate = "smartContractInitOut_%s_%d"
smartContractCodeOutTemplate = "smartContractCodeOut_%s_%d"
dbNameSpace = "chainData"
)
type diskBlockChain struct {
// The current max continuous height of the block chain.
curShardsHeight map[shard.Index]wire.BlockHeight
// Database for disk store
chainDB db.DB
// used to trigger synchronization
syncTriggers map[shard.Index]SyncTrigger
logger btclog.Logger
}
type headerNode struct {
headerHash chainhash.Hash
header *wire.BlockHeader
}
// newBlockChain returns the blockChain instance
func newDiskBlockChain() *diskBlockChain {
chain := diskBlockChain{}
chain.curShardsHeight = make(map[shard.Index]wire.BlockHeight)
chain.syncTriggers = make(map[shard.Index]SyncTrigger)
// TODO: Temporary soultion for disk store.
// Miner node doesn't need to store all data.
db, err := db.OpenDB(config.GlobalConfig().DataDir, dbNameSpace)
if err != nil {
panic(err)
}
chain.chainDB = db
// log config
chain.logger = logBackend.Logger("BlockChain-Disk")
chain.logger.SetLevel(logger.ChainLogLevel)
//read curHeight from db
for _, shardIndex := range shard.ShardList {
rawData, err := chain.chainDB.Get(util.GetShardHeightKey(shardIndex.GetID()))
if err != nil {
chain.logger.Debugf("there's no curHeight in db : %v. If it's first start, can ingore this error info.", err)
} else {
height := util.BytesToInt64(rawData)
chain.logger.Debugf("shard:%d height %d from db", shardIndex, height)
chain.curShardsHeight[shardIndex] = wire.BlockHeight(height)
}
}
chain.init()
return &chain
}
func (chain *diskBlockChain) init() {
// Load all shards' genesis blocks
for _, shardIndex := range shard.ShardList {
//read curShardHeight from db
if _, hasKey := chain.curShardsHeight[shardIndex]; !hasKey {
ok := chain.ReceiveBlock(genesis.GenesisBlocks[shardIndex])
if ok {
chain.curShardsHeight[shardIndex] = 1
} else {
panic("can't load genesis block!")
}
}
}
}
func (chain *diskBlockChain) SetSyncTrigger(shard shard.Index, trigger SyncTrigger) {
chain.syncTriggers[shard] = trigger
}
// ReceiveHeader will receives headers from all shards
func (chain *diskBlockChain) ReceiveHeader(header *wire.BlockHeader) bool {
chain.logger.Debugf("receiving block header %v", header)
return chain.receiveAllHeader(header) != nil
}
// ReceiveBlock will receives blocks from all shards
func (chain *diskBlockChain) ReceiveBlock(block *wire.MsgBlock) bool {
chain.logger.Debugf("receiving block %v", block)
h := chain.receiveAllHeader(&block.Header)
if h == nil {
return false
}
// encoding block
dataToStore, err := rlp.EncodeToBytes(block)
if err != nil {
return false
}
// save block to db
key := getBlockKey(block.Header.BlockHeaderHash())
err = chain.chainDB.Put(key, dataToStore)
if err != nil {
chain.logger.Error(err)
return false
}
// save smart contract to db
if scs := block.Body.SmartContracts; len(scs) > 0 {
chain.receiveSmartContracts(scs)
}
// save smart contract outs to db
for _, out := range block.Body.Outs {
if out.IsSmartContractCode() {
err = chain.saveSmartContractCodeOut(out.ContractAddress, out.Shard, out)
if err != nil {
chain.logger.Errorf("fail to saveSmartContractCodeOut,contractAddr: %v,"+
"shard: %v,out: %v", out.ContractAddress, out.Shard, out)
}
} else if out.IsSmartContractShardInitOut() {
err = chain.saveSmartContractShardInitOut(out.ContractAddress, out.Shard, out)
if err != nil {
chain.logger.Errorf("fail to saveSmartContractShardInitOut,contractAddr: %v,"+
"shard: %v,out: %v", out.ContractAddress, out.Shard, out)
}
}
}
return true
}
// receiveAllHeader will cache all non-existent headers
func (chain *diskBlockChain) receiveAllHeader(h *wire.BlockHeader) *headerNode {
header := &headerNode{headerHash: h.BlockHeaderHash(), header: h}
shardIndex := header.header.ShardIndex
hgt := wire.BlockHeight(header.header.Height)
if chain.containsShardsBlock(shardIndex, hgt) {
// We have received this block already, ignore it.
chain.logger.Debugf("Inside receiveHeader, rejected because already exists: %v", h)
return header
}
shardIndexAndHeight := shard.IndexAndHeight{Index: shardIndex, Height: int64(hgt)}
// Temporory sulotion for disk store
dataToSave, err := rlp.EncodeToBytes(header.header)
if err != nil {
chain.logger.Error(err)
return nil
}
// get key
headerKey := getHeaderKey(header.headerHash)
indexAndHeightKey := getIndexAndHeightKey(shardIndexAndHeight)
// save to db
err = chain.chainDB.Put(headerKey, dataToSave)
if err != nil {
chain.logger.Debugf("DBSave key: headerKey error:", err)
return nil
}
err = chain.chainDB.Put(indexAndHeightKey, dataToSave)
if err != nil {
chain.logger.Debugf("DBSave key: indexAndeightKey error:", err)
return nil
}
// try to update shards height
for nextHgt := chain.curShardsHeight[shardIndex] + 1; chain.containsShardsBlock(shardIndex, nextHgt); nextHgt++ {
chain.curShardsHeight[shardIndex] = nextHgt
}
if err = chain.chainDB.Put(util.GetShardHeightKey(shardIndex.GetID()),
util.Int64ToBytes(int64(chain.curShardsHeight[shardIndex]))); err != nil {
chain.logger.Debugf("DBSave GetShardHeightKey to curShardsHeight failed, detail: %v", err)
return nil
}
// start sync when receive a higher header
// condition: currentHeight+1 < receiveHeight
if chain.curShardsHeight[shardIndex]+1 < hgt {
chain.logger.Debugf("Recevie a higher header(%d), current header hight is %d, start to sync", hgt, chain.curShardsHeight[shardIndex])
// if trigger, ok := chain.syncTriggers[shardIndex]; ok {
// trigger.MaybeSync()
// }
}
return header
}
// GetShardsHeight returns the height of the specified shard.
func (chain *diskBlockChain) GetShardsHeight(shardIndex shard.Index) wire.BlockHeight {
return chain.curShardsHeight[shardIndex]
}
// containsShardsBlock determines whether a specified block exists in the cache by a given height and slice ID.
func (chain *diskBlockChain) containsShardsBlock(shardIndex shard.Index, hgt wire.BlockHeight) bool {
shardIndexAndHeight := shard.IndexAndHeight{Index: shardIndex, Height: int64(hgt)}
key := getIndexAndHeightKey(shardIndexAndHeight)
header, err := chain.getHeaderByIndexAndHeight(key)
if err != nil || header == nil {
return false
}
return true
}
// GetShardsHeaderHashes returns the hashes of the specified shard and height range.
func (chain *diskBlockChain) GetShardsHeaderHashes(shardIndex shard.Index, fromHgt wire.BlockHeight, toHgt wire.BlockHeight) []chainhash.Hash {
if chain.curShardsHeight[shardIndex] == 0 {
return nil
}
if toHgt == wire.ReqSyncToLatest || toHgt > chain.GetShardsHeight(shardIndex) {
toHgt = chain.GetShardsHeight(shardIndex)
}
chain.logger.Debugf("==== toHgt: %v, fromHgt: %v", toHgt, fromHgt)
if toHgt < fromHgt {
return nil
}
hashes := make([]chainhash.Hash, 0, toHgt-fromHgt)
for i := fromHgt; i <= toHgt; i++ {
shardIndexAndHeight := shard.IndexAndHeight{Index: shardIndex, Height: int64(i)}
key := getIndexAndHeightKey(shardIndexAndHeight)
header, err := chain.getHeaderByIndexAndHeight(key)
if err != nil || header == nil {
continue
}
hashes = append(hashes, header.BlockHeaderHash())
}
return hashes
}
// GetShardsBlockByHeight returns the height of the specified shard and height
func (chain *diskBlockChain) GetShardsBlockByHeight(shardIndex shard.Index, hgt wire.BlockHeight) *wire.MsgBlock {
shardIndexAndHeight := shard.IndexAndHeight{Index: shardIndex, Height: int64(hgt)}
key := getIndexAndHeightKey(shardIndexAndHeight)
header, err := chain.getHeaderByIndexAndHeight(key)
if err != nil || header == nil {
return nil
}
// Query from db
key = getBlockKey(header.BlockHeaderHash())
block, err := chain.getBlock(key)
if err != nil {
return nil
}
return block
}
// GetSlimBlock returns the SlimBlock
func (chain *diskBlockChain) GetSlimBlock(toshard shard.Index, shardIndex shard.Index, hgt wire.BlockHeight) *wire.SlimBlock {
// Query from db
key := getSlimBlockKey(int(toshard), int(shardIndex), int(hgt))
slimBlock, err := chain.getSlimBlock(key)
if err != nil {
return nil
}
return slimBlock
}
func (chain *diskBlockChain) getSlimBlock(key []byte) (*wire.SlimBlock, error) {
rawData, err := chain.chainDB.Get(key)
if err != nil {
return nil, err
}
if len(rawData) == 0 {
return nil, nil
}
slimBlock := &wire.SlimBlock{}
err = rlp.DecodeBytes(rawData, slimBlock)
if err != nil {
return nil, err
}
return slimBlock, nil
}
func (chain *diskBlockChain) ReceiveSlimBlock(msg *wire.SlimBlock) bool {
var err error
chain.logger.Debugf("receiving slim block shardIndex : %v, height : %v", msg.Header.ShardIndex, msg.Header.Height)
h := chain.receiveAllHeader(&msg.Header)
if h == nil {
chain.logger.Errorf("fail to receiveallheader in receive slimblock")
return false
}
// save slimBlock message to db
err = chain.saveSlimBlock(msg)
if err != nil {
chain.logger.Errorf("fail to saveSlimBlock, shard : %v, height : %v, err: %v", msg.ToShard, msg.Header.Height, err)
return false
}
// save smart contract to db
if len(msg.SmartContracts) > 0 {
flag := chain.receiveSmartContracts(msg.SmartContracts)
if !flag {
chain.logger.Errorf("fail to receiveSmartContracts, smartContracts: %v", msg.SmartContracts)
return false
}
}
// save smart contract outs to db
for _, out := range msg.ClipTreeData.Outs {
if out.IsSmartContractCode() {
err = chain.saveSmartContractCodeOut(out.ContractAddress, out.Shard, out)
if err != nil {
chain.logger.Errorf("fail to saveSmartContractCodeOut,contractAddr: %v,"+
"shard: %v,out: %v, err: %v", out.ContractAddress, out.Shard, out, err)
return false
}
} else if out.IsSmartContractShardInitOut() {
err = chain.saveSmartContractShardInitOut(out.ContractAddress, out.Shard, out)
if err != nil {
chain.logger.Errorf("fail to saveSmartContractShardInitOut,contractAddr: %v,"+
"shard: %v,out: %v, err: %v", out.ContractAddress, out.Shard, out, err)
return false
}
}
}
return true
}
func (chain *diskBlockChain) saveSlimBlock(slimBlock *wire.SlimBlock) error {
// encoding
dataToStore, err := rlp.EncodeToBytes(slimBlock)
if err != nil {
return err
}
// save to db
key := getSlimBlockKey(int(slimBlock.ToShard), int(slimBlock.Header.ShardIndex), int(slimBlock.Header.Height))
return chain.chainDB.Put(key, dataToStore)
}
// GetShardsBlockByHash returns the block of the specified shard and hash
func (chain *diskBlockChain) GetShardsBlockByHash(headerHash chainhash.Hash) *wire.MsgBlock {
key := getBlockKey(headerHash)
block, err := chain.getBlock(key)
if err != nil {
return nil
}
return block
}
// GetShardsHeaderByHeight returns the block header of the specified shard and height
func (chain *diskBlockChain) GetShardsHeaderByHeight(shardIndex shard.Index, hgt wire.BlockHeight) *wire.BlockHeader {
shardIndexAndHeight := &shard.IndexAndHeight{Index: shardIndex, Height: int64(hgt)}
key := getIndexAndHeightKey(*shardIndexAndHeight)
header, err := chain.getHeaderByIndexAndHeight(key)
if err != nil || header == nil {
return nil
}
return header
}
// GetShardsHeaderByHash returns the block header of the specified shard and hash
func (chain *diskBlockChain) GetShardsHeaderByHash(headerHash chainhash.Hash) *wire.BlockHeader {
key := getHeaderKey(headerHash)
if header, err := chain.getHeader(key); err == nil {
return header
}
return nil
}
// getHeaderKey returns the key of header for disk store.
func getHeaderKey(hash chainhash.Hash) []byte {
s := fmt.Sprintf(headerKeyTemplate, hash)
return []byte(s)
}
// getBlockKey returns the key of block for disk store.
func getBlockKey(hash chainhash.Hash) []byte {
s := fmt.Sprintf(blockKeyTemplate, hash)
return []byte(s)
}
func getSlimBlockKey(toShard, shard, height int) []byte {
s := fmt.Sprintf(slimBlockKeyTemplate, toShard, shard, height)
return []byte(s)
}
// getIndexAndHeightKey returns the key of header for disk store,
// this key is consist of shard index and height.
func getIndexAndHeightKey(shardIndexAndHeight shard.IndexAndHeight) []byte {
s := fmt.Sprintf(indexAndHeightKeyTemplate, shardIndexAndHeight.Index.GetID(), shardIndexAndHeight.Height)
return []byte(s)
}
// getHeader get header from db by header's key
func (chain *diskBlockChain) getHeader(key []byte) (*wire.BlockHeader, error) {
rawData, err := chain.chainDB.Get(key)
if err != nil {
return nil, err
}
if len(rawData) == 0 {
return nil, nil
}
header := &wire.BlockHeader{}
err = rlp.DecodeBytes(rawData, header)
if err != nil {
chain.logger.Error("RLP decoding error:", err)
return nil, err
}
return header, nil
}
// getHeaderByIndexAndHeight get header from db by header's index and height key
func (chain *diskBlockChain) getHeaderByIndexAndHeight(key []byte) (*wire.BlockHeader, error) {
rawData, err := chain.chainDB.Get(key)
if err != nil {
return nil, err
}
if len(rawData) == 0 {
return nil, nil
}
header := &wire.BlockHeader{}
err = rlp.DecodeBytes(rawData, header)
if err != nil {
chain.logger.Error("RLP decoding error:", err)
return nil, err
}
return header, nil
}
// getBlock get block from db by block's key
func (chain *diskBlockChain) getBlock(key []byte) (*wire.MsgBlock, error) {
rawData, err := chain.chainDB.Get(key)
if err != nil {
return nil, err
}
if len(rawData) == 0 {
return nil, nil
}
block := &wire.MsgBlock{}
err = rlp.DecodeBytes(rawData, block)
if err != nil {
chain.logger.Error("RLP decoding error:", err)
return nil, err
}
return block, nil
}
// getSmartContractKey returns the key of smart contract
func getSmartContractKey(addr multivacaddress.Address) []byte {
s := fmt.Sprintf(smartContractKeyTemplate, addr)
return []byte(s)
}
// saveSmartContract将smart contract以contract address作为键持久化至数据库
func (chain *diskBlockChain) saveSmartContract(contractAddr multivacaddress.Address, smartContract *wire.SmartContract) error {
// encoding
dataToStore, err := rlp.EncodeToBytes(smartContract)
if err != nil {
return err
}
key := getSmartContractKey(contractAddr)
// save to db
return chain.chainDB.Put(key, dataToStore)
}
// GetSmartContract根据contract address返回smart contract结构体
func (chain *diskBlockChain) GetSmartContract(contractAddr multivacaddress.Address) *wire.SmartContract {
// Query from db
key := getSmartContractKey(contractAddr)
sc, err := chain.getSmartContract(key)
if err != nil {
chain.logger.Errorf("failed to GetSmartContract, err: %v", err)
return nil
}
return sc
}
// getSmartContract根据ContractAddress返回smart contract
func (chain *diskBlockChain) getSmartContract(key []byte) (*wire.SmartContract, error) {
rawData, err := chain.chainDB.Get(key)
if err != nil {
return nil, err
}
if len(rawData) == 0 {
return nil, fmt.Errorf("no corresponding value was found based on the keyword: %s", key)
}
sc := &wire.SmartContract{}
err = rlp.DecodeBytes(rawData, sc)
if err != nil {
return nil, err
}
return sc, nil
}
// receiveSmartContracts会将收到的所有智能合约持久化至数据库
func (chain *diskBlockChain) receiveSmartContracts(scs []*wire.SmartContract) bool {
for _, sc := range scs {
err := chain.saveSmartContract(sc.ContractAddr, sc)
if err != nil {
chain.logger.Errorf("failed to receiveSmartContracts, err: %v", err)
return false
}
}
return true
}
// getSmartContractCodeOutKey returns the key of smart contract code out
func getSmartContractCodeOutKey(contractAddr multivacaddress.Address, shardIdx shard.Index) []byte {
// 格式化中两个输入参数分别代表合约地址、分片编号
s := fmt.Sprintf(smartContractCodeOutTemplate, contractAddr, shardIdx)
return []byte(s)
}
func (chain *diskBlockChain) saveSmartContractCodeOut(contractAddr multivacaddress.Address, shardIdx shard.Index, out *wire.OutState) error {
key := getSmartContractCodeOutKey(contractAddr, shardIdx)
dataToStore, err := rlp.EncodeToBytes(out)
if err != nil {
return err
}
return chain.chainDB.Put(key, dataToStore)
}
// getSmartContractCodeOut根据合约地址以及分片编号返回部署合约后产生的代码out
func (chain *diskBlockChain) getSmartContractCodeOut(contractAddr multivacaddress.Address, shardIdx shard.Index) *wire.OutState {
// Query from db
key := getSmartContractCodeOutKey(contractAddr, shardIdx)
out, err := chain.getSmartContractOut(key)
if err != nil {
chain.logger.Errorf("failed to getSmartContractCodeOut, err: %v", err)
return nil
}
return out
}
// getSmartContractShardInitOutKey returns the key of smart contract shard init data out
func getSmartContractShardInitOutKey(contractAddr multivacaddress.Address, shardIdx shard.Index) []byte {
// 格式化中三个输入参数分别代表合约地址、分片编号、OutPoint的index
s := fmt.Sprintf(smartContractShardInitOutTemplate, contractAddr, shardIdx)
return []byte(s)
}
func (chain *diskBlockChain) saveSmartContractShardInitOut(contractAddr multivacaddress.Address, shardIdx shard.Index, outs *wire.OutState) error {
key := getSmartContractShardInitOutKey(contractAddr, shardIdx)
dataToStore, err := rlp.EncodeToBytes(outs)
if err != nil {
return err
}
return chain.chainDB.Put(key, dataToStore)
}
// getSmartContractShardInitOut returns smart contract sharding data based on contract address and sharding number
func (chain *diskBlockChain) getSmartContractShardInitOut(contractAddr multivacaddress.Address, shardIdx shard.Index) *wire.OutState {
// Query from db
key := getSmartContractShardInitOutKey(contractAddr, shardIdx)
out, err := chain.getSmartContractOut(key)
if err != nil {
chain.logger.Errorf("fail to getSmartContractShardInitOut, err: %v", err)
return nil
}
return out
}
// getSmartContractOut根据key返回一个分片out
func (chain *diskBlockChain) getSmartContractOut(key []byte) (*wire.OutState, error) {
rawData, err := chain.chainDB.Get(key)
if err != nil {
return nil, err
}
if len(rawData) == 0 {
return nil, fmt.Errorf("no corresponding value was found based on the keyword: %s", key)
}
var outs wire.OutState
err = rlp.DecodeBytes(rawData, &outs)
if err != nil {
return nil, err
}
return &outs, nil
}
func (chain *diskBlockChain) Act(e *message.Event, callback func(m interface{})) {
switch e.Topic {
case evtReceiveBlock:
callback(chain.ReceiveBlock(e.Extra.(*wire.MsgBlock)))
case evtReceiveHeader:
callback(chain.ReceiveHeader(e.Extra.(*wire.BlockHeader)))
case evtShardHeight:
callback(chain.GetShardsHeight(e.Extra.(shard.Index)))
case evtBlockByShardAndHeight:
shardAndHgt := e.Extra.(shard.IndexAndHeight)
callback(chain.GetShardsBlockByHeight(shardAndHgt.Index, wire.BlockHeight(shardAndHgt.Height)))
case evtHeaderByShardAndHeight:
shardAndHgt := e.Extra.(shard.IndexAndHeight)
callback(chain.GetShardsHeaderByHeight(shardAndHgt.Index, wire.BlockHeight(shardAndHgt.Height)))
case evtBlockByHash:
callback(chain.GetShardsBlockByHash(e.Extra.(chainhash.Hash)))
case evtHeaderByHash:
callback(chain.GetShardsHeaderByHash(e.Extra.(chainhash.Hash)))
case evtSetTrigger:
params := e.Extra.(*triggerRequest)
chain.SetSyncTrigger(params.shard, params.trigger)
case evtShardHeaderHashes:
locator := e.Extra.(*wire.BlockLocator)
callback(chain.GetShardsHeaderHashes(
locator.ShardIdx, wire.BlockHeight(locator.FromHeight), wire.BlockHeight(locator.ToHeight)))
case evtSlimBlockMsgByShardAndHeight:
saveSlimBlockParam := e.Extra.(saveSlimBlockRequest)
callback(chain.GetSlimBlock(saveSlimBlockParam.toshard, saveSlimBlockParam.shard, saveSlimBlockParam.height))
case evtSmartContractByAddress:
callback(chain.GetSmartContract(e.Extra.(multivacaddress.Address)))
case evtReceiveSlimBlock:
callback(chain.ReceiveSlimBlock(e.Extra.(*wire.SlimBlock)))
case evtSmartContractCodeOut:
msg := e.Extra.([]interface{})
contractAddr := msg[0].(multivacaddress.Address)
shardIdx := msg[1].(shard.Index)
callback(chain.getSmartContractCodeOut(contractAddr, shardIdx))
case evtSmartContractShardInitOut:
msg := e.Extra.([]interface{})
contractAddr := msg[0].(multivacaddress.Address)
shardIdx := msg[1].(shard.Index)
callback(chain.getSmartContractShardInitOut(contractAddr, shardIdx))
case evtReceiveSmartContractShardInitOut:
msg := e.Extra.(*wire.OutState)
contractAddr := msg.ContractAddress
shardIdx := msg.Shard
callback(chain.saveSmartContractShardInitOut(contractAddr, shardIdx, msg))
}
}
|
package quicksort
import (
"testing"
)
func BenchmarkQuicksortIterative(b *testing.B) {
if b.N > 10 {
numbers := make([]int, b.N)
fillWithRandomNumbers(numbers, 1000)
QuicksortIterative(numbers, 0, len(numbers)-1)
}
}
func BenchmarkQuicksortRecursive(b *testing.B) {
if b.N > 10 {
numbers := make([]int, b.N)
fillWithRandomNumbers(numbers, 1000)
QuicksortRecursive(numbers, 0, len(numbers)-1)
}
}
|
// customReader project doc.go
/*
customReader document
*/
package main
|
package m2go
const (
configurableProducts = "/configurable-products"
configurableProductsOptionsRelative = "options"
configurableProductsOptionsAllRelative = "options/all"
configurableProductsChildRelative = "child"
)
|
package main
import "fmt"
// this method does not make sense for most use cases
// but if you happen to need 2+ funcs reading from one channel
// you can do that - though each channel reading will contain a random
// item from the channel - not sequential data
func main() {
// create channels
c := make(chan int)
done := make(chan bool)
// launch 3 go routes
go func() {
for i := 0; i < 100000; i++ {
// add values to channel
c <- i
}
close(c)
}()
go func() {
// reads from c channel
for n := range c {
fmt.Println(n)
}
done <- true
}()
go func() {
// reads from c channel
for n := range c {
fmt.Println(n)
}
done <- true
}()
// the output of this code doesn't have duplication
// the reason is the each loop that reads from the c channel
// simply read one value and remove it from the channel
// whichever go routine reads a channel value first prints that next value
// each for loop will not need to read the same value sequentially - just values as they come through the channel
// waiting for 2 done values to be added to the "done" channel when funcs
// with for loops in them complete
// doing this allows the done channel to close and be emptied
<-done
<-done
}
|
package ravendb
import (
"encoding/json"
"fmt"
"io"
"strconv"
"strings"
)
// StreamOperation represents a streaming operation
type StreamOperation struct {
session *InMemoryDocumentSessionOperations
statistics *StreamQueryStatistics
isQueryStream bool
}
// NewStreamOperation returns new StreamOperation
func NewStreamOperation(session *InMemoryDocumentSessionOperations, statistics *StreamQueryStatistics) *StreamOperation {
return &StreamOperation{
session: session,
statistics: statistics,
}
}
func (o *StreamOperation) createRequestForIndexQuery(query *IndexQuery) (*QueryStreamCommand, error) {
o.isQueryStream = true
if query.waitForNonStaleResults {
return nil, newUnsupportedOperationError("Since stream() does not wait for indexing (by design), streaming query with setWaitForNonStaleResults is not supported")
}
if err := o.session.incrementRequestCount(); err != nil {
return nil, err
}
return NewQueryStreamCommand(o.session.Conventions, query), nil
}
func (o *StreamOperation) createRequest(startsWith string, matches string, start int, pageSize int, exclude string, startAfter string) *StreamCommand {
uri := "streams/docs?"
if startsWith != "" {
uri += "startsWith=" + urlUtilsEscapeDataString(startsWith) + "&"
}
if matches != "" {
uri += "matches=" + urlUtilsEscapeDataString(matches) + "&"
}
if exclude != "" {
uri += "exclude=" + urlUtilsEscapeDataString(exclude) + "&"
}
if startAfter != "" {
uri += "startAfter=" + urlUtilsEscapeDataString(startAfter) + "&"
}
if start != 0 {
uri += "start=" + strconv.Itoa(start) + "&"
}
// Note: using 0 as default value instead of MaxInt
if pageSize != 0 {
uri += "pageSize=" + strconv.Itoa(pageSize) + "&"
}
uri = strings.TrimSuffix(uri, "&")
return NewStreamCommand(uri)
}
func isDelimToken(tok json.Token, delim string) bool {
delimTok, ok := tok.(json.Delim)
return ok && delimTok.String() == delim
}
/* The response looks like:
{
"Results": [
{
"foo": bar,
}
]
}
*/
func (o *StreamOperation) setResult(response *StreamResultResponse) (*yieldStreamResults, error) {
if response == nil {
return nil, newIllegalStateError("The index does not exists, failed to stream results")
}
dec := json.NewDecoder(response.Stream)
tok, err := dec.Token()
if err != nil {
return nil, err
}
// we expect start of json object
if !isDelimToken(tok, "{") {
return nil, newIllegalStateError("Expected start object '{', got %T %s", tok, tok)
}
if o.isQueryStream {
if o.statistics == nil {
o.statistics = &StreamQueryStatistics{}
}
err = handleStreamQueryStats(dec, o.statistics)
if err != nil {
return nil, err
}
}
// expecting object with a single field "Results" that is array of values
tok, err = getTokenAfterObjectKey(dec, "Results")
if err != nil {
return nil, err
}
if !isDelimToken(tok, "[") {
return nil, newIllegalStateError("Expected start array '[', got %T %s", tok, tok)
}
return newYieldStreamResults(response, dec), nil
}
func getNextDelimToken(dec *json.Decoder, delimStr string) error {
tok, err := dec.Token()
if err != nil {
return err
}
if delim, ok := tok.(json.Delim); ok || delim.String() == delimStr {
return nil
}
return fmt.Errorf("Expected delim token '%s', got %T %s", delimStr, tok, tok)
}
func getNextStringToken(dec *json.Decoder) (string, error) {
tok, err := dec.Token()
if err != nil {
return "", err
}
if s, ok := tok.(string); ok {
return s, nil
}
return "", fmt.Errorf("Expected string token, got %T %s", tok, tok)
}
func getTokenAfterObjectKey(dec *json.Decoder, name string) (json.Token, error) {
s, err := getNextStringToken(dec)
if err == nil {
if s != name {
return nil, fmt.Errorf("Expected string token named '%s', got '%s'", name, s)
}
}
return dec.Token()
}
func getNextObjectStringValue(dec *json.Decoder, name string) (string, error) {
tok, err := getTokenAfterObjectKey(dec, name)
if err != nil {
return "", err
}
s, ok := tok.(string)
if !ok {
return "", fmt.Errorf("Expected string token, got %T %s", tok, tok)
}
return s, nil
}
func getNextObjectBoolValue(dec *json.Decoder, name string) (bool, error) {
tok, err := getTokenAfterObjectKey(dec, name)
if err != nil {
return false, err
}
v, ok := tok.(bool)
if !ok {
return false, fmt.Errorf("Expected bool token, got %T %s", tok, tok)
}
return v, nil
}
func getNextObjectInt64Value(dec *json.Decoder, name string) (int64, error) {
tok, err := getTokenAfterObjectKey(dec, name)
if err != nil {
return 0, err
}
if v, ok := tok.(float64); ok {
return int64(v), nil
}
if v, ok := tok.(json.Number); ok {
return v.Int64()
}
return 0, fmt.Errorf("Expected number token, got %T %s", tok, tok)
}
func handleStreamQueryStats(dec *json.Decoder, stats *StreamQueryStatistics) error {
var err error
var n int64
stats.ResultEtag, err = getNextObjectInt64Value(dec, "ResultEtag")
if err == nil {
stats.IsStale, err = getNextObjectBoolValue(dec, "IsStale")
}
if err == nil {
stats.IndexName, err = getNextObjectStringValue(dec, "IndexName")
}
if err == nil {
n, err = getNextObjectInt64Value(dec, "TotalResults")
stats.TotalResults = int(n)
}
if err == nil {
var s string
s, err = getNextObjectStringValue(dec, "IndexTimestamp")
if err == nil {
stats.IndexTimestamp, err = ParseTime(s)
}
}
return err
}
type yieldStreamResults struct {
response *StreamResultResponse
dec *json.Decoder
err error
}
func newYieldStreamResults(response *StreamResultResponse, dec *json.Decoder) *yieldStreamResults {
return &yieldStreamResults{
response: response,
dec: dec,
}
}
// next decodes next value from stream
// returns io.EOF when reaching end of stream. Other errors indicate a parsing error
func (r *yieldStreamResults) next(v interface{}) error {
if r.err != nil {
return r.err
}
// More() returns false if there is an error or ']' token
if r.dec.More() {
r.err = r.dec.Decode(&v)
if r.err != nil {
return r.err
}
return nil
}
// expect end of Results array
r.err = getNextDelimToken(r.dec, "]")
if r.err != nil {
return r.err
}
// expect end of top-level json object
r.err = getNextDelimToken(r.dec, "}")
if r.err != nil {
return r.err
}
// should now return nil, io.EOF to indicate end of stream
_, r.err = r.dec.Token()
return r.err
}
// nextJSONObject decodes next javascript object from stream
// returns io.EOF when reaching end of stream. Other errors indicate a parsing error
func (r *yieldStreamResults) nextJSONObject() (map[string]interface{}, error) {
var v map[string]interface{}
err := r.next(&v)
if err != nil {
return nil, err
}
return v, nil
}
func (r *yieldStreamResults) close() error {
// a bit of a hack
if rc, ok := r.response.Stream.(io.ReadCloser); ok {
return rc.Close()
}
return nil
}
|
package lc
// Time: O(nlog n)
// Benchmark: 480ms 169.4mb | 88% 15%
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func insert(node *TreeNode, val int) {
if node == nil {
node = &TreeNode{val, nil, nil}
}
if node.Left != nil && node.Val > val {
insert(node.Left, val)
} else if node.Right != nil && node.Val < val {
insert(node.Right, val)
} else if node.Val > val {
node.Left = &TreeNode{val, nil, nil}
} else {
node.Right = &TreeNode{val, nil, nil}
}
}
func insertIntoBST(root *TreeNode, val int) *TreeNode {
if root == nil {
return &TreeNode{val, nil, nil}
}
insert(root, val)
return root
}
|
package main
import (
"fmt"
)
func reverse(s []int) {
sliceLength := len(s)
for i := 0; i < sliceLength/2; i++ {
s[i], s[sliceLength-1-i] = s[sliceLength-1-i],s[i]
}
}
func printSlice(s []int) {
for i:=0; i<len(s);i++ {
fmt.Printf("%d ", s[i]);
}
fmt.Printf("\n");
}
func main() {
slice_1 := []int{1,2,3,4,5,6,7,8,9}
slice_2 := []int{1,2,3,4,5,6,7,8}
slice_3 := []int{}
fmt.Printf("Slice_1:\n")
printSlice(slice_1)
reverse(slice_1)
printSlice(slice_1)
fmt.Printf("Slice_2:\n")
printSlice(slice_2)
reverse(slice_2)
printSlice(slice_2)
fmt.Printf("Slice_3:\n")
printSlice(slice_3)
reverse(slice_3)
printSlice(slice_3)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.