text
stringlengths
11
4.05M
package shutil import ( "io/ioutil" "os" "path" "testing" ) func TestMoveCrossDomain(t *testing.T) { var ( err error ) homeDir := os.Getenv("OTHER_DRIVE") if homeDir == "" { t.Skipf("OTHER_DRIVE env not set") } tmpDir, err := ioutil.TempDir("", "shutil-test") if err != nil { t.Fatalf("could not create temporary directory. what kind of shitty environment are you running?") } defer os.RemoveAll(tmpDir) tmpDir2, err := ioutil.TempDir(homeDir, "shutil-test") if err != nil { t.Fatalf("could not create secondary temporary directory.") } defer os.RemoveAll(tmpDir2) tmpPath := path.Join(tmpDir, "herp") finalPath := path.Join(tmpDir2, "derp") f, err := os.Create(tmpPath) if err != nil { t.Fatalf("couldnt create file: %v", err) } f.WriteString("derp") f.Close() t.Logf("moving %s to %s", tmpPath, finalPath) if err := MoveFile(tmpPath, finalPath); err != nil { t.Errorf("could not move file: %v", err) } }
/* Farmer Feb has three fields with potatoes planted in them. He harvested x potatoes from the first field, y potatoes from the second field and is yet to harvest potatoes from the third field. Feb is very superstitious and believes that if the sum of potatoes he harvests from the three fields is a prime number (http://en.wikipedia.org/wiki/Prime_number), he'll make a huge profit. Please help him by calculating for him the minimum number of potatoes that if harvested from the third field will make the sum of potatoes prime. At least one potato should be harvested from the third field. Input The first line of the input contains an integer T denoting the number of test cases. Each of the next T lines contain 2 integers separated by single space: x and y. Output For each test case, output a single line containing the answer. Constraints 1 ≤ T ≤ 1000 1 ≤ x ≤ 1000 1 ≤ y ≤ 1000 */ package main import ( "math" "math/big" ) func main() { assert(potatoes(1, 3) == 1) assert(potatoes(4, 3) == 4) } func assert(x bool) { if !x { panic("assertion failed") } } func potatoes(x, y int64) int64 { for n := x + y + 1; n < math.MaxInt64; n++ { if isprime(n) { return n - (x + y) } } return -1 } func isprime(n int64) bool { x := big.NewInt(n) return x.ProbablyPrime(2) }
package kata import ( "fmt" "testing" ) // digPow(89, 1) should return 1 since 8¹ + 9² = 89 = 89 * 1 // digPow(92, 1) should return -1 since there is no k such as 9¹ + 2² equals 92 * k // digPow(695, 2) should return 2 since 6² + 9³ + 5⁴= 1390 = 695 * 2 // digPow(46288, 3) should return 51 since 4³ + 6⁴+ 2⁵ + 8⁶ + 8⁷ = 2360688 = 46288 * 51 func Test(t *testing.T) { t.Skip() testCases := []struct { n uint p uint k int }{ {89, 1, 1}, {92, 1, -1}, {695, 2, 2}, {46288, 3, 51}, } for _, tC := range testCases { t.Run(fmt.Sprintf("n: %v, p: %v, k: %v", tC.n, tC.p, tC.k), func(t *testing.T) { if result := DigPow(tC.n, tC.p); result != tC.k { t.Fatalf("DigPow(%v, %v) == '%v', wanted '%v'", tC.n, tC.p, result, tC.k) } }) } }
package subscription_test import ( "context" "errors" "testing" "time" "github.com/imrenagi/go-payment/invoice" "github.com/imrenagi/go-payment" . "github.com/imrenagi/go-payment/subscription" sm "github.com/imrenagi/go-payment/subscription/mocks" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) func TestSubscription_Start(t *testing.T) { c := &sm.Controller{} c.On("Create", mock.Anything, mock.Anything). Return(&CreateResponse{ ID: "12345", Status: StatusActive, LastCreatedInvoiceURL: "http://example.com", }, nil) c.On("Gateway").Return(payment.GatewayXendit) t.Run("crete subscription with multiple recurrence", func(t *testing.T) { s := New() startAt := time.Date(2020, 1, 1, 0, 0, 0, 0, time.Local) s.Schedule = *NewSchedule(1, IntervalUnitDay, &startAt) err := s.Start(context.TODO(), c) assert.Nil(t, err) assert.Equal(t, payment.GatewayXendit.String(), s.Gateway) assert.Equal(t, "12345", s.GatewayRecurringID) assert.Equal(t, StatusActive, s.Status) assert.Equal(t, "http://example.com", s.LastCreatedInvoice) assert.Equal(t, startAt, *s.Schedule.PreviousExecutionAt) assert.Equal(t, startAt.AddDate(0, 0, 1), *s.Schedule.NextExecutionAt) }) t.Run("crete subscription with one recurrence", func(t *testing.T) { s := New() s.TotalReccurence = 1 startAt := time.Date(2020, 1, 1, 0, 0, 0, 0, time.Local) s.Schedule = *NewSchedule(1, IntervalUnitDay, &startAt) err := s.Start(context.TODO(), c) assert.Nil(t, err) assert.Equal(t, payment.GatewayXendit.String(), s.Gateway) assert.Equal(t, "12345", s.GatewayRecurringID) assert.Equal(t, StatusActive, s.Status) assert.Equal(t, "http://example.com", s.LastCreatedInvoice) assert.Equal(t, startAt, *s.Schedule.PreviousExecutionAt) assert.Nil(t, s.Schedule.NextExecutionAt) }) } func TestSubscription_Pause(t *testing.T) { t.Run("successfully pause", func(t *testing.T) { c := &sm.Controller{} c.On("Pause", mock.Anything, mock.Anything).Return(nil) s := New() startAt := time.Date(2020, 1, 1, 0, 0, 0, 0, time.Local) s.Schedule = *NewSchedule(1, IntervalUnitDay, &startAt) s.Status = StatusActive err := s.Pause(context.TODO(), c) assert.Nil(t, err) assert.Equal(t, StatusPaused, s.Status) }) t.Run("cant pause if it is nnot in active", func(t *testing.T) { c := &sm.Controller{} s := New() startAt := time.Date(2020, 1, 1, 0, 0, 0, 0, time.Local) s.Schedule = *NewSchedule(1, IntervalUnitDay, &startAt) s.Status = StatusPaused err := s.Pause(context.TODO(), c) assert.NotNil(t, err) assert.True(t, errors.Is(err, payment.ErrCantProceed)) assert.Equal(t, StatusPaused, s.Status) }) } func TestSubscription_Resume(t *testing.T) { t.Run("can't resume if it is not paused", func(t *testing.T) { c := &sm.Controller{} c.On("Resume", mock.Anything, mock.Anything).Return(nil) s := New() s.Status = StatusStop startAt := time.Date(2020, 1, 1, 0, 0, 0, 0, time.Local) s.Schedule = *NewSchedule(1, IntervalUnitDay, &startAt) err := s.Resume(context.TODO(), c) assert.NotNil(t, err) assert.True(t, errors.Is(err, payment.ErrCantProceed)) assert.Equal(t, StatusStop, s.Status) }) t.Run("successfully resume", func(t *testing.T) { c := &sm.Controller{} c.On("Resume", mock.Anything, mock.Anything).Return(nil) s := New() s.Status = StatusPaused startAt := time.Date(2020, 1, 1, 0, 0, 0, 0, time.Local) s.Schedule = *NewSchedule(1, IntervalUnitDay, &startAt) next := time.Now().Add(-1 * time.Hour) s.Schedule.NextExecutionAt = &next err := s.Resume(context.TODO(), c) assert.Nil(t, err) expected := next.AddDate(0, 0, 1) assert.Equal(t, StatusActive, s.Status) assert.Equal(t, expected.Second(), s.Schedule.NextExecutionAt.Second()) }) } func TestSubscription_Stop(t *testing.T) { t.Run("successfully stop", func(t *testing.T) { c := &sm.Controller{} c.On("Stop", mock.Anything, mock.Anything).Return(nil) s := New() s.Status = StatusActive startAt := time.Date(2020, 1, 1, 0, 0, 0, 0, time.Local) s.Schedule = *NewSchedule(1, IntervalUnitDay, &startAt) next := time.Now().Add(-1 * time.Hour) s.Schedule.NextExecutionAt = &next assert.Equal(t, StatusActive, s.Status) assert.NotNil(t, s.Schedule.NextExecutionAt) err := s.Stop(context.TODO(), c) assert.Nil(t, err) assert.Equal(t, StatusStop, s.Status) assert.Nil(t, s.Schedule.NextExecutionAt) }) } func TestSubscription_Save(t *testing.T) { t.Run("one time subscription, should save invoice if no invoice is paid/expired", func(t *testing.T) { s := New() s.TotalReccurence = 1 startAt := time.Date(2020, 1, 1, 0, 0, 0, 0, time.Local) s.Schedule = *NewSchedule(1, IntervalUnitDay, &startAt) s.Schedule.PreviousExecutionAt = &startAt next := time.Now().Add(-1 * time.Hour) s.Schedule.NextExecutionAt = &next assert.Len(t, s.Invoices, 0) now := time.Now() inv := invoice.New(now, now.Add(1*time.Hour)) err := s.Save(inv) assert.Nil(t, err) assert.Len(t, s.Invoices, 1) assert.Equal(t, next.AddDate(0, 0, 1).Second(), s.Schedule.NextExecutionAt.Second()) }) t.Run("one time subscription, should not save invoice if paid before", func(t *testing.T) { s := New() s.TotalReccurence = 1 assert.Len(t, s.Invoices, 0) now := time.Now() inv := invoice.New(now, now.Add(1*time.Hour)) err := s.Save(inv) assert.Nil(t, err) err = s.Save(inv) assert.NotNil(t, err) assert.True(t, errors.Is(err, payment.ErrCantProceed)) assert.Len(t, s.Invoices, 1) }) t.Run("save multiple invoice for recurring payment", func(t *testing.T) { s := New() assert.Len(t, s.Invoices, 0) now := time.Now() inv := invoice.New(now, now.Add(1*time.Hour)) err := s.Save(inv) assert.Nil(t, err) assert.Len(t, s.Invoices, 1) err = s.Save(inv) assert.Nil(t, err) assert.Len(t, s.Invoices, 2) }) } func TestSchedule_NextAfterPause(t *testing.T) { startAt := time.Date(2020, 1, 1, 0, 0, 0, 0, time.Local) t.Run("one time schedule should has no next execution date", func(t *testing.T) { s := NewSchedule(1, IntervalUnitDay, &startAt) assert.Nil(t, s.NextAfterPause()) }) t.Run("next execution date is after now, return next execution date", func(t *testing.T) { nextAt := time.Now().AddDate(0, 0, 1) s := NewSchedule(1, IntervalUnitDay, &startAt) s.NextExecutionAt = &nextAt assert.Equal(t, nextAt, *s.NextAfterPause()) }) t.Run("next execution date is passed once, find the next execution date after now", func(t *testing.T) { now := time.Now() s := NewSchedule(1, IntervalUnitMonth, &now) start := now.AddDate(0, -2, 0) next := now.AddDate(0, -1, 0).Add(-5 * time.Hour) s.StartAt = &start s.NextExecutionAt = &next expected := now.Add(-5 * time.Hour) assert.Equal(t, expected.Second(), s.NextAfterPause().Second()) }) t.Run("next execution date is passed multiple times, find the next execution date after now", func(t *testing.T) { now := time.Now() s := NewSchedule(1, IntervalUnitMonth, &now) start := now.AddDate(0, -5, 0) next := now.AddDate(0, -4, 0) s.StartAt = &start s.NextExecutionAt = &next result := s.NextAfterPause() assert.True(t, result.After(now)) }) }
// Copyright (C) 2015 Nippon Telegraph and Telephone Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. // See the License for the specific language governing permissions and // limitations under the License. package signal import ( "github.com/stretchr/testify/assert" "testing" "time" ) func TestNewPacketEvent(t *testing.T) { event, err := NewPacketEvent("foo", "bar", "baz", map[string]interface{}{}) assert.NoError(t, err) assert.Equal(t, "foo", event.EntityID()) action := testDeferredEventDefaultAction(t, event) faultAction := testDeferredEventDefaultFaultAction(t, event) assert.IsType(t, &EventAcceptanceAction{}, action) assert.IsType(t, &PacketFaultAction{}, faultAction) testGOBAction(t, action, event) testGOBAction(t, faultAction, event) } func TestNewPacketEventFromJSONString(t *testing.T) { s := ` { "type": "event", "class": "PacketEvent", "entity": "_earthquake_ether_inspector", "uuid": "1f13eaa6-4b92-45f0-a4de-1236081dc649", "deferred": true, "option": { "src": "zksrv1", "dst": "zksrv3", "message": { "ZkQuorumPacket": { "AckQP": { "zxid_low": 0, "zxid_high": 1 } } } } }` signal, err := NewSignalFromJSONString(s, time.Now()) assert.NoError(t, err) event := signal.(Event) t.Logf("event: %#v", event) packetEvent, ok := event.(*PacketEvent) if !ok { t.Fatal("Cannot convert to PacketEvent") } assert.Equal(t, "_earthquake_ether_inspector", packetEvent.EntityID()) assert.Equal(t, "1f13eaa6-4b92-45f0-a4de-1236081dc649", packetEvent.ID()) opt1 := packetEvent.JSONMap()["option"].(map[string]interface{}) opt2 := map[string]interface{}{ "src": "zksrv1", "dst": "zksrv3", "message": map[string]interface{}{ "ZkQuorumPacket": map[string]interface{}{ "AckQP": map[string]interface{}{ "zxid_low": 0, "zxid_high": 1, }, }, }, } assert.Equal(t, opt2["src"], opt1["src"]) // TODO: compare much more action := testDeferredEventDefaultAction(t, event) faultAction := testDeferredEventDefaultFaultAction(t, event) assert.IsType(t, &EventAcceptanceAction{}, action) assert.IsType(t, &PacketFaultAction{}, faultAction) testGOBAction(t, action, event) testGOBAction(t, faultAction, event) }
/* Copyright AppsCode Inc. and Contributors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" releasesv1alpha1 "x-helm.dev/apimachinery/apis/releases/v1alpha1" ) const ( ResourceKindPlan = "Plan" ResourcePlan = "plan" ResourcePlans = "plans" ) // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:resource:scope=Cluster type Plan struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec PlanSpec `json:"spec,omitempty"` Status PlanStatus `json:"status,omitempty"` } type PlanSpec struct { StripeID string `json:"id"` NickName string `json:"name"` DisplayName string `json:"displayName"` Description string `json:"description"` ProductID string `json:"productID"` Phase Phase `json:"phase"` // Plans for sorted by weight before displaying to users Weight int32 `json:"weight"` Bundle *releasesv1alpha1.ChartRef `json:"bundle,omitempty"` //+optional IncludedPlans []string `json:"includedPlans,omitempty"` //+optional PricingPattern map[ResourceGroup]PricingPattern `json:"pricingPattern,omitempty"` AggregateUsage *string `json:"aggregateUsage,omitempty"` Amount *int64 `json:"amount,omitempty"` AmountDecimal *float64 `json:"amountDecimal,string,omitempty"` BillingScheme *string `json:"billingScheme,omitempty"` Currency *string `json:"currency,omitempty"` Interval *string `json:"interval,omitempty"` IntervalCount *int64 `json:"intervalCount,omitempty"` Tiers []*PlanTier `json:"tiers,omitempty"` TiersMode *string `json:"tiersMode,omitempty"` TransformUsage *PlanTransformUsage `json:"transformUsage,omitempty"` TrialPeriodDays *int64 `json:"trialPeriodDays,omitempty"` UsageType *string `json:"usageType,omitempty"` } type ResourceGroup string type PricingPattern struct { //+optional Expression Expression `json:"expression,omitempty"` //+optional SizedPrices []SizedPrice `json:"sizedPrices,omitempty"` } type Expression string type SizedPrice struct { CPU string `json:"cpu"` Memory string `json:"memory"` Price float64 `json:"price"` } // PlanTier configures tiered pricing type PlanTier struct { FlatAmount *int64 `json:"flatAmount,omitempty"` FlatAmountDecimal *float64 `json:"flatAmountDecimal,string,omitempty"` UnitAmount *int64 `json:"unitAmount,omitempty"` UnitAmountDecimal *float64 `json:"unitAmountDecimal,string,omitempty"` UpTo *int64 `json:"upTo,omitempty"` } // PlanTransformUsage represents the bucket billing configuration. type PlanTransformUsage struct { DivideBy *int64 `json:"divideBy,omitempty"` Round *string `json:"round,omitempty"` } func (p Plan) BundledPlans() []string { plans := sets.NewString(p.Spec.StripeID) plans.Insert(p.Spec.IncludedPlans...) return plans.List() } // +kubebuilder:object:root=true type PlanList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []Plan `json:"items,omitempty"` } func init() { SchemeBuilder.Register(&Plan{}, &PlanList{}) } type PlanStatus struct { // ObservedGeneration is the most recent generation observed for this resource. It corresponds to the // resource's generation, which is updated on mutation by the API Server. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty"` } type FeatureTable struct { Plans []Plan `json:"plans"` Rows []*Row `json:"rows"` } type Row struct { Trait string `json:"trait"` Values []string `json:"values"` }
package carbon2 import ( "bytes" "fmt" "strconv" "strings" "github.com/influxdata/telegraf" ) type format string const ( Carbon2FormatFieldSeparate string = "field_separate" Carbon2FormatMetricIncludesField string = "metric_includes_field" formatFieldSeparate = format(Carbon2FormatFieldSeparate) formatMetricIncludesField = format(Carbon2FormatMetricIncludesField) ) var formats = map[string]format{ // Field separate is the default when no format specified. "": formatFieldSeparate, Carbon2FormatFieldSeparate: formatFieldSeparate, Carbon2FormatMetricIncludesField: formatMetricIncludesField, } type Serializer struct { metricsFormat format } func NewSerializer(f string) (*Serializer, error) { var ( ok bool metricsFormat format ) if metricsFormat, ok = formats[f]; !ok { return nil, fmt.Errorf("unknown carbon2 format: %s", f) } return &Serializer{ metricsFormat: metricsFormat, }, nil } func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) { return s.createObject(metric), nil } func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { var batch bytes.Buffer for _, metric := range metrics { batch.Write(s.createObject(metric)) } return batch.Bytes(), nil } func (s *Serializer) createObject(metric telegraf.Metric) []byte { var m bytes.Buffer for fieldName, fieldValue := range metric.Fields() { if !isNumeric(fieldValue) { continue } switch s.metricsFormat { case formatFieldSeparate: m.WriteString(serializeMetricFieldSeparate( metric.Name(), fieldName, )) case formatMetricIncludesField: m.WriteString(serializeMetricIncludeField( metric.Name(), fieldName, )) } for _, tag := range metric.TagList() { m.WriteString(strings.Replace(tag.Key, " ", "_", -1)) m.WriteString("=") value := tag.Value if len(value) == 0 { value = "null" } m.WriteString(strings.Replace(value, " ", "_", -1)) m.WriteString(" ") } m.WriteString(" ") m.WriteString(fmt.Sprintf("%v", fieldValue)) m.WriteString(" ") m.WriteString(strconv.FormatInt(metric.Time().Unix(), 10)) m.WriteString("\n") } return m.Bytes() } func serializeMetricFieldSeparate(name, fieldName string) string { return fmt.Sprintf("metric=%s field=%s ", strings.Replace(name, " ", "_", -1), strings.Replace(fieldName, " ", "_", -1), ) } func serializeMetricIncludeField(name, fieldName string) string { return fmt.Sprintf("metric=%s_%s ", strings.Replace(name, " ", "_", -1), strings.Replace(fieldName, " ", "_", -1), ) } func isNumeric(v interface{}) bool { switch v.(type) { case string: return false default: return true } }
package bot import ( "os" "github.com/sad0vnikov/wundergram/bot/dialog" "github.com/sad0vnikov/wundergram/logger" "gopkg.in/telegram-bot-api.v4" ) var dialogTreeProcessor dialog.Processor //Bot is a struct representing Bot state type Bot struct { API *tgbotapi.BotAPI } //Create returns a new Bot func Create(token string) Bot { bot, err := tgbotapi.NewBotAPI(token) if err != nil { panic(err) } return Bot{API: bot} } //Init func Initializes telegram bot func (bot Bot) Init(dialogTree dialog.Tree) { dialogTreeProcessor = dialog.NewProcessor(&dialogTree) logger.Get("main").Infof("Authorized on account %#v", bot.API.Self.UserName) u := tgbotapi.NewUpdate(0) u.Timeout = 60 updates, err := bot.API.GetUpdatesChan(u) if err != nil { panic(err) } for update := range updates { if update.Message == nil { continue } command := update.Message.Command() if len(command) == 0 { command = update.Message.Text } nextDialogNode := dialogTreeProcessor.GetNodeToMoveIn(update.Message, bot.API) logger.Get("main").Infof("new message from %v: %v", update.Message.From.UserName, update.Message.Text) go dialogTreeProcessor.RunNodeHandler(nextDialogNode, update.Message, bot.API) } } //GetTelegramBotLink returns a telegram bot t.me link func GetTelegramBotLink() string { botName := os.Getenv("TELEGRAM_BOT_NAME") return "http://t.me/" + botName }
package main import ( "log" "net" "github.com/pietern/pductl/watchdog" ) // Monitor wraps a watchdog and kicks it when it receives a packet on // a UDP socket that it listens on. type Monitor struct { *watchdog.Watchdog } func NewMonitor(outlet Outlet) (*Monitor, error) { addr, err := net.ResolveUDPAddr("udp", outlet.UDP) if err != nil { return nil, err } conn, err := net.ListenUDP("udp", addr) if err != nil { return nil, err } log.Printf( "New watchdog for %s with delay %v", outlet.Name, outlet.Delay.Duration) m := Monitor{ Watchdog: watchdog.NewWatchdog(outlet.Delay.Duration), } go func() { // Assume packets are never bigger... // We don't care what they contain anyway. buf := make([]byte, 1024) for { _, _, err := conn.ReadFrom(buf) if err != nil { log.Fatal(err) } // Received packet, kick watchdog. m.Watchdog.Kick() } }() return &m, nil }
package http import ( "net/http" "github.com/b2wdigital/goignite/pkg/config" ) // NewServer returns a pointer with new Server func NewServer(handler http.Handler) *http.Server { return &http.Server{ Addr: config.String(ServerAddress), Handler: handler, MaxHeaderBytes: config.Int(MaxHeaderBytes), ReadTimeout: config.Duration(ReadTimeout), ReadHeaderTimeout: config.Duration(ReadHeaderTimeout), WriteTimeout: config.Duration(WriteTimeout), IdleTimeout: config.Duration(IdleTimeout), } }
package pulsar import ( "context" "encoding/json" "fmt" "time" "github.com/apache/pulsar-client-go/pulsar" "github.com/pkg/errors" uuid "github.com/satori/go.uuid" "github.com/batchcorp/plumber-schemas/build/go/protos/opts" "github.com/batchcorp/plumber-schemas/build/go/protos/records" "github.com/batchcorp/plumber/util" "github.com/batchcorp/plumber/validate" ) func (p *Pulsar) Read(ctx context.Context, readOpts *opts.ReadOptions, resultsChan chan *records.ReadRecord, errorChan chan *records.ErrorRecord) error { if err := validateReadOptions(readOpts); err != nil { return errors.Wrap(err, "invalid read options") } consumer, err := p.client.Subscribe(pulsar.ConsumerOptions{ Topic: readOpts.Pulsar.Args.Topic, SubscriptionName: readOpts.Pulsar.Args.SubscriptionName, Type: getSubscriptionType(readOpts), SubscriptionInitialPosition: getSubscriptionInitialPosition(readOpts), }) if err != nil { return errors.Wrap(err, "unable to create pulsar subscription") } defer consumer.Close() defer consumer.Unsubscribe() p.log.Info("Listening for message(s) ...") var count int64 for { msg, err := consumer.Receive(ctx) if err != nil { if err == context.Canceled { p.log.Debug("context cancelled") return nil } util.WriteError(nil, errorChan, errors.Wrap(err, "unable to read pulsar message")) if !readOpts.Continuous { return nil } continue } consumer.Ack(msg) count++ serializedMsg, err := json.Marshal(msg) if err != nil { errorChan <- &records.ErrorRecord{ OccurredAtUnixTsUtc: time.Now().UTC().Unix(), Error: errors.Wrap(err, "unable to serialize message into JSON").Error(), } } resultsChan <- &records.ReadRecord{ MessageId: uuid.NewV4().String(), Num: count, ReceivedAtUnixTsUtc: time.Now().UTC().Unix(), Payload: msg.Payload(), XRaw: serializedMsg, Record: &records.ReadRecord_Pulsar{ Pulsar: &records.Pulsar{ Id: fmt.Sprintf("%s", msg.ID()), Key: msg.Key(), Topic: msg.Topic(), Properties: msg.Properties(), RedeliveryCount: msg.RedeliveryCount(), EventTime: msg.EventTime().Format(time.RFC3339), IsReplicated: msg.IsReplicated(), OrderingKey: msg.OrderingKey(), ProducerName: msg.ProducerName(), PublishTime: msg.PublishTime().Format(time.RFC3339), Timestamp: time.Now().UTC().Unix(), Value: msg.Payload(), }, }, } if !readOpts.Continuous { return nil } } return nil } // getSubscriptionType converts string input of the subscription type to pulsar library's equivalent func getSubscriptionType(readOpts *opts.ReadOptions) pulsar.SubscriptionType { switch readOpts.Pulsar.Args.SubscriptionType.String() { case "EXCLUSIVE": return pulsar.Exclusive case "FAILOVER": return pulsar.Failover case "KEYSHARED": return pulsar.KeyShared default: return pulsar.Shared } } func getSubscriptionInitialPosition(readOpts *opts.ReadOptions) pulsar.SubscriptionInitialPosition { switch readOpts.Pulsar.Args.InitialPosition.String() { case "PULSAR_EARLIEST": return pulsar.SubscriptionPositionEarliest default: return pulsar.SubscriptionPositionLatest } } func validateReadOptions(readOpts *opts.ReadOptions) error { if readOpts == nil { return validate.ErrMissingReadOptions } if readOpts.Pulsar == nil { return validate.ErrEmptyBackendGroup } args := readOpts.Pulsar.Args if args == nil { return validate.ErrEmptyBackendArgs } if args.Topic == "" { return ErrEmptyTopic } if args.SubscriptionName == "" { return ErrEmptySubscriptionName } return nil }
// CLI to convert CM to Feet & Inches. sample usage: go run 09_distange.go 48 package main import ( "os" "fmt" "strconv" ) type Inch float64 type Centimetre float64 func(i Inch) String() string { inches := uint(i) % 12 feet := (uint(i) - inches) / 12 if feet > 0 { return fmt.Sprintf("%d'%d\"", feet, inches) } return fmt.Sprintf("%d\"", inches) } func(c Centimetre) String() string { cm := uint(c) % 100 m := (uint(c) - cm) / 100.0 if m > 0 { return fmt.Sprintf("%.2fm", c/100) } return fmt.Sprintf("%dcm", uint(c)) } func CmToI(c Centimetre) Inch { return Inch(c/2.49) } func main() { num, err := strconv.Atoi(os.Args[1]) if err != nil { fmt.Println(err) return } cm := Centimetre(num) fmt.Println(cm) fmt.Println(CmToI(cm)) }
package analyser import ( "bytes" "errors" "htmlparser/models" "io" "strconv" "strings" "golang.org/x/net/html" "fmt" ) func ParseSparkDashboard(content string) (*models.Report, error) { doc, _ := html.Parse(strings.NewReader(content)) table, err := FindTagWithId(doc, "table", "completed-batches-table") if err != nil { return &models.Report{}, err } tbody, err := FindFirstChild(table, "tbody") if err != nil { return &models.Report{}, err } res, err := browseTr(tbody) if err == nil { return res, nil } else { return &models.Report{}, err } } func FindTagWithId(doc *html.Node, tagType string, tagId string) (*html.Node, error) { var res *html.Node var f func(*html.Node) f = func(node *html.Node) { if node.Type == html.ElementNode && node.Data == tagType { for i := 0; i < len(node.Attr); i++ { if node.Attr[i].Key == "id" && node.Attr[i].Val == tagId { res = node } } } if res == nil { for child := node.FirstChild; child != nil; child = child.NextSibling { f(child) } } } f(doc) if res != nil { return res, nil } return nil, errors.New(fmt.Sprintf("Missing <%s> with id %s in the node tree", tagType, tagId)) } func FindFirstChild(doc *html.Node, tagName string) (*html.Node, error) { var res *html.Node var f func(*html.Node) f = func(node *html.Node) { if node.Type == html.ElementNode && node.Data == tagName { res = node } if res == nil { for child := node.FirstChild; child != nil; child = child.NextSibling { f(child) if res != nil { break } } } } f(doc) if res != nil { return res, nil } return nil, errors.New("Missing <" + tagName + "> in the node tree") } func FindTagWithContent(doc *html.Node, tagType string, content string) (*html.Node, error) { var res *html.Node var f func(*html.Node) f = func(node *html.Node) { if node.Type == html.ElementNode && node.Data == tagType { nodeContent := renderNode(node) if strings.Contains(nodeContent, content) { res = node } } if res == nil { for child := node.FirstChild; child != nil; child = child.NextSibling { f(child) } } } f(doc) if res != nil { return res, nil } return nil, errors.New(fmt.Sprintf("No tag <%s> found with the requested content '%s'", tagType, content)) } func browseTr(tr *html.Node) (*models.Report, error) { var batches []models.Batch for child := tr.FirstChild; child != nil; child = child.NextSibling { if child.Data == "tr" { td, err := FindFirstChild(child, "td") if err != nil { return nil, err } batch, err := browseTd(td) if err != nil { return nil, err } batches = append(batches, batch) batch = models.Batch{} } } report := models.Report{ Batches: batches, } return &report, nil } func genericTRBrowser(tr *html.Node) (*[]models.ArrayLine, error) { var lines []models.ArrayLine for child := tr.FirstChild; child != nil; child = child.NextSibling { if child.Data == "tr" { td, err := FindFirstChild(child, "td") if err != nil { return nil, err } line, err := genericTDBrowser(td) if err != nil { return nil, err } lines = append(lines, *line) line = &models.ArrayLine{} } } return &lines, nil } func browseTd(td *html.Node) (models.Batch, error) { cols := 0 batch := models.Batch{} for child := td; child != nil; child = child.NextSibling { if child.Data == "td" { val := strings.Trim(renderNode(child.FirstChild), " ") val = strings.Replace(val, " ", "", -1) val = strings.Replace(val, "\n", "", -1) val = strings.Replace(val, "\r", "", -1) switch cols { case 0: val := strings.Trim(renderNode(child.FirstChild.NextSibling.FirstChild), " ") val = strings.Replace(val, "\r", "", -1) val = strings.Replace(val, "\n", "", -1) val = strings.Replace(val, " ", "", -1) val = strings.Trim(val, "") val = strings.Replace(val, " ", "_", -1) batch.BatchTime = val case 1: val = strings.Replace(val, "events", "", 1) val, _ := strconv.Atoi(val) batch.InputSize = val case 2: if strings.Index(val, "ms") > 0 { val = strings.Replace(val, "ms", "", 1) val, _ := strconv.Atoi(val) batch.SchedulingDelay = val } else { val = strings.Replace(val, "s", "", 1) val, _ := strconv.Atoi(val) batch.SchedulingDelay = (val * 1000) } case 3: val = strings.Replace(val, "s", "", 1) val, _ := strconv.ParseFloat(val, 4) batch.ProcessingTime = float32(val) case 4: val = strings.Replace(val, "s", "", 1) val, _ := strconv.ParseFloat(val, 4) batch.TotalDelay = float32(val) } cols++ } } return batch, nil } func genericTDBrowser(td *html.Node) (*models.ArrayLine, error) { cols := 0 line := models.ArrayLine{} for child := td; child != nil; child = child.NextSibling { if child.Data == "td" { tdChild := child.FirstChild val := strings.Trim(renderNode(tdChild), " ") if tdChild.NextSibling != nil && tdChild.NextSibling.Data == "a" { val = strings.Trim(renderNode(tdChild.NextSibling), " ") } val = strings.Replace(val, " ", "", -1) val = strings.Replace(val, "\n", "", -1) val = strings.Replace(val, "\r", "", -1) line.Cells = append(line.Cells, val) cols++ } } return &line, nil } func renderNode(node *html.Node) string { var buffer bytes.Buffer writer := io.Writer(&buffer) if node != nil { html.Render(writer, node) } return buffer.String() } func FindWorkerLinkForApp(appName string, content string) (string, error) { doc, _ := html.Parse(strings.NewReader(content)) node, err := FindTagWithContent(doc, "h4", "<h4> Running Applications </h4>") if err != nil { return "", err } // at this stage, node refers to "<h4> Running Applications </h4>" tag, search sibling 2 times to get following <table> if node.NextSibling == nil || node.NextSibling.NextSibling == nil { return "", errors.New("Unexpected spark response, expected a <table>") } table := node.NextSibling.NextSibling tbody, err := FindFirstChild(table, "tbody") res, err := genericTRBrowser(tbody) link := "" for l :=0; l < len(*res); l++ { line := (*res)[l] // app name is contained in the second index of cells if strings.Contains(line.Cells[1], appName) { start := strings.IndexAny(line.Cells[1], "href=") + len("href=\"") end := strings.IndexAny(line.Cells[1], ">") link = line.Cells[1][start:end-1] break } } if link != "" { return link, nil } else { return "", errors.New(fmt.Sprintf("Link not found for application %s", appName)) } } func IsActiveSparkMaster(htmlContent string) bool { return strings.Contains(htmlContent, "<li><strong>Status:</strong> ALIVE</li>") }
package kafka import ( "github.com/anchorfree/data-go/pkg/promutils" "github.com/prometheus/client_golang/prometheus" "github.com/valyala/fastjson" ) var ( rdHistoMetrics = []string{"min", "max", "avg", "p50", "p95", "p99"} rdGlobalMetrics = []string{"replyq", "msg_cnt", "msg_size", "tx", "tx_bytes", "rx", "rx_bytes", "txmsgs", "txmsgs_bytes", "rxmsgs", "rxmsgs_bytes"} rdBrokerMetrics = []string{"outbuf_cnt", "outbuf_msg_cnt", "waitresp_cnt", "waitresp_msg_cnt", "tx", "txbytes", "req_timeouts", "rx", "rxbytes", "connects", "disconnects"} rdBrokerHistoMetrics = []string{"int_latency", "outbuf_latency", "rtt"} rdTopicMetrics = []string{"batchsize", "batchcnt"} rdPartitionMetrics = []string{"msgq_cnt", "msgq_bytes", "xmit_msgq_cnt", "msgs_inflight"} ) type MetricVec interface { prometheus.Collector Delete(labels prometheus.Labels) bool } func B2f(b bool) float64 { if b { return float64(1) } return float64(0) } func (p *T) dropProducerMetrics(producer_id string) { for _, m := range []MetricVec{msgInTransit, activeProducer, metricRDKafkaGlobal, metricRDKafkaBroker, metricRDKafkaTopic, metricRDKafkaPartition} { for _, l := range promutils.GetVectorLabels(m, prometheus.Labels{"producer_id": producer_id}) { m.Delete(l) } } } func populateRDKafkaMetrics(stats string) error { var parser fastjson.Parser values, err := parser.Parse(stats) if err != nil { return err } producerID := string(values.GetStringBytes("name")) //librdkafka global metrics for _, m := range rdGlobalMetrics { metricRDKafkaGlobal.With(prometheus.Labels{ "metric": m, "producer_id": producerID, }).Set(values.GetFloat64(m)) } //librdkafka broker metrics values.GetObject("brokers").Visit(func(key []byte, v *fastjson.Value) { brokerID := string(v.GetStringBytes("name")) metricRDKafkaBroker.With(prometheus.Labels{ "metric": "state_up", "producer_id": producerID, "broker": brokerID, "window": "", }).Set(B2f(string(v.GetStringBytes("state")) == "UP")) for _, m := range rdBrokerMetrics { metricRDKafkaBroker.With(prometheus.Labels{ "metric": m, "producer_id": producerID, "broker": brokerID, "window": "", }).Set(v.GetFloat64(m)) } for _, m := range rdBrokerHistoMetrics { for _, window := range rdHistoMetrics { metricRDKafkaBroker.With(prometheus.Labels{ "metric": m, "producer_id": producerID, "broker": brokerID, "window": window, }).Set(v.GetFloat64(m, window)) } } }) //librdkafka topic metrics values.GetObject("topics").Visit(func(key []byte, v *fastjson.Value) { topic := string(v.GetStringBytes("topic")) for _, m := range rdTopicMetrics { for _, window := range rdHistoMetrics { metricRDKafkaTopic.With(prometheus.Labels{ "metric": m, "producer_id": producerID, "topic": topic, "window": window, }).Set(v.GetFloat64(m, window)) } } //librdkafka topic-partition metrics for _, m := range rdPartitionMetrics { v.GetObject("partitions").Visit(func(key []byte, pv *fastjson.Value) { metricRDKafkaPartition.With(prometheus.Labels{ "metric": m, "producer_id": producerID, "topic": topic, "partition": string(key), }).Set(pv.GetFloat64(m)) }) } }) return nil } var ( msgSent = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "kafka_client_requests_cnt", Help: "Number of kafka requests sent", }, []string{"topic"}, ) msgOK = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "kafka_client_ack_cnt", Help: "Number of kafka ACKed requests received", }, []string{"topic"}, ) msgNOK = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "kafka_client_err_cnt", Help: "Number of kafka Errored requests", }, []string{"topic", "error"}, ) msgInTransit = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: "kafka_client_messages_in_transit", Help: "Number of kafka messages in transit", }, []string{"producer_id"}, ) msgDropped = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "kafka_client_dropped_cnt", Help: "Number of kafka Errored requests which are dropped", }, []string{"topic", "error"}, ) eventIgnored = prometheus.NewCounter( prometheus.CounterOpts{ Name: "kafka_client_events_ignored_cnt", Help: "Number of kafka events which are ignored", }, ) cbState = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "producer_cb_state", Help: "Circuit Breaker state of Kafka", }, []string{"name", "state"}, ) cbCurrentState = prometheus.NewGauge( prometheus.GaugeOpts{ Name: "kafka_cb_current_state", Help: "Circuit Breaker current state of Kafka", }, ) producerQueueLen = prometheus.NewGauge( prometheus.GaugeOpts{ Name: "producer_kafka_queue_len", Help: "Number of messages and requests waiting to be transmitted to the broker as well as delivery reports queued for the application", }, ) libVersion = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: "producer_kafka_librdkafka_version", Help: "Version of underlying librdkafka library", }, []string{"version"}, ) activeProducer = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: "kafka_active_producer", Help: "Current active producer", }, []string{"producer_id"}, ) lastProducerStartTime = prometheus.NewGauge( prometheus.GaugeOpts{ Name: "kafka_last_producer_start_time", Help: "Time when the freshest producer was started", }, ) metricCertExpirationTime = prometheus.NewGauge( prometheus.GaugeOpts{ Name: "kafka_cert_expiration_time", Help: "Kafka producer certificat NotAfter", }, ) metricCaExpirationTime = prometheus.NewGauge( prometheus.GaugeOpts{ Name: "kafka_ca_expiration_time", Help: "Kafka producer CA NotAfter", }, ) metricKafkaEventsQueueLen = prometheus.NewGauge( prometheus.GaugeOpts{ Name: "kafka_events_queue_len", Help: "Kafka driver events queue length", }, ) metricRDKafkaGlobal = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: "rdkafka_global", Help: "librdkafka internal global metrics", }, []string{"producer_id", "metric"}, ) metricRDKafkaBroker = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: "rdkafka_broker", Help: "librdkafka internal broker metrics", }, []string{"producer_id", "metric", "broker", "window"}, ) metricRDKafkaTopic = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: "rdkafka_topic", Help: "librdkafka internal topic metrics", }, []string{"producer_id", "metric", "topic", "window"}, ) metricRDKafkaPartition = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: "rdkafka_partition", Help: "librdkafka internal partition metrics", }, []string{"producer_id", "metric", "topic", "partition"}, ) ) func registerMetrics(prom *prometheus.Registry) { prom.MustRegister(msgSent, msgOK, msgNOK, msgDropped, cbState, cbCurrentState, producerQueueLen, eventIgnored, msgInTransit, libVersion, activeProducer, lastProducerStartTime, metricCertExpirationTime, metricCaExpirationTime, metricKafkaEventsQueueLen, metricRDKafkaGlobal, metricRDKafkaBroker, metricRDKafkaTopic, metricRDKafkaPartition, ) }
func intersect(nums1 []int, nums2 []int) []int { sort.Ints(nums1) sort.Ints(nums2) p1 := 0 p2 := 0 res := []int{} for p1 < len(nums1) && p2 < len(nums2) { n1 := nums1[p1] n2 := nums2[p2] if n1 == n2 { res = append(res, n1) p1 += 1 p2 += 1 } else if (n1 > n2) { p2 += 1 } else { p1 += 1 } } return res }
package wx type WxpayReq struct { Appid string `xml:"appid"` BankType string `xml:"bank_type"` CashFee string `xml:"cash_fee"` FeeType string `xml:"fee_type"` IsSubscribe string `xml:"is_subscribe"` MchId string `xml:"mch_id"` NonceStr string `xml:"nonce_str"` Openid string `xml:"openid"` OutTradeNo string `xml:"out_trade_no"` ResultCode string `xml:"result_code"` ReturnCode string `xml:"return_code"` Sign string `xml:"sign"` TimeEnd string `xml:"time_end"` TotalFee int `xml:"total_fee"` TradeType string `xml:"trade_type"` TransactionId string `xml:"transaction_id"` } type WxpayRes struct { ReturnCode string `xml:"return_code"` ReturnMsg string `xml:"return_msg"` }
/* Tencent is pleased to support the open source community by making Basic Service Configuration Platform available. Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "as IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package service import ( "bytes" "fmt" "io/ioutil" "sync" "bscp.io/pkg/dal/table" "bscp.io/pkg/kit" "bscp.io/pkg/logs" "bscp.io/pkg/types" ) // getAppTmplRevisions get app template revision details func (s *Service) getAppTmplRevisions(kt *kit.Kit, bizID, appID uint32) ([]*table.TemplateRevision, error) { opt := &types.BasePage{All: true} details, _, err := s.dao.AppTemplateBinding().List(kt, bizID, appID, opt) if err != nil { logs.Errorf("get app template revisions failed, err: %v, rid: %s", err, kt.Rid) return nil, err } // so far, no any template config item exists for the app if len(details) == 0 { return nil, nil } // get template revision details tmplRevisions, err := s.dao.TemplateRevision(). ListByIDs(kt, details[0].Spec.TemplateRevisionIDs) if err != nil { logs.Errorf("get app template revisions failed, err: %v, rid: %s", err, kt.Rid) return nil, err } return tmplRevisions, nil } // downloadTmplContent download template config item content from repo. // the order of elements in slice contents and slice tmplRevisions is consistent func (s *Service) downloadTmplContent(kt *kit.Kit, tmplRevisions []*table.TemplateRevision) ([][]byte, error) { contents := make([][]byte, len(tmplRevisions)) var hitError error pipe := make(chan struct{}, 10) wg := sync.WaitGroup{} for idx, r := range tmplRevisions { wg.Add(1) pipe <- struct{}{} go func(idx int, r *table.TemplateRevision) { defer func() { wg.Done() <-pipe }() k := kt.GetKitForRepoTmpl(r.Attachment.TemplateSpaceID) body, _, err := s.repo.Download(k, r.Spec.ContentSpec.Signature) if err != nil { hitError = fmt.Errorf("download template config content from repo failed, "+ "template id: %d, name: %s, path: %s, error: %v", r.Attachment.TemplateID, r.Spec.Name, r.Spec.Path, err) return } content, err := ioutil.ReadAll(body) if err != nil { hitError = fmt.Errorf("read template config content from body failed, "+ "template id: %d, name: %s, path: %s, error: %v", r.Attachment.TemplateID, r.Spec.Name, r.Spec.Path, err) return } contents[idx] = content }(idx, r) } wg.Wait() if hitError != nil { logs.Errorf("download template config content failed, err: %v, rid: %s", hitError, kt.Rid) return nil, hitError } return contents, nil } // uploadRenderedTmplContent upload rendered template config item content to repo. func (s *Service) uploadRenderedTmplContent(kt *kit.Kit, renderedContentMap map[uint32][]byte, signatureMap map[uint32]string, revisionMap map[uint32]*table.TemplateRevision) error { var hitError error pipe := make(chan struct{}, 10) wg := sync.WaitGroup{} for revisionID := range renderedContentMap { wg.Add(1) pipe <- struct{}{} go func(revisionID uint32) { defer func() { wg.Done() <-pipe }() r := revisionMap[revisionID] k := kt.GetKitForRepoCfg() _, err := s.repo.Upload(k, signatureMap[revisionID], bytes.NewReader(renderedContentMap[revisionID])) if err != nil { hitError = fmt.Errorf("upload rendered template config content to repo failed, "+ "template id: %d, name: %s, path: %s, error: %v", r.Attachment.TemplateID, r.Spec.Name, r.Spec.Path, err) return } }(revisionID) } wg.Wait() if hitError != nil { logs.Errorf("upload rendered template config content failed, err: %v, rid: %s", hitError, kt.Rid) return hitError } return nil }
package middleware import ( log "github.com/best-expendables/logger" "bytes" "context" "io/ioutil" "net/http" ) // loggable structure helper type logger struct { logger log.Entry } // get context-dependent logger. // If logger not presented into context then returns "base" logger from property. func (l *logger) get(ctx context.Context) log.Entry { if logger := log.EntryFromContext(ctx); logger != nil { return logger } return l.logger } func cloneRequest(r *http.Request) *http.Request { r2 := new(http.Request) *r2 = *r // deep copy of the Header r2.Header = make(http.Header) for k, s := range r.Header { r2.Header[k] = s } return r2 } func recoverRequestBody(request *http.Request, originalBody []byte) { if originalBody != nil { request.Body = ioutil.NopCloser(bytes.NewBuffer(originalBody)) } }
// Copyright 2018 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package core import ( "fmt" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/mock" ) func newLongType() types.FieldType { return *(types.NewFieldType(mysql.TypeLong)) } func newStringType() types.FieldType { ft := types.NewFieldType(mysql.TypeVarchar) charset, collate := types.DefaultCharsetForType(mysql.TypeVarchar) ft.SetCharset(charset) ft.SetCollate(collate) return *ft } func newDateType() types.FieldType { ft := types.NewFieldType(mysql.TypeDate) return *ft } // MockSignedTable is only used for plan related tests. func MockSignedTable() *model.TableInfo { // column: a, b, c, d, e, c_str, d_str, e_str, f, g, h, i_date // PK: a // indices: c_d_e, e, f, g, f_g, c_d_e_str, e_d_c_str_prefix indices := []*model.IndexInfo{ { Name: model.NewCIStr("c_d_e"), Columns: []*model.IndexColumn{ { Name: model.NewCIStr("c"), Length: types.UnspecifiedLength, Offset: 2, }, { Name: model.NewCIStr("d"), Length: types.UnspecifiedLength, Offset: 3, }, { Name: model.NewCIStr("e"), Length: types.UnspecifiedLength, Offset: 4, }, }, State: model.StatePublic, Unique: true, }, { Name: model.NewCIStr("x"), Columns: []*model.IndexColumn{ { Name: model.NewCIStr("e"), Length: types.UnspecifiedLength, Offset: 4, }, }, State: model.StateWriteOnly, Unique: true, }, { Name: model.NewCIStr("f"), Columns: []*model.IndexColumn{ { Name: model.NewCIStr("f"), Length: types.UnspecifiedLength, Offset: 8, }, }, State: model.StatePublic, Unique: true, }, { Name: model.NewCIStr("g"), Columns: []*model.IndexColumn{ { Name: model.NewCIStr("g"), Length: types.UnspecifiedLength, Offset: 9, }, }, State: model.StatePublic, }, { Name: model.NewCIStr("f_g"), Columns: []*model.IndexColumn{ { Name: model.NewCIStr("f"), Length: types.UnspecifiedLength, Offset: 8, }, { Name: model.NewCIStr("g"), Length: types.UnspecifiedLength, Offset: 9, }, }, State: model.StatePublic, Unique: true, }, { Name: model.NewCIStr("c_d_e_str"), Columns: []*model.IndexColumn{ { Name: model.NewCIStr("c_str"), Length: types.UnspecifiedLength, Offset: 5, }, { Name: model.NewCIStr("d_str"), Length: types.UnspecifiedLength, Offset: 6, }, { Name: model.NewCIStr("e_str"), Length: types.UnspecifiedLength, Offset: 7, }, }, State: model.StatePublic, }, { Name: model.NewCIStr("e_d_c_str_prefix"), Columns: []*model.IndexColumn{ { Name: model.NewCIStr("e_str"), Length: types.UnspecifiedLength, Offset: 7, }, { Name: model.NewCIStr("d_str"), Length: types.UnspecifiedLength, Offset: 6, }, { Name: model.NewCIStr("c_str"), Length: 10, Offset: 5, }, }, State: model.StatePublic, }, } pkColumn := &model.ColumnInfo{ State: model.StatePublic, Offset: 0, Name: model.NewCIStr("a"), FieldType: newLongType(), ID: 1, } col0 := &model.ColumnInfo{ State: model.StatePublic, Offset: 1, Name: model.NewCIStr("b"), FieldType: newLongType(), ID: 2, } col1 := &model.ColumnInfo{ State: model.StatePublic, Offset: 2, Name: model.NewCIStr("c"), FieldType: newLongType(), ID: 3, } col2 := &model.ColumnInfo{ State: model.StatePublic, Offset: 3, Name: model.NewCIStr("d"), FieldType: newLongType(), ID: 4, } col3 := &model.ColumnInfo{ State: model.StatePublic, Offset: 4, Name: model.NewCIStr("e"), FieldType: newLongType(), ID: 5, } colStr1 := &model.ColumnInfo{ State: model.StatePublic, Offset: 5, Name: model.NewCIStr("c_str"), FieldType: newStringType(), ID: 6, } colStr2 := &model.ColumnInfo{ State: model.StatePublic, Offset: 6, Name: model.NewCIStr("d_str"), FieldType: newStringType(), ID: 7, } colStr3 := &model.ColumnInfo{ State: model.StatePublic, Offset: 7, Name: model.NewCIStr("e_str"), FieldType: newStringType(), ID: 8, } col4 := &model.ColumnInfo{ State: model.StatePublic, Offset: 8, Name: model.NewCIStr("f"), FieldType: newLongType(), ID: 9, } col5 := &model.ColumnInfo{ State: model.StatePublic, Offset: 9, Name: model.NewCIStr("g"), FieldType: newLongType(), ID: 10, } col6 := &model.ColumnInfo{ State: model.StatePublic, Offset: 10, Name: model.NewCIStr("h"), FieldType: newLongType(), ID: 11, } col7 := &model.ColumnInfo{ State: model.StatePublic, Offset: 11, Name: model.NewCIStr("i_date"), FieldType: newDateType(), ID: 12, } pkColumn.SetFlag(mysql.PriKeyFlag | mysql.NotNullFlag) // Column 'b', 'c', 'd', 'f', 'g' is not null. col0.SetFlag(mysql.NotNullFlag) col1.SetFlag(mysql.NotNullFlag) col2.SetFlag(mysql.NotNullFlag) col4.SetFlag(mysql.NotNullFlag) col5.SetFlag(mysql.NotNullFlag) col6.SetFlag(mysql.NoDefaultValueFlag) table := &model.TableInfo{ Columns: []*model.ColumnInfo{pkColumn, col0, col1, col2, col3, colStr1, colStr2, colStr3, col4, col5, col6, col7}, Indices: indices, Name: model.NewCIStr("t"), PKIsHandle: true, } return table } // MockUnsignedTable is only used for plan related tests. func MockUnsignedTable() *model.TableInfo { // column: a, b, c // PK: a // indeices: b, b_c indices := []*model.IndexInfo{ { Name: model.NewCIStr("b"), Columns: []*model.IndexColumn{ { Name: model.NewCIStr("b"), Length: types.UnspecifiedLength, Offset: 1, }, }, State: model.StatePublic, Unique: true, }, { Name: model.NewCIStr("b_c"), Columns: []*model.IndexColumn{ { Name: model.NewCIStr("b"), Length: types.UnspecifiedLength, Offset: 1, }, { Name: model.NewCIStr("c"), Length: types.UnspecifiedLength, Offset: 2, }, }, State: model.StatePublic, }, } pkColumn := &model.ColumnInfo{ State: model.StatePublic, Offset: 0, Name: model.NewCIStr("a"), FieldType: newLongType(), ID: 1, } col0 := &model.ColumnInfo{ State: model.StatePublic, Offset: 1, Name: model.NewCIStr("b"), FieldType: newLongType(), ID: 2, } col1 := &model.ColumnInfo{ State: model.StatePublic, Offset: 2, Name: model.NewCIStr("c"), FieldType: newLongType(), ID: 3, } pkColumn.SetFlag(mysql.PriKeyFlag | mysql.NotNullFlag | mysql.UnsignedFlag) // Column 'b' is not null. col0.SetFlag(mysql.NotNullFlag) col1.SetFlag(mysql.UnsignedFlag) table := &model.TableInfo{ Columns: []*model.ColumnInfo{pkColumn, col0, col1}, Indices: indices, Name: model.NewCIStr("t2"), PKIsHandle: true, } return table } // MockNoPKTable is only used for plan related tests. func MockNoPKTable() *model.TableInfo { // column: a, b col0 := &model.ColumnInfo{ State: model.StatePublic, Offset: 1, Name: model.NewCIStr("a"), FieldType: newLongType(), ID: 2, } col1 := &model.ColumnInfo{ State: model.StatePublic, Offset: 2, Name: model.NewCIStr("b"), FieldType: newLongType(), ID: 3, } // Column 'a', 'b' is not null. col0.SetFlag(mysql.NotNullFlag) col1.SetFlag(mysql.UnsignedFlag) table := &model.TableInfo{ Columns: []*model.ColumnInfo{col0, col1}, Name: model.NewCIStr("t3"), PKIsHandle: true, } return table } // MockView is only used for plan related tests. func MockView() *model.TableInfo { selectStmt := "select b,c,d from t" col0 := &model.ColumnInfo{ State: model.StatePublic, Offset: 0, Name: model.NewCIStr("b"), ID: 1, } col1 := &model.ColumnInfo{ State: model.StatePublic, Offset: 1, Name: model.NewCIStr("c"), ID: 2, } col2 := &model.ColumnInfo{ State: model.StatePublic, Offset: 2, Name: model.NewCIStr("d"), ID: 3, } view := &model.ViewInfo{SelectStmt: selectStmt, Security: model.SecurityDefiner, Definer: &auth.UserIdentity{Username: "root", Hostname: ""}, Cols: []model.CIStr{col0.Name, col1.Name, col2.Name}} table := &model.TableInfo{ Name: model.NewCIStr("v"), Columns: []*model.ColumnInfo{col0, col1, col2}, View: view, } return table } // MockContext is only used for plan related tests. func MockContext() sessionctx.Context { ctx := mock.NewContext() ctx.Store = &mock.Store{ Client: &mock.Client{}, } initStatsCtx := mock.NewContext() initStatsCtx.Store = &mock.Store{ Client: &mock.Client{}, } ctx.GetSessionVars().CurrentDB = "test" do := domain.NewMockDomain() if err := do.CreateStatsHandle(ctx, initStatsCtx); err != nil { panic(fmt.Sprintf("create mock context panic: %+v", err)) } domain.BindDomain(ctx, do) return ctx } // MockPartitionInfoSchema mocks an info schema for partition table. func MockPartitionInfoSchema(definitions []model.PartitionDefinition) infoschema.InfoSchema { tableInfo := MockSignedTable() cols := make([]*model.ColumnInfo, 0, len(tableInfo.Columns)) cols = append(cols, tableInfo.Columns...) last := tableInfo.Columns[len(tableInfo.Columns)-1] cols = append(cols, &model.ColumnInfo{ State: model.StatePublic, Offset: last.Offset + 1, Name: model.NewCIStr("ptn"), FieldType: newLongType(), ID: last.ID + 1, }) partition := &model.PartitionInfo{ Type: model.PartitionTypeRange, Expr: "ptn", Enable: true, Definitions: definitions, } tableInfo.Columns = cols tableInfo.Partition = partition is := infoschema.MockInfoSchema([]*model.TableInfo{tableInfo}) return is } // MockRangePartitionTable mocks a range partition table for test func MockRangePartitionTable() *model.TableInfo { definitions := []model.PartitionDefinition{ { ID: 41, Name: model.NewCIStr("p1"), LessThan: []string{"16"}, }, { ID: 42, Name: model.NewCIStr("p2"), LessThan: []string{"32"}, }, } tableInfo := MockSignedTable() tableInfo.Name = model.NewCIStr("pt1") cols := make([]*model.ColumnInfo, 0, len(tableInfo.Columns)) cols = append(cols, tableInfo.Columns...) last := tableInfo.Columns[len(tableInfo.Columns)-1] cols = append(cols, &model.ColumnInfo{ State: model.StatePublic, Offset: last.Offset + 1, Name: model.NewCIStr("ptn"), FieldType: newLongType(), ID: last.ID + 1, }) partition := &model.PartitionInfo{ Type: model.PartitionTypeRange, Expr: "ptn", Enable: true, Definitions: definitions, } tableInfo.Columns = cols tableInfo.Partition = partition return tableInfo } // MockHashPartitionTable mocks a hash partition table for test func MockHashPartitionTable() *model.TableInfo { definitions := []model.PartitionDefinition{ { ID: 51, Name: model.NewCIStr("p1"), }, { ID: 52, Name: model.NewCIStr("p2"), }, } tableInfo := MockSignedTable() tableInfo.Name = model.NewCIStr("pt2") cols := make([]*model.ColumnInfo, 0, len(tableInfo.Columns)) cols = append(cols, tableInfo.Columns...) last := tableInfo.Columns[len(tableInfo.Columns)-1] cols = append(cols, &model.ColumnInfo{ State: model.StatePublic, Offset: last.Offset + 1, Name: model.NewCIStr("ptn"), FieldType: newLongType(), ID: last.ID + 1, }) partition := &model.PartitionInfo{ Type: model.PartitionTypeHash, Expr: "ptn", Enable: true, Definitions: definitions, Num: 2, } tableInfo.Columns = cols tableInfo.Partition = partition return tableInfo } // MockListPartitionTable mocks a list partition table for test func MockListPartitionTable() *model.TableInfo { definitions := []model.PartitionDefinition{ { ID: 61, Name: model.NewCIStr("p1"), InValues: [][]string{ { "1", }, }, }, { ID: 62, Name: model.NewCIStr("p2"), InValues: [][]string{ { "2", }, }, }, } tableInfo := MockSignedTable() tableInfo.Name = model.NewCIStr("pt3") cols := make([]*model.ColumnInfo, 0, len(tableInfo.Columns)) cols = append(cols, tableInfo.Columns...) last := tableInfo.Columns[len(tableInfo.Columns)-1] cols = append(cols, &model.ColumnInfo{ State: model.StatePublic, Offset: last.Offset + 1, Name: model.NewCIStr("ptn"), FieldType: newLongType(), ID: last.ID + 1, }) partition := &model.PartitionInfo{ Type: model.PartitionTypeList, Expr: "ptn", Enable: true, Definitions: definitions, Num: 2, } tableInfo.Columns = cols tableInfo.Partition = partition return tableInfo } // MockStateNoneColumnTable is only used for plan related tests. func MockStateNoneColumnTable() *model.TableInfo { // column: a, b // PK: a // indeices: b indices := []*model.IndexInfo{ { Name: model.NewCIStr("b"), Columns: []*model.IndexColumn{ { Name: model.NewCIStr("b"), Length: types.UnspecifiedLength, Offset: 1, }, }, State: model.StatePublic, Unique: true, }, } pkColumn := &model.ColumnInfo{ State: model.StatePublic, Offset: 0, Name: model.NewCIStr("a"), FieldType: newLongType(), ID: 1, } col0 := &model.ColumnInfo{ State: model.StatePublic, Offset: 1, Name: model.NewCIStr("b"), FieldType: newLongType(), ID: 2, } col1 := &model.ColumnInfo{ State: model.StateNone, Offset: 2, Name: model.NewCIStr("c"), FieldType: newLongType(), ID: 3, } pkColumn.SetFlag(mysql.PriKeyFlag | mysql.NotNullFlag | mysql.UnsignedFlag) col0.SetFlag(mysql.NotNullFlag) col1.SetFlag(mysql.UnsignedFlag) table := &model.TableInfo{ Columns: []*model.ColumnInfo{pkColumn, col0, col1}, Indices: indices, Name: model.NewCIStr("T_StateNoneColumn"), PKIsHandle: true, } return table }
// Copyright 2020 MongoDB Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package opsmanager import ( "fmt" "github.com/AlecAivazis/survey/v2" "github.com/mongodb/mongocli/internal/cli" "github.com/mongodb/mongocli/internal/config" "github.com/mongodb/mongocli/internal/convert" "github.com/mongodb/mongocli/internal/description" "github.com/mongodb/mongocli/internal/flag" "github.com/mongodb/mongocli/internal/store" "github.com/mongodb/mongocli/internal/usage" "github.com/spf13/cobra" "go.mongodb.org/ops-manager/atmcfg" "go.mongodb.org/ops-manager/opsmngr" ) const scransha1 = "SCRAM-SHA-1" type DBUsersCreateOpts struct { cli.GlobalOpts username string password string authDB string roles []string mechanisms []string store store.AutomationPatcher } func (opts *DBUsersCreateOpts) initStore() error { var err error opts.store, err = store.New(config.Default()) return err } func (opts *DBUsersCreateOpts) Run() error { current, err := opts.store.GetAutomationConfig(opts.ConfigProjectID()) if err != nil { return err } atmcfg.AddUser(current, opts.newDBUser()) if err := opts.store.UpdateAutomationConfig(opts.ConfigProjectID(), current); err != nil { return err } fmt.Print(cli.DeploymentStatus(config.OpsManagerURL(), opts.ConfigProjectID())) return nil } func (opts *DBUsersCreateOpts) newDBUser() *opsmngr.MongoDBUser { return &opsmngr.MongoDBUser{ Database: opts.authDB, Username: opts.username, InitPassword: opts.password, Roles: convert.BuildOMRoles(opts.roles), AuthenticationRestrictions: []string{}, Mechanisms: opts.mechanisms, } } func (opts *DBUsersCreateOpts) Prompt() error { if opts.password != "" { return nil } prompt := &survey.Password{ Message: "Password:", } return survey.AskOne(prompt, &opts.password) } // mongocli atlas dbuser(s) create --username username --password password --role roleName@dbName [--projectId projectId] func DBUsersCreateBuilder() *cobra.Command { opts := &DBUsersCreateOpts{} cmd := &cobra.Command{ Use: "create", Short: description.CreateDBUser, Example: ` Create a user with readWriteAnyDatabase and clusterMonitor access $ mongocli om dbuser create --username <username> --role readWriteAnyDatabase,clusterMonitor --mechanisms SCRAM-SHA-256 --projectId <projectId>`, Args: cobra.NoArgs, PreRunE: func(cmd *cobra.Command, args []string) error { if err := opts.PreRunE(opts.initStore); err != nil { return err } return opts.Prompt() }, RunE: func(cmd *cobra.Command, args []string) error { return opts.Run() }, } cmd.Flags().StringVarP(&opts.username, flag.Username, flag.UsernameShort, "", usage.Username) cmd.Flags().StringVarP(&opts.password, flag.Password, flag.PasswordShort, "", usage.Password) cmd.Flags().StringVar(&opts.authDB, flag.AuthDB, convert.AdminDB, usage.AuthDB) cmd.Flags().StringSliceVar(&opts.roles, flag.Role, []string{}, usage.Roles) cmd.Flags().StringSliceVar(&opts.mechanisms, flag.Mechanisms, []string{scransha1}, usage.Mechanisms) cmd.Flags().StringVar(&opts.ProjectID, flag.ProjectID, "", usage.ProjectID) _ = cmd.MarkFlagRequired(flag.Username) return cmd }
package main import ( "compress/gzip" "fmt" "io" "math" "net/http" "net/http/httptest" "os" "strings" "testing" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) func TestCLIDownloadServer(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "CLI Download server") } var _ = Describe("Test CLI Download Server", func() { origFileServerDir := fileServerDir mux = setupMux() BeforeSuite(func() { files, err := os.Open("testFiles/files.json") Expect(err).ToNot(HaveOccurred()) fileMetadataList, err = getMetadata(files) Expect(err).ToNot(HaveOccurred()) }) AfterSuite(func() { fileServerDir = origFileServerDir }) Context("Test CompressFiles", func() { tempDir := "" BeforeEach(func() { var err error tempDir, err = os.MkdirTemp(".", "cliDlTemp.") Expect(err).ToNot(HaveOccurred()) tempDir = strings.TrimPrefix(tempDir, "./") fileServerDir = tempDir }) AfterEach(func() { if tempDir != "" { _ = os.RemoveAll(tempDir) } }) It("should create compressed files", func() { err := copyTestFiles("a.txt", tempDir) Expect(err).ToNot(HaveOccurred()) err = copyTestFiles("b.txt", tempDir) Expect(err).ToNot(HaveOccurred()) err = compressFiles() Expect(err).ToNot(HaveOccurred()) files, err := os.ReadDir(tempDir) Expect(files).To(HaveLen(2)) fileNames := make([]string, len(files)) for i, f := range files { fileNames[i] = f.Name() } Expect(err).ToNot(HaveOccurred()) Expect(fileNames).To(ContainElements("a.txt.gz", "b.txt.gz")) Expect(fileNames).ToNot(ContainElement("a.txt")) Expect(fileNames).ToNot(ContainElement("b.txt")) checkUncompressedFile("testFiles/a.txt", tempDir+"/a.txt.gz") checkUncompressedFile("testFiles/b.txt", tempDir+"/b.txt.gz") }) It("should skip compressed files", func() { err := copyTestFiles("a.txt.gz", tempDir) Expect(err).ToNot(HaveOccurred()) err = copyTestFiles("b.txt", tempDir) Expect(err).ToNot(HaveOccurred()) origFiles, err := os.ReadDir(tempDir) Expect(err).ToNot(HaveOccurred()) Expect(origFiles).To(HaveLen(2)) origAInfo, err := origFiles[0].Info() Expect(err).ToNot(HaveOccurred()) origBInfo, err := origFiles[1].Info() Expect(err).ToNot(HaveOccurred()) // to create file mod time differences time.Sleep(time.Second) err = compressFiles() Expect(err).ToNot(HaveOccurred()) files, err := os.ReadDir(tempDir) Expect(err).ToNot(HaveOccurred()) Expect(files).To(HaveLen(2)) By("checking that the compressed file was not changed") aInfo, err := files[0].Info() Expect(err).ToNot(HaveOccurred()) Expect(aInfo.ModTime()).Should(Equal(origAInfo.ModTime())) By("checking that the uncompressed file was changed") bInfo, err := files[1].Info() Expect(err).ToNot(HaveOccurred()) Expect(bInfo.ModTime().After(origBInfo.ModTime())).Should(BeTrue()) }) It("should skip directories", func() { err := os.Mkdir(tempDir+"/a", 0644) Expect(err).ToNot(HaveOccurred()) err = copyTestFiles("b.txt", tempDir) Expect(err).ToNot(HaveOccurred()) origFiles, err := os.ReadDir(tempDir) Expect(err).ToNot(HaveOccurred()) Expect(origFiles).To(HaveLen(2)) origAInfo, err := origFiles[0].Info() Expect(err).ToNot(HaveOccurred()) origBInfo, err := origFiles[1].Info() Expect(err).ToNot(HaveOccurred()) // to create file mod time differences time.Sleep(time.Second) err = compressFiles() Expect(err).ToNot(HaveOccurred()) files, err := os.ReadDir(tempDir) Expect(err).ToNot(HaveOccurred()) Expect(files).To(HaveLen(2)) By("checking that the directory was not changed") Expect(files[0].IsDir()).Should(BeTrue()) aInfo, err := files[0].Info() Expect(err).ToNot(HaveOccurred()) Expect(aInfo.ModTime()).Should(Equal(origAInfo.ModTime())) By("checking that the uncompressed file was changed") Expect(files[1].IsDir()).Should(BeFalse()) bInfo, err := files[1].Info() Expect(err).ToNot(HaveOccurred()) Expect(bInfo.ModTime().After(origBInfo.ModTime())).Should(BeTrue()) }) It("should not compress files in sub directories", func() { subDir := tempDir + "/files" err := os.Mkdir(subDir, 0744) Expect(err).ToNot(HaveOccurred()) err = copyTestFiles("a.txt", subDir) Expect(err).ToNot(HaveOccurred()) origFiles, err := os.ReadDir(subDir) Expect(err).ToNot(HaveOccurred()) Expect(origFiles).To(HaveLen(1)) origAInfo, err := origFiles[0].Info() Expect(err).ToNot(HaveOccurred()) // to create file mod time differences time.Sleep(time.Second) err = compressFiles() Expect(err).ToNot(HaveOccurred()) files, err := os.ReadDir(subDir) Expect(err).ToNot(HaveOccurred()) Expect(files).To(HaveLen(1)) By("checking that the directory was not changed") Expect(origAInfo.Name()).Should(Equal(files[0].Name())) aInfo, err := files[0].Info() Expect(err).ToNot(HaveOccurred()) Expect(aInfo.ModTime()).Should(Equal(origAInfo.ModTime())) }) }) Context("Test validatePort", func() { It("Should accept valid port number", func() { Expect(validatePort("1234")).ShouldNot(HaveOccurred()) Expect(validatePort("8080")).ShouldNot(HaveOccurred()) }) It("Should reject alphabetic strings", func() { Expect(validatePort("ABCD")).Should(HaveOccurred()) }) It("Should reject alphanumeric strings", func() { Expect(validatePort("AB12")).Should(HaveOccurred()) Expect(validatePort("12AB")).Should(HaveOccurred()) }) It("Should reject floating points number strings", func() { Expect(validatePort("12.34")).Should(HaveOccurred()) }) It("Should reject port number of zero", func() { Expect(validatePort("0")).Should(HaveOccurred()) }) It("Should reject negative port numbers", func() { Expect(validatePort("-1234")).Should(HaveOccurred()) }) It("Should reject too large port numbers", func() { var port uint32 = math.MaxUint16 Expect(validatePort(fmt.Sprint(port))).ShouldNot(HaveOccurred()) port++ Expect(validatePort(fmt.Sprint(port))).Should(HaveOccurred()) }) }) Context("Test http", func() { tempDir := "" BeforeEach(func() { var err error tempDir, err = os.MkdirTemp(".", "cliDlTemp.") Expect(err).ToNot(HaveOccurred()) fileServerDir = tempDir err = copyTestFiles("a.txt.gz", tempDir) Expect(err).ShouldNot(HaveOccurred()) err = copyTestFiles("b.txt.gz", tempDir) Expect(err).ShouldNot(HaveOccurred()) }) AfterEach(func() { if tempDir != "" { _ = os.RemoveAll(tempDir) } }) Context("getGzipFile - Positive Tests", func() { It("should serve a compressed file", func() { req := httptest.NewRequest(http.MethodGet, FILE_SERVER_API_PATH+"a.txt", nil) req.Header.Set("Accept-Encoding", "gzip") w := httptest.NewRecorder() mux.ServeHTTP(w, req) Expect(w.Code).Should(Equal(http.StatusOK)) Expect(w.Header().Get("Content-Encoding")).Should(Equal("gzip")) a, err := os.Open("testFiles/a.txt") Expect(err).ShouldNot(HaveOccurred()) zw, err := gzip.NewReader(w.Body) Expect(err).ShouldNot(HaveOccurred()) compareFiles(1, a, zw) }) It("should serve a compressed file as uncompressed", func() { req := httptest.NewRequest(http.MethodGet, FILE_SERVER_API_PATH+"a.txt", nil) w := httptest.NewRecorder() mux.ServeHTTP(w, req) Expect(w.Code).Should(Equal(http.StatusOK)) Expect(w.Header().Get("Content-Encoding")).Should(BeEmpty()) a, err := os.Open("testFiles/a.txt") Expect(err).ShouldNot(HaveOccurred()) compareFiles(1, a, w.Body) }) It("should serve an uncompressed", func() { req := httptest.NewRequest(http.MethodGet, FILE_SERVER_API_PATH+"a.txt.gz", nil) w := httptest.NewRecorder() mux.ServeHTTP(w, req) Expect(w.Code).Should(Equal(http.StatusOK)) Expect(w.Header().Get("Content-Encoding")).Should(BeEmpty()) a, err := os.Open("testFiles/a.txt") Expect(err).ShouldNot(HaveOccurred()) zw, err := gzip.NewReader(w.Body) Expect(err).ShouldNot(HaveOccurred()) compareFiles(1, a, zw) }) }) Context("getGzipFile - Negative Tests", func() { It("should not found if the path is in sub directory", func() { req := httptest.NewRequest(http.MethodGet, FILE_SERVER_API_PATH+"subdir/a.txt", nil) req.Header.Set("Accept-Encoding", "gzip") w := httptest.NewRecorder() mux.ServeHTTP(w, req) Expect(w.Code).Should(Equal(http.StatusNotFound)) }) It("should not found if not found", func() { req := httptest.NewRequest(http.MethodGet, FILE_SERVER_API_PATH+"notFound.txt", nil) req.Header.Set("Accept-Encoding", "gzip") w := httptest.NewRecorder() mux.ServeHTTP(w, req) Expect(w.Code).Should(Equal(http.StatusNotFound)) }) It("should not found if it's a directory", func() { Expect(os.Mkdir(tempDir+"/dir.gz", 0644)).ToNot(HaveOccurred()) req := httptest.NewRequest(http.MethodGet, FILE_SERVER_API_PATH+"dir", nil) w := httptest.NewRecorder() mux.ServeHTTP(w, req) Expect(w.Code).Should(Equal(http.StatusNotFound)) }) It("should not found if it's a directory (gzip header)", func() { Expect(os.Mkdir(tempDir+"/dir.gz", 0644)).ToNot(HaveOccurred()) req := httptest.NewRequest(http.MethodGet, FILE_SERVER_API_PATH+"dir", nil) req.Header.Set("Accept-Encoding", "gzip") w := httptest.NewRecorder() mux.ServeHTTP(w, req) Expect(w.Code).Should(Equal(http.StatusNotFound)) }) It("should not found if it's a directory (request gz)", func() { Expect(os.Mkdir(tempDir+"/dir.gz", 0644)).ToNot(HaveOccurred()) req := httptest.NewRequest(http.MethodGet, FILE_SERVER_API_PATH+"dir.gz", nil) w := httptest.NewRecorder() mux.ServeHTTP(w, req) Expect(w.Code).Should(Equal(http.StatusNotFound)) }) }) Context("test methodFilter", func() { It("should reject for non-GET methods", func() { req := httptest.NewRequest(http.MethodPut, FILE_SERVER_API_PATH+"a.txt", nil) w := httptest.NewRecorder() mux.ServeHTTP(w, req) Expect(w.Code).Should(Equal(http.StatusMethodNotAllowed)) }) It("should allow HEAD method", func() { req := httptest.NewRequest(http.MethodHead, FILE_SERVER_API_PATH+"a.txt", nil) w := httptest.NewRecorder() mux.ServeHTTP(w, req) Expect(w.Code).Should(Equal(http.StatusOK)) }) It("should allow OPTIONS method", func() { req := httptest.NewRequest(http.MethodOptions, FILE_SERVER_API_PATH+"a.txt", nil) w := httptest.NewRecorder() mux.ServeHTTP(w, req) Expect(w.Code).Should(Equal(http.StatusNoContent)) }) }) Context("test health", func() { req := httptest.NewRequest(http.MethodGet, HEALTH_API_PATH, nil) w := httptest.NewRecorder() mux.ServeHTTP(w, req) Expect(w.Code).Should(Equal(http.StatusOK)) }) Context("test ready", func() { req := httptest.NewRequest(http.MethodGet, READY_API_PATH, nil) w := httptest.NewRecorder() mux.ServeHTTP(w, req) Expect(w.Code).Should(Equal(http.StatusOK)) }) }) }) func copyTestFiles(fileName, tempDir string) error { in, err := os.Open("testFiles/" + fileName) if err != nil { return err } defer in.Close() out, err := os.OpenFile(tempDir+"/"+fileName, os.O_CREATE|os.O_WRONLY, 0466) if err != nil { return err } defer out.Close() fileCopier := io.TeeReader(in, out) _, err = io.ReadAll(fileCopier) if err != nil { return err } return nil } func checkUncompressedFile(orig, compressed string) { origReader, err := os.Open(orig) ExpectWithOffset(1, err).ShouldNot(HaveOccurred()) defer origReader.Close() compReader, err := os.Open(compressed) ExpectWithOffset(1, err).ShouldNot(HaveOccurred()) defer compReader.Close() zreader, err := gzip.NewReader(compReader) ExpectWithOffset(1, err).ShouldNot(HaveOccurred()) compareFiles(2, origReader, zreader) } func compareFiles(offet int, f1, f2 io.Reader) { f1Bytes, err := io.ReadAll(f1) ExpectWithOffset(offet, err).ShouldNot(HaveOccurred()) f2Bytes, err := io.ReadAll(f2) ExpectWithOffset(offet, err).ShouldNot(HaveOccurred()) ExpectWithOffset(offet, f1Bytes).Should(Equal(f2Bytes)) }
package main import ( "net/http" "github.com/gorilla/mux" "html/template" ) var templates *template.Template func main() { templates = template.Must(template.ParseGlob("templates/*.html")) r := mux.NewRouter() r.HandleFunc("/", handler).Methods("GET") http.Handle("/", r) http.ListenAndServe(":8000", nil) } func handler(w http.ResponseWriter, r *http.Request) { templates.ExecuteTemplate(w, "index.html", nil) }
// Copyright 2015 Google Inc. All rights reserved. // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. package main import ( "testing" "github.com/GoogleCloudPlatform/golang-samples/internal/testutil" ) func TestListBuckets(t *testing.T) { tc := testutil.SystemTest(t) buckets, err := ListBuckets(tc.ProjectID) if err != nil { t.Errorf("error while listing buckets: %s", err) } if len(buckets) <= 0 { t.Error("want non-empty list of buckets") } }
package expsocket import ( "fmt" "golang.org/x/net/websocket" "html/template" "log" "net/http" "strings" ) const host = ":8080" type message struct { Text string `json:"text"` Author string `json:"author"` } var ( connections = make(map[*websocket.Conn]bool) broadcast = make(chan *message) ) func ServeWebSimpleSocket() { go handleMessage() http.Handle("/ws", websocket.Handler(echo)) http.HandleFunc("/", index) log.Fatal(http.ListenAndServe(host, nil)) } func echo(ws *websocket.Conn) { connections[ws] = true remAdd := ws.Request().RemoteAddr sendBroadcast(&message{ Text: fmt.Sprintf("new user connected %s", remAdd), Author: "System", }) defer ws.Close() handleConnection(ws) delete(connections, ws) } func handleConnection(ws *websocket.Conn) { for { req, err := receive(ws) if err != nil { fmt.Println(err) break } if strings.Trim(req.Text, " ") == "" { continue } fmt.Println(req) sendBroadcast(req) } } func sendBroadcast(message *message) { broadcast <- message } func receive(ws *websocket.Conn) (*message, error) { var mes message if err := websocket.JSON.Receive(ws, &mes); err != nil { return nil, err } return &mes, nil } func handleMessage() { for { mes := <-broadcast for connection := range connections { if err := websocket.JSON.Send(connection, mes); err != nil { fmt.Println(err) continue } } } } func index(w http.ResponseWriter, r *http.Request) { t, err := template.ParseFiles("./exphttp/socket/index.html") if err != nil { log.Fatal(err) } t.ExecuteTemplate(w, "index", nil) }
package types type User struct { Username string `json:"username"` Password string `json:"passwd"` Active bool `json:"active"` }
// Copyright (c) 2018-2020 The qitmeer developers // Copyright (c) 2013-2017 The btcsuite developers // Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package wallet import ( "encoding/json" "errors" "fmt" "github.com/Qitmeer/qitmeer-wallet/json/qitmeerjson" "github.com/Qitmeer/qitmeer/common/marshal" "github.com/Qitmeer/qitmeer/core/message" "github.com/Qitmeer/qitmeer/crypto/ecc" "os" "sort" "strconv" "strings" "sync" "time" "github.com/Qitmeer/qitmeer-wallet/util" wt "github.com/Qitmeer/qitmeer-wallet/types" "github.com/Qitmeer/qitmeer/common/hash" "github.com/Qitmeer/qitmeer/core/address" corejson "github.com/Qitmeer/qitmeer/core/json" "github.com/Qitmeer/qitmeer/core/types" "github.com/Qitmeer/qitmeer/engine/txscript" "github.com/Qitmeer/qitmeer/log" chaincfg "github.com/Qitmeer/qitmeer/params" "github.com/Qitmeer/qitmeer-wallet/config" clijson "github.com/Qitmeer/qitmeer-wallet/json" "github.com/Qitmeer/qitmeer-wallet/utils" waddrmgr "github.com/Qitmeer/qitmeer-wallet/waddrmgs" "github.com/Qitmeer/qitmeer-wallet/wallet/txrules" "github.com/Qitmeer/qitmeer-wallet/walletdb" "github.com/Qitmeer/qitmeer-wallet/wtxmgr" ) const ( // InsecurePubPassphrase is the default outer encryption passphrase used // for public data (everything but private keys). Using a non-default // public passphrase can prevent an attacker without the public // passphrase from discovering all past and future wallet addresses if // they gain access to the wallet database. // // NOTE: at time of writing, public encryption only applies to public // data in the waddrmgr namespace. Transactions are not yet encrypted. InsecurePubPassphrase = "public" webUpdateBlockTicker = 30 defaultNewAddressNumber = 1 ) var ( // Namespace bucket keys. waddrmgrNamespaceKey = []byte("waddrmgr") wtxmgrNamespaceKey = []byte("wtxmgr") ) var UploadRun = false type Wallet struct { cfg *config.Config // Data stores db walletdb.DB Manager *waddrmgr.Manager TxStore *wtxmgr.Store HttpClient *httpConfig // Channels for the manager locker. unlockRequests chan unlockRequest lockRequests chan struct{} lockState chan bool chainParams *chaincfg.Params wg sync.WaitGroup started bool quit chan struct{} quitMu sync.Mutex SyncHeight int32 } // Start starts the goroutines necessary to manage a wallet. func (w *Wallet) Start() { w.quitMu.Lock() select { case <-w.quit: // Restart the wallet goroutines after shutdown finishes. w.WaitForShutdown() w.quit = make(chan struct{}) default: // Ignore when the wallet is still running. if w.started { w.quitMu.Unlock() return } w.started = true } w.quitMu.Unlock() w.wg.Add(1) go w.walletLocker() go func() { updateBlockTicker := time.NewTicker(webUpdateBlockTicker * time.Second) for { select { case <-updateBlockTicker.C: if UploadRun == false { log.Trace("Updateblock start") UploadRun = true err := w.UpdateBlock(0) if err != nil { log.Error("Start.Updateblock err", "err", err.Error()) } UploadRun = false } } } }() } // quitChan atomically reads the quit channel. func (w *Wallet) quitChan() <-chan struct{} { w.quitMu.Lock() c := w.quit w.quitMu.Unlock() return c } // Stop signals all wallet goroutines to shutdown. func (w *Wallet) Stop() { w.quitMu.Lock() quit := w.quit w.quitMu.Unlock() select { case <-quit: default: close(quit) } } // ShuttingDown returns whether the wallet is currently in the process of // shutting down or not. func (w *Wallet) ShuttingDown() bool { select { case <-w.quitChan(): return true default: return false } } // WaitForShutdown blocks until all wallet goroutines have finished executing. func (w *Wallet) WaitForShutdown() { w.wg.Wait() } type ( unlockRequest struct { passphrase []byte lockAfter <-chan time.Time // nil prevents the timeout. err chan error } ) type Balance struct { TotalAmount types.Amount // 总数 SpendAmount types.Amount // 已花费 UnspendAmount types.Amount //未花费 ConfirmAmount types.Amount //待确认 } // AccountBalanceResult is a single result for the Wallet.AccountBalances method. type AccountBalanceResult struct { AccountNumber uint32 AccountName string AccountBalance types.Amount } type AccountAndAddressResult struct { AccountNumber uint32 AccountName string AddrsOutput []AddrAndAddrTxOutput } type AddrAndAddrTxOutput struct { Addr string balance Balance Txoutput []wtxmgr.AddrTxOutput } // ImportPrivateKey imports a private key to the wallet and writes the new // wallet to disk. // // NOTE: If a block stamp is not provided, then the wallet's birthday will be // set to the genesis block of the corresponding chain. func (w *Wallet) ImportPrivateKey(scope waddrmgr.KeyScope, wif *utils.WIF) (string, error) { manager, err := w.Manager.FetchScopedKeyManager(scope) if err != nil { return "", err } // Attempt to import private key into wallet. var addr types.Address err = walletdb.Update(w.db, func(tx walletdb.ReadWriteTx) error { addrMgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey) maddr, err := manager.ImportPrivateKey(addrMgrNs, wif) if err != nil { return err } addr = maddr.Address() _, err = manager.AccountProperties( addrMgrNs, waddrmgr.ImportedAddrAccount, ) if err != nil { return err } return nil }) if err != nil { return "", err } addrStr := addr.Encode() log.Trace("ImportPrivateKey succ", "address", addrStr) // Return the payment address string of the imported private key. return addrStr, nil } // ChainParams returns the network parameters for the blockchain the wallet // belongs to. func (w *Wallet) ChainParams() *chaincfg.Params { return w.chainParams } // Database returns the underlying walletdb database. This method is provided // in order to allow applications wrapping btcwallet to store app-specific data // with the wallet's database. func (w *Wallet) Database() walletdb.DB { return w.db } func Create(db walletdb.DB, pubPass, privPass, seed []byte, params *chaincfg.Params, birthday time.Time) error { // If a seed was provided, ensure that it is of valid length. Otherwise, // we generate a random seed for the wallet with the recommended seed // length. return walletdb.Update(db, func(tx walletdb.ReadWriteTx) error { addrMgrNs, err := tx.CreateTopLevelBucket(waddrmgrNamespaceKey) if err != nil { return err } txmgrNs, err := tx.CreateTopLevelBucket(wtxmgrNamespaceKey) if err != nil { return err } err = waddrmgr.Create( addrMgrNs, seed, pubPass, privPass, params, nil, birthday, ) if err != nil { return err } return wtxmgr.Create(txmgrNs) }) } // Open loads an already-created wallet from the passed database and namespaces. func Open(db walletdb.DB, pubPass []byte, _ *waddrmgr.OpenCallbacks, params *chaincfg.Params, _ uint32, cfg *config.Config) (*Wallet, error) { var ( addrMgr *waddrmgr.Manager txMgr *wtxmgr.Store ) // Before attempting to open the wallet, we'll check if there are any // database upgrades for us to proceed. We'll also create our references // to the address and transaction managers, as they are backed by the // database. err := walletdb.Update(db, func(tx walletdb.ReadWriteTx) error { addrMgrBucket := tx.ReadWriteBucket(waddrmgrNamespaceKey) if addrMgrBucket == nil { return errors.New("missing address manager namespace") } txMgrBucket := tx.ReadWriteBucket(wtxmgrNamespaceKey) if txMgrBucket == nil { return errors.New("missing transaction manager namespace") } var err error addrMgr, err = waddrmgr.Open(addrMgrBucket, pubPass, params) if err != nil { return err } txMgr, err = wtxmgr.Open(txMgrBucket, params) if err != nil { return err } return nil }) if err != nil { return nil, err } log.Trace("Opened wallet") w := &Wallet{ cfg: cfg, db: db, Manager: addrMgr, TxStore: txMgr, unlockRequests: make(chan unlockRequest), lockRequests: make(chan struct{}), lockState: make(chan bool), chainParams: params, quit: make(chan struct{}), } return w, nil } func (w *Wallet) GetTx(txId string) (corejson.TxRawResult, error) { trx := corejson.TxRawResult{} err := walletdb.View(w.db, func(tx walletdb.ReadTx) error { ns := tx.ReadBucket(wtxmgrNamespaceKey) txNs := ns.NestedReadBucket(wtxmgr.BucketTxJson) k, err := hash.NewHashFromStr(txId) if err != nil { return err } v := txNs.Get(k.Bytes()) if v != nil { err := json.Unmarshal(v, &trx) if err != nil { return err } } else { return errors.New("GetTx fail ") } return nil }) if err != nil { return trx, err } return trx, nil } func (w *Wallet) GetAccountAndAddress(scope waddrmgr.KeyScope) ([]AccountAndAddressResult, error) { manager, err := w.Manager.FetchScopedKeyManager(scope) if err != nil { return nil, err } var results []AccountAndAddressResult err = walletdb.View(w.db, func(tx walletdb.ReadTx) error { addrNs := tx.ReadBucket(waddrmgrNamespaceKey) lastAcct, err := manager.LastAccount(addrNs) if err != nil { return err } results = make([]AccountAndAddressResult, lastAcct+2) for i := range results[:len(results)-1] { accountName, err := manager.AccountName(addrNs, uint32(i)) if err != nil { return err } results[i].AccountNumber = uint32(i) results[i].AccountName = accountName } results[len(results)-1].AccountNumber = waddrmgr.ImportedAddrAccount results[len(results)-1].AccountName = waddrmgr.ImportedAddrAccountName for k := range results { adds, err := w.AccountAddresses(results[k].AccountNumber) if err != nil { return err } var addrOutputs []AddrAndAddrTxOutput for _, addr := range adds { addrOutput, err := w.getAddrAndAddrTxOutputByAddr(addr.Encode()) if err != nil { return err } addrOutputs = append(addrOutputs, *addrOutput) } results[k].AddrsOutput = addrOutputs } return nil }) if err != nil { return nil, err } return results, err } func (w *Wallet) getAddrAndAddrTxOutputByAddr(addr string) (*AddrAndAddrTxOutput, error) { ato := AddrAndAddrTxOutput{} b := Balance{} var txOuts wtxmgr.AddrTxOutputs err := walletdb.View(w.db, func(tx walletdb.ReadTx) error { hs := []byte(addr) ns := tx.ReadBucket(wtxmgrNamespaceKey) outNs := ns.NestedReadBucket(wtxmgr.BucketAddrtxout) hsOutNs := outNs.NestedReadBucket(hs) if hsOutNs != nil { err := hsOutNs.ForEach(func(k, v []byte) error { to := wtxmgr.AddrTxOutput{} err := wtxmgr.ReadAddrTxOutput(v, &to) if err != nil { return err } txOuts = append(txOuts, to) return nil }) if err != nil { return err } } return nil }) if err != nil { return nil, err } sort.Sort(sort.Reverse(txOuts)) var spendAmount types.Amount var unspentAmount types.Amount var totalAmount types.Amount var confirmAmount types.Amount for _, txOut := range txOuts { if txOut.Spend == wtxmgr.SpendStatusSpend { spendAmount += txOut.Amount } else if txOut.Spend == wtxmgr.SpendStatusUnconfirmed { totalAmount += txOut.Amount confirmAmount += txOut.Amount } else { totalAmount += txOut.Amount unspentAmount += txOut.Amount } } b.UnspendAmount = unspentAmount b.SpendAmount = spendAmount b.TotalAmount = totalAmount b.ConfirmAmount = confirmAmount ato.Addr = addr ato.balance = b ato.Txoutput = txOuts return &ato, nil } const ( PageUseDefault = -1 PageDefaultNo = 1 PageDefaultSize = 10 PageMaxSize = 1000000000 FilterIn = 0 FilterOut = 1 FilterAll = 2 ) /** request all the transactions that affect a specific address, a transaction can have MULTIPLE payments and affect MULTIPLE addresses sType 0 Turn in 1 Turn out 2 all no page */ func (w *Wallet) GetListTxByAddr(addr string, sType int, pageNo int, pageSize int) (*clijson.PageTxRawResult, error) { bill, err := w.getPagedBillByAddr(addr, sType, pageNo, pageSize) if err != nil { return nil, err } result := clijson.PageTxRawResult{} result.Page = int32(pageNo) result.PageSize = int32(pageSize) result.Total = int32(bill.Len()) var transactions []corejson.TxRawResult err = walletdb.View(w.db, func(tx walletdb.ReadTx) error { ns := tx.ReadBucket(wtxmgrNamespaceKey) txNs := ns.NestedReadBucket(wtxmgr.BucketTxJson) for _, b := range *bill { txHs := b.TxID v := txNs.Get(txHs.Bytes()) if v == nil { return fmt.Errorf("db uploadblock err tx:%s non-existent", txHs.String()) } var txr corejson.TxRawResult err := json.Unmarshal(v, &txr) if err != nil { return err } transactions = append(transactions, txr) } return nil }) if err != nil { return nil, err } result.Transactions = transactions return &result, nil } // request the bill of a specific address, a bill is the log of payments, // which are the effects that a transaction makes on a specific address // a payment can affect only ONE address func (w *Wallet) GetBillByAddr(addr string, filter int, pageNo int, pageSize int) (*clijson.PagedBillResult, error) { bill, err := w.getPagedBillByAddr(addr, filter, pageNo, pageSize) if err != nil { return nil, err } res := clijson.PagedBillResult{} res.PageNo = int32(pageNo) res.PageSize = int32(pageSize) res.Total = int32(bill.Len()) for _, p := range *bill { res.Bill = append(res.Bill, clijson.PaymentResult{ TxID: p.TxID.String(), Variation: p.Variation, }) } return &res, nil } func (w *Wallet) getPagedBillByAddr(addr string, filter int, pageNo int, pageSize int) (*wt.Bill, error) { at, err := w.getAddrAndAddrTxOutputByAddr(addr) if err != nil { return nil, err } if pageNo == 0 { pageNo = PageDefaultNo } if pageSize == 0 { pageSize = PageDefaultSize } startIndex := (pageNo - 1) * pageSize var endIndex int var allTxs wt.Bill var inTxs wt.Bill var outTxs wt.Bill var dataLen int allMap := make(map[hash.Hash]wt.Payment) for _, o := range at.Txoutput { txOut, found := allMap[o.TxId] if found { txOut.Variation += int64(o.Amount) } else { txOut.TxID = o.TxId txOut.Variation = int64(o.Amount) txOut.BlockHash = o.Block.Hash txOut.BlockOrder = uint32(o.Block.Height) } //log.Debug(fmt.Sprintf("%s %v %v", o.TxId.String(), float64(o.Amount)/math.Pow10(8), float64(txOut.Amount)/math.Pow10(8))) allMap[o.TxId] = txOut if o.SpendTo != nil { txOut, found := allMap[o.SpendTo.TxHash] if found { txOut.Variation -= int64(o.Amount) } else { txOut.TxID = o.SpendTo.TxHash txOut.Variation = -int64(o.Amount) // ToDo: add Block to SpendTo txOut.BlockHash = o.Block.Hash txOut.BlockOrder = uint32(o.Block.Height) } allMap[o.SpendTo.TxHash] = txOut //log.Debug(fmt.Sprintf("%s %v %v", o.SpendTo.TxHash.String(), float64(-o.Amount)/math.Pow10(8), float64(txOut.Amount)/math.Pow10(8))) } } for _, out := range allMap { if out.Variation > 0 { inTxs = append(inTxs, out) } else { outTxs = append(outTxs, out) } } switch filter { case FilterIn: allTxs = inTxs case FilterOut: allTxs = outTxs case FilterAll: allTxs = append(inTxs, outTxs...) default: return nil, fmt.Errorf("err filter:%d", filter) } sort.Sort(allTxs) dataLen = len(allTxs) if pageNo < 0 { pageNo = PageDefaultNo pageSize = PageMaxSize } else { if startIndex > dataLen { return nil, fmt.Errorf("no data, index:%d len:%d", startIndex, dataLen) } else { if (startIndex + pageSize) > dataLen { endIndex = dataLen } else { endIndex = startIndex + pageSize } allTxs = allTxs[startIndex:endIndex] } } return &allTxs, nil } func (w *Wallet) GetBalance(addr string) (*Balance, error) { if addr == "" { return nil, errors.New("addr is nil") } res, err := w.getAddrAndAddrTxOutputByAddr(addr) if err != nil { return nil, err } return &res.balance, nil } func (w *Wallet) GetTxSpendInfo(txId string) ([]*wtxmgr.AddrTxOutput, error) { var atos []*wtxmgr.AddrTxOutput txHash, err := hash.NewHashFromStr(txId) if err != nil { return nil, err } err = walletdb.Update(w.db, func(tx walletdb.ReadWriteTx) error { rb := tx.ReadWriteBucket(wtxmgrNamespaceKey) txNrb := rb.NestedReadWriteBucket(wtxmgr.BucketTxJson) outNrb := rb.NestedReadWriteBucket(wtxmgr.BucketAddrtxout) v := txNrb.Get(txHash.Bytes()) if v == nil { return fmt.Errorf("txid does not exist") } var txr corejson.TxRawResult err := json.Unmarshal(v, &txr) if err != nil { return err } for i, vOut := range txr.Vout { addr := vOut.ScriptPubKey.Addresses[0] top := types.TxOutPoint{ Hash: *txHash, OutIndex: uint32(i), } var ato, err = w.TxStore.GetAddrTxOut(outNrb, addr, top) if err != nil { return err } ato.Address = addr atos = append(atos, ato) } return err }) if err != nil { return nil, err } return atos, nil } func (w *Wallet) insertTx(txins []wtxmgr.TxInputPoint, txouts []wtxmgr.AddrTxOutput, trrs []corejson.TxRawResult) error { err := walletdb.Update(w.db, func(tx walletdb.ReadWriteTx) error { ns := tx.ReadWriteBucket(wtxmgrNamespaceKey) txNs := ns.NestedReadWriteBucket(wtxmgr.BucketTxJson) outNs := ns.NestedReadWriteBucket(wtxmgr.BucketAddrtxout) for _, tr := range trrs { k, err := hash.NewHashFromStr(tr.Txid) if err != nil { return err } v, err := json.Marshal(tr) if err != nil { return err } ks := k.Bytes() err = txNs.Put(ks, v) if err != nil { return err } } for _, txo := range txouts { err := w.TxStore.InsertAddrTxOut(outNs, &txo) if err != nil { return err } } for _, txi := range txins { v := txNs.Get(txi.TxOutPoint.Hash.Bytes()) if v == nil { continue } var txr corejson.TxRawResult err := json.Unmarshal(v, &txr) if err != nil { return err } addr := txr.Vout[txi.TxOutPoint.OutIndex].ScriptPubKey.Addresses[0] spendOut, err := w.TxStore.GetAddrTxOut(outNs, addr, txi.TxOutPoint) if err != nil { return err } spendOut.Spend = wtxmgr.SpendStatusSpend spendOut.Address = addr spendOut.SpendTo = &txi.SpendTo err = w.TxStore.UpdateAddrTxOut(outNs, spendOut) if err != nil { return err } } return nil }) return err } func (w *Wallet) SyncTx(order int64) (clijson.BlockHttpResult, error) { var block clijson.BlockHttpResult blockByte, err := w.HttpClient.getBlockByOrder(order) if err != nil { return block, err } if err := json.Unmarshal(blockByte, &block); err == nil { if !block.Txsvalid { log.Trace(fmt.Sprintf("block:%v err,txsvalid is false", block.Hash)) return block, nil } isBlue, err := w.HttpClient.isBlue(block.Hash) if err != nil { return block, err } block.IsBlue = isBlue if !block.IsBlue { log.Trace(fmt.Sprintf("block:%v is not blue", block.Hash)) } txIns, txOuts, trRs, err := parseBlockTxs(block) if err != nil { return block, err } err = w.insertTx(txIns, txOuts, trRs) if err != nil { return block, err } } else { log.Error(err.Error()) return block, err } return block, nil } func parseTx(tr corejson.TxRawResult, height int32, isBlue bool) ([]wtxmgr.TxInputPoint, []wtxmgr.AddrTxOutput, error) { var txins []wtxmgr.TxInputPoint var txouts []wtxmgr.AddrTxOutput blockhash, err := hash.NewHashFromStr(tr.BlockHash) if err != nil { return nil, nil, err } block := wtxmgr.Block{ Hash: *blockhash, Height: height, } txId, err := hash.NewHashFromStr(tr.Txid) if err != nil { return nil, nil, err } spend := wtxmgr.SpendStatusUnspent if tr.Confirmations < config.Cfg.Confirmations { spend = wtxmgr.SpendStatusUnconfirmed } for i, vi := range tr.Vin { if vi.Coinbase != "" { continue } if vi.Txid == "" && vi.Vout == 0 { continue } else { hs, err := hash.NewHashFromStr(vi.Txid) if err != nil { return nil, nil, err } else { txOutPoint := types.TxOutPoint{ Hash: *hs, OutIndex: vi.Vout, } spendTo := wtxmgr.SpendTo{ Index: uint32(i), TxHash: *txId, } txIn := wtxmgr.TxInputPoint{ TxOutPoint: txOutPoint, SpendTo: spendTo, } txins = append(txins, txIn) spend = wtxmgr.SpendStatusUnspent } } } for index, vo := range tr.Vout { if len(vo.ScriptPubKey.Addresses) == 0 { continue } else { txOut := wtxmgr.AddrTxOutput{ Address: vo.ScriptPubKey.Addresses[0], TxId: *txId, Index: uint32(index), Amount: types.Amount(vo.Amount), Block: block, Spend: spend, IsBlue: isBlue, } txouts = append(txouts, txOut) } } return txins, txouts, nil } func parseBlockTxs(block clijson.BlockHttpResult) ([]wtxmgr.TxInputPoint, []wtxmgr.AddrTxOutput, []corejson.TxRawResult, error) { var txIns []wtxmgr.TxInputPoint var txOuts []wtxmgr.AddrTxOutput var tx []corejson.TxRawResult for _, tr := range block.Transactions { tx = append(tx, tr) tin, tout, err := parseTx(tr, block.Order, block.IsBlue) if err != nil { return nil, nil, nil, err } else { txIns = append(txIns, tin...) txOuts = append(txOuts, tout...) } } return txIns, txOuts, tx, nil } func (w *Wallet) GetSyncBlockHeight() int32 { height := w.Manager.SyncedTo().Height return height } func (w *Wallet) SetSyncedToNum(order int64) error { var block clijson.BlockHttpResult blockByte, err := w.HttpClient.getBlockByOrder(order) if err != nil { return err } if err := json.Unmarshal(blockByte, &block); err == nil { if !block.Txsvalid { log.Trace(fmt.Sprintf("block:%v err,txsvalid is false", block.Hash)) return nil } hs, err := hash.NewHashFromStr(block.Hash) if err != nil { return fmt.Errorf("blockhash string to hash err:%s", err.Error()) } stamp := &waddrmgr.BlockStamp{Hash: *hs, Height: block.Order} err = walletdb.Update(w.db, func(tx walletdb.ReadWriteTx) error { ns := tx.ReadWriteBucket(waddrmgrNamespaceKey) err := w.Manager.SetSyncedTo(ns, stamp) if err != nil { return err } return nil }) if err != nil { return err } return nil } else { log.Error(err.Error()) return err } } func (w *Wallet) handleBlockSynced(order int64) error { br, er := w.SyncTx(order) if er != nil { return er } hs, err := hash.NewHashFromStr(br.Hash) if err != nil { return fmt.Errorf("blockhash string to hash err:%s", err.Error()) } if br.Confirmations > config.Cfg.Confirmations { stamp := &waddrmgr.BlockStamp{Hash: *hs, Height: br.Order} err = walletdb.Update(w.db, func(tx walletdb.ReadWriteTx) error { ns := tx.ReadWriteBucket(waddrmgrNamespaceKey) err := w.Manager.SetSyncedTo(ns, stamp) if err != nil { return err } return nil }) if err != nil { return err } } return nil } func (w *Wallet) UpdateBlock(toHeight int64) error { var blockCount string var err error if toHeight == 0 { blockCount, err = w.HttpClient.getblockCount() if err != nil { return err } } else { blockCount = strconv.FormatInt(toHeight, strIntBase) } blockHeight, err := strconv.ParseInt(blockCount, strIntBase, strIntBitSize32) if err != nil { return err } h := int64(w.Manager.SyncedTo().Height) if h < blockHeight { log.Trace(fmt.Sprintf("localheight:%d,blockHeight:%d", h, blockHeight)) for h < blockHeight { err := w.handleBlockSynced(h) if err != nil { return err } else { w.SyncHeight = int32(h) _, _ = fmt.Fprintf(os.Stdout, "update blcok:%s/%s\r", strconv.FormatInt(h, 10), strconv.FormatInt(blockHeight-1, 10)) h++ } } fmt.Print("\nsucc\n") } else { fmt.Println("Block data is up to date") } return nil } // NextAccount creates the next account and returns its account number. The // name must be unique to the account. In order to support automatic seed // restoring, new accounts may not be created when all of the previous 100 // accounts have no transaction history (this is a deviation from the BIP0044 // spec, which allows no unused account gaps). func (w *Wallet) NextAccount(scope waddrmgr.KeyScope, name string) (uint32, error) { manager, err := w.Manager.FetchScopedKeyManager(scope) if err != nil { return 0, err } var ( account uint32 ) err = walletdb.Update(w.db, func(tx walletdb.ReadWriteTx) error { addrMgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey) var err error account, err = manager.NewAccount(addrMgrNs, name) if err != nil { return err } _, err = manager.AccountProperties(addrMgrNs, account) return err }) if err != nil { log.Error("Cannot fetch new account properties for notification "+ "after account creation", "err", err) return account, err } return account, err } // AccountBalances returns all accounts in the wallet and their balances. // Balances are determined by excluding transactions that have not met // requiredConfs confirmations. func (w *Wallet) AccountBalances(scope waddrmgr.KeyScope) ([]AccountBalanceResult, error) { aaaRs, err := w.GetAccountAndAddress(scope) if err != nil { return nil, err } results := make([]AccountBalanceResult, len(aaaRs)) for index, aaa := range aaaRs { results[index].AccountNumber = aaa.AccountNumber results[index].AccountName = aaa.AccountName unSpendAmount := types.Amount(0) for _, addr := range aaa.AddrsOutput { unSpendAmount = unSpendAmount + addr.balance.UnspendAmount } results[index].AccountBalance = unSpendAmount } return results, nil } // AccountNumber returns the account number for an account name under a // particular key scope. func (w *Wallet) AccountNumber(scope waddrmgr.KeyScope, accountName string) (uint32, error) { manager, err := w.Manager.FetchScopedKeyManager(scope) if err != nil { return 0, err } var account uint32 err = walletdb.View(w.db, func(tx walletdb.ReadTx) error { addrMgrNs := tx.ReadBucket(waddrmgrNamespaceKey) var err error account, err = manager.LookupAccount(addrMgrNs, accountName) return err }) return account, err } // NewAddress returns the next external chained address for a wallet. func (w *Wallet) NewAddress( scope waddrmgr.KeyScope, account uint32) (types.Address, error) { var ( addr types.Address ) err := walletdb.Update(w.db, func(tx walletdb.ReadWriteTx) error { addrMgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey) var err error addr, _, err = w.newAddress(addrMgrNs, account, scope) return err }) if err != nil { return nil, err } return addr, nil } func (w *Wallet) newAddress(addrMgrNs walletdb.ReadWriteBucket, account uint32, scope waddrmgr.KeyScope) (types.Address, *waddrmgr.AccountProperties, error) { manager, err := w.Manager.FetchScopedKeyManager(scope) if err != nil { return nil, nil, err } // Get next address from wallet. addr, err := manager.NextExternalAddresses(addrMgrNs, account, defaultNewAddressNumber) if err != nil { return nil, nil, err } props, err := manager.AccountProperties(addrMgrNs, account) if err != nil { log.Error(fmt.Sprintf("Cannot fetch account properties for notification "+ "after deriving next external address: %v", err)) return nil, nil, err } return addr[0].Address(), props, nil } // DumpWIFPrivateKey returns the WIF encoded private key for a // single wallet address. func (w *Wallet) DumpWIFPrivateKey(addr types.Address) (string, error) { var maddr waddrmgr.ManagedAddress err := walletdb.View(w.db, func(tx walletdb.ReadTx) error { waddrMgrNs := tx.ReadBucket(waddrmgrNamespaceKey) // Get private key from wallet if it exists. var err error maddr, err = w.Manager.Address(waddrMgrNs, addr) return err }) if err != nil { return "", err } pka, ok := maddr.(waddrmgr.ManagedPubKeyAddress) if !ok { return "", fmt.Errorf("address %s is not a key type", addr) } wif, err := pka.ExportPrivKey() if err != nil { return "", err } return wif.String(), nil } func (w *Wallet) getPrivateKey(addr types.Address) (waddrmgr.ManagedPubKeyAddress, error) { var maddr waddrmgr.ManagedAddress err := walletdb.View(w.db, func(tx walletdb.ReadTx) error { waddrMgrNs := tx.ReadBucket(waddrmgrNamespaceKey) // Get private key from wallet if it exists. var err error maddr, err = w.Manager.Address(waddrMgrNs, addr) return err }) if err != nil { return nil, err } pka, ok := maddr.(waddrmgr.ManagedPubKeyAddress) if !ok { return nil, fmt.Errorf("address %s is not a key type", addr) } return pka, nil } // Unlock unlocks the wallet's address manager and relocks it after timeout has // expired. If the wallet is already unlocked and the new passphrase is // correct, the current timeout is replaced with the new one. The wallet will // be locked if the passphrase is incorrect or any other error occurs during the // unlock. func (w *Wallet) Unlock(passphrase []byte, lock <-chan time.Time) error { log.Trace("wallet Unlock") err := make(chan error, 1) w.unlockRequests <- unlockRequest{ passphrase: passphrase, lockAfter: lock, err: err, } log.Trace("wallet Unlock end") return <-err } //// Lock locks the wallet's address manager. func (w *Wallet) Lock() { w.lockRequests <- struct{}{} } //// Locked returns whether the account manager for a wallet is locked. func (w *Wallet) Locked() bool { return <-w.lockState } func (w *Wallet) UnLockManager(passphrase []byte) error { err := walletdb.View(w.db, func(tx walletdb.ReadTx) error { addrMgrNs := tx.ReadBucket(waddrmgrNamespaceKey) return w.Manager.Unlock(addrMgrNs, passphrase) }) if err != nil { return err } return nil } // walletLocker manages the locked/unlocked state of a wallet. func (w *Wallet) walletLocker() { var timeout <-chan time.Time quit := w.quitChan() out: for { select { case req := <-w.unlockRequests: err := walletdb.View(w.db, func(tx walletdb.ReadTx) error { addMgrNs := tx.ReadBucket(waddrmgrNamespaceKey) return w.Manager.Unlock(addMgrNs, req.passphrase) }) if err != nil { req.err <- err continue } timeout = req.lockAfter if timeout == nil { log.Info("The wallet has been unlocked without a time limit") } else { log.Info("The wallet has been temporarily unlocked") } req.err <- nil continue case w.lockState <- w.Manager.IsLocked(): continue case <-quit: break out case <-w.lockRequests: case <-timeout: } // Select statement fell through by an explicit lock or the // timer expiring. Lock the manager here. timeout = nil err := w.Manager.Lock() if err != nil && !waddrmgr.IsError(err, waddrmgr.ErrLocked) { log.Error("Could not lock wallet: ", err.Error()) } else { log.Info("The wallet has been locked") } } w.wg.Done() } // AccountAddresses returns the addresses for every created address for an // account. func (w *Wallet) AccountAddresses(account uint32) (addrs []types.Address, err error) { err = walletdb.View(w.db, func(tx walletdb.ReadTx) error { addrMgrNs := tx.ReadBucket(waddrmgrNamespaceKey) return w.Manager.ForEachAccountAddress(addrMgrNs, account, func(mAddr waddrmgr.ManagedAddress) error { addrs = append(addrs, mAddr.Address()) return nil }) }) return } // AccountOfAddress finds the account that an address is associated with. func (w *Wallet) AccountOfAddress(a types.Address) (uint32, error) { var account uint32 err := walletdb.View(w.db, func(tx walletdb.ReadTx) error { addrMgrNs := tx.ReadBucket(waddrmgrNamespaceKey) var err error _, account, err = w.Manager.AddrAccount(addrMgrNs, a) return err }) return account, err } // AccountName returns the name of an account. func (w *Wallet) AccountName(scope waddrmgr.KeyScope, accountNumber uint32) (string, error) { manager, err := w.Manager.FetchScopedKeyManager(scope) if err != nil { return "", err } var accountName string err = walletdb.View(w.db, func(tx walletdb.ReadTx) error { addrMgrNs := tx.ReadBucket(waddrmgrNamespaceKey) var err error accountName, err = manager.AccountName(addrMgrNs, accountNumber) return err }) return accountName, err } func (w *Wallet) GetUtxo(addr string) ([]wtxmgr.UTxo, error) { var txouts []wtxmgr.AddrTxOutput var utxos []wtxmgr.UTxo err := walletdb.View(w.db, func(tx walletdb.ReadTx) error { hs := []byte(addr) ns := tx.ReadBucket(wtxmgrNamespaceKey) outns := ns.NestedReadBucket(wtxmgr.BucketAddrtxout) hsoutns := outns.NestedReadBucket(hs) if hsoutns != nil { _ = hsoutns.ForEach(func(k, v []byte) error { to := wtxmgr.AddrTxOutput{} err := wtxmgr.ReadAddrTxOutput(v, &to) if err != nil { log.Error("readAddrTxOutput err", "err", err.Error()) return err } txouts = append(txouts, to) return nil }) } return nil }) if err != nil { log.Error("ReadAddrTxOutput err", "err", err) return nil, err } for _, txout := range txouts { uo := wtxmgr.UTxo{} if txout.Spend == wtxmgr.SpendStatusUnspent { uo.TxId = txout.TxId.String() uo.Index = txout.Index uo.Amount = txout.Amount utxos = append(utxos, uo) } } return utxos, nil } // Sendoutputs can only be accessed by a single thread at the same time to prevent the referenced utxo from being referenced again under the concurrency var syncSendOutputs = new(sync.Mutex) // SendOutputs creates and sends payment transactions. It returns the // transaction upon success. func (w *Wallet) SendOutputs(outputs []*types.TxOutput, account int64, satPerKb types.Amount) (*string, error) { // Ensure the outputs to be created adhere to the network's consensus // rules. syncSendOutputs.Lock() defer syncSendOutputs.Unlock() tx := types.NewTransaction() payAmount := types.Amount(0) feeAmount := int64(0) for _, output := range outputs { if err := txrules.CheckOutput(output, satPerKb); err != nil { return nil, err } payAmount = payAmount + types.Amount(output.Amount) tx.AddTxOut(output) } aaars, err := w.GetAccountAndAddress(waddrmgr.KeyScopeBIP0044) if err != nil { return nil, err } var sendAddrTxOutput []wtxmgr.AddrTxOutput //var prk string b: for _, aaar := range aaars { if int64(aaar.AccountNumber) != account && account != waddrmgr.AccountMergePayNum { continue } for _, addroutput := range aaar.AddrsOutput { log.Trace(fmt.Sprintf("addr:%s,unspend:%v", addroutput.Addr, addroutput.balance.UnspendAmount)) if addroutput.balance.UnspendAmount > 0 { addr, err := address.DecodeAddress(addroutput.Addr) if err != nil { return nil, err } frompkscipt, err := txscript.PayToAddrScript(addr) if err != nil { return nil, err } addrByte := []byte(addroutput.Addr) for _, output := range addroutput.Txoutput { output.Address = addroutput.Addr mature := false if outTx, err := w.GetTx(output.TxId.String()); err == nil { if outTx.Vin[0].IsCoinBase() { if blockByte, err := w.HttpClient.getBlockByOrder(int64(output.Block.Height)); err == nil { var block clijson.BlockHttpResult if err := json.Unmarshal(blockByte, &block); err == nil { if block.Confirmations >= int64(w.chainParams.CoinbaseMaturity) { mature = true } } } } else { mature = true } } if output.Spend == wtxmgr.SpendStatusUnspent && mature { if payAmount > 0 && feeAmount == 0 { if output.Amount > payAmount { input := types.NewOutPoint(&output.TxId, output.Index) tx.AddTxIn(types.NewTxInput(input, addrByte)) selfTxOut := types.NewTxOutput(uint64(output.Amount-payAmount), frompkscipt) feeAmount = util.CalcMinRequiredTxRelayFee(int64(tx.SerializeSize()+selfTxOut.SerializeSize()), types.Amount(config.Cfg.MinTxFee)) sendAddrTxOutput = append(sendAddrTxOutput, output) if (output.Amount - payAmount - types.Amount(feeAmount)) >= 0 { selfTxOut.Amount = uint64(output.Amount - payAmount - types.Amount(feeAmount)) if selfTxOut.Amount > 0 { tx.AddTxOut(selfTxOut) } payAmount = 0 feeAmount = 0 break b } else { selfTxOut.Amount = uint64(output.Amount - payAmount) payAmount = 0 tx.AddTxOut(selfTxOut) } } else { input := types.NewOutPoint(&output.TxId, output.Index) tx.AddTxIn(types.NewTxInput(input, addrByte)) sendAddrTxOutput = append(sendAddrTxOutput, output) payAmount = payAmount - output.Amount if payAmount == 0 { feeAmount = util.CalcMinRequiredTxRelayFee(int64(tx.SerializeSize()), types.Amount(config.Cfg.MinTxFee)) } } } else if payAmount == 0 && feeAmount > 0 { if output.Amount >= types.Amount(feeAmount) { input := types.NewOutPoint(&output.TxId, output.Index) tx.AddTxIn(types.NewTxInput(input, addrByte)) selfTxOut := types.NewTxOutput(uint64(output.Amount-types.Amount(feeAmount)), frompkscipt) if selfTxOut.Amount > 0 { tx.AddTxOut(selfTxOut) } sendAddrTxOutput = append(sendAddrTxOutput, output) feeAmount = 0 break b } else { log.Trace("utxo < feeAmount") } } else { log.Trace(fmt.Sprintf("system err payAmount :%v ,feeAmount :%v\n", payAmount, feeAmount)) return nil, fmt.Errorf("system err payAmount :%v ,feeAmount :%v\n", payAmount, feeAmount) } } } } //} } } if payAmount.ToCoin() != types.Amount(0).ToCoin() || feeAmount != 0 { log.Trace("payAmount", "payAmount", payAmount) log.Trace("feeAmount", "feeAmount", feeAmount) return nil, fmt.Errorf("balance is not enough,please deduct the service charge:%v", types.Amount(feeAmount).ToCoin()) } signTx, err := w.multiAddressMergeSign(*tx, w.chainParams.Name) if err != nil { return nil, err } log.Trace(fmt.Sprintf("signTx size:%v", len(signTx)), "signTx", signTx) msg, err := w.HttpClient.SendRawTransaction(signTx, false) if err != nil { log.Trace("SendRawTransaction txSign err ", "err", err.Error()) return nil, err } else { msg = strings.ReplaceAll(msg, "\"", "") log.Trace("SendRawTransaction txSign response msg", "msg", msg) } err = walletdb.Update(w.db, func(tx walletdb.ReadWriteTx) error { ns := tx.ReadWriteBucket(wtxmgrNamespaceKey) outns := ns.NestedReadWriteBucket(wtxmgr.BucketAddrtxout) for _, txoutput := range sendAddrTxOutput { txoutput.Spend = wtxmgr.SpendStatusSpend err = w.TxStore.UpdateAddrTxOut(outns, &txoutput) if err != nil { log.Error("UpdateAddrTxOut to spend err", "err", err.Error()) return err } } log.Trace("UpdateAddrTxOut to spend succ ") return nil }) if err != nil { log.Error("UpdateAddrTxOut to spend err", "err", err.Error()) return nil, err } return &msg, nil } // Multi address merge signature func (w *Wallet) multiAddressMergeSign(redeemTx types.Transaction, network string) (string, error) { var param *chaincfg.Params switch network { case "mainnet": param = &chaincfg.MainNetParams case "testnet": param = &chaincfg.TestNetParams case "privnet": param = &chaincfg.PrivNetParams case "mixnet": param = &chaincfg.MixNetParams } var sigScripts [][]byte for i := range redeemTx.TxIn { addrByte := redeemTx.TxIn[i].SignScript addr, err := address.DecodeAddress(string(addrByte)) if err != nil { return "", err } pri, err := w.getPrivateKey(addr) if err != nil { return "", err } priKey, err := pri.PrivKey() if err != nil { return "", err } // Create a new script which pays to the provided address. pkScript, err := txscript.PayToAddrScript(addr) if err != nil { return "", err } var kdb txscript.KeyClosure = func(types.Address) (ecc.PrivateKey, bool, error) { return priKey, true, nil // compressed is true } sigScript, err := txscript.SignTxOutput(param, &redeemTx, i, pkScript, txscript.SigHashAll, kdb, nil, nil, ecc.ECDSA_Secp256k1) if err != nil { return "", err } sigScripts = append(sigScripts, sigScript) } for i2 := range sigScripts { redeemTx.TxIn[i2].SignScript = sigScripts[i2] } mtxHex, err := marshal.MessageToHex(&message.MsgTx{Tx: &redeemTx}) if err != nil { return "", err } return mtxHex, nil } //sendPairs creates and sends payment transactions. //It returns the transaction hash in string format upon success //All errors are returned in btcjson.RPCError format func (w *Wallet) SendPairs(amounts map[string]types.Amount, account int64, feeSatPerKb types.Amount) (string, error) { check, err := w.HttpClient.CheckSyncUpdate(int64(w.Manager.SyncedTo().Height)) if check == false { return "", err } outputs, err := makeOutputs(amounts) if err != nil { return "", err } tx, err := w.SendOutputs(outputs, account, feeSatPerKb) if err != nil { if err == txrules.ErrAmountNegative { return "", qitmeerjson.ErrNeedPositiveAmount } if waddrmgr.IsError(err, waddrmgr.ErrLocked) { return "", &qitmeerjson.ErrWalletUnlockNeeded } switch err.(type) { case qitmeerjson.RPCError: return "", err } return "", &qitmeerjson.RPCError{ Code: qitmeerjson.ErrRPCInternal.Code, Message: err.Error(), } } return *tx, nil } // makeOutputs creates a slice of transaction outputs from a pair of address // strings to amounts. This is used to create the outputs to include in newly // created transactions from a JSON object describing the output destinations // and amounts. func makeOutputs(pairs map[string]types.Amount) ([]*types.TxOutput, error) { outputs := make([]*types.TxOutput, 0, len(pairs)) for addrStr, amt := range pairs { addr, err := address.DecodeAddress(addrStr) if err != nil { return nil, fmt.Errorf("cannot decode address: %s,address:%s", err, addrStr) } pkScript, err := txscript.PayToAddrScript(addr) if err != nil { return nil, fmt.Errorf("cannot create txout script: %s", err) } outputs = append(outputs, types.NewTxOutput(uint64(amt), pkScript)) } return outputs, nil }
package cmd import ( "log" "github.com/grrtrr/exit" "github.com/pkg/errors" "github.com/spf13/cobra" ) func init() { var ( nic = &cobra.Command{ // Top-level NIC command Use: "nic", Short: "Manage server NICs", Long: "Add or remove server secondary network interface", PersistentPreRunE: func(cmd *cobra.Command, args []string) error { // Both commands take <serverName> <net ID | name | CIDR> if len(args) < 2 { return errors.Errorf("Need a server name and a network specifier " + "(network ID/name/CIDR, or IP on the network) for the secondary NIC") } setLocationBasedOnServerName(args[0]) return nil }, } addNICFlags struct { ip string // IP address to assign to the secondary NIC } addNIC = &cobra.Command{ Use: "add <serverName> <net (ID | Name | CIDR | IP)>", Short: "Add a secondary NIC to server", Long: "Add a secondary NIC to @server on network @net (using network ID, name, CIDR, or and IP on the network)", Run: func(cmd *cobra.Command, args []string) { var server, netID = args[0], args[1] network, err := resolveNet(netID, conf.Location) if err != nil { exit.Errorf("failed to resolve %s: %s", netID, err) } else if network != nil { netID = network.Id } log.Printf("Adding %s NIC on network %s ...", server, netID) if err = client.ServerAddNic(server, netID, addNICFlags.ip); err != nil { log.Fatalf("failed to add NIC to %s: %s", server, err) } log.Printf("Successfully added NIC to server %s", server) }, } removeNIC = &cobra.Command{ Use: "rm <serverName> <net (ID | Name | CIDR | IP)>", Aliases: []string{"remove", "del"}, Short: "Remove secondary NIC from server", Long: "Remove secondary NIC identified by @net (network ID, name, CIDR, or an IP on the network) from @serverName", Run: func(cmd *cobra.Command, args []string) { var server, netID = args[0], args[1] network, err := resolveNet(netID, conf.Location) if err != nil { exit.Errorf("failed to resolve %s: %s", netID, err) } else if network != nil { netID = network.Id } log.Printf("Deleting %s NIC on network %s ...", server, netID) if err = client.ServerDelNic(server, netID); err != nil { log.Fatalf("failed to remove NIC from %s: %s", server, err) } log.Printf("Successfully removed NIC from server %s", server) }, } ) addNIC.Flags().StringVar(&addNICFlags.ip, "ip", "", "IP address to use with NIC (optional, default is automatic assignment)") nic.AddCommand(addNIC) nic.AddCommand(removeNIC) Root.AddCommand(nic) }
package main import ( "database/sql" _ "github.com/lib/pq" "os" ) var db *sql.DB func init(){ var err error db, err = sql.Open("postgres", os.Getenv("DATABASE_URL")) if err != nil{ panic(err) } } func retrieveAll()(users []User, err error){ rows, err := db.Query("SELECT id, name, is_paid FROM users") if err != nil{ return } for rows.Next(){ user := User{} err = rows.Scan(&user.Id, &user.Name, &user.IsPaid) if err != nil{ return } users = append(users, user) } rows.Close() return }
package resolver import ( "github.com/dalloriam/synthia/core" "github.com/dalloriam/websynth/app/audio" ) type KnobResolver struct { sys *audio.System knob *core.Knob } func (r *KnobResolver) Value() float64 { return r.knob.GetValue() } func (r *KnobResolver) Set(args struct{ Value float64 }) float64 { r.knob.SetValue(args.Value) return r.Value() } func (r *KnobResolver) Line() *SignalResolver { return &SignalResolver{r.sys, &r.knob.Line} }
package main import "fmt" func main() { age := 56 fmt.Printf("%T", age) }
package main import ( "github.com/fsouza/go-dockerclient" "github.com/codegangsta/cli" "github.com/mcuadros/go-version" "log" ) func doNetworks(c *cli.Context) { client, err := docker.NewClient(c.GlobalString("endpoint")) if err != nil { log.Fatal(err) } ver, err := client.Version() if version.Compare(ver.Get("ApiVersion"),"1.10", "<") { log.Fatal("Network clean only works on Docker 1.10 and newer") } client.PruneNetworks(docker.PruneNetworksOptions{}) return }
package bucket import "testing" const ( BucketSize = 5 BucketListSize = 5 MaxIdxProduct = 5 ) var myBucket = New(BucketSize, BucketListSize, MaxIdxProduct) func BenchmarkBucket_ReceiveOrderSlow(b *testing.B) { b.ResetTimer() b.StartTimer() for i := 0; i < b.N; i++ { myBucket.ReceiveOrderSlow(1, 2, 3, 4, 5) } } func BenchmarkBucket_ReceiveOrderFast(b *testing.B) { b.ResetTimer() b.StartTimer() for i := 0; i < b.N; i++ { myBucket.ReceiveOrderFast(1, 2, 3, 4, 5) } } func BenchmarkBucket_ReceiveOrder(b *testing.B) { b.ResetTimer() b.StartTimer() for i := 0; i < b.N; i++ { myBucket.ReceiveOrder(1, 2, 3, 4, 5) } }
// Copyright 2020 MongoDB Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "fmt" "os" "github.com/mitchellh/go-homedir" ) func configHome() (string, error) { if home := os.Getenv("XDG_CONFIG_HOME"); home != "" { return home, nil } home, err := homedir.Dir() if err != nil { return "", err } return fmt.Sprintf("%s/.config", home), nil }
package main import ( "github.com/redisTesting/deployment/analysis" cfg "github.com/redisTesting/internal/config" "github.com/redisTesting/roles/client" "os" ) func main() { // Remove logs err := os.RemoveAll(cfg.Conf.LogDir) if err != nil { panic(err) } // Run n clients client.StartNClients(cfg.Conf.NClients) // Analysis analysis.RunAnalysis(cfg.Conf.LogDir) }
package main import "fmt" var rows,cols,blocks []map[uint8]bool type Node struct{ x,y int } func solveSudoku(board [][]byte) { rows = make([]map[uint8]bool,9) cols = make([]map[uint8]bool,9) blocks = make([]map[uint8]bool,9) for i:=0;i<9;i++{ rows[i] = make(map[uint8]bool) cols[i] = make(map[uint8]bool) blocks[i] = make(map[uint8]bool) } queue := make([]Node,0) for i:=0;i<len(board);i++{ for t:=0;t<len(board[i]);t++{ char := board[i][t] mark(i,t,char,true) if char=='.'{ queue = append(queue,Node{i,t}) } } } solveSudokuExec(board,queue) // for i:=0;i<9;i++{ // for t:=0;t<9;t++{ // fmt.Print(string(board[i][t])," ") // } // fmt.Println() // } } func mark(i,t int,char uint8,flag bool){ rows[i][char]=flag cols[t][char]=flag blocks[i/3*3 +t/3][char]=flag } func solveSudokuExec(board [][]byte,queue []Node) bool{ if len(queue)==0{ return true } for num := uint8(1+'0');num<=uint8(9+'0');num++{ x,y := queue[0].x,queue[0].y if rows[x][num] == true || cols[y][num] == true || blocks[x/3*3+y/3][num] == true { continue } char := num mark(x, y, char, true) board[x][y] = num if solveSudokuExec(board, queue[1:]) == true { return true } board[x][y] = '.' mark(x, y, char, false) } return false } /* 总结 1. 这个版本我把需要填充的坐标加入队列中,之后再遍历这个队列,这样就可以只使用O(1)的时间找到下一个需要填充的坐标了。 */
package rogue import ( "fmt" "log" // "time" "github.com/I82Much/rogue/combat" "github.com/I82Much/rogue/dungeon" "github.com/I82Much/rogue/gameover" "github.com/I82Much/rogue/monster" "github.com/I82Much/rogue/player" "github.com/I82Much/rogue/stats" "github.com/I82Much/rogue/title" ) type Game struct { curModule Module dungeonModule *dungeon.Controller player *player.Player playerWpm int } const ( EasyWpm = 15 MediumWpm = 40 HardWpm = 70 InsaneWpm = 100 StenographerWpm = 300 ) var ( difficultyMap = map[string]int{ title.Easy: EasyWpm, title.Medium: MediumWpm, title.Hard: HardWpm, title.Insane: InsaneWpm, title.Stenographer: StenographerWpm, } ) func (g *Game) lifeForMonster(t monster.Type) int { // TODO use player's level // TODO change it based on the monster return 20 } func (g *Game) makeCombat(t []monster.Type) Module { if len(t) == 0 { panic("need >= 1 monster") } player := g.player var monsters []*combat.Monster for _, m := range t { life := g.lifeForMonster(m) m1 := combat.NewMonster(life, player.MaxWPM, m) monsters = append(monsters, m1) } module := combat.NewModule(player, monsters) return module } func makeDungeon(p *player.Player) *dungeon.Controller { return dungeon.NewModule(dungeon.RandomWorld(3, 3), p) } func NewGame() *Game { d := title.NewModule() wpm := MediumWpm g := &Game{ curModule: d, playerWpm: wpm, player: player.WithName("Player 1", wpm), } d.AddListener(g) return g } func (g *Game) Start() { g.curModule.Start() } func (g *Game) Stop() { g.curModule.Stop() } func (g *Game) setWpm(wpm int) { g.playerWpm = wpm g.player.MaxWPM = wpm } func (g *Game) updateStats(stats stats.Stats) { g.player.Stats.Add(stats) } func (g *Game) restart() { g.Stop() g.player = player.WithName("Player 1", g.playerWpm) dm := makeDungeon(g.player) dm.AddListener(g) g.dungeonModule = dm g.curModule = dm g.Start() } // Listen handles the state transitions between the different modules. func (g *Game) Listen(e string, extra interface{}) { log.Printf("got event %v", e) switch e { case gameover.Restart: g.restart() // Title screen case title.Easy, title.Medium, title.Hard, title.Insane, title.Stenographer: g.setWpm(difficultyMap[e]) g.restart() case dungeon.EnterCombat: g.Stop() types := extra.([]monster.Type) c := g.makeCombat(types) c.AddListener(g) g.curModule = c g.Start() // Combat case combat.PlayerDied: g.Stop() c := gameover.NewModule() c.AddListener(g) g.curModule = c g.Start() case combat.AllMonstersDied: g.updateStats(extra.(stats.Stats)) g.Stop() // Check to see if we've completed the game g.dungeonModule.ReplaceMonsterWithPlayer() g.dungeonModule.MaybeUnlockCurrentRoom() if g.dungeonModule.HasWon() { win := gameover.NewWinModule(g.player) win.AddListener(g) g.curModule = win } else { g.curModule = g.dungeonModule } g.Start() default: fmt.Errorf("unknown event: %v\n", e) g.Stop() } }
package main import ("encoding/json"; "fmt"; "os" ) type Person struct { Name Name Email []Email } type Name struct { First string Last string } type Email struct { Kind string Address string } func main() { person := Person{ Name: Name{First: "Ууганбаяр", Last: "Сүхбаатар"}, Email: []Email{Email{Kind: "хувийн", Address: "ubs121@gmail.com"}, Email{Kind: "ажлын", Address: "ub@hotmail.com"}}} saveJSON("person.json", person) } func saveJSON(fileName string, key interface{}) { outFile, err := os.Create(fileName) checkError(err) encoder := json.NewEncoder(outFile) err = encoder.Encode(key) checkError(err) outFile.Close() } func checkError(err error) { if err != nil { fmt.Println("Fatal error ", err.Error()) os.Exit(1) } }
package main import ( "bytes" "fmt" "io" "io/ioutil" "log" "os" "strings" ) func main() { var b bytes.Buffer // A Buffer needs no initialization. b.Write([]byte("Hello ")) fmt.Fprintf(&b, "world!\n") _, _ = b.WriteTo(os.Stdout) fmt.Printf("'%s'\n", b.String()) fmt.Printf("'%s'\n", b.String()) // Second example with an io.TeeReader var c bytes.Buffer c.WriteString("Another buffer...\n") t := io.TeeReader(&c, os.Stdout) fmt.Println("before ReadAll(t)") xc, _ := ioutil.ReadAll(t) fmt.Println("after ReadAll(t)") fmt.Printf("Contents of 'xc' as string: '%s'\n", xc) fmt.Printf("Re-read buffer 'c': '%s'\n", c.String()) // Third example which is really convoluted and also features a strings.Builder var d bytes.Buffer d.WriteRune('h') d.WriteRune('e') d.WriteRune('l') d.WriteRune('l') d.WriteRune('o') sb := strings.Builder{} fmt.Printf("sb: '%#v'\n", sb) fmt.Printf("sb.String(): '%#v'\n", sb.String()) t2 := io.TeeReader(&d, &sb) t2p := make([]byte, 1) for { n, errRead := t2.Read(t2p) if errRead != nil { if errRead == io.EOF { break } log.Fatal(errRead) } fmt.Printf("n: '%d'\n", n) fmt.Printf("sb: '%#v'\n", sb) fmt.Printf("sb.String(): '%s'\n", sb.String()) fmt.Printf("sb.String(): '%s'\n", sb.String()) } }
// Copyright 2020 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 package wasmlib import ( "encoding/binary" "strconv" ) type ScImmutableAddress struct { objId int32 keyId Key32 } func (o ScImmutableAddress) Exists() bool { return Exists(o.objId, o.keyId, TYPE_ADDRESS) } func (o ScImmutableAddress) String() string { return o.Value().String() } func (o ScImmutableAddress) Value() *ScAddress { return NewScAddressFromBytes(GetBytes(o.objId, o.keyId, TYPE_ADDRESS)) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableAddressArray struct { objId int32 } func (o ScImmutableAddressArray) GetAddress(index int32) ScImmutableAddress { return ScImmutableAddress{objId: o.objId, keyId: Key32(index)} } func (o ScImmutableAddressArray) Length() int32 { return GetLength(o.objId) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableAgentId struct { objId int32 keyId Key32 } func (o ScImmutableAgentId) Exists() bool { return Exists(o.objId, o.keyId, TYPE_AGENT_ID) } func (o ScImmutableAgentId) String() string { return o.Value().String() } func (o ScImmutableAgentId) Value() *ScAgentId { return NewScAgentIdFromBytes(GetBytes(o.objId, o.keyId, TYPE_AGENT_ID)) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableAgentArray struct { objId int32 } func (o ScImmutableAgentArray) GetAgentId(index int32) ScImmutableAgentId { return ScImmutableAgentId{objId: o.objId, keyId: Key32(index)} } func (o ScImmutableAgentArray) Length() int32 { return GetLength(o.objId) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableBytes struct { objId int32 keyId Key32 } func (o ScImmutableBytes) Exists() bool { return Exists(o.objId, o.keyId, TYPE_BYTES) } func (o ScImmutableBytes) String() string { return base58Encode(o.Value()) } func (o ScImmutableBytes) Value() []byte { return GetBytes(o.objId, o.keyId, TYPE_BYTES) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableBytesArray struct { objId int32 } func (o ScImmutableBytesArray) GetBytes(index int32) ScImmutableBytes { return ScImmutableBytes{objId: o.objId, keyId: Key32(index)} } func (o ScImmutableBytesArray) Length() int32 { return GetLength(o.objId) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableChainId struct { objId int32 keyId Key32 } func (o ScImmutableChainId) Exists() bool { return Exists(o.objId, o.keyId, TYPE_CHAIN_ID) } func (o ScImmutableChainId) String() string { return o.Value().String() } func (o ScImmutableChainId) Value() *ScChainId { return NewScChainIdFromBytes(GetBytes(o.objId, o.keyId, TYPE_CHAIN_ID)) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableColor struct { objId int32 keyId Key32 } func (o ScImmutableColor) Exists() bool { return Exists(o.objId, o.keyId, TYPE_COLOR) } func (o ScImmutableColor) String() string { return o.Value().String() } func (o ScImmutableColor) Value() *ScColor { return NewScColorFromBytes(GetBytes(o.objId, o.keyId, TYPE_COLOR)) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableColorArray struct { objId int32 } func (o ScImmutableColorArray) GetColor(index int32) ScImmutableColor { return ScImmutableColor{objId: o.objId, keyId: Key32(index)} } func (o ScImmutableColorArray) Length() int32 { return GetLength(o.objId) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableContractId struct { objId int32 keyId Key32 } func (o ScImmutableContractId) Exists() bool { return Exists(o.objId, o.keyId, TYPE_CONTRACT_ID) } func (o ScImmutableContractId) String() string { return o.Value().String() } func (o ScImmutableContractId) Value() *ScContractId { return NewScContractIdFromBytes(GetBytes(o.objId, o.keyId, TYPE_CONTRACT_ID)) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableHash struct { objId int32 keyId Key32 } func (o ScImmutableHash) Exists() bool { return Exists(o.objId, o.keyId, TYPE_HASH) } func (o ScImmutableHash) String() string { return o.Value().String() } func (o ScImmutableHash) Value() *ScHash { return NewScHashFromBytes(GetBytes(o.objId, o.keyId, TYPE_HASH)) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableHashArray struct { objId int32 } func (o ScImmutableHashArray) GetHash(index int32) ScImmutableHash { return ScImmutableHash{objId: o.objId, keyId: Key32(index)} } func (o ScImmutableHashArray) Length() int32 { return GetLength(o.objId) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableHname struct { objId int32 keyId Key32 } func (o ScImmutableHname) Exists() bool { return Exists(o.objId, o.keyId, TYPE_HNAME) } func (o ScImmutableHname) String() string { return o.Value().String() } func (o ScImmutableHname) Value() ScHname { return NewScHnameFromBytes(GetBytes(o.objId, o.keyId, TYPE_HNAME)) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableInt struct { objId int32 keyId Key32 } func (o ScImmutableInt) Exists() bool { return Exists(o.objId, o.keyId, TYPE_INT) } func (o ScImmutableInt) String() string { return strconv.FormatInt(o.Value(), 10) } func (o ScImmutableInt) Value() int64 { bytes := GetBytes(o.objId, o.keyId, TYPE_INT) return int64(binary.LittleEndian.Uint64(bytes)) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableIntArray struct { objId int32 } func (o ScImmutableIntArray) GetInt(index int32) ScImmutableInt { return ScImmutableInt{objId: o.objId, keyId: Key32(index)} } func (o ScImmutableIntArray) Length() int32 { return GetLength(o.objId) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableMap struct { objId int32 } func (o ScImmutableMap) GetAddress(key MapKey) ScImmutableAddress { return ScImmutableAddress{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetAddressArray(key MapKey) ScImmutableAddressArray { arrId := GetObjectId(o.objId, key.KeyId(), TYPE_ADDRESS|TYPE_ARRAY) return ScImmutableAddressArray{objId: arrId} } func (o ScImmutableMap) GetAgentId(key MapKey) ScImmutableAgentId { return ScImmutableAgentId{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetAgentIdArray(key MapKey) ScImmutableAgentArray { arrId := GetObjectId(o.objId, key.KeyId(), TYPE_AGENT_ID|TYPE_ARRAY) return ScImmutableAgentArray{objId: arrId} } func (o ScImmutableMap) GetBytes(key MapKey) ScImmutableBytes { return ScImmutableBytes{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetBytesArray(key MapKey) ScImmutableBytesArray { arrId := GetObjectId(o.objId, key.KeyId(), TYPE_BYTES|TYPE_ARRAY) return ScImmutableBytesArray{objId: arrId} } func (o ScImmutableMap) GetChainId(key MapKey) ScImmutableChainId { return ScImmutableChainId{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetColor(key MapKey) ScImmutableColor { return ScImmutableColor{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetColorArray(key MapKey) ScImmutableColorArray { arrId := GetObjectId(o.objId, key.KeyId(), TYPE_COLOR|TYPE_ARRAY) return ScImmutableColorArray{objId: arrId} } func (o ScImmutableMap) GetContractId(key MapKey) ScImmutableContractId { return ScImmutableContractId{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetHash(key MapKey) ScImmutableHash { return ScImmutableHash{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetHashArray(key MapKey) ScImmutableHashArray { arrId := GetObjectId(o.objId, key.KeyId(), TYPE_HASH|TYPE_ARRAY) return ScImmutableHashArray{objId: arrId} } func (o ScImmutableMap) GetHname(key MapKey) ScImmutableHname { return ScImmutableHname{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetInt(key MapKey) ScImmutableInt { return ScImmutableInt{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetIntArray(key MapKey) ScImmutableIntArray { arrId := GetObjectId(o.objId, key.KeyId(), TYPE_INT|TYPE_ARRAY) return ScImmutableIntArray{objId: arrId} } func (o ScImmutableMap) GetMap(key MapKey) ScImmutableMap { mapId := GetObjectId(o.objId, key.KeyId(), TYPE_MAP) return ScImmutableMap{objId: mapId} } func (o ScImmutableMap) GetMapArray(key MapKey) ScImmutableMapArray { arrId := GetObjectId(o.objId, key.KeyId(), TYPE_MAP|TYPE_ARRAY) return ScImmutableMapArray{objId: arrId} } func (o ScImmutableMap) GetString(key MapKey) ScImmutableString { return ScImmutableString{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetStringArray(key MapKey) ScImmutableStringArray { arrId := GetObjectId(o.objId, key.KeyId(), TYPE_STRING|TYPE_ARRAY) return ScImmutableStringArray{objId: arrId} } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableMapArray struct { objId int32 } func (o ScImmutableMapArray) GetMap(index int32) ScImmutableMap { mapId := GetObjectId(o.objId, Key32(index), TYPE_MAP) return ScImmutableMap{objId: mapId} } func (o ScImmutableMapArray) Length() int32 { return GetLength(o.objId) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableString struct { objId int32 keyId Key32 } func (o ScImmutableString) Exists() bool { return Exists(o.objId, o.keyId, TYPE_STRING) } func (o ScImmutableString) String() string { return o.Value() } func (o ScImmutableString) Value() string { bytes := GetBytes(o.objId, o.keyId, TYPE_STRING) if bytes == nil { return "" } return string(bytes) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableStringArray struct { objId int32 } func (o ScImmutableStringArray) GetString(index int32) ScImmutableString { return ScImmutableString{objId: o.objId, keyId: Key32(index)} } func (o ScImmutableStringArray) Length() int32 { return GetLength(o.objId) }
package main import ( "bufio" "encoding/json" "fmt" "html/template" "io" "io/ioutil" "log" "net/http" "os" "path/filepath" "runtime" "strings" "time" ) var CONFIG *Config func main() { // 设置CPU核心数量 runtime.GOMAXPROCS(runtime.NumCPU()) // 设置日志的结构 log.SetFlags(log.Lshortfile | log.Ldate | log.Ltime | log.Lmicroseconds) // -------------------------------------------------------- // CONFIG = readConfig() // -------------------------------------------------------- // http.Handle("/css/", http.FileServer(http.Dir("template"))) http.Handle("/js/", http.FileServer(http.Dir("template"))) http.Handle("/files/", http.FileServer(http.Dir("template"))) http.Handle("/images/", http.FileServer(http.Dir("template"))) // -------------------------------------------------------- // http.HandleFunc("/", index) http.HandleFunc("/rmfile", rmfile) http.HandleFunc("/upload", upload) http.HandleFunc("/upload/f", upload) http.HandleFunc("/download", download) // -------------------------------------------------------- // // 监听 if err := http.ListenAndServe(":8080", nil); err != nil { log.Panic(err) } } var ( currPath = GetCurrentPath() // 当前项目的绝对路径 ) type Size interface { Size() int64 } // 上传文件接口 func upload(w http.ResponseWriter, r *http.Request) { log.Println(r.URL.String()) // 解析参数 r.ParseForm() // 加锁,写入 if "POST" == r.Method { file, multi, err := r.FormFile("file") if err != nil { http.Error(w, err.Error(), 500) return } defer file.Close() if sizeInterface, ok := file.(Size); ok { if float64(sizeInterface.Size()) > CONFIG.Size { http.Error(w, "超过文件大小限制", 500) return } } // 文件名 Filename := multi.Filename // 判断文件是否存在, // 当前程序使用的路径是当前项目的的绝对路径 currPath. // 若想要使用相对路径, 例如将 files 文件夹与项目文件夹同目录, 则可以只使用: files/%s // 注: 若要使用相对路径, 需要将 main.go、godw.conf、template 及 files 放在同一个目录, 然后运行 go run main.go 即可. if Exists(fmt.Sprintf(currPath + "/files/%s", Filename)) { if r.URL != nil && strings.HasSuffix(r.URL.String(), "upload/f") { if err := os.Remove(fmt.Sprintf(currPath + "/files/%s", Filename)); err != nil { http.Error(w, fmt.Sprintf("WARN: [%s] %s ...", Filename, err.Error()), 500) return } } else { for i := 1; i < 100; i++ { if !Exists(fmt.Sprintf(currPath + "/files/%s.%d", Filename, i)) { Filename = fmt.Sprintf("%s.%d", Filename, i) break } } } } // 在写入文件之前应先创建 files 文件夹, 否则若 files 不存在则会抛出异常哟 if !Exists(currPath + "/files") { err = os.Mkdir(currPath + "/files", os.ModePerm) if err != nil { http.Error(w, err.Error(), 500) return } } // 将源文件写入到目标文件 f, err := os.Create(fmt.Sprintf(currPath + "/files/%s", Filename)) if err != nil { http.Error(w, err.Error(), 500) return } defer f.Close() _, err = io.Copy(f, file) if err != nil { http.Error(w, err.Error(), 500) return } // add header rename ... if Filename != multi.Filename { w.Header().Set("rename", Filename) } } // 重定向 http.Redirect(w, r, "/", http.StatusFound) // 返回 return } // 下载文件接口 func download(w http.ResponseWriter, r *http.Request) { // 解析参数 r.ParseForm() // 获取文件名称 fname := Trim(r.FormValue("f")) // 添加头信息 w.Header().Set("Content-Type", "multipart/form-data") w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s", fname)) // 判断文件是否存在 if !Exists(fmt.Sprintf(currPath + "/files/%s", fname)) { http.Error(w, fmt.Sprintf("WARN: [%s] file not exists ...", fname), 500) return } // 写入文件流 FileRF(fmt.Sprintf(currPath + "/files/%s", fname), func(f *os.File) { _, err := io.Copy(w, bufio.NewReader(f)) if err != nil { http.Error(w, err.Error(), 500) return } }) // 返回 return } // 删除文件 func rmfile(w http.ResponseWriter, r *http.Request) { // cookie if _, err := r.Cookie("username"); err != nil { // 重定向 http.Redirect(w, r, "/", http.StatusFound) // 返回 return } // 解析参数 r.ParseForm() // 获取文件名称 fname := Trim(r.FormValue("f")) // 判断安装包是否存在 if Exists(fmt.Sprintf("files/%s", fname)) && !IsBlank(fname) { // 删除 Fremove(fmt.Sprintf("files/%s", fname)) } // 重定向 http.Redirect(w, r, "/", http.StatusFound) // 返回 return } /* 这里偷个懒 应该将文件信息记录到数据库或者文件中 我这个每次都去扫描,浪费资源 */ type FileInfo struct { Id int // ID Name string // 文件名称 Size string // 文件大小 Date string // 上传日期 Stat string // 权限状态 } type Data struct { // 权限状态 Stat string // 文件列表 Files []*FileInfo } // 构造 func NewData() *Data { data := new(Data) data.Files = make([]*FileInfo, 0) return data } func index(w http.ResponseWriter, r *http.Request) { // 解析参数 r.ParseForm() // 管理员 var admin string // form if _, ok := r.Form[CONFIG.Admin]; ok { // cookie cookie := http.Cookie{Name: "username", Value: CONFIG.Admin, Expires: time.Now().Add(24 * time.Hour)} // cookie http.SetCookie(w, &cookie) // 管理员 admin = CONFIG.Admin } // cookie if cookie, err := r.Cookie("username"); err == nil { // 权限 if cookie.Value == CONFIG.Admin { // 管理员 admin = cookie.Value } } // 获取文件名称 fname := Trim(r.FormValue("f")) // 创建返回对象 data := NewData() data.Stat = admin // ID var id int // 遍历本地文件 filepath.Walk("files", func(ph string, f os.FileInfo, err error) error { // 文件不存在 if f == nil { return nil } // 跳过文件夹 if f.IsDir() { return nil } // 判断文件是否存在 if IsBlank(fname) { // 累加 id++ // 记录文件 data.Files = append(data.Files, &FileInfo{id, f.Name(), unitCapacity(f.Size()), f.ModTime().String(), admin}) } else { // 检查包含 if strings.Contains(strings.ToLower(f.Name()), strings.ToLower(fname)) { // 累加 id++ // 记录文件 data.Files = append(data.Files, &FileInfo{id, f.Name(), unitCapacity(f.Size()), f.ModTime().String(), admin}) } } // 返回 return nil }) // 解析主页面 t, err := template.ParseFiles("template/default.html") if err != nil { // 输出错误信息 http.Error(w, err.Error(), 500) return } // 执行 t.Execute(w, data) // 返回 return } func unitCapacity(size int64) string { if g := float64(size) / (1024 * 1024 * 1024); int64(g) > 0 { return fmt.Sprintf("%.2fG", g) } else if m := float64(size) / (1024 * 1024); int64(m) > 0 { return fmt.Sprintf("%.2fM", m) } else if k := float64(size) / (1024); int64(k) > 0 { return fmt.Sprintf("%.2fK", k) } else { return fmt.Sprintf("%dB", size) } } type Config struct { Size float64 `json:"size"` Admin string `json:"admin"` } func readConfig() *Config { // New ServerConf conf := new(Config) conf.Size = 1073741824 conf.Admin = "admin" if !Exists("godw.conf") { log.Println("use default") log.Println("not found godw.conf") return conf } f, err := os.Open("godw.conf") if err != nil { log.Println("use default") log.Println(err.Error()) return conf } bs, err := ioutil.ReadAll(bufio.NewReader(f)) if err != nil { log.Println("use default") log.Println(err.Error()) return conf } err = json.Unmarshal(bs, &conf) if err != nil { log.Println("use default") log.Println(err.Error()) return conf } return conf } // 判断一个路径是否存在 func Exists(name string) bool { if _, err := os.Stat(name); err != nil { if os.IsNotExist(err) { return false } } return true } // 去掉一个字符串左右的空白串,即(0x00 - 0x20 之内的字符均为空白字符) // 与strings.TrimSpace功能一致 func Trim(s string) string { size := len(s) if size <= 0 { return s } l := 0 for ; l < size; l++ { b := s[l] if !IsSpace(b) { break } } r := size - 1 for ; r >= l; r-- { b := s[r] if !IsSpace(b) { break } } return string(s[l : r+1]) } // Remove 文件 func Fremove(ph string) (err error) { err = os.Remove(ph) return err } /* 将从自己磁盘目录,只读的方式打开一个文件。如果文件不存在,或者打开错误,则返回 nil。 如果有错误,将打印 log 调用者将负责关闭文件 */ func FileR(ph string) *os.File { f, err := os.Open(ph) if nil != err { return nil } return f } // 用回调的方式打文件以便读取内容,回调函数不需要关心文件关闭等问题 func FileRF(ph string, callback func(*os.File)) { f := FileR(ph) if nil != f { defer f.Close() callback(f) } } // 是不是空字符 func IsSpace(c byte) bool { if c >= 0x00 && c <= 0x20 { return true } return false } // 判断一个字符串是不是空白串,即(0x00 - 0x20 之内的字符均为空白字符) func IsBlank(s string) bool { for i := 0; i < len(s); i++ { b := s[i] if !IsSpace(b) { return false } } return true } // 获取当前项目的绝对路径 // C:/Users/Administrator.DESKTOP-3V51O0O/Desktop/Go/workbench/godw func GetCurrentPath() string { dir, err := os.Getwd() if err != nil { log.Fatal(err) } return strings.Replace(dir, "\\", "/", -1) }
/********************************** / Sedgewick's algorithm edition 4 / Chapter 1 Quick Find *********************************/ package quick_find type Sites struct { id []int number int } func Init(n int) *Sites { sites := &Sites{make([]int, n), n} for i := range sites.id { sites.id[i] = i } return sites } func (s *Sites) Union(p, q int) { pId := s.Find(p) qId := s.Find(q) if pId == qId { return } for i := range s.id { if s.Find(i) == pId { s.id[i] = qId } } s.number = s.number - 1 } func (s Sites) Find(p int) int { return s.id[p] } func (s Sites) Connected(p, q int) bool { return s.Find(p) == s.Find(q) } func (s Sites) Count() int { return s.number }
package main import ( "fmt" "sort" ) func main() { s := []string{"Ali", "Sancho", "Messi", "Bale", "Ronaldo"} sort.Strings(s) fmt.Println(s) } // [Ali Bale Messi Ronaldo Sancho]
package main import ( "fmt" "io/ioutil" "net/http" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/spf13/viper" "github.com/tidwall/gjson" ) var ( ticker *time.Ticker network string prometheusURL string totalVotingPowerGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // Namespace: "our_company", // Subsystem: "blob_storage", Name: "gaia_total_voting_power", Help: "Total network voting power", }, []string{"chainID"}) individualVotingPowerGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // Namespace: "our_company", // Subsystem: "blob_storage", Name: "gaia_validators_voting_power", Help: "Voting power of each validator", }, []string{"address", "chainID"}) ) func startDataRetrieval() { baseURL := viper.GetString("baseURL") validatorsURLEndpoint := baseURL + "/validators" statusURLEndpoint := baseURL + "/status" freq := time.Duration(viper.GetDuration("queryFrequency") * time.Second) fmt.Println("Query frequency:", freq) ticker = time.NewTicker(freq) // Retrieve chain-id chainID, err := getChainID(statusURLEndpoint) if err != nil { panic(err) } fmt.Println("Chain-ID:", chainID) go func() { for { retrieveValidatorData(validatorsURLEndpoint, chainID) <-ticker.C } }() } func getChainID(statusURLEndpoint string) (string, error) { client := http.Client{ Timeout: time.Duration(time.Second), } response, err := client.Get(statusURLEndpoint) if err != nil { return "", err } responseBody, err := ioutil.ReadAll(response.Body) if err != nil { return "", err } defer response.Body.Close() value := gjson.GetBytes(responseBody, "result.node_info.network") return value.Str, nil } func retrieveValidatorData(validatorsURLEndpoint, chainID string) { client := http.Client{ Timeout: time.Duration(time.Second), } response, err := client.Get(validatorsURLEndpoint) if err != nil { fmt.Println(err) return } responseBody, err := ioutil.ReadAll(response.Body) if err != nil { fmt.Println(err) return } defer response.Body.Close() var totalVotingPower uint64 { value := gjson.GetBytes(responseBody, "result.validators.#.voting_power") for _, v := range value.Array() { totalVotingPower += v.Uint() } } labels := prometheus.Labels{"chainID": chainID} totalVotingPowerGauge.With(labels).Set(float64(totalVotingPower)) { value := gjson.GetBytes(responseBody, "result.validators") for _, v := range value.Array() { address := v.Get("address").String() votingPower := v.Get("voting_power").Uint() labels["address"] = address individualVotingPowerGauge.With(labels).Set(float64(votingPower)) } } } func readConfig() { viper.SetConfigName("config") viper.AddConfigPath(".") viper.SetConfigType("yaml") if err := viper.ReadInConfig(); err != nil { panic(fmt.Errorf("fatal error config file: %s", err)) } // TODO Ensure all configuration keys exist viper.SetDefault("queryFrequency", 30) viper.SetDefault("prometheusURL", "[::]:26662") prometheusURL = viper.GetString("prometheusURL") } func init() { prometheus.MustRegister(totalVotingPowerGauge) prometheus.MustRegister(individualVotingPowerGauge) } func main() { readConfig() startDataRetrieval() http.Handle("/metrics", promhttp.Handler()) fmt.Println("Prometheus listening endpoint:", prometheusURL) err := http.ListenAndServe(prometheusURL, nil) if err != nil { panic(err) } }
package data import ( "fmt" "gopkg.in/go-playground/validator.v9" ) type Validation struct { validation *validator.Validate // refs validator.Validate struct } // NewValidation initialize and return the Validation struct func NewValidation() *Validation { validation := validator.New() return &Validation{ validation: validation, } } type ValidationError struct { validator.FieldError // refs validator.FieldError } type ValidationErrors []ValidationError // Validate - Validate the request data from client then return the result // in the form ValidationErrors func (validation *Validation) Validate(i interface{}) ValidationErrors { var errs validator.ValidationErrors validationErrs := validation.validation.Struct(i) if validationErrs != nil { errs = validationErrs.(validator.ValidationErrors) } if len(errs) == 0 { return nil } var returnArrs []ValidationError for _, err := range errs { ve := ValidationError{err.(validator.FieldError)} returnArrs = append(returnArrs, ve) } return returnArrs } // Error - Return the formatted validation error string func (validationError ValidationError) Error() string { return fmt.Sprintf( "Key: '%s' Error: Field validation for '%s' failed on the '%s' tag", validationError.Namespace(), validationError.Field(), validationError.Tag(), ) } // Error - Return the list of formatted validation error string func (validationErrors ValidationErrors) Errors() []string { errs := []string{} for _, err := range validationErrors { errs = append(errs, err.Error()) } return errs }
// Copyright 2020 The Operator-SDK Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v2 import ( "github.com/spf13/pflag" "sigs.k8s.io/kubebuilder/pkg/model/config" "sigs.k8s.io/kubebuilder/pkg/plugin" "github.com/operator-framework/operator-sdk/internal/plugins/manifests" ) type createAPIPlugin struct { plugin.CreateAPI config *config.Config } var _ plugin.CreateAPI = &createAPIPlugin{} func (p *createAPIPlugin) UpdateContext(ctx *plugin.Context) { p.CreateAPI.UpdateContext(ctx) } func (p *createAPIPlugin) BindFlags(fs *pflag.FlagSet) { p.CreateAPI.BindFlags(fs) } func (p *createAPIPlugin) InjectConfig(c *config.Config) { p.CreateAPI.InjectConfig(c) p.config = c } func (p *createAPIPlugin) Run() error { // Run() may add a new resource to the config, so we can compare resources before/after to get the new resource. oldResources := make(map[config.GVK]struct{}, len(p.config.Resources)) for _, r := range p.config.Resources { oldResources[r] = struct{}{} } if err := p.CreateAPI.Run(); err != nil { return err } // Emulate plugins phase 2 behavior by checking the config for this plugin's config object. if !hasPluginConfig(p.config) { return nil } // Find the new resource. Here we shouldn't worry about checking if one was found, // since downstream plugins will do so. var newResource config.GVK for _, r := range p.config.Resources { if _, hasResource := oldResources[r]; !hasResource { newResource = r break } } // Run SDK phase 2 plugins. return p.runPhase2(newResource) } // SDK phase 2 plugins. func (p *createAPIPlugin) runPhase2(gvk config.GVK) error { return manifests.RunCreateAPI(p.config, gvk) }
/*Package income depicts current situation Imaginary organisation has income from two kinds of projects viz. fixed billing and time and material. The net income of the organisation is calculated by the sum of the incomes from these projects. Assume that the currency is dollars and we will not deal with cents. It will be represented using int. /**/ package income import "fmt" //Income interface contains two methods calculate() which calculates and returns the income from the source and source() which returns the name of the source. type Income interface { calculate() int //calculates and returns the income from the source source() string //returns the name of the source } //FixedBilling project has two fields "projectName" which represents the name of the project and "biddedAmount" which is the amount that the organisation has bid for the project. type FixedBilling struct { projectName string biddedAmount int } //Advertisement new income stream type Advertisement struct { adName string cPC int noOfClicks int } //NewAdvertisement new func of Advertisement struct func NewAdvertisement(adName string, CPC, noOfClicks int) *Advertisement { return &Advertisement{adName, CPC, noOfClicks} } //NewFixedBilling new func of FixedBilling struct func NewFixedBilling(projectName string, biddedAmount int) *FixedBilling { return &FixedBilling{projectName, biddedAmount} } //NewTimeAndMaterial new func of TimeAndMaterial struct func NewTimeAndMaterial(projectName string, noOfHours, hourlyRate int) *TimeAndMaterial { return &TimeAndMaterial{projectName, noOfHours, hourlyRate} } //TimeAndMaterial represent projects of Time and Material type. type TimeAndMaterial struct { projectName string noOfHours int hourlyRate int } func (fb FixedBilling) calculate() int { return fb.biddedAmount } func (fb FixedBilling) source() string { return fb.projectName } func (tam TimeAndMaterial) calculate() int { return tam.noOfHours * tam.hourlyRate } func (tam TimeAndMaterial) source() string { return tam.projectName } func (a Advertisement) calculate() int { return a.cPC * a.noOfClicks } func (a Advertisement) source() string { return a.adName } //CalculateNetIncome calculates and prints the total income func CalculateNetIncome(ic []Income) { var netincome int for _, v := range ic { fmt.Printf("Income from %s is %d\n", v.source(), v.calculate()) netincome += v.calculate() } fmt.Println("Total net income is ", netincome) }
package rules import ( "strings" ) //Execute1 :Todos los ComplexType\Sequence\Element del documento deben tener nombres con el primer caracter en minuscula func (r *Rule) Execute1(xsd Schema) { r.result = "OK" if r.isXSD(xsd.XMLFile) { for _, complexType := range xsd.ComplexType { for _, element := range complexType.Sequence.Element { //fmt.Println(element.Name) if element.Name[0:1] == strings.ToLower(element.Name[0:1]) { r.result = "OK" } else { r.result = "NOK" r.detail = r.detail + "Nombre no valido " + element.Name + "\n" } } } } }
package backtracking import ( "fmt" "testing" ) func Test_permuteUnique(t *testing.T) { res := permuteUnique([]int{1, 1, 2}) if len(res) != 3 { t.Error(res) } fmt.Println(res) }
package utils import ( "bytes" "context" "os/exec" "time" ) /* 指令工具 */ /* 超时执行指令 */ func RunWithTimeout(cmd *exec.Cmd, timeout time.Duration) (string, error) { var out bytes.Buffer cmd.Stdout = &out cmd.Stderr = &out ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() if err := cmd.Start(); err != nil { return out.String(), err } errCh := make(chan error, 1) go func() { errCh <- cmd.Wait() }() for { select { case <-ctx.Done(): return out.String(), ctx.Err() case err := <-errCh: return out.String(), err } } }
package mt type SoundID int32 type SoundSrcType uint8 const ( NoSrc SoundSrcType = iota // nowhere PosSrc // pos AOSrc // ao ) //go:generate stringer -linecomment -type SoundSrcType type SoundDef struct { Name string Gain, Pitch, Fade float32 }
// Package database is a plugin that manages the badger database (e.g. garbage collection). package database import ( "errors" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/wasp/packages/coretypes" "github.com/iotaledger/wasp/packages/dbprovider" "github.com/iotaledger/wasp/packages/parameters" "sync" "github.com/iotaledger/hive.go/daemon" "github.com/iotaledger/hive.go/logger" "github.com/iotaledger/hive.go/node" ) const pluginName = "Database" var ( log *logger.Logger dbProvider *dbprovider.DBProvider doOnce sync.Once ) // Init is an entry point for the plugin. func Init() *node.Plugin { return node.NewPlugin(pluginName, node.Enabled, configure, run) } func configure(_ *node.Plugin) { log = logger.NewLogger(pluginName) err := checkDatabaseVersion() if errors.Is(err, ErrDBVersionIncompatible) { log.Panicf("The database scheme was updated. Please delete the database folder.\n%s", err) } if err != nil { log.Panicf("Failed to check database version: %s", err) } // we open the database in the configure, so we must also make sure it's closed here err = daemon.BackgroundWorker(pluginName, func(shutdownSignal <-chan struct{}) { <-shutdownSignal log.Infof("syncing database to disk...") dbProvider.Close() log.Infof("syncing database to disk... done") }, parameters.PriorityDatabase) if err != nil { log.Panicf("failed to start a daemon: %s", err) } } func run(_ *node.Plugin) { err := daemon.BackgroundWorker(pluginName+"[GC]", dbProvider.RunGC, parameters.PriorityBadgerGarbageCollection) if err != nil { log.Errorf("failed to start as daemon: %s", err) } } func GetInstance() *dbprovider.DBProvider { doOnce.Do(createInstance) return dbProvider } func createInstance() { if parameters.GetBool(parameters.DatabaseInMemory) { log.Infof("IN MEMORY DATABASE") dbProvider = dbprovider.NewInMemoryDBProvider(log) } else { dbDir := parameters.GetString(parameters.DatabaseDir) dbProvider = dbprovider.NewPersistentDBProvider(dbDir, log) } } // each key in DB is prefixed with `chainID` | `SC index` | `object type byte` // GetPartition returns a Partition, which is a KVStore prefixed with the chain ID. func GetPartition(chainID *coretypes.ChainID) kvstore.KVStore { return GetInstance().GetPartition(chainID) } func GetRegistryPartition() kvstore.KVStore { return GetInstance().GetRegistryPartition() }
package main import ( "fmt" ) func main() { imprimirMessage("hola", "mundo", "!") } func imprimirMessage(messages ...string) { for _, message := range messages { fmt.Println(message) } }
// Copyright 2020 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 package solo import "time" // LogicalTime return current logical clock time on the 'solo' instance func (env *Solo) LogicalTime() time.Time { env.glbMutex.Lock() defer env.glbMutex.Unlock() return env.logicalTime } // AdvanceClockTo advances logical clock to the specific time moment in the (logical) future func (env *Solo) AdvanceClockTo(ts time.Time) { env.glbMutex.Lock() defer env.glbMutex.Unlock() env.advanceClockTo(ts) } func (env *Solo) advanceClockTo(ts time.Time) { if !env.logicalTime.Before(ts) { env.logger.Panic("can'T advance clock to the past") } env.logicalTime = ts } // AdvanceClockBy advances logical clock by time step func (env *Solo) AdvanceClockBy(step time.Duration) { env.glbMutex.Lock() defer env.glbMutex.Unlock() env.advanceClockTo(env.logicalTime.Add(step)) env.logger.Infof("AdvanceClockBy: logical clock advanced by %v", step) } // ClockStep advances logical clock by time step set by SetTimeStep func (env *Solo) ClockStep() { env.glbMutex.Lock() defer env.glbMutex.Unlock() env.advanceClockTo(env.logicalTime.Add(env.timeStep)) env.logger.Infof("ClockStep: logical clock advanced by %v", env.timeStep) } // SetTimeStep sets default time step for the 'solo' instance func (env *Solo) SetTimeStep(step time.Duration) { env.glbMutex.Lock() defer env.glbMutex.Unlock() env.timeStep = step }
package main type Priority int const ( EMERGENCY Priority = 0 ALERT Priority = 1 CRITICAL Priority = 2 ERROR Priority = 3 WARNING Priority = 4 NOTICE Priority = 5 INFO Priority = 6 DEBUG Priority = 7 ) var PriorityName = map[Priority]string{ EMERGENCY: "EMERG", ALERT: "ALERT", CRITICAL: "CRITICAL", ERROR: "ERROR", WARNING: "WARNING", NOTICE: "NOTICE", INFO: "INFO", DEBUG: "DEBUG", } type Record struct { InstanceId string TimeUsec int64 PID int `journald:"_PID"` UID int `journald:"_UID"` GID int `journald:"_GID"` Command string `journald:"_COMM"` Executable string `journald:"_EXE"` CommandLine string `journald:"_CMDLINE"` SystemdUnit string `journald:"_SYSTEMD_UNIT"` BootId string `journald:"_BOOT_ID"` MachineId string `journald:"_MACHINE_ID"` Hostname string `journald:"_HOSTNAME"` Transport string `journald:"_TRANSPORT"` Priority Priority `journald:"PRIORITY"` Message string `journald:"MESSAGE"` MessageId string `journald:"MESSAGE_ID"` Errno int `journald:"ERRNO"` Syslog RecordSyslog Kernel RecordKernel } type RecordSyslog struct { Facility int `journald:"SYSLOG_FACILITY"` Identifier string `journald:"SYSLOG_IDENTIFIER"` PID int `journald:"SYSLOG_PID"` } type RecordKernel struct { Device string `journald:"_KERNEL_DEVICE"` Subsystem string `journald:"_KERNEL_SUBSYSTEM"` SysName string `journald:"_UDEV_SYSNAME"` DevNode string `journald:"_UDEV_DEVNODE"` }
package recursion // Fac return n's factorial by recursion func Fac(n int) int { if n < 2 { return n } return n * Fac(n-1) } // Fac2 return n's factorial by foreach func Fac2(n int) (ret int) { ret = 1 for i := 2; i <= n; i++ { ret = ret * i } return }
package main import ( "bufio" "fmt" "io" "os" "strings" ) func ReadConfig(filepath string) map[string]string { res := map[string]string{} file, err := os.Open(filepath) if err != nil { return res } defer file.Close() buf := bufio.NewReader(file) for { l, err := buf.ReadString('\n') line := strings.TrimSpace(l) if err != nil { if err != io.EOF { return res } if len(line) == 0 { break } } if len(line) == 0 || line == "#" || line == "\r\n" { //break continue } if line[0] == '/' { //fmt.Println("line[0] =", line[0]) continue } //fmt.Println(line) //fmt.Println("len(line) =", len(line)) i := strings.IndexAny(line, "=") //fmt.Println("i = ", i) value := strings.TrimSpace(line[i+1 : len(line)]) //fmt.Println("value =", value) res[strings.TrimSpace(line[0:i])] = value //fmt.Printf("res[strings.TrimSpace(line[0:%d])] = %s\n", i, value) } return res } func main() { var User, Host, Port, Touser string conf := ReadConfig("F:\\Coding\\Golang\\src\\learngo\\goReadConfig\\app.cnf") User = conf["user"] Port = conf["port"] Host = conf["host"] Touser = conf["touser"] fmt.Printf("User is %s\nPort is %s\nHost is %s\nTouser is %s\n", User, Port, Host, Touser) }
package leetcode import "testing" func TestSubtractProductAndSum(t *testing.T) { if subtractProductAndSum(234) != 15 { t.Fatal() } if subtractProductAndSum(4421) != 21 { t.Fatal() } if subtractProductAndSum(114) != -2 { t.Fatal() } }
package api import ( "fmt" "github.com/kalifun/gin-template/global" "github.com/kalifun/gin-template/middleware/config" "github.com/kalifun/gin-template/middleware/logs" "github.com/kalifun/gin-template/router" "github.com/spf13/cobra" "net/http" ) var Api = &cobra.Command{ Use: "server", Short: "Start Server", Long: "Start Api Server", Example: "gin-template server", Run: func(cmd *cobra.Command, args []string) { RunServer() }, } func RunServer() { // 初始化中间件 config.Init() logs.InitLog() router := router.InitRouter() s := &http.Server{ Addr: fmt.Sprintf(":%d", global.ConfigSvr.System.Port), Handler: router, //ReadTimeout: conf.ReadTimeout, //WriteTimeout: conf.WriteTimeout, MaxHeaderBytes: 1 << 20, } err := s.ListenAndServe() if err != nil { fmt.Println(err) } }
package main import ( "errors" "flag" "net" "os" "os/signal" "syscall" "protocol" "tun" "github.com/golang/glog" ) func main() { var network, secret, listenAddr, ipnet, upScript, downScript string flag.StringVar(&network, "network", "udp", "network of transport layer") flag.StringVar(&secret, "secret", "", "secret") flag.StringVar(&listenAddr, "listen-addr", "0.0.0.0:9525", "listening address") flag.StringVar(&ipnet, "ipnet", "10.0.200.1/24", "internal ip net") flag.StringVar(&upScript, "up-script", "./if-up.sh", "up shell script file path") flag.StringVar(&downScript, "down-script", "./if-down.sh", "down shell script file path") flag.Parse() ip, ipNet, err := net.ParseCIDR(ipnet) if err != nil { glog.Fatalln(err) } ln, err := protocol.Listen(network, secret, listenAddr, ip, ipNet) if err != nil { glog.Fatalln(err) } glog.Infoln("start listening") tun, err := tun.NewTUN("", &ip, &ipNet.Mask) if err != nil { glog.Fatalln(err) } defer tun.Close() err = tun.Up(upScript, listenAddr) if err != nil { glog.Fatalln(err) } defer tun.Down(downScript, listenAddr) glog.Infoln(tun.Name(), " is ready") s := NewServer(tun, ipNet) defer s.Close() errc := make(chan error) go func() { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, os.Kill, syscall.SIGTERM) s := <-c errc <- errors.New(s.String()) }() go func() { for { c, err := ln.Accept() if err != nil { glog.Errorln("fail to accept", err) } go s.Handle(c) } }() glog.Infoln("waiting client") err = <-errc glog.Info("process quit", err) return }
// Matt Behrens <askedrelic@gmail.com> // 2013/04/08 16:57:42 package main import "strconv" // import "fmt" func IterativeFizz(max int) { for i:= 1; i <= max; i++ { if (i % 3 == 0 && i % 5 == 0) { println("fizzbuzz") } else if (i % 3 == 0) { println("fizz") } else if (i % 5 == 0) { println("buzz") } else { println(i) } } } func RecursiveFizz(max int, vals []string) { if max == 0 { return } val := "" if (max % 15 == 0) { val = "fizzbuzz" } else if (max % 3 == 0) { val = "fizz" } else if (max % 5 == 0) { val = "buzz" } else { val = strconv.Itoa(max) } vals[max-1] = val RecursiveFizz(max - 1, vals) } // Writeaprogramthatprintsthenumbersfrom1 to 100. But for multiples of three // print "Fizz" in- stead of the number and for the multiples of five print // "Buzz". For numbers which are multiples of both three and five print // "FizzBuzz". func main() { IterativeFizz(30) println(); vals := make([]string, 30) RecursiveFizz(30, vals) for x := 0; x < len(vals); x++ { println(vals[x]) } }
package main import "fmt" type Node struct { Left *Node Value rune Right *Node } type Queue struct { nodes []*Node head int tail int count int } func walk(n *Node) { if n == nil { return } q := &Queue{nodes: make([]*Node, 11)} q.push(n) for q.count != 0 { current := q.pop() fmt.Printf("%c", current.Value) if current.Left != nil { q.push(current.Left) } if current.Right != nil { q.push(current.Right) } } } func main() { root := createNode('F') root.Left = createNode('D') root.Left.Left = createNode('B') root.Left.Right = createNode('E') root.Left.Left.Left = createNode('A') root.Left.Left.Right = createNode('C') root.Right = createNode('J') root.Right.Left = createNode('G') root.Right.Right = createNode('K') root.Right.Left.Right = createNode('I') root.Right.Left.Right.Left = createNode('H') walk(root) } func (q *Queue) push(n *Node) { if q.head == q.tail && q.count > 0 { nodes := make([]*Node, len(q.nodes)*2) copy(nodes, q.nodes[q.head:]) copy(nodes[len(q.nodes)-q.head:], q.nodes[:q.head]) q.head = 0 q.tail = len(q.nodes) q.nodes = nodes } q.nodes[q.tail] = n q.tail = (q.tail + 1) % len(q.nodes) q.count++ } func (q *Queue) pop() *Node { if q.count == 0 { return nil } node := q.nodes[q.head] q.head = (q.head + 1) % len(q.nodes) q.count-- return node } func createNode(value rune) *Node { node := new(Node) node.Left = nil node.Value = value node.Right = nil return node }
package gocmd import ( "os/exec" "strings" ) func ExecCmd(cmdStr string) (res string, err error) { args := strings.Split(cmdStr, " ") resb,err := exec.Command(args[0], args[1:]...).Output() if err != nil { return "", err } return string(resb), nil }
package main import ( "bytes" "crypto/rand" "database/sql" "encoding/base64" "encoding/hex" "encoding/json" "errors" "fmt" "log" "net/http" "net/url" "os" "time" "github.com/gorilla/mux" _ "github.com/lib/pq" "github.com/satori/go.uuid" "github.com/tobyjsullivan/log-sdk/reader" "github.com/tobyjsullivan/ues-auth-svc/projection" "github.com/urfave/negroni" "github.com/rs/cors" ) const ( AUTH_TOKEN_BYTES = 64 AUTH_TOKEN_EXPIRY_SECONDS = 86400 // 24 hours ) var ( logger *log.Logger db *sql.DB logId reader.LogID client *reader.Client state *projection.Projection corsAllowedOrigin string ) func init() { logger = log.New(os.Stdout, "[service] ", 0) pgHostname := os.Getenv("PG_HOSTNAME") pgUsername := os.Getenv("PG_USERNAME") pgPassword := os.Getenv("PG_PASSWORD") pgDatabase := os.Getenv("PG_DATABASE") dbConnOpts := fmt.Sprintf("host='%s' user='%s' dbname='%s' password='%s' sslmode=disable", pgHostname, pgUsername, pgDatabase, pgPassword) logger.Println("Connecting to DB...") var err error db, err = sql.Open("postgres", dbConnOpts) if err != nil { logger.Println("Error initializing connection to Postgres DB.", err.Error()) panic(err.Error()) } corsAllowedOrigin = os.Getenv("FRONTEND_URL") if corsAllowedOrigin != "" { parsed, err := url.Parse(corsAllowedOrigin) if err != nil { panic("Failed to parse FRONTENT_URL. "+err.Error()) } corsAllowedOrigin = parsed.String() } readerSvc := os.Getenv("LOG_READER_API") client, err = reader.New(&reader.ClientConfig{ ServiceAddress: readerSvc, Logger: logger, }) if err != nil { panic("Error creating reader client. " + err.Error()) } logId = reader.LogID{} err = logId.Parse(os.Getenv("SERVICE_LOG_ID")) if err != nil { panic("Error parsing LogID. " + err.Error()) } state = projection.NewProjection() logger.Println("Subscribing projection to log.", logId.String()) start := reader.EventID{} client.Subscribe(logId, start, state.Apply, true) logger.Println("Hydration complete.", logId.String()) } func main() { r := buildRoutes() c := cors.New(cors.Options{ AllowedOrigins: []string{corsAllowedOrigin}, }) n := negroni.New() n.UseHandler(c.Handler(r)) port := os.Getenv("PORT") if port == "" { port = "3000" } n.Run(":" + port) } func buildRoutes() http.Handler { r := mux.NewRouter() r.HandleFunc("/", statusHandler).Methods("GET") r.HandleFunc("/authorize", authHandler).Methods("POST") r.HandleFunc("/verify", verifyTokenHandler).Methods("POST") return r } func statusHandler(w http.ResponseWriter, _ *http.Request) { fmt.Fprint(w, "The ues-auth-svc service is online!\n") } func authHandler(w http.ResponseWriter, r *http.Request) { if err := r.ParseForm(); err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } paramEmail := r.Form.Get("email") paramPassword := r.Form.Get("password") paramClientId := r.Form.Get("client-id") paramCallbackUrl := r.Form.Get("callback") if paramEmail == "" { http.Error(w, "Must provide email.", http.StatusBadRequest) return } if paramPassword == "" { http.Error(w, "Must provide password.", http.StatusBadRequest) return } if paramClientId == "" { http.Error(w, "Must provide client-id.", http.StatusBadRequest) return } if paramCallbackUrl == "" { http.Error(w, "Must provide callback.", http.StatusBadRequest) return } clientId, err := parseClientId(paramClientId) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } callbackUrl, err := url.Parse(paramCallbackUrl) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } valid, err := validateClientCallbackUrl(clientId, callbackUrl) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } else if !valid { http.Error(w, "Invalid client-id or callback-url.", http.StatusUnauthorized) return } acct, err := state.FindAccount(paramEmail, paramPassword) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } if acct == nil { http.Error(w, "Account not found.", http.StatusUnauthorized) return } // Generate an auth token token := make([]byte, AUTH_TOKEN_BYTES) _, err = rand.Read(token) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } expiry := time.Now().Add(AUTH_TOKEN_EXPIRY_SECONDS * time.Second) t := &tokenIssue{ clientId: clientId, accountId: acct.ID, token: token, expiry: expiry, } err = commitToken(t) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } timeToExpire := expiry.Sub(time.Now()) resp := responseFmt{ Data: struct { Token string `json:"token"` Expires int `json:"expires"` }{ Token: hex.EncodeToString(token), Expires: int(timeToExpire.Seconds()), }, } encoder := json.NewEncoder(w) if err = encoder.Encode(&resp); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } func verifyTokenHandler(w http.ResponseWriter, r *http.Request) { if err := r.ParseForm(); err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } paramToken := r.Form.Get("token") paramClientId := r.Form.Get("client-id") paramClientSecret := r.Form.Get("client-secret") clientId, err := parseClientId(paramClientId) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } clientSecret, err := base64.StdEncoding.DecodeString(paramClientSecret) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } valid, err := validateClientSecret(clientId, clientSecret) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } else if !valid { http.Error(w, "Invalid client-id or client-secret.", http.StatusUnauthorized) return } token, err := hex.DecodeString(paramToken) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } issue, err := lookupToken(clientId, token) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } timeToExpire := issue.expiry.Sub(time.Now()) resp := responseFmt{ Data: struct { AccountID string `json:"accountId"` Expires int `json:"expires"` }{ AccountID: issue.accountId.String(), Expires: int(timeToExpire.Seconds()), }, } encoder := json.NewEncoder(w) if err = encoder.Encode(&resp); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } type responseFmt struct { Data interface{} `json:"data"` } func parseClientId(s string) ([32]byte, error) { bytesClientId, err := hex.DecodeString(s) if err != nil { return [32]byte{}, err } var clientId [32]byte copy(clientId[:], bytesClientId) return clientId, nil } func validateClientCallbackUrl(clientId [32]byte, callbackUrl *url.URL) (bool, error) { if callbackUrl == nil { return false, errors.New("No callbackUrl value present") } expectedId, err := parseClientId(os.Getenv("CLIENT_ID")) if err != nil { return false, err } expectedUrl, err := url.Parse(os.Getenv("CLIENT_CALLBACK_URL")) if err != nil { return false, err } return clientId == expectedId && callbackUrl.String() == expectedUrl.String(), nil } func validateClientSecret(clientId [32]byte, clientSecret []byte) (bool, error) { expectedId, err := parseClientId(os.Getenv("CLIENT_ID")) if err != nil { return false, err } expectedSecret, err := base64.StdEncoding.DecodeString(os.Getenv("CLIENT_SECRET")) if err != nil { return false, err } return clientId == expectedId && bytes.Equal(clientSecret, expectedSecret), nil } func commitToken(t *tokenIssue) error { // TODO Add Client-ID to record res, err := db.Exec(`INSERT INTO Tokens(ACCOUNT_ID, TOKEN, EXPIRES) VALUES ($1, $2, $3)`, t.accountId.Bytes(), t.token, t.expiry) if err != nil { logger.Println("Error inserting new log record.", err.Error()) return err } numRows, err := res.RowsAffected() if err != nil { logger.Println("Error reading RowsAffected.", err.Error()) return err } logger.Println("Rows affected:", numRows) return nil } func lookupToken(clientId [32]byte, token []byte) (*tokenIssue, error) { // TODO Add clientID filter to where clause var accountIdBytes []byte var expires time.Time err := db.QueryRow(`SELECT ACCOUNT_ID, EXPIRES FROM Tokens WHERE token=$1 AND expires>NOW()`, token).Scan(&accountIdBytes, &expires) if err != nil { return nil, err } accountId := uuid.UUID{} copy(accountId[:], accountIdBytes) return &tokenIssue{ clientId: clientId, accountId: accountId, token: token, expiry: expires, }, nil } type tokenIssue struct { clientId [32]byte accountId uuid.UUID token []byte expiry time.Time }
package api import ( "bytes" "encoding/json" "fmt" "strings" MQTT "github.com/eclipse/paho.mqtt.golang" "github.com/johannesrohwer/redfish/core" ) const APIBASE = "redfish/api/v1.0" type ReplyChannelMessage struct { RequestID string Payload interface{} } type MQTTFacade struct { broker string clientOptions *MQTT.ClientOptions client MQTT.Client dispatcher core.Dispatcher replyCh chan ReplyChannelMessage } func NewMQTTFacade(d core.Dispatcher) *MQTTFacade { return &MQTTFacade{dispatcher: d, replyCh: make(chan ReplyChannelMessage, 8)} } func (mqtt *MQTTFacade) processReplyCh() { for reply := range mqtt.replyCh { msgBytes, _ := json.Marshal(reply.Payload) message := string(msgBytes[:]) mqtt.publish(APIBASE+"/request/"+reply.RequestID, message) } } func (mqtt *MQTTFacade) Start() { // Initialize connection broker := "tcp://localhost:1883" mqtt.connect(broker) mqtt.publish("redfish/api/v1.0/log", "Redfish v1.0") // Initialize replyCh processor go mqtt.processReplyCh() // Initialize subscriptions mqtt.subscribe("redfish/api/v1.0/service", mqtt.serviceHandler) mqtt.subscribe("redfish/api/v1.0/service/+", mqtt.serviceIDHandler) select {} } func (mqtt *MQTTFacade) connect(broker string) { mqtt.clientOptions = MQTT.NewClientOptions() mqtt.clientOptions.AddBroker(broker) mqtt.client = MQTT.NewClient(mqtt.clientOptions) if token := mqtt.client.Connect(); token.Wait() && token.Error() != nil { panic(token.Error()) } } func (mqtt *MQTTFacade) publish(topic string, message string) { token := mqtt.client.Publish(topic, 0, false, message) token.Wait() } func (mqtt *MQTTFacade) subscribe(topic string, handler MQTT.MessageHandler) bool { if token := mqtt.client.Subscribe(topic, 0, handler); token.Wait() && token.Error() != nil { fmt.Println(token.Error()) return false } return true } func (mqtt *MQTTFacade) unsubscribe(topic string) bool { if token := mqtt.client.Unsubscribe(topic); token.Wait() && token.Error() != nil { fmt.Println(token.Error()) return false } return true } type MQTTRequest struct { RequestID string `json:"requestID,omitempty"` Method string `json:"method,omitempty"` QueryParameters map[string]string `json:"queryParameters,omitempty"` Service *core.Service `json:"service,omitempty"` ServiceCollection *[]core.Service `json:"serviceCollection,omitempty"` } func (req MQTTRequest) String() string { return fmt.Sprintf("requestID: %s; method: %s", req.RequestID, req.Method) } func (req MQTTRequest) process(d core.Dispatcher, replyCh chan ReplyChannelMessage) { var payload interface{} switch req.Method { case "GET": if id, exists := req.QueryParameters["id"]; exists { payload = d.GetService(id) } else { payload = d.GetServiceSlice() } case "POST": if service := req.Service; service != nil { payload = d.Register([]core.Service{*service}) } else if services := req.ServiceCollection; services != nil { payload = d.Register(*services) } else { payload = error("no service specified") } case "DELETE": if id, exists := req.QueryParameters["id"]; exists { payload = d.Deregister(id) } default: fmt.Println(req) payload = error("unknown method") } replyCh <- ReplyChannelMessage{req.RequestID, payload} } func error(msg string) map[string]string { return map[string]string{"error": msg} } func (mqtt MQTTFacade) serviceHandler(client MQTT.Client, msg MQTT.Message) { fmt.Println(msg.Payload()) req := parseToMQTTRequest(msg.Payload()) go req.process(mqtt.dispatcher, mqtt.replyCh) } func parseToMQTTRequest(rawMessage []byte) MQTTRequest { // Parse and process to MQTTRequest object //TODO: this allows service and serviceCollection at the same time var req MQTTRequest buf := bytes.NewBuffer(rawMessage) _ = json.NewDecoder(buf).Decode(&req) if req.QueryParameters == nil { req.QueryParameters = make(map[string]string) } return req } func (mqtt MQTTFacade) serviceIDHandler(client MQTT.Client, msg MQTT.Message) { // Parse URL urlSplit := strings.Split(msg.Topic(), "/") id := urlSplit[len(urlSplit)-1] fmt.Println(string(msg.Payload()[:])) req := parseToMQTTRequest(msg.Payload()) req.QueryParameters["id"] = id go req.process(mqtt.dispatcher, mqtt.replyCh) }
package main import ( "errors" "fmt" "math/rand" "net" "os" "os/exec" "os/signal" "strconv" "strings" "time" "github.com/sirupsen/logrus" "github.com/Logiase/gomirai/bot" "github.com/Logiase/gomirai/message" ) type KEY struct { word string reply string } type KEYS struct { data []KEY } type COUNTER struct { ID uint REMARK string BEGIN time.Time COUNT []time.Time } const ( qq uint = 2540719484 helpKey = "help" execKey = "exec" colorKey = "color" tpinKey = "tpin" counterKey = "cod" help = `Starx's bot - command list 获取此帮助: /help 执行终端命令: /exec [cmd] (admin only) 来一份色图: /color 获取TCP延迟: /tpin [ip,hostname] [port] 启动计时器: /cod [start,stop,count] [remark] ` //wordsPath = "words.json" ) var ( master = []uint {1787074172} // counter []COUNTER // global KEYS //unknow KEYS ) func isTrusted(id uint) bool{ for _,p := range master { if p == id{ return true } } return false } func getCounted(id uint) (COUNTER,int){ for i,p := range counter{ if p.ID == id{ return p,i } } return COUNTER{ ID: 0, REMARK: "", BEGIN: time.Time{}, COUNT: []time.Time{}, },-1 } func delCounted(i int) { counter = append(counter[:i],counter[i+1:]...) } func getARG(full string,key string)([]string,error){ key = "/" + key if !strings.HasPrefix(full,key){ return []string{},errors.New("获取命令参数错误: 未知的命令") } return strings.Fields(strings.TrimSpace(strings.Trim(full,key)) ),nil } func safeExec(ucmd string,group uint,id uint,b *bot.Bot) { if len(ucmd) == 0 { _,_ = b.SendGroupMessage(group,0,message.AtMessage(id),message.PlainMessage("命令执行参数不完整")) return } if isTrusted(id){ cmd := strings.Fields(ucmd) app := cmd[0] args := cmd[1:] out,_ := exec.Command(app,args...).Output() _,_ = b.SendGroupMessage(group,0,message.AtMessage(id),message.PlainMessage(strings.TrimSpace(string(out)))) } else { _,_ = b.SendGroupMessage(group,0,message.AtMessage(id),message.PlainMessage("命令执行未受信任")) } } func tcping(pat []string,group uint,id uint,b *bot.Bot) { // ip/host [0] // port [1] args := len(pat) if args == 0 || args < 2{ _,_ = b.SendGroupMessage(group,0,message.AtMessage(id),message.PlainMessage("TCPING 参数不完整")) return } addr := net.ParseIP(pat[0]) if addr == nil { addr_,err := net.LookupIP(pat[0]) if err != nil{ _,_ = b.SendGroupMessage(group,0,message.AtMessage(id),message.PlainMessage("未找到DNS记录")) return } addr = addr_[0] } start := time.Now() var conn net.Conn var err error if !strings.Contains(addr.String(),":"){ conn,err = net.Dial("tcp",addr.String()+":"+pat[1]) } else { conn,err = net.Dial("tcp","[" + addr.String() + "]" + ":" +pat[1]) } if err != nil { _,_ = b.SendGroupMessage(group,0,message.AtMessage(id),message.PlainMessage("建立TCP连接时出错: "+err.Error())) return } _,_ = b.SendGroupMessage(group,0,message.AtMessage(id),message.PlainMessage(strings.Join([]string{"TCPING",pat[0],":",pat[1],time.Now().Sub(start).String()}," "))) _ = conn.Close() } func count(pat []string,group uint,id uint,b *bot.Bot){ if len(pat) == 0{ _, _ = b.SendGroupMessage(group, 0, message.AtMessage(id), message.PlainMessage("计时器参数不完整")) return } switch pat[0] { case "start": var remark string if len(pat) == 1{ remark = "未命名记录" } else { remark = strings.Join(pat[1:]," ") } counter = append(counter, COUNTER{ ID: id, REMARK: remark, BEGIN: time.Now(), COUNT: nil, }) _, _ = b.SendGroupMessage(group, 0, message.AtMessage(id), message.PlainMessage(remark+"\n已开始计时")) case "stop": endTime := time.Now() _,index := getCounted(id) if index != -1 { counts := len(counter[index].COUNT) if counts <= 1 { _, _ = b.SendGroupMessage(group, 0, message.AtMessage(id), message.PlainMessage(counter[index].REMARK+"\n计时: "+endTime.Sub(counter[index].BEGIN).String())) delCounted(index) } else { var rp string for i,cts := range counter[index].COUNT{ rp += "计次: " + strconv.Itoa(i+1)+"\n计时: "+ cts.Sub(counter[index].BEGIN).String() + "\n" } _, _ = b.SendGroupMessage(group, 0, message.AtMessage(id), message.PlainMessage(counter[index].REMARK + "\n" + rp)) delCounted(index) } } else { _, _ = b.SendGroupMessage(group, 0, message.AtMessage(id), message.PlainMessage("未找到你的计时记录")) } case "count": endTime := time.Now() _,index := getCounted(id) if index != -1 { counter[index].COUNT=append(counter[index].COUNT,endTime) _, _ = b.SendGroupMessage(group, 0, message.AtMessage(id), message.PlainMessage(counter[index].REMARK + "\n计次" +strconv.Itoa(len(counter[index].COUNT))+"已开始")) } else { _, _ = b.SendGroupMessage(group, 0, message.AtMessage(id), message.PlainMessage("未找到你的计时记录")) } default: _, _ = b.SendGroupMessage(group, 0, message.AtMessage(id), message.PlainMessage("未知的计时器命令")) } } func findkey(full string,group uint,id uint,b *bot.Bot){ var matchs []int for i,sw := range global.data{ if strings.Contains(full,sw.word){ matchs = append(matchs,i) } } matchsN := len(matchs) // 3 if matchsN == 1{ _, _ = b.SendGroupMessage(group, 0, message.AtMessage(id), message.PlainMessage(global.data[0].reply)) }else if matchsN > 1{ _, _ = b.SendGroupMessage(group, 0, message.AtMessage(id), message.PlainMessage(global.data[rand.Intn(matchsN)].reply)) } //else { // _, _ = b.SendGroupMessage(group, 0, message.AtMessage(id), message.PlainMessage("words not found")) //} } func main() { global = KEYS{data: []KEY{{ word: "测试", reply: "测试成功啦", }, { word: "测试", reply: "测试2也成功啦", }, { word: "测试", reply: "随机测试啦", }, { word: "测试", reply: "给我滚去吃饭!!", }}} // Catch interrupt interrupt := make(chan os.Signal, 1) signal.Notify(interrupt, os.Interrupt) // Create a bot instance c := bot.NewClient("default", "http://192.168.31.99:8888", "starxiyf") // Setup Log Level c.Logger.Level = logrus.TraceLevel // If able to connect to api key, err := c.Auth() if err != nil { c.Logger.Fatal(err) } // Check if id is mismatch b, err := c.Verify(qq, key) if err != nil { c.Logger.Fatal(err) } //defer c.Release(qq) go func() { err = b.FetchMessages() if err != nil { c.Logger.Fatal(err) } }() for { select { case e := <-b.Chan: switch e.Type { case message.EventReceiveGroupMessage: quote := e.MessageChain[0].Id var text string for _,mp := range e.MessageChain{ pt := mp.Text if len(pt) != 0 { text += pt } } //fmt.Printf("%+v\n", e.MessageChain) if len(text) != 0 { // HELP _,errHelp := getARG(text,helpKey) if errHelp == nil { _,_ = b.SendGroupMessage(e.Sender.Group.Id,quote,message.PlainMessage(strings.TrimSpace(help))) break } // EXEC argsExec,errExec := getARG(text,execKey) if errExec == nil { go safeExec(strings.Join(argsExec," ") ,e.Sender.Group.Id,e.Sender.Id,b) break } // Colorful pic _, errColor := getARG(text, colorKey) if errColor == nil { _,_ = b.SendGroupMessage(e.Sender.Group.Id,quote,message.ImageMessage("url","https://i.xinger.ink:4443/images.php")) break } // TCPING argsTpin,errTpin := getARG(text,tpinKey) if errTpin == nil { go tcping(argsTpin,e.Sender.Group.Id,e.Sender.Id,b) break } // COUNTER argsCoun,errCoun := getARG(text,counterKey) if errCoun == nil { go count(argsCoun,e.Sender.Group.Id,e.Sender.Id,b) break } // KEYWORD TODO go findkey(text,e.Sender.Group.Id,e.Sender.Id,b) } } case <-interrupt: fmt.Println("######") fmt.Println("interrupt") fmt.Println("######") //c.Release(qq) _ = c.Release(qq) return } } }
package jsonv import ( "bytes" "io" "reflect" "testing" ) func Test_scannerTokens(t *testing.T) { cases := []struct { json string tok TokenType val []byte }{ {"{", TokenObjectBegin, []byte("{")}, {" {", TokenObjectBegin, []byte("{")}, {"\t{", TokenObjectBegin, []byte("{")}, {"\n{", TokenObjectBegin, []byte("{")}, {"\r{", TokenObjectBegin, []byte("{")}, {" \t\n\n\r\t { \t\t", TokenObjectBegin, []byte("{")}, {"}", TokenObjectEnd, []byte("}")}, {"[", TokenArrayBegin, []byte("[")}, {"]", TokenArrayEnd, []byte("]")}, {` , `, TokenItemSep, []byte(",")}, {` :, `, TokenPropSep, []byte(":")}, {"true", TokenTrue, []byte("true")}, {"false,", TokenFalse, []byte("false")}, {"null", TokenNull, []byte("null")}, {"0 ", TokenNumber, []byte("0")}, {"5 ", TokenNumber, []byte("5")}, {"-5,", TokenNumber, []byte("-5")}, {"0.1,", TokenNumber, []byte("0.1")}, {"-0.1 ", TokenNumber, []byte("-0.1")}, {"0.123 ", TokenNumber, []byte("0.123")}, {"1234567890 ", TokenNumber, []byte("1234567890")}, {"2e+12", TokenNumber, []byte("2e+12")}, {"2e-12", TokenNumber, []byte("2e-12")}, {"2e12", TokenNumber, []byte("2e12")}, {"2.3e+9", TokenNumber, []byte("2.3e+9")}, {"0.2e-5", TokenNumber, []byte("0.2e-5")}, {"0.2e5", TokenNumber, []byte("0.2e5")}, {",", TokenItemSep, []byte(",")}, {`""`, TokenString, []byte(`""`)}, {`"Abc"`, TokenString, []byte(`"Abc"`)}, {`"A\"b\\c"`, TokenString, []byte(`"A\"b\\c"`)}, {`"\"A\"b\\c"`, TokenString, []byte(`"\"A\"b\\c"`)}, {` "Abc" `, TokenString, []byte(`"Abc"`)}, } for i, c := range cases { t.Logf("Starting case: %d\n", i) s := NewScanner(bytes.NewBufferString(c.json)) tok, b, err := s.ReadToken() if err != nil { t.Errorf("Case %d error: %v", i, err) } else if tok != c.tok { t.Errorf("Case %d token: Got %v, want %v", i, tok, c.tok) } else if !reflect.DeepEqual(b, c.val) { t.Errorf("Case %d val: Got \"%s\", want \"%s\"", i, b, c.val) } } } // test skipValue // Used by Object when it needs to jump an unneeded property. // // Test // skip null, string, number, bool, array, object {}, object {props}, object {{},{},{}} // func Test_scannerSkipValue(t *testing.T) { cases := []string{ `{"fake": null, "actual": "test"}`, `{"fake": false, "actual": "test"}`, `{"fake": true, "actual": "test"}`, `{"fake": "a string", "actual": "test"}`, `{"fake": "\"", "actual": "test"}`, `{"fake": 123123123, "actual": "test"}`, `{"fake": 12.2, "actual": "test"}`, `{"fake": -12.2e23, "actual": "test"}`, `{"fake": [], "actual": "test"}`, `{"fake": [{},{}], "actual": "test"}`, `{"fake": [1,true, null], "actual": "test"}`, `{"fake": {}, "actual": "test"}`, `{"fake": {"diff": "val", "age": 42}, "actual": "test"}`, `{"fake": {"diff": "val", "age": 42, "sub": {}}, "actual": "test"}`, `{"fake": {"diff": "val", "age": 42, "sub": {"has": null}}, "actual": "test"}`, } want1 := []TokenType{TokenObjectBegin, TokenString, TokenPropSep} want2 := []TokenType{TokenItemSep, TokenString, TokenPropSep, TokenString, TokenObjectEnd} for i, json := range cases { t.Logf("Starting case %d: %s\n", i, json) s := NewScanner(bytes.NewBufferString(json)) // read the first bits for _, w := range want1 { if tok, _, err := s.ReadToken(); tok != w { if err != nil { t.Fatal(err) } else { t.Fatalf("Got token: %v, want %v", tok, w) } return } } // skip a value (complex or whatever) if err := s.SkipValue(); err != nil { t.Fatal(err) } // finish up for _, w := range want2 { if tok, _, err := s.ReadToken(); tok != w { if err != nil { t.Fatal(err) } else { t.Fatalf("Got token: %v, want %v", tok, w) } return } } // make sure we're at the end if tok, buf, err := s.ReadToken(); err != io.EOF { t.Fatalf("Got token: %v, buf %v, err %v, want EOF", tok, buf, err) } } } func Test_scannerLargeSource(t *testing.T) { data1 := []byte(`{"Name": "Angelo","Age":24,"Friends":["Bob","Jim","Jenny"]}`) data := make([]byte, len(data1)*1024+2+1023) for i := 0; i < 1024; i++ { offset := 1 + (len(data1)+1)*i copy(data[offset:], data1) data[offset+len(data1)] = ',' } data[0] = '[' data[len(data)-1] = ']' wantToks := []TokenType{TokenObjectBegin, TokenString, TokenPropSep, TokenString, TokenItemSep, TokenString, TokenPropSep, TokenNumber, TokenItemSep, TokenString, TokenPropSep, TokenArrayBegin, TokenString, TokenItemSep, TokenString, TokenItemSep, TokenString, TokenArrayEnd, TokenObjectEnd, TokenItemSep, } lenWantToks := len(wantToks) // read 1024 objects + ',' chars without a trailing ',' char toksToRead := lenWantToks*1024 - 1 // start scanning s := NewScanner(bytes.NewReader(data)) // read array start tok, _, err := s.ReadToken() if tok != TokenArrayBegin { t.Fatalf("Got %v, err %v. Want %v", tok, err, TokenArrayBegin) } for i := 0; i < toksToRead; i++ { got, buf, err := s.ReadToken() if got == TokenError { t.Fatal(err) } t.Logf("token: %v %s", got, buf) want := wantToks[i%lenWantToks] if got != want { t.Fatalf("Token %d: Got %v, want %v", i, got, want) } } // read the array end if tok, _, err := s.ReadToken(); tok != TokenArrayEnd { t.Fatalf("Got %v, err %v. Want %v", tok, err, TokenArrayEnd) } }
package services_test import ( "errors" "reflect" "strconv" "testing" "github.com/mrdulin/go-rpc-cnode/mocks" "github.com/mrdulin/go-rpc-cnode/models" "github.com/mrdulin/go-rpc-cnode/services" "github.com/stretchr/testify/mock" ) const ( baseurl string = "http://localhost:3000" accesstoken string = "123" ) func TestMessageService_MarkOneMessage(t *testing.T) { markedMsgId := "666" t.Run("should mark one message", func(t *testing.T) { testHttp := new(mocks.MockedHttp) var r models.MarkOneMessageResponse var res string args := services.MarkOneMessageArgs{ID: "1", Accesstoken: accesstoken} testHttp. On("Post", baseurl+"/message/mark_one/"+args.ID, &services.MarkOneMessageRequestPayload{Accesstoken: accesstoken}, &r). Return(nil). Run(func(args mock.Arguments) { arg := args.Get(2).(*models.MarkOneMessageResponse) arg.MarkedMsgId = &markedMsgId }) svc := services.NewMessageService(testHttp, baseurl) err := svc.MarkOneMessage(&args, &res) if err != nil { t.Error(err) } if !reflect.DeepEqual(res, markedMsgId) { t.Errorf("got %#v, want: %#v", res, markedMsgId) } }) } func TestMessageService_MarkAllMessages(t *testing.T) { var testMarkedMessage []models.MarkedMessage for i := 1; i <= 3; i++ { testMarkedMessage = append(testMarkedMessage, models.MarkedMessage{ID: strconv.Itoa(i)}) } t.Run("should mark all messages", func(t *testing.T) { testHttp := new(mocks.MockedHttp) var r models.MarkAllMessagesResponse var res []models.MarkedMessage args := services.MarkAllMessagesArgs{Accesstoken: accesstoken} testHttp. On("Post", baseurl+"/message/mark_all", &services.MarkAllMessagesRequestPayload{Accesstoken: args.Accesstoken}, &r). Return(nil). Run(func(args mock.Arguments) { arg := args.Get(2).(*models.MarkAllMessagesResponse) arg.MarkedMsgs = &testMarkedMessage }) svc := services.NewMessageService(testHttp, baseurl) err := svc.MarkAllMessages(&args, &res) testHttp.AssertExpectations(t) if !reflect.DeepEqual(err, nil) { t.Error(err) } if !reflect.DeepEqual(res, testMarkedMessage) { t.Errorf("got: %+v, want: %+v", res, testMarkedMessage) } }) t.Run("should return error", func(t *testing.T) { testHttp := new(mocks.MockedHttp) var r models.MarkAllMessagesResponse var res []models.MarkedMessage args := services.MarkAllMessagesArgs{Accesstoken: accesstoken} testHttp. On("Post", baseurl+"/message/mark_all", &services.MarkAllMessagesRequestPayload{Accesstoken: args.Accesstoken}, &r). Return(errors.New("network")) svc := services.NewMessageService(testHttp, baseurl) err := svc.MarkAllMessages(&args, &res) testHttp.AssertExpectations(t) if !reflect.DeepEqual(err, services.ErrMarkAllMessages) { t.Errorf("got: %+v, want: %+v", err, services.ErrMarkAllMessages) } }) }
package global const ( // Version go-admin version info Version = "2.1.0" ) var ( Source string Driver string DBName string )
package persistence import ( "database/sql" "errors" "gopetstore/src/domain" "gopetstore/src/util" "log" "time" ) const ( getOrderByOrderIdSQL = `select BILLADDR1 AS billAddress1,BILLADDR2 AS billAddress2,BILLCITY,BILLCOUNTRY,BILLSTATE,BILLTOFIRSTNAME,BILLTOLASTNAME,BILLZIP, SHIPADDR1 AS shipAddress1,SHIPADDR2 AS shipAddress2,SHIPCITY,SHIPCOUNTRY,SHIPSTATE,SHIPTOFIRSTNAME,SHIPTOLASTNAME,SHIPZIP,CARDTYPE,COURIER,CREDITCARD, EXPRDATE AS expiryDate,LOCALE,ORDERDATE,ORDERS.ORDERID,TOTALPRICE,USERID AS username,STATUS FROM ORDERS, ORDERSTATUS WHERE ORDERS.ORDERID = ? AND ORDERS.ORDERID = ORDERSTATUS.ORDERID` getOrdersByUsernameSQL = `SELECT BILLADDR1 AS billAddress1, BILLADDR2 AS billAddress2, BILLCITY, BILLCOUNTRY, BILLSTATE, BILLTOFIRSTNAME, BILLTOLASTNAME, BILLZIP, SHIPADDR1 AS shipAddress1, SHIPADDR2 AS shipAddress2, SHIPCITY, SHIPCOUNTRY, SHIPSTATE, SHIPTOFIRSTNAME, SHIPTOLASTNAME, SHIPZIP, CARDTYPE, COURIER, CREDITCARD, EXPRDATE AS expiryDate,LOCALE, ORDERDATE, ORDERS.ORDERID, TOTALPRICE, USERID AS username,STATUS FROM ORDERS, ORDERSTATUS WHERE ORDERS.USERID = ? AND ORDERS.ORDERID = ORDERSTATUS.ORDERID ORDER BY ORDERDATE` insertOrderSQL = `INSERT INTO ORDERS (ORDERID, USERID, ORDERDATE, SHIPADDR1, SHIPADDR2, SHIPCITY, SHIPSTATE, SHIPZIP, SHIPCOUNTRY, BILLADDR1, BILLADDR2, BILLCITY, BILLSTATE, BILLZIP, BILLCOUNTRY, COURIER, TOTALPRICE, BILLTOFIRSTNAME, BILLTOLASTNAME, SHIPTOFIRSTNAME, SHIPTOLASTNAME, CREDITCARD, EXPRDATE, CARDTYPE, LOCALE) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)` insertOrderStatusSQL = `INSERT INTO ORDERSTATUS (ORDERID, LINENUM, TIMESTAMP, STATUS) VALUES (?, ?, ?, ?)` ) // scan order func scanOrder(r *sql.Rows) (*domain.Order, error) { var billAddr1, billAddr2, billCity, billCountry, billState, billToFirstName, billToLastName, billZip string var shipAddr1, shipAddr2, shipCity, shipCountry, shipState, shipFirstName, shipLastName, shipZip string var cardType, courier, creditCard string var expiryDate, locale, userName, status string var totalPrice float64 var orderDate time.Time var orderId int err := r.Scan(&billAddr1, &billAddr2, &billCity, &billCountry, &billState, &billToFirstName, &billToLastName, &billZip, &shipAddr1, &shipAddr2, &shipCity, &shipCountry, &shipState, &shipFirstName, &shipLastName, &shipZip, &cardType, &courier, &creditCard, &expiryDate, &locale, &orderDate, &orderId, &totalPrice, &userName, &status) if err != nil { return nil, err } return &domain.Order{ OrderId: orderId, OrderDate: orderDate, UserName: userName, ShipAddress1: shipAddr1, ShipAddress2: shipAddr2, ShipCity: shipCity, ShipState: shipState, ShipCountry: shipCountry, ShipToFirstName: shipFirstName, ShipToLastName: shipLastName, BillAddress1: billAddr1, BillAddress2: billAddr2, BillCity: billCity, BillZip: billZip, BillCountry: billCountry, BillToFirstName: billToFirstName, BillToLastName: billToLastName, Courier: courier, CreditCard: creditCard, CardType: cardType, TotalPrice: totalPrice, ExpiryDate: expiryDate, Locale: locale, Status: status, }, nil } // get order by order id func GetOrderByOrderId(orderId int) (*domain.Order, error) { d, err := util.GetConnection() defer func() { if d != nil { _ = d.Close() } }() if err != nil { return nil, err } r, err := d.Query(getOrderByOrderIdSQL, orderId) defer func() { if r != nil { _ = r.Close() } }() if err != nil { return nil, err } if r.Next() { order, err := scanOrder(r) if err != nil { return nil, err } order.OrderId = orderId return order, nil } defer r.Close() err = r.Err() if err != nil { return nil, err } return nil, errors.New("can not get a order by this orderId") } // get all orders by user name func GetOrdersByUserName(userName string) ([]*domain.Order, error) { d, err := util.GetConnection() defer func() { if d != nil { _ = d.Close() } }() if err != nil { return nil, err } r, err := d.Query(getOrdersByUsernameSQL, userName) defer func() { if r != nil { _ = r.Close() } }() if err != nil { return nil, err } var result []*domain.Order for r.Next() { order, err := scanOrder(r) if err != nil { log.Printf("GetOrdersByUserName scanOrder error: %v for userName: %v", err.Error(), userName) continue } result = append(result, order) } defer r.Close() err = r.Err() if err != nil { return result, err } return result, nil } // insert order func InsertOrder(o *domain.Order) error { // 这里的插入使用事务,插入订单出错则回滚报错 return util.ExecTransaction(func(tx *sql.Tx) error { for _, li := range o.LineItems { // update inventory by item id _, err := tx.Exec(updateInventoryByItemIdSQl, li.ItemId, li.Quantity) if err != nil { log.Printf("service InsertOrder UpdateInventoryQuantity error: %v", err.Error()) continue } } // insert order info _, err := tx.Exec(insertOrderSQL, o.OrderId, o.UserName, o.OrderDate, o.ShipAddress1, o.ShipAddress2, o.ShipCity, o.ShipState, o.ShipZip, o.ShipCountry, o.BillAddress1, o.BillAddress2, o.BillCity, o.BillState, o.BillZip, o.BillCountry, o.Courier, o.TotalPrice, o.BillToFirstName, o.BillToLastName, o.ShipToFirstName, o.ShipToLastName, o.CreditCard, o.ExpiryDate, o.CardType, o.Locale) if err != nil { tx.Rollback() return err } // 总物品数 var totalLineNum int for _, li := range o.LineItems { li.OrderId = o.OrderId totalLineNum += li.LineNumber // insert line item _, err := tx.Exec(insertLineItemSQL, li.OrderId, li.LineNumber, li.ItemId, li.Quantity, li.UnitPrice) if err != nil { log.Printf("service InsertOrder InsertLineItem error: %v", err.Error()) continue } } // insert order status _, err = tx.Exec(insertOrderStatusSQL, o.OrderId, totalLineNum, o.OrderDate, o.Status) if err != nil { tx.Rollback() return err } return nil }) }
package configuration import ( "io/ioutil" "launchpad.net/goyaml" "log" "os" "path/filepath" ) type Configuration interface { GetDatabase() Database GetKeys() Keys GetMail() Mails GetUrl() string loadConfiguration() GetFilePath(filename string) string } type FileConfiguration struct { Db Database Key Keys Mail Mails Url string } func (conf *FileConfiguration) loadConfiguration() { buffer, err := ioutil.ReadFile(conf.GetFilePath(CONF_FILENAME)) if err != nil { log.Fatalf("Erreur dans le chargement de la configuration.", err.Error()) } goyaml.Unmarshal(buffer, conf) } func (conf *FileConfiguration) GetFilePath(filename string) string { folder, err := filepath.Abs(filepath.Dir(os.Args[0])) if err != nil { log.Fatalf("Erreur dans le path du fichier. %s", err.Error()) } return folder + "/" + filename } func (conf *FileConfiguration) GetDatabase() Database { return conf.Db } func (conf *FileConfiguration) GetKeys() Keys { return conf.Key } func (conf *FileConfiguration) GetMail() Mails { return conf.Mail } func (conf *FileConfiguration) GetUrl() string { return conf.Url } type TestConfiguration struct{} func (conf *TestConfiguration) loadConfiguration() {} func (conf *TestConfiguration) GetFilePath(filename string) string { return filename } func (conf *TestConfiguration) GetDatabase() Database { return Database{"localhost", "reunion", "test", "test"} } func (conf *TestConfiguration) GetMail() Mails { return Mails{"test@test", "contact@animalove.re"} } func (conf *TestConfiguration) GetUrl() string { return "localhost:8080" } func (conf *TestConfiguration) GetKeys() Keys { return Keys{Private: "../token/private_key", Public: "../token/public_key"} }
package util import ( "bytes" "io" "testing" ) func makeStreams() (*ReadSeekCloseWrapper, io.WriteCloser) { r, w := io.Pipe() s := WrapReadSeekClose(r) return s, w } func TestRead(t *testing.T) { r, w := makeStreams() buf := make([]byte, 10) go func() { if _, err := w.Write([]byte("hello")); err != nil { t.Error(err) } if err := w.Close(); err != nil { t.Error(err) } }() count, err := r.Read(buf) if err != nil { t.Error(err) } if count != 5 { t.Errorf("Read returned %v, expected %v", count, 5) } } func TestClose(t *testing.T) { r, w := makeStreams() if err := r.Close(); err != nil { t.Error(err) } if count, err := w.Write([]byte("hello")); err == nil { t.Errorf("Successfully wrote %v bytes after closing", count) } } func TestSeekSet(t *testing.T) { r, w := makeStreams() if _, err := w.Write([]byte("hello")); err != nil { t.Error(err) } if pos, err := r.Seek(2, 0); err == nil { t.Errorf("Seek via SEEK_SET should not be implemented; got %v", pos) } } func TestSeekCur(t *testing.T) { r, w := makeStreams() if _, err := w.Write([]byte("hello")); err != nil { t.Error(err) } if pos, err := r.Seek(2, 1); err == nil { t.Errorf("Seek via SEEK_CUR should not be implemented; got %v", pos) } } func TestSeekEndZero(t *testing.T) { r, w := makeStreams() if _, err := w.Write([]byte("hello")); err != nil { t.Error(err) } _, err := r.Seek(0, 2) if err != nil { t.Error(err) } if _, err := w.Write([]byte("world")); err != nil { t.Error(err) } buf := make([]byte, 10) count, err := r.Read(buf) if err != nil { t.Error(err) } if count != 5 { t.Errorf("Got %v bytes in %v, expected 5", count, buf) } expected := []byte("world\x00\x00\x00\x00\x00") if bytes.Compare(buf, expected) != 0 { t.Errorf("Got unexpected content %v, expected %v", buf, expected) } } func TestSeekEndNonZero(t *testing.T) { r, w := makeStreams() if _, err := w.Write([]byte("hello")); err != nil { t.Error(err) } _, err := r.Seek(-2, 2) if err != nil { t.Error(err) } buf := make([]byte, 10) count, err := r.Read(buf) if err != nil { t.Error(err) } if count != 2 { t.Errorf("Got %v bytes in %v, expected 5", count, buf) } expected := []byte("lo\x00\x00\x00\x00\x00\x00\x00\x00") if bytes.Compare(buf, expected) != 0 { t.Errorf("Got unexpected content %v, expected %v", buf, expected) } } func TestSeekEndPastEOF(t *testing.T) { r, w := makeStreams() if _, err := w.Write([]byte("hello")); err != nil { t.Error(err) } _, err := r.Seek(2, 2) if err == nil { t.Errorf("Seeking past EOF should not be supported") } }
package lib import ( "fmt" "github.com/sacloud/libsacloud/api" "github.com/sacloud/libsacloud/sacloud" "github.com/yamamoto-febc/jobq" "github.com/yamamoto-febc/sacloud-delete-all/version" "strings" "sync" "time" ) type ParallelJobPayload struct { RouteName string Targets []string } func doActionPerZone(option *Option, sacloudAPIFunc func(*api.Client) error) error { for _, zone := range option.Zones { client := getClient(option, zone) // call API func per zone. err := sacloudAPIFunc(client) if err != nil { return err } } return nil } func FindAndDeleteJob(target string) func(interface{}) jobq.JobAPI { return func(p interface{}) jobq.JobAPI { return jobq.NewJob(fmt.Sprintf("FindAndDelete:%s", target), findAndDelete, target) } } func FindAndDeleteJobParallel(routeName string, targets ...string) func(interface{}) jobq.JobAPI { payload := ParallelJobPayload{ RouteName: routeName, Targets: targets, } targetName := strings.Join(targets, ",") return func(p interface{}) jobq.JobAPI { return jobq.NewJob(fmt.Sprintf("FindAndDeleteParallel:%s", targetName), findAndDeleteParallel, payload) } } func findAndDeleteParallel(queue *jobq.Queue, option *jobq.Option, job jobq.JobAPI) { targets := job.GetPayload().(ParallelJobPayload) var wg sync.WaitGroup wg.Add(len(targets.Targets)) for _, target := range targets.Targets { go func(t string) { err := doFindAndDelete(queue, option, t) if err != nil { queue.StopByError(err) } else { queue.PushRequest(fmt.Sprintf("%s:done", target), nil) resourceWaitGroup.Done() wg.Done() } }(target) } wg.Wait() queue.PushRequest(fmt.Sprintf("%s:done", targets.RouteName), nil) } func findAndDelete(queue *jobq.Queue, option *jobq.Option, job jobq.JobAPI) { target := job.GetPayload().(string) err := doFindAndDelete(queue, option, target) if err != nil { queue.StopByError(err) } else { queue.PushRequest(fmt.Sprintf("%s:done", target), nil) resourceWaitGroup.Done() } } func doFindAndDelete(queue *jobq.Queue, option *jobq.Option, target string) error { return doActionPerZone(currentOption, func(client *api.Client) error { apiWrapper := getSacloudAPIWrapper(client, target) resources, err := apiWrapper.findFunc() if err != nil { return fmt.Errorf("target[%s](%s) : %s", target, client.Zone, err) } var wg sync.WaitGroup wg.Add(len(resources)) for _, resource := range resources { go func(r sacloudResourceWrapper) { id := r.id name := r.name if apiWrapper.isAvaiableFunc != nil { isPowerOn, err := apiWrapper.isAvaiableFunc(id) if err != nil { queue.StopByError(fmt.Errorf("%-26s : resource(id:%d,name:%s) : %s", fmt.Sprintf("target[%s/%s]", target, client.Zone), id, name, err)) return } if isPowerOn { _, err := apiWrapper.powerOffFunc(id) if err != nil { queue.StopByError(fmt.Errorf("%-26s : resource(id:%d,name:%s) : %s", fmt.Sprintf("target[%s/%s]", target, client.Zone), id, name, err)) return } err = apiWrapper.waitForPoweroffFunc(id, client.DefaultTimeoutDuration) if err != nil { queue.StopByError(fmt.Errorf("%-26s : resource(id:%d,name:%s) : %s", fmt.Sprintf("target[%s/%s]", target, client.Zone), id, name, err)) return } } } err := apiWrapper.deleteFunc(id) if err != nil { queue.StopByError(fmt.Errorf("%-26s : resource(id:%d,name:%s) : %s", fmt.Sprintf("target[%s/%s]", target, client.Zone), id, name, err)) return } queue.PushInfo(fmt.Sprintf("%-26s : resource(id:%d,name:%s) deleted.", fmt.Sprintf("target[%s/%s]", target, client.Zone), id, name)) wg.Done() }(resource) } wg.Wait() return nil }) } func getClient(o *Option, zone string) *api.Client { client := api.NewClient(o.AccessToken, o.AccessTokenSecret, zone) client.TraceMode = o.TraceMode client.UserAgent = fmt.Sprintf("sacloud-delete-all/%s", version.Version) return client } type sacloudAPIWrapper struct { findFunc func() ([]sacloudResourceWrapper, error) isAvaiableFunc func(int64) (bool, error) powerOffFunc func(int64) (bool, error) waitForPoweroffFunc func(int64, time.Duration) error deleteFunc func(int64) error } type sacloudResourceWrapper struct { id int64 name string } func getSacloudAPIWrapper(client *api.Client, target string) *sacloudAPIWrapper { switch target { case "archive": return &sacloudAPIWrapper{ findFunc: createFindFunc(client.Archive.Find), deleteFunc: func(id int64) error { _, err := client.Archive.Delete(id); return err }, } case "bridge": return &sacloudAPIWrapper{ findFunc: createFindFunc(client.Bridge.Find), deleteFunc: func(id int64) error { _, err := client.Bridge.Delete(id); return err }, } case "cdrom": return &sacloudAPIWrapper{ findFunc: createFindFunc(client.CDROM.Find), deleteFunc: func(id int64) error { _, err := client.CDROM.Delete(id); return err }, } case "disk": return &sacloudAPIWrapper{ findFunc: createFindFunc(client.Disk.Find), deleteFunc: func(id int64) error { _, err := client.Disk.Delete(id); return err }, } case "icon": return &sacloudAPIWrapper{ findFunc: createFindFunc(client.Icon.Find), deleteFunc: func(id int64) error { _, err := client.Icon.Delete(id); return err }, } case "internet": return &sacloudAPIWrapper{ findFunc: createFindFunc(client.Internet.Find), deleteFunc: func(id int64) error { _, err := client.Internet.Delete(id); return err }, } case "license": return &sacloudAPIWrapper{ findFunc: createFindFunc(client.License.Find), deleteFunc: func(id int64) error { _, err := client.License.Delete(id); return err }, } case "note": return &sacloudAPIWrapper{ findFunc: createFindFunc(client.Note.Find), deleteFunc: func(id int64) error { _, err := client.Note.Delete(id); return err }, } case "packetfilter": return &sacloudAPIWrapper{ findFunc: createFindFunc(client.PacketFilter.Find), deleteFunc: func(id int64) error { _, err := client.PacketFilter.Delete(id); return err }, } case "sshkey": return &sacloudAPIWrapper{ findFunc: createFindFunc(client.SSHKey.Find), deleteFunc: func(id int64) error { _, err := client.SSHKey.Delete(id); return err }, } case "switch": return &sacloudAPIWrapper{ findFunc: createFindFunc(client.Switch.Find), deleteFunc: func(id int64) error { // ブリッジより先に実行 // もしブリッジ接続があれば切断 sw, err := client.Switch.Read(id) if err != nil { return err } if sw.Bridge != nil { _, err := client.Switch.DisconnectFromBridge(id) if err != nil { return err } } _, err = client.Switch.Delete(id) return err }, } case "autobackup": return &sacloudAPIWrapper{ findFunc: func() ([]sacloudResourceWrapper, error) { result, err := client.AutoBackup.Find() return toResourceList(result.CommonServiceAutoBackupItems), err }, deleteFunc: func(id int64) error { _, err := client.AutoBackup.Delete(id); return err }, } case "gslb": return &sacloudAPIWrapper{ findFunc: func() ([]sacloudResourceWrapper, error) { result, err := client.GSLB.Find() return toResourceList(result.CommonServiceGSLBItems), err }, deleteFunc: func(id int64) error { _, err := client.GSLB.Delete(id); return err }, } case "dns": return &sacloudAPIWrapper{ findFunc: func() ([]sacloudResourceWrapper, error) { result, err := client.DNS.Find() return toResourceList(result.CommonServiceDNSItems), err }, deleteFunc: func(id int64) error { _, err := client.DNS.Delete(id); return err }, } case "simplemonitor": return &sacloudAPIWrapper{ findFunc: func() ([]sacloudResourceWrapper, error) { result, err := client.SimpleMonitor.Find() return toResourceList(result.SimpleMonitors), err }, deleteFunc: func(id int64) error { _, err := client.SimpleMonitor.Delete(id); return err }, } case "server": return &sacloudAPIWrapper{ findFunc: createFindFunc(client.Server.Find), isAvaiableFunc: client.Server.IsUp, powerOffFunc: client.Server.Stop, waitForPoweroffFunc: client.Server.SleepUntilDown, deleteFunc: func(id int64) error { _, err := client.Server.Delete(id); return err }, } case "database": return &sacloudAPIWrapper{ findFunc: func() ([]sacloudResourceWrapper, error) { result, err := client.Database.Find() return toResourceList(result.Databases), err }, isAvaiableFunc: client.Database.IsUp, powerOffFunc: client.Database.Stop, waitForPoweroffFunc: client.Database.SleepUntilDown, deleteFunc: func(id int64) error { _, err := client.Database.Delete(id); return err }, } case "loadbalancer": return &sacloudAPIWrapper{ findFunc: func() ([]sacloudResourceWrapper, error) { result, err := client.LoadBalancer.Find() return toResourceList(result.LoadBalancers), err }, isAvaiableFunc: client.LoadBalancer.IsUp, powerOffFunc: client.LoadBalancer.Stop, waitForPoweroffFunc: client.LoadBalancer.SleepUntilDown, deleteFunc: func(id int64) error { _, err := client.LoadBalancer.Delete(id); return err }, } case "vpcrouter": return &sacloudAPIWrapper{ findFunc: func() ([]sacloudResourceWrapper, error) { result, err := client.VPCRouter.Find() return toResourceList(result.VPCRouters), err }, isAvaiableFunc: client.VPCRouter.IsUp, powerOffFunc: client.VPCRouter.Stop, waitForPoweroffFunc: client.VPCRouter.SleepUntilDown, deleteFunc: func(id int64) error { _, err := client.VPCRouter.Delete(id); return err }, } } return nil } func createFindFunc(f func() (*sacloud.SearchResponse, error)) func() ([]sacloudResourceWrapper, error) { return func() ([]sacloudResourceWrapper, error) { result, err := f() var res []sacloudResourceWrapper if err != nil { return res, err } res = append(res, toResourceList(result.Archives)...) res = append(res, toResourceList(result.Bridges)...) res = append(res, toResourceList(result.CDROMs)...) res = append(res, toResourceList(result.Disks)...) res = append(res, toResourceList(result.Icons)...) res = append(res, toResourceList(result.Internet)...) res = append(res, toResourceList(result.Licenses)...) res = append(res, toResourceList(result.Notes)...) res = append(res, toResourceList(result.PacketFilters)...) res = append(res, toResourceList(result.Servers)...) res = append(res, toResourceList(result.SSHKeys)...) res = append(res, toResourceList(result.Switches)...) return res, err } } func toResourceList(arr interface{}) []sacloudResourceWrapper { var res []sacloudResourceWrapper = []sacloudResourceWrapper{} switch sl := arr.(type) { case []sacloud.Archive: for _, s := range sl { if s.Scope != string(sacloud.ESCopeUser) { continue } res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.Bridge: for _, s := range sl { res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.CDROM: for _, s := range sl { if s.Scope != string(sacloud.ESCopeUser) { continue } res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.Disk: for _, s := range sl { res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.Icon: for _, s := range sl { if s.Scope != string(sacloud.ESCopeUser) { continue } res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.Internet: for _, s := range sl { res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.License: for _, s := range sl { res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.Note: for _, s := range sl { if s.Scope != string(sacloud.ESCopeUser) { continue } res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.PacketFilter: for _, s := range sl { res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.Server: for _, s := range sl { res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.SSHKey: for _, s := range sl { res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.Switch: for _, s := range sl { res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.AutoBackup: for _, s := range sl { res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.DNS: for _, s := range sl { res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.GSLB: for _, s := range sl { res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.SimpleMonitor: for _, s := range sl { res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.Database: for _, s := range sl { res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.LoadBalancer: for _, s := range sl { res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break case []sacloud.VPCRouter: for _, s := range sl { res = append(res, sacloudResourceWrapper{id: s.GetID(), name: s.Name}) } break } return res }
package scraper import ( "net/http" "net/http/httptest" "reflect" "testing" "github.com/yevhenshymotiuk/ekatalog-scraper/items" ) func newTestServer() *httptest.Server { mux := http.NewServeMux() mux.HandleFunc("/html", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/html") w.Write([]byte(`<!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <title>Test Page</title> </head> <body> <a href="/k298.htm" class="path_lnk">Ноутбуки</a> <div id="top-page-title"> <b class="ib">Apple MacBook Pro 13 (2020)</b> </div> <table> <tbody> <tr class="conf-tr"> <td class="conf-td c21"><span title="Серия процессора">Core i5&nbsp;</span></td> <td class="conf-td c21"><span title="Модель процессора">8257U&nbsp;</span></td> <td class="conf-td c21"><span title="Объем оперативной памяти">8&nbsp;ГБ</span></td> <td class="conf-td c21"><span title="Модель видеокарты">Iris Plus Graphics 645&nbsp;</span></td> <td class="conf-td c21"><span title="Тип накопителя">SSD&nbsp;</span></td> <td class="conf-td c21"><span title="Емкость накопителя">256&nbsp;ГБ</span></td> <td class="conf-td conf-price" align="right"><span class="price-int"><span>36&nbsp;949&nbsp;</span>.. <span>43&nbsp;176&nbsp;</span>грн.</span></a></td> </tr> </tbody> </table> </body> </html>`)) }) return httptest.NewServer(mux) } func TestScrapeLaptops(t *testing.T) { ts := newTestServer() defer ts.Close() products, err := ScrapeProducts([]string{ts.URL + "/html"}) if err != nil { t.Error("Failed to scrape products") } wantProducts := []items.Product{ { Name: "Apple MacBook Pro 13 (2020)", Modifications: []items.ModificationType{ items.Laptop{ CPU: items.CPU{ Series: "Core i5", Model: "8257U", }, RAM: items.RAM{ Capacity: 8, }, GPU: items.GPU{ Model: "Iris Plus Graphics 645", }, Drive: items.Drive{ Type: "SSD", Capacity: 256, }, Price: items.Price{ Min: 36949, Max: 43176, }, }, }, }, } for i, got := range products { want := wantProducts[i] if !reflect.DeepEqual(got, want) { t.Errorf("products are not equal: got: %+v, want: %+v", got, want) } } }
/** * day 02 2020 * https://adventofcode.com/2020/day/2 * * compile: go build main.go * run: ./main < input * compile & run: go run main.go < input **/ package main import ( "bufio" "fmt" "os" "strings" ) func sled(min int, max int, c byte, pass string) int { count := 0 for i, _ := range pass { if pass[i] == c { count++ i++ } } if count >= min && count <= max { return 1 } return 0 } func toboggan(min int, max int, c byte, pass string) int { // no XOR in go but this is the same logic if (pass[min-1] == c) != (pass[max-1] == c) { return 1 } return 0 } func main() { var ( min int max int c byte pass string ) validSled := 0 validToboggan := 0 scan := bufio.NewScanner(os.Stdin) for scan.Scan() { f := strings.NewReader(scan.Text()) fmt.Fscanf(f, "%d-%d %c: %s", &min, &max, &c, &pass) validSled += sled(min, max, c, pass) validToboggan += toboggan(min, max, c, pass) } fmt.Println("part 1:", validSled) fmt.Println("part 2:", validToboggan) }
package main import ( "github.com/antonybudianto/go-starter/app" ) func main() { a := app.App{} a.Initialize("root", "hello", "rest_api_example") a.Run(":8000") }
package common import ( "github.com/asim/go-micro/config" "github.com/asim/go-micro/plugins/config/source/consul" "strconv" ) func GetConsulConfig(host string, port int64, prefix string) (config.Config, error) { consulSource := consul.NewSource( consul.WithAddress(host+":"+strconv.FormatInt(port,10)), consul.WithPrefix(prefix), consul.StripPrefix(true), ) config, err := config.NewConfig() if err != nil { return config, err } err = config.Load(consulSource) return config, err }
package types import ( "github.com/graphql-go/graphql" ) //Owner type definition type Owner struct { ID int `db:"id" json:"id"` FirstName string `db:"first_name" json:"first_name"` LastName string `db:"last_name" json:"last_name"` } //OwnerType is graphql schema for type owner var OwnerType = graphql.NewObject(graphql.ObjectConfig{ Name: "Owner", Fields: graphql.Fields{ "id": &graphql.Field{Type: graphql.Int}, "first_name": &graphql.Field{Type: graphql.String}, "last_name": &graphql.Field{Type: graphql.String}, }, })
package templates //go:generate go run github.com/pseudo-su/templates -t text -s . -o templates.gen.go
package pget import ( "bytes" "fmt" "github.com/Code-Hex/updater" "github.com/jessevdk/go-flags" "github.com/pkg/errors" ) // Options struct for parse command line arguments type Options struct { Help bool `short:"h" long:"help"` NumConnection int `short:"p" long:"procs" default:"1"` Output string `short:"o" long:"output"` Timeout int `short:"t" long:"timeout" default:"10"` UserAgent string `short:"u" long:"user-agent"` Referer string `short:"r" long:"referer"` Update bool `long:"check-update"` Trace bool `long:"trace"` } func (opts *Options) parse(argv []string, version string) ([]string, error) { p := flags.NewParser(opts, flags.PrintErrors) args, err := p.ParseArgs(argv) if err != nil { stdout.Write(opts.usage(version)) return nil, errors.Wrap(err, "invalid command line options") } return args, nil } func (opts Options) usage(version string) []byte { buf := bytes.Buffer{} msg := "Pget %s, The fastest file download client\n" fmt.Fprintf(&buf, msg+ `Usage: pget [options] URL Options: -h, --help print usage and exit -p, --procs <num> the number of connections for a single URL (default 1) -o, --output <filename> output file to <filename> -t, --timeout <seconds> timeout of checking request in seconds (default 10s) -u, --user-agent <agent> identify as <agent> -r, --referer <referer> identify as <referer> --check-update check if there is update available --trace display detail error messages `, version) return buf.Bytes() } func (opts Options) isupdate(version string) ([]byte, error) { buf := bytes.Buffer{} result, err := updater.CheckWithTag("Code-Hex", "pget", version) if err != nil { return nil, err } fmt.Fprintf(&buf, result+"\n") return buf.Bytes(), nil }
package sweet import ( "fmt" "strings" ) type nameChain struct { fmt.Stringer names []string } func newNameChain(names ...string) *nameChain { return &nameChain{ names: names, } } func (n *nameChain) String() string { return strings.Join(n.names, " => ") } func (n *nameChain) append(name string) *nameChain { newArr := append(n.names, sanitizeTestName(name)) return newNameChain(newArr...) } func sanitizeTestName(name string) string { ret := strings.Replace(name, "=>", "_", -1) return ret }
package main import ( "fmt" tl "github.com/JoelOtter/termloop" ) // GameOverLevel is displayed when the game has ended. type GameOverLevel struct { *tl.BaseLevel } func gameOver() { game.Log("Game ended :(") screen := tl.NewScreen() lvl := &GameOverLevel{ BaseLevel: tl.NewBaseLevel(tl.Cell{ Bg: tl.ColorDefault, Fg: tl.ColorBlack, Ch: '.', }), } screen.SetLevel(lvl) lvl.AddEntity(tl.NewText(0, 1, "GAME OVER", tl.ColorWhite, tl.ColorDefault)) lvl.AddEntity(tl.NewText(0, 2, fmt.Sprintf("Final Score: %d", score), tl.ColorWhite, tl.ColorDefault)) lvl.AddEntity(tl.NewText(0, 4, "Press Return to Retry", tl.ColorWhite, tl.ColorDefault)) lvl.AddEntity(tl.NewText(0, 5, "Press Ctrl+C to Close", tl.ColorWhite, tl.ColorDefault)) game.SetScreen(screen) } // Tick is triggered in response to user input func (l *GameOverLevel) Tick(event tl.Event) { if event.Type == tl.EventKey { // Is it a keyboard event? switch event.Key { // If so, switch on the pressed key. case tl.KeyEnter: game.Log("User initiated new game") newGame() } } }
package auth import ( "github.com/dghubble/go-twitter/twitter" "github.com/dghubble/oauth1" "log" "os" ) var twitterClient *twitter.Client = nil func grantNewClientAccess(){ consumerKey := os.Getenv("TWITTER_CONSUMER_KEY") consumerSecret := os.Getenv("TWITTER_CONSUMER_SECRET") accessToken := os.Getenv("TWITTER_ACCESS_TOKEN") accessSecret := os.Getenv("TWITTER_ACCESS_SECRET") if consumerKey == "" || consumerSecret == "" || accessToken == "" || accessSecret == "" { log.Fatal("Consumer key/secret and Access token/secret required") } config := oauth1.NewConfig(consumerKey, consumerSecret) token := oauth1.NewToken(accessToken, accessSecret) httpClient := config.Client(oauth1.NoContext, token) twitterClient = twitter.NewClient(httpClient) } func RequireTwitterClient() *twitter.Client{ if twitterClient == nil { grantNewClientAccess(); } return twitterClient }
package main // modified from HelloWorld example at https://github.com/GoogleCloudPlatform/golang-samples/blob/master/appengine/go11x/helloworld/helloworld.go import ( "fmt" "log" "net/http" "os" "time" "cloud.google.com/go/datastore" ) func main() { http.HandleFunc("/", indexHandler) port := os.Getenv("PORT") if port == "" { port = "8080" log.Printf("Defaulting to port %s", port) } log.Printf("Listening on port %s", port) log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", port), nil)) } type MyStruct struct { Name string Created time.Time } func indexHandler(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/" { http.NotFound(w, r) return } ctx := r.Context() project := os.Getenv("GOOGLE_CLOUD_PROJECT") dsClient, err := datastore.NewClient(ctx, project) if err != nil { _, _ = fmt.Fprint(w, "Creating Client", err) return } for i := 0; i < 5; i++ { k := datastore.IncompleteKey("MyStruct", nil) k, err := dsClient.Put(ctx, k, &MyStruct{ Name: fmt.Sprintf("Struct-%d", i), Created: time.Now().Add(time.Duration(-1 * i) * time.Hour), }) if err != nil { _, _ = fmt.Fprint(w, "Creating MyStruct", err) return } defer func() { _ = dsClient.Delete(ctx, k) }() } k := datastore.IncompleteKey("MyStruct", nil) k, err = dsClient.Put(ctx, k, &MyStruct{ Name: fmt.Sprintf("Struct-%d", 10), Created: time.Time{}, }) if err != nil { _, _ = fmt.Fprint(w, "Creating MyStruct", err) return } defer func() { _ = dsClient.Delete(ctx, k) }() res1 := make([]MyStruct, 0) badQuery := datastore.NewQuery("MyStruct").Filter("Created > ", time.Now().Add(-6 * time.Hour)) keys, err := dsClient.GetAll(ctx, badQuery, &res1) if err != nil { _, _ = fmt.Fprint(w, "BadQuery", err) return } _, _ = fmt.Fprintf(w, "BadQuery:\nKeys: %+v\nValues: %+v\n", keys, res1) }
// +build !windows package process func (p *Process) postStart() error { return nil }
package main /* #cgo pkg-config #include "Python.h" #include <stdio.h> #include <stdlib.h> extern void c_msg(char*); */ import "C" import "unsafe" import ( "fmt" "strings" ) const Version = `v0.0.0` //export blahblah func blahblah(cStr *C.char, cCnt C.int) *C.char { // Convert our cStr into a Go string s := C.GoString(cStr) // Convert our C int into a Go integer cnt := int(cCnt) fmt.Printf("GO MSG: %q cCnt: %d\n", s, cnt) C.c_msg(cStr) sArray := []string{} for i := 1; i <= cnt; i++ { sArray = append(sArray, fmt.Sprintf("%d: %s", i, s)) } // Write our array of strings back to s then convert to hand back // to our C shared library. s = strings.Join(sArray, ", ") // Now copy back into C space with a deferred free... cResult := C.CString(s) defer C.free(unsafe.Pointer(cResult)) fmt.Printf("GO MSG: %q\n", s) C.c_msg(cResult) // Now return our C String to C's processing space return cResult } func main() {}
package main import ( "bufio" "fmt" "io/ioutil" "os" ) const ( filename = "test.txt" ) func main() { write() read() } func write() { file, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0666) if err != nil { fmt.Println(err) os.Exit(1) } defer file.Close() file.WriteString("metaprogramming ruby\n") fmt.Fprintf(file, "angularjs reference\n") } func read() { fmt.Println("=== read by using bufio.Scanner()") file, err := os.Open(filename) if err != nil { fmt.Println(err) os.Exit(1) } defer file.Close() sc := bufio.NewScanner(file) for sc.Scan() { fmt.Println(sc.Text()) } if err := sc.Err(); err != nil { fmt.Println(err) os.Exit(1) } fmt.Println("\n=== read by using ioutil.ReadFile()") dat, err := ioutil.ReadFile(filename) if err != nil { fmt.Println(err) os.Exit(0) } fmt.Println(string(dat)) }
package main import "fmt" func main() { sc := Constructor([]string{ "ab", "ba", "aaab", "abab", "baa", }) res := []bool{false, false, false, false, false, true, true, true, true, true, false, false, true, true, true, true, false, false, false, true, true, true, true, true, true, false, true, true, true, false} query := "aaaaaba" //query := "aaaaabababbbababbbbababaaabaaa" fmt.Println(len(res)) fmt.Println(len(query)) for i := 0; i < len(query); i++ { fmt.Println(res[i] == sc.Query(query[i])) } } type StreamChecker struct { words []string // 原来的 words2 []string // 可能存在的 s string } func Constructor(words []string) StreamChecker { return StreamChecker{ words: words, words2: make([]string, 0), } } func (this *StreamChecker) Query(letter byte) bool { tmp := make([]string, 0) ret := false if len(this.words2) != 0 { for i := 0; i < len(this.words2); i++ { cur := this.words2[i] if len(this.s) < len(cur) && cur[len(this.s)] == letter { tmp = append(tmp, cur) if cur == this.s+string(letter) { ret = true } } } } else { for i := 0; i < len(this.words); i++ { cur := this.words[i] if len(this.s) < len(cur) && cur[len(this.s)] == letter { tmp = append(tmp, cur) if cur == this.s+string(letter) { ret = true } } } } if len(tmp) > 0 { this.s += string(letter) } else { this.s = "" } this.words2 = tmp return ret } /** * Your StreamChecker object will be instantiated and called as such: * obj := Constructor(words); * param_1 := obj.Query(letter); */
package cluster import ( "fmt" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/openshift/rosa/pkg/helper/versions" ) var _ = Describe("Validates OCP version", func() { const ( nightly = "nightly" stable = "stable" fast = "fast" ) var _ = Context("when creating a hosted cluster", func() { It("OK: Validates successfully a cluster for hosted clusters with a supported version", func() { v, err := versions.ValidateVersion("4.12.0", []string{"4.12.0"}, stable, false, true) Expect(err).NotTo(HaveOccurred()) Expect(v).To(Equal("openshift-v4.12.0")) }) It("OK: Validates successfully a nightly version of OCP for hosted clusters "+ "with a supported version", func() { v, err := versions.ValidateVersion("4.12.0-0.nightly-2022-11-25-185455-nightly", []string{"4.12.0-0.nightly-2022-11-25-185455-nightly"}, nightly, false, true) Expect(err).NotTo(HaveOccurred()) Expect(v).To(Equal("openshift-v4.12.0-0.nightly-2022-11-25-185455-nightly-nightly")) }) It("KO: Fails with a nightly version of OCP for hosted clusters "+ "in a not supported version", func() { v, err := versions.ValidateVersion("4.11.0-0.nightly-2022-10-17-040259-nightly", []string{"4.11.0-0.nightly-2022-10-17-040259-nightly"}, nightly, false, true) Expect(err).To(BeEquivalentTo( fmt.Errorf("version '4.11.0-0.nightly-2022-10-17-040259-nightly' " + "is not supported for hosted clusters"))) Expect(v).To(Equal("")) }) It("OK: Validates successfully the next major release of OCP for hosted clusters "+ "with a supported version", func() { v, err := versions.ValidateVersion("4.13.0", []string{"4.13.0"}, fast, false, true) Expect(err).NotTo(HaveOccurred()) Expect(v).To(Equal("openshift-v4.13.0-fast")) }) It(`KO: Fails to validate a cluster for a hosted cluster when the user provides an unsupported version`, func() { v, err := versions.ValidateVersion("4.11.5", []string{"4.11.5"}, stable, false, true) Expect(err).To(BeEquivalentTo(fmt.Errorf("version '4.11.5' is not supported for hosted clusters"))) Expect(v).To(BeEmpty()) }) It(`KO: Fails to validate a cluster for a hosted cluster when the user provides an invalid or malformed version`, func() { v, err := versions.ValidateVersion("foo.bar", []string{"foo.bar"}, stable, false, true) Expect(err).To(BeEquivalentTo( fmt.Errorf("error while parsing OCP version 'foo.bar': Malformed version: foo.bar"))) Expect(v).To(BeEmpty()) }) }) var _ = Context("when creating a classic cluster", func() { It("OK: Validates successfully a cluster with a supported version", func() { v, err := versions.ValidateVersion("4.11.0", []string{"4.11.0"}, stable, true, false) Expect(err).NotTo(HaveOccurred()) Expect(v).To(Equal("openshift-v4.11.0")) }) }) })
package account import ( "database/sql" "errors" _ "github.com/go-sql-driver/mysql" ) type Account struct { Login string Password string LastActive string AccessLevel uint8 Banned bool CharacterSlot uint8 // CharacterList []Character } // Update the database with all these fun values func (a *Account) Create(db *sql.DB) error { createAccount, err := db.Prepare("INSERT INTO accounts VALUES ( ?, ?, ?, ?, ?, ?)") if err != nil { return err } _, err = createAccount.Exec(a.Login, a.Password, a.LastActive, a.AccessLevel, a.Banned, a.CharacterSlot) if err != nil { return err } return nil } // We'll probably want to send this as part of a client type func (a *Account) Authenticate(db *sql.DB, pass string) error { // Load in account information identifed by login accountPass, err := db.Prepare("SELECT password FROM accounts WHERE login = ?") if err != nil { return err } row := accountPass.QueryRow(a.Login) var apass string err = row.Scan(&apass) if err != nil { return err } // Compare passwords if apass != pass { // if not equal, return err err := errors.New("Failed to authenticate") return err } return nil }
package memory import ( "context" "errors" "go.uber.org/zap" "github.com/silverspase/todo/internal/modules/auth" "github.com/silverspase/todo/internal/modules/auth/model" ) type memoryStorage struct { users map[string]model.User // TODO change to sync.Map // usersArray []model.User // TODO use this for pagination in GetAllUsers logger *zap.Logger } func NewMemoryStorage(logger *zap.Logger) auth.Repository { return &memoryStorage{ users: make(map[string]model.User), logger: logger, } } func (m memoryStorage) CreateUser(ctx context.Context, entry model.User) (string, error) { m.logger.Debug("CreateUser") m.users[entry.ID] = entry return entry.ID, nil } func (m memoryStorage) GetAllUsers(ctx context.Context, page int) (res []model.User, err error) { for _, user := range m.users { res = append(res, user) } return res, nil } func (m memoryStorage) GetUser(ctx context.Context, id string) (model.User, error) { m.logger.Debug("GetItem") item, ok := m.users[id] if !ok { return item, errors.New("not found") } return item, nil } func (m memoryStorage) UpdateUser(ctx context.Context, item model.User) (string, error) { _, ok := m.users[item.ID] if !ok { return "", errors.New("entry with given id not found, nothing to update") } m.users[item.ID] = item return item.ID, nil } func (m memoryStorage) DeleteUser(ctx context.Context, id string) (string, error) { _, ok := m.users[id] if !ok { return "", errors.New("item with given id not found, nothing to delete") } delete(m.users, id) return id, nil }
package model type StreamFilterList struct { // List of stream filters Filters []StreamFilter `json:"filters,omitempty"` }
package pathfileops type DirectoryStatsDto struct { numOfFiles uint64 numOfSubDirs uint64 numOfBytes uint64 isInitialized bool } func (dirStats *DirectoryStatsDto) IsInitialized() bool { return dirStats.isInitialized } func (dirStats *DirectoryStatsDto) NumOfFiles() uint64 { return dirStats.numOfFiles } func (dirStats *DirectoryStatsDto) NumOfSubDirs() uint64 { return dirStats.numOfSubDirs } func (dirStats *DirectoryStatsDto) NumOfBytes() uint64 { return dirStats.numOfBytes } type DirTreeCopyStats struct { TotalDirsScanned uint64 DirsCopied uint64 DirsCreated uint64 TotalFilesProcessed uint64 FilesCopied uint64 FileBytesCopied uint64 FilesNotCopied uint64 FileBytesNotCopied uint64 ComputeError error } type DirectoryCopyStats struct { DirCreated uint64 TotalFilesProcessed uint64 FilesCopied uint64 FileBytesCopied uint64 FilesNotCopied uint64 FileBytesNotCopied uint64 ComputeError error } type DirectoryMoveStats struct { TotalSrcFilesProcessed uint64 SourceFilesMoved uint64 SourceFileBytesMoved uint64 SourceFilesRemaining uint64 SourceFileBytesRemaining uint64 TotalDirsProcessed uint64 DirsCreated uint64 NumOfSubDirectories uint64 SourceDirWasDeleted bool ComputeError error } type DeleteDirFilesStats struct { TotalFilesProcessed uint64 FilesDeleted uint64 FilesDeletedBytes uint64 FilesRemaining uint64 FilesRemainingBytes uint64 TotalSubDirectories uint64 TotalDirsScanned uint64 NumOfDirsWhereFilesDeleted uint64 DirectoriesDeleted uint64 }
// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cardinality import ( "math" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/planner/property" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/util/logutil" "go.uber.org/zap" ) const distinctFactor = 0.8 // EstimateColumnNDV computes estimated NDV of specified column using the original // histogram of `DataSource` which is retrieved from storage(not the derived one). func EstimateColumnNDV(tbl *statistics.Table, colID int64) (ndv float64) { hist, ok := tbl.Columns[colID] if ok && hist.IsStatsInitialized() { ndv = float64(hist.Histogram.NDV) // TODO: a better way to get the total row count derived from the last analyze. analyzeCount := getTotalRowCount(tbl, hist) if analyzeCount > 0 { factor := float64(tbl.RealtimeCount) / float64(analyzeCount) ndv *= factor } } else { ndv = float64(tbl.RealtimeCount) * distinctFactor } return ndv } // getTotalRowCount returns the total row count, which is obtained when collecting colHist. func getTotalRowCount(statsTbl *statistics.Table, colHist *statistics.Column) int64 { if colHist.IsFullLoad() { return int64(colHist.TotalRowCount()) } // If colHist is not fully loaded, we may still get its total row count from other index/column stats. for _, idx := range statsTbl.Indices { if idx.IsFullLoad() && idx.LastUpdateVersion == colHist.LastUpdateVersion { return int64(idx.TotalRowCount()) } } for _, col := range statsTbl.Columns { if col.IsFullLoad() && col.LastUpdateVersion == colHist.LastUpdateVersion { return int64(col.TotalRowCount()) } } return 0 } // EstimateColsNDVWithMatchedLen returns the NDV of a couple of columns. // If the columns match any GroupNDV maintained by child operator, we can get an accurate NDV. // Otherwise, we simply return the max NDV among the columns, which is a lower bound. func EstimateColsNDVWithMatchedLen(cols []*expression.Column, schema *expression.Schema, profile *property.StatsInfo) (float64, int) { ndv := 1.0 if groupNDV := profile.GetGroupNDV4Cols(cols); groupNDV != nil { return math.Max(groupNDV.NDV, ndv), len(groupNDV.Cols) } indices := schema.ColumnsIndices(cols) if indices == nil { logutil.BgLogger().Error("column not found in schema", zap.Any("columns", cols), zap.String("schema", schema.String())) return ndv, 1 } for _, idx := range indices { // It is a very naive estimation. col := schema.Columns[idx] ndv = math.Max(ndv, profile.ColNDVs[col.UniqueID]) } return ndv, 1 } // EstimateColsDNVWithMatchedLenFromUniqueIDs is similar to EstimateColsDNVWithMatchedLen, but it receives UniqueIDs instead of Columns. func EstimateColsDNVWithMatchedLenFromUniqueIDs(ids []int64, schema *expression.Schema, profile *property.StatsInfo) (float64, int) { cols := make([]*expression.Column, 0, len(ids)) for _, id := range ids { cols = append(cols, &expression.Column{ UniqueID: id, }) } return EstimateColsNDVWithMatchedLen(cols, schema, profile) }
package main import ( "fmt" "github.com/gorilla/mux" "log" "net/http" ) // The BIND Port const PORT = 8000 // Local SystemInfo Cache var SystemInfo *SysInfo func main() { //Initialize System Info SystemInfo = NewSystemInfo() //Initialize a new router object router := mux.NewRouter() //Set header content-type: application/json globally via middleware router.Use(commonMiddleware) //Add the Info Route router.HandleFunc("/info", HandleInfo).Methods("GET") //Add the VolumeSet Route router.HandleFunc("/volume/set/{level}", HandleVolumeSet).Methods("GET") // Handle Mute / Unmute router.HandleFunc("/volume/mute", HandleMute).Methods("GET") router.HandleFunc("/volume/unmute", HandleUnmute).Methods("GET") //Add the Keyboard Route router.HandleFunc("/keyboard/{action}", HandleKeyboard).Methods("GET") //Print Info fmt.Printf("The Volume-Ctrl Server has been started\n") fmt.Printf("Your local IP address is %s\n", SystemInfo.IpAddress) fmt.Printf("Your hostname is %s\n", SystemInfo.Hostname) fmt.Printf("You can connect either via %s:%d or %s:%d\n", SystemInfo.IpAddress, PORT, SystemInfo.Hostname, PORT) //Start the REST Server log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", PORT), router)) } func commonMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-Type", "application/json") next.ServeHTTP(w, r) }) }
// Package iban implements validation of IBAN as defined in ISO 13616 package iban import ( "errors" "math/big" "regexp" "strings" ) // Validate performs a sanity check on the IBAN number provided func Validate(iban string) error { iban = normalizeIBAN(iban) if !validIBANChars(iban) { return errors.New("invalid characters") } if f, ok := format[iban[:2]]; !ok { return errors.New("invalid country code") } else { // Validating the BBAN number if len(iban) != f.size { return errors.New("invalid BBAN length") } } rearranged := []byte(iban[4:] + iban[:4]) t := make([]string, 0, len(rearranged)) for _, c := range rearranged { t = append(t, alphaToNum[c]) } x := big.NewInt(0) x, ok := x.SetString(strings.Join(t, ""), 10) if !ok { return errors.New("internal error") } modulo := big.NewInt(0) modulo.Mod(x, big.NewInt(97)) if modulo.Int64() != 1 { return errors.New("invalid IBAN") } return nil } func normalizeIBAN(iban string) string { iban = strings.ToUpper(iban) return replaceIgnoredChars(iban, "") } var ( replaceIgnoredChars = regexp.MustCompile(`[\s\-]+`).ReplaceAllString validIBANChars = regexp.MustCompile(`^[0-9A-Z]{15,34}$`).MatchString ) var alphaToNum = map[byte]string{ '0': "0", '1': "1", '2': "2", '3': "3", '4': "4", '5': "5", '6': "6", '7': "7", '8': "8", '9': "9", 'A': "10", 'B': "11", 'C': "12", 'D': "13", 'E': "14", 'F': "15", 'G': "16", 'H': "17", 'I': "18", 'J': "19", 'K': "20", 'L': "21", 'M': "22", 'N': "23", 'O': "24", 'P': "25", 'Q': "26", 'R': "27", 'S': "28", 'T': "29", 'U': "30", 'V': "31", 'W': "32", 'X': "33", 'Y': "34", 'Z': "35", } type country struct { name string size int bban string code string format string comment string standard bool } var format = map[string]country{ "AL": country{name: "Albania", size: 28, bban: "8n, 16c", code: "AL", format: "ALkk bbbs sssx cccc cccc cccc cccc", comment: "b = National bank code s = Branch code x = National check digit c = Account number", standard: true}, "AD": country{name: "Andorra", size: 24, bban: "8n,12c", code: "AD", format: "ADkk bbbb ssss cccc cccc cccc", comment: "b = National bank code s = Branch code c = Account number", standard: true}, "AT": country{name: "Austria", size: 20, bban: "16n", code: "AT", format: "ATkk bbbb bccc cccc cccc", comment: "b = National bank code c = Account number", standard: true}, "AZ": country{name: "Azerbaijan", size: 28, bban: "4c,20n", code: "AZ", format: "AZkk bbbb cccc cccc cccc cccc cccc", comment: "b = National bank code c = Account number", standard: true}, "BH": country{name: "Bahrain", size: 22, bban: "4a,14c", code: "BH", format: "BHkk bbbb cccc cccc cccc cc", comment: "b = National bank code c = Account number", standard: true}, "BE": country{name: "Belgium", size: 16, bban: "12n", code: "BE", format: "BEkk bbbc cccc ccxx", comment: "b = National bank code c = Account number x = National check digits", standard: true}, "BA": country{name: "Bosnia and Herzegovina", size: 20, bban: "16n", code: "BA", format: "BAkk bbbs sscc cccc ccxx", comment: "k = IBAN check digits (always 39) b = National bank code s = Branch code c = Account number x = National check digits", standard: true}, "BR": country{name: "Brazil", size: 29, bban: "23n, 1a, 1c", code: "BR", format: "BRkk bbbb bbbb ssss sccc cccc ccct n", comment: "k = IBAN check digits (Calculated by MOD 97-10) b = National bank code s = Branch code c = Account Number t = Account type (Cheque account, Savings account etc.) n = Owner account number (1, 2 etc.)[31]", standard: true}, "BG": country{name: "Bulgaria", size: 22, bban: "4a,6n,8c", code: "BG", format: "BGkk bbbb ssss ddcc cccc cc", comment: "b = BIC bank code s = Branch (BAE) number d = Account type c = Account number", standard: true}, "CR": country{name: "Costa Rica", size: 21, bban: "17n", code: "CR", format: "CRkk bbbc cccc cccc cccc c", comment: "b = bank code c = Account number", standard: true}, "HR": country{name: "Croatia", size: 21, bban: "17n", code: "HR", format: "HRkk bbbb bbbc cccc cccc c", comment: "b = Bank code c = Account number", standard: true}, "CY": country{name: "Cyprus", size: 28, bban: "8n,16c", code: "CY", format: "CYkk bbbs ssss cccc cccc cccc cccc", comment: "b = National bank code s = Branch code c = Account number", standard: true}, "CZ": country{name: "Czech Republic", size: 24, bban: "20n", code: "CZ", format: "CZkk bbbb ssss sscc cccc cccc", comment: "b = National bank code s = Account number prefix c = Account number", standard: true}, "DK": country{name: "Denmark", size: 18, bban: "14n", code: "DK", format: "DKkk bbbb cccc cccc cc", comment: "b = National bank code c = Account number", standard: true}, "DO": country{name: "Dominican Republic", size: 28, bban: "4a,20n", code: "DO", format: "DOkk bbbb cccc cccc cccc cccc cccc", comment: "b = Bank identifier c = Account number", standard: true}, "TL": country{name: "East Timor", size: 23, bban: "19n", code: "TL", format: "TLkk bbbc cccc cccc cccc cxx", comment: "k = IBAN check digits (always = '38') b = Bank identifier c = Account number x = National check digit", standard: true}, "EE": country{name: "Estonia", size: 20, bban: "16n", code: "EE", format: "EEkk bbss cccc cccc cccx", comment: "b = National bank code s = Branch code c = Account number x = National check digit", standard: true}, "FO": country{name: "Faroe Islands", size: 18, bban: "14n", code: "FO", format: "FOkk bbbb cccc cccc cx", comment: "b = National bank code c = Account number x = National check digit", standard: true}, "FI": country{name: "Finland", size: 18, bban: "14n", code: "FI", format: "FIkk bbbb bbcc cccc cx", comment: "b = Bank and branch code c = Account number x = National check digit", standard: true}, "FR": country{name: "France", size: 27, bban: "10n,11c,2n", code: "FR", format: "FRkk bbbb bggg ggcc cccc cccc cxx", comment: "b = National bank code g = Branch code (fr:code guichet) c = Account number x = National check digits (fr:clé RIB)", standard: true}, "GE": country{name: "Georgia", size: 22, bban: "2c,16n", code: "GE", format: "GEkk bbcc cccc cccc cccc cc", comment: "b = National bank code c = Account number", standard: true}, "DE": country{name: "Germany", size: 22, bban: "18n", code: "DE", format: "DEkk bbbb bbbb cccc cccc cc", comment: "b = Bank and branch identifier (de:Bankleitzahl or BLZ) c = Account number", standard: true}, "GI": country{name: "Gibraltar", size: 23, bban: "4a,15c", code: "GI", format: "GIkk bbbb cccc cccc cccc ccc", comment: "b = BIC bank code c = Account number", standard: true}, "GR": country{name: "Greece", size: 27, bban: "7n,16c", code: "GR", format: "GRkk bbbs sssc cccc cccc cccc ccc", comment: "b = National bank code s = Branch code c = Account number", standard: true}, "GL": country{name: "Greenland", size: 18, bban: "14n", code: "GL", format: "GLkk bbbb cccc cccc cc", comment: "b = National bank code c = Account number", standard: true}, "GT": country{name: "Guatemala", size: 28, bban: "4c,20c", code: "GT", format: "GTkk bbbb mmtt cccc cccc cccc cccc", comment: "b = National bank code c = Account number m = Currency t = Account type ", standard: true}, "HU": country{name: "Hungary", size: 28, bban: "24n", code: "HU", format: "HUkk bbbs sssk cccc cccc cccc cccx", comment: "b = National bank code s = Branch code c = Account number x = National check digit", standard: true}, "IS": country{name: "Iceland", size: 26, bban: "22n", code: "IS", format: "ISkk bbbb sscc cccc iiii iiii ii", comment: "b = National bank code s = Branch code c = Account number i = holder's kennitala (national identification number).", standard: true}, "IE": country{name: "Ireland", size: 22, bban: "4c,14n", code: "IE", format: "IEkk aaaa bbbb bbcc cccc cc", comment: "a = BIC bank code b = Bank/branch code (sort code) c = Account number", standard: true}, "IK": country{name: "Israel", size: 23, bban: "19n", code: "IK", format: "ILkk bbbn nncc cccc cccc ccc", comment: "b = National bank code n = Branch number c = Account number 13 digits (padded with zeros)", standard: true}, "IT": country{name: "Italy", size: 27, bban: "1a,10n,12c", code: "IT", format: "ITkk xaaa aabb bbbc cccc cccc ccc", comment: "x = Check char (CIN) a = National bank code (it:Associazione bancaria italiana or Codice ABI ) b = Branch code (it:Coordinate bancarie or CAB – Codice d'Avviamento Bancario) c = Account number", standard: true}, "JO": country{name: "Jordan", size: 30, bban: "4a, 22n", code: "JO", format: "JOkk bbbb nnnn cccc cccc cccc cccc cc", comment: "b = National bank code n = Branch code c = Account number ", standard: true}, "KZ": country{name: "Kazakhstan", size: 20, bban: "3n,13c", code: "KZ", format: "KZkk bbbc cccc cccc cccc", comment: "b = National bank code c = Account number ", standard: true}, "XK": country{name: "Kosovo", size: 20, bban: "4n,10n,2n", code: "XK", format: "XKkk bbbb cccc cccc cccc", comment: "b = National bank code c = Account number", standard: true}, "KW": country{name: "Kuwait", size: 30, bban: "4a, 22c", code: "KW", format: "KWkk bbbb cccc cccc cccc cccc cccc cc", comment: "b = National bank code c = Account number.", standard: true}, "LV": country{name: "Latvia", size: 21, bban: "4a,13c", code: "LV", format: "LVkk bbbb cccc cccc cccc c", comment: "b = BIC Bank code c = Account number", standard: true}, "LB": country{name: "Lebanon", size: 28, bban: "4n,20c", code: "LB", format: "LBkk bbbb cccc cccc cccc cccc cccc", comment: "b = National bank code c = Account number", standard: true}, "LI": country{name: "Liechtenstein", size: 21, bban: "5n,12c", code: "LI", format: "LIkk bbbb bccc cccc cccc c", comment: "b = National bank code c = Account number", standard: true}, "LT": country{name: "Lithuania", size: 20, bban: "16n", code: "LT", format: "LTkk bbbb bccc cccc cccc", comment: "b = National bank code c = Account number", standard: true}, "LU": country{name: "Luxembourg", size: 20, bban: "3n,13c", code: "LU", format: "LUkk bbbc cccc cccc cccc", comment: "b = National bank code c = Account number", standard: true}, "MK": country{name: "Macedonia", size: 19, bban: "3n,10c,2n", code: "MK", format: "MKkk bbbc cccc cccc cxx", comment: "k = IBAN check digits (always = '07') b = National bank code c = Account number x = National check digits", standard: true}, "MT": country{name: "Malta", size: 31, bban: "4a,5n,18c", code: "MT", format: "MTkk bbbb ssss sccc cccc cccc cccc ccc", comment: "b = BIC bank code s = Branch code c = Account number", standard: true}, "MR": country{name: "Mauritania", size: 27, bban: "23n", code: "MR", format: "MRkk bbbb bsss sscc cccc cccc cxx", comment: "k = IBAN check digits (always 13) b = National bank code s = Branch code (fr:code guichet) c = Account number x = National check digits (fr:clé RIB)", standard: true}, "MU": country{name: "Mauritius", size: 30, bban: "4a,19n,3a", code: "MU", format: "MUkk bbbb bbss cccc cccc cccc 000d dd", comment: "b = National bank code s = Branch identifier c = Account number 0 = Zeroes d = Currency Symbol ", standard: true}, "MC": country{name: "Monaco", size: 27, bban: "10n,11c,2n", code: "MC", format: "MCkk bbbb bsss sscc cccc cccc cxx", comment: "b = National bank code s = Branch code (fr:code guichet) c = Account number x = National check digits (fr:clé RIB). ", standard: true}, "MD": country{name: "Moldova", size: 24, bban: "2c,18c", code: "MD", format: "MDkk bbcc cccc cccc cccc cccc", comment: "b = National bank code c = Account number", standard: true}, "ME": country{name: "Montenegro", size: 22, bban: "18n", code: "ME", format: "MEkk bbbc cccc cccc cccc xx", comment: "k = IBAN check digits (always = '25') b = Bank code c = Account number x = National check digits", standard: true}, "NL": country{name: "Netherlands", size: 18, bban: "4a,10n", code: "NL", format: "NLkk bbbb cccc cccc cc", comment: "b = BIC Bank code c = Account number", standard: true}, "NO": country{name: "Norway", size: 15, bban: "11n", code: "NO", format: "NOkk bbbb cccc ccx", comment: "b = National bank code c = Account number x = Modulo-11 national check digit", standard: true}, "PK": country{name: "Pakistan", size: 24, bban: "4c,16n", code: "PK", format: "PKkk bbbb cccc cccc cccc cccc", comment: "b = National bank code c = Account number", standard: true}, "PS": country{name: "Palestinian territories", size: 29, bban: "4c,21n", code: "PS", format: "PSkk bbbb xxxx xxxx xccc cccc cccc c", comment: "b = National bank code c = Account number x = Not specified", standard: true}, "PL": country{name: "Poland", size: 28, bban: "24n", code: "PL", format: "PLkk bbbs sssx cccc cccc cccc cccc", comment: "b = National bank code s = Branch code x = National check digit c = Account number, ", standard: true}, "PT": country{name: "Portugal", size: 25, bban: "21n", code: "PT", format: "PTkk bbbb ssss cccc cccc cccx x", comment: "k = IBAN check digits (always = '50') b = National bank code s = Branch code C = Account number x = National check digit", standard: true}, "QA": country{name: "Qatar", size: 29, bban: "4a, 21c", code: "QA", format: "QAkk bbbb cccc cccc cccc cccc cccc c", comment: "b = National bank code c = Account number[34]", standard: true}, "RO": country{name: "Romania", size: 24, bban: "4a,16c", code: "RO", format: "ROkk bbbb cccc cccc cccc cccc", comment: "b = BIC Bank code c = Branch code and account number (bank-specific format) ", standard: true}, "SM": country{name: "San Marino", size: 27, bban: "1a,10n,12c", code: "SM", format: "SMkk xaaa aabb bbbc cccc cccc ccc", comment: "x = Check char (it:CIN) a = National bank code (it:Associazione bancaria italiana or Codice ABI) b = Branch code (it:Coordinate bancarie or CAB – Codice d'Avviamento Bancario) c = Account number", standard: true}, "SA": country{name: "Saudi Arabia", size: 24, bban: "2n,18c", code: "SA", format: "SAkk bbcc cccc cccc cccc cccc", comment: "b = National bank code c = Account number preceded by zeros, if required", standard: true}, "RS": country{name: "Serbia", size: 22, bban: "18n", code: "RS", format: "RSkk bbbc cccc cccc cccc xx", comment: "b = National bank code c = Account number x = Account check digits", standard: true}, "SK": country{name: "Slovakia", size: 24, bban: "20n", code: "SK", format: "SKkk bbbb ssss sscc cccc cccc", comment: "b = National bank code s = Account number prefix c = Account number", standard: true}, "SI": country{name: "Slovenia", size: 19, bban: "15n", code: "SI", format: "SIkk bbss sccc cccc cxx", comment: "k = IBAN check digits (always = '56') b = National bank code s = Branch code c = Account number x = National check digits", standard: true}, "ES": country{name: "Spain", size: 24, bban: "20n", code: "ES", format: "ESkk bbbb gggg xxcc cccc cccc", comment: "b = National bank code g = Branch code x = Check digits c = Account number", standard: true}, "SE": country{name: "Sweden", size: 24, bban: "20n", code: "SE", format: "SEkk bbbc cccc cccc cccc cccc", comment: "b = National bank code c = Account number ", standard: true}, "CH": country{name: "Switzerland", size: 21, bban: "5n,12c", code: "CH", format: "CHkk bbbb bccc cccc cccc c", comment: "b = National bank code c = Account number", standard: true}, "TN": country{name: "Tunisia", size: 24, bban: "20n", code: "TN", format: "TNkk bbss sccc cccc cccc cccc", comment: "k = IBAN check digits (always 59) b = National bank code s = Branch code c = Account number", standard: true}, "TR": country{name: "Turkey", size: 26, bban: "5n,17c", code: "TR", format: "TRkk bbbb bxcc cccc cccc cccc cc", comment: "b = National bank code x = Reserved for future use (currently '0') c = Account number", standard: true}, "GB": country{name: "United Kingdom", size: 22, bban: "4a,14n", code: "GB", format: "GBkk bbbb ssss sscc cccc cc", comment: "b = BIC bank code s = Bank and branch code (sort code) c = Account number", standard: true}, "AE": country{name: "United Arab Emirates", size: 23, bban: "3n,16n", code: "AE", format: "AEkk bbbc cccc cccc cccc ccc", comment: "b = National bank code c = Account number ", standard: true}, "VG": country{name: "Virgin Islands, British", size: 24, bban: "4c,16n", code: "VG", format: "VGkk bbbb cccc cccc cccc cccc", comment: "b = National bank code c = Account number", standard: true}, }
package start import ( . "github.com/zond/godip/variants/classical/common" "github.com/zond/godip/common" ) func SupplyCenters() map[common.Province]common.Nation { return map[common.Province]common.Nation{ "edi": England, "lvp": England, "lon": England, "bre": France, "par": France, "mar": France, "kie": Germany, "ber": Germany, "mun": Germany, "ven": Italy, "rom": Italy, "nap": Italy, "tri": Austria, "vie": Austria, "bud": Austria, "con": Turkey, "ank": Turkey, "smy": Turkey, "sev": Russia, "mos": Russia, "stp": Russia, "war": Russia, } }
package application import ( "net/http" log "github.com/sirupsen/logrus" "github.com/spf13/viper" ) type Application struct { ListenAddr string MongoAddr string } func NewApplication() *Application { viper.SetConfigName("config") viper.AddConfigPath(".") if err := viper.ReadInConfig(); err != nil { log.Fatal(err) } app := &Application{} app.ListenAddr = viper.GetString("addr") app.MongoAddr = viper.GetString("mongo_addr") return app } func Logger(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { log.Info(r.Method, r.URL) h.ServeHTTP(w, r) }) }
package main import ( "fmt" ) func genParens(strSoFar string, remainingLParens, unpairedLParens int, results []string) []string { if remainingLParens == 0 { if len(strSoFar) == 0 { return results } if unpairedLParens > 0 { return genParens(strSoFar+string(")"), remainingLParens, unpairedLParens-1, results) } return append(results, strSoFar) } results = genParens(strSoFar+string("("), remainingLParens-1, unpairedLParens+1, results) if unpairedLParens > 0 { results = genParens(strSoFar+string(")"), remainingLParens, unpairedLParens-1, results) } return results } func generateParenthesis(n int) []string { results := make([]string, 0, 10) return append(results, genParens("", n, 0, results)...) } func main() { ans1 := generateParenthesis(3) fmt.Printf("ans: %v\n", ans1) }
/** * Definition for singly-linked list. * type ListNode struct { * Val int * Next *ListNode * } */ func reorderList(head *ListNode) { if head==nil || head.Next==nil{ return } var fast, slow *ListNode fast=head slow=head for ;fast.Next != nil && fast.Next.Next !=nil;{ fast = fast.Next.Next slow=slow.Next } last:=slow.Next slow.Next = nil last = reverse(last) var next,next2 *ListNode for tmp:=head;;{ if tmp==nil || last==nil { break } next = tmp.Next next2 = last.Next tmp.Next = last tmp.Next.Next = next tmp = next last = next2 } } func reverse(node *ListNode) *ListNode{ var pre,next *ListNode for ;;{ if node.Next==nil{ node.Next = pre break } next = node.Next node.Next = pre pre = node node = next } return node } func plist(head *ListNode){ for tmp:=head;tmp!=nil;{ fmt.Println(tmp) tmp = tmp.Next } }
package github import ( "archive/tar" "compress/gzip" "errors" "fmt" "io" "net/http" "os" "path/filepath" "github.com/gomods/athens/pkg/repo" ) const ( fetchRepoURI string = "https://api.github.com/repos/%s/%s/tarball/%s" tmpFileName = "%s-%s-%s" // owner-repo-ref ) type gitCrawler struct { owner string repoName string tag string } // NewGitCrawler creates a new Crawler for repositories hosted on github func NewGitCrawler(owner string, repoName string, tag string) (repo.Crawler, error) { if owner == "" || repoName == "" { return nil, errors.New("invalid repository identifier") } return &gitCrawler{ owner: owner, repoName: repoName, tag: tag, }, nil } // Fetches a tarball of a repo and untars it into a temp dir which is used later in the workflow. func (g gitCrawler) DownloadRepo() (string, error) { uri := fmt.Sprintf(fetchRepoURI, g.owner, g.repoName, g.tag) resp, err := http.Get(uri) if err != nil { return "", err } defer resp.Body.Close() tmpDir := os.TempDir() dirName, err := untar(resp.Body, tmpDir) if err != nil { os.Remove(tmpDir) return "", err } return dirName, nil } func untar(content io.Reader, tmpDir string) (string, error) { gzr, err := gzip.NewReader(content) defer gzr.Close() if err != nil { return "", err } tr := tar.NewReader(gzr) var dirName string for { hdr, err := tr.Next() if err != nil { if err == io.EOF { break } return "", err } if hdr == nil { continue } target := filepath.Join(tmpDir, hdr.Name) switch hdr.Typeflag { case tar.TypeDir: if dirName == "" { dirName = target } if _, err := os.Stat(target); err != nil { if err := os.MkdirAll(target, 0755); err != nil { return "", err } } case tar.TypeReg: f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(hdr.Mode)) if err != nil { return "", err } defer f.Close() if _, err := io.Copy(f, tr); err != nil { return "", err } } } return dirName, nil }