text
stringlengths
11
4.05M
package models type Responce struct { JsonRPC string `json: "jsonrpc"` ID string `json: "id"` Result map[string]interface{} `json: "result"` }
package virtualmachinevolume import ( "context" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" hc "kubevirt-image-service/pkg/apis/hypercloud/v1alpha1" "kubevirt-image-service/pkg/util" ) // # pvc pvc phase volume condition volume state // 1 X // 2 O bound true available // 3 O lost // 4 O pending var _ = Describe("syncVolumePvc", func() { Context("1. with no pvc", func() { r := createFakeReconcileVolumeWithImage() err := r.syncVolumePvc() It("Should not return error", func() { Expect(err).Should(BeNil()) }) It("Should create pvc", func() { pvc := &corev1.PersistentVolumeClaim{} err := r.client.Get(context.TODO(), types.NamespacedName{Name: GetVolumePvcName(r.volume.Name), Namespace: r.volume.Namespace}, pvc) Expect(err).Should(BeNil()) }) }) Context("2. with bound pvc", func() { pvc := newTestPvc() pvc.Status.Phase = corev1.ClaimBound r := createFakeReconcileVolumeWithImage(pvc) err := r.syncVolumePvc() It("Should not return error", func() { Expect(err).Should(BeNil()) }) It("Should update state to available", func() { volume := &hc.VirtualMachineVolume{} err = r.client.Get(context.TODO(), testVolumeNamespacedName, volume) Expect(err).Should(BeNil()) Expect(volume.Status.State).Should(Equal(hc.VirtualMachineVolumeStateAvailable)) }) It("Should update condition readyToUse to true", func() { volume := &hc.VirtualMachineVolume{} err = r.client.Get(context.TODO(), testVolumeNamespacedName, volume) Expect(err).Should(BeNil()) found, cond := util.GetConditionByType(volume.Status.Conditions, hc.VirtualMachineVolumeConditionReadyToUse) Expect(found).Should(BeTrue()) Expect(cond.Status).Should(Equal(corev1.ConditionTrue)) }) }) Context("3. with lost pvc", func() { pvc := newTestPvc() pvc.Status.Phase = corev1.ClaimLost r := createFakeReconcileVolumeWithImage(pvc) err := r.syncVolumePvc() It("Should return error", func() { Expect(err).ShouldNot(BeNil()) }) }) Context("4. with pending pvc", func() { pvc := newTestPvc() pvc.Status.Phase = corev1.ClaimPending r := createFakeReconcileVolumeWithImage(pvc) err := r.syncVolumePvc() It("Should not return error", func() { Expect(err).Should(BeNil()) }) }) })
package config type Config struct { Version int Buttons []struct { Label string Id string } }
/* * Copyright 2021 American Express * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing * permissions and limitations under the License. */ package core import ( cfgReader "github.com/americanexpress/earlybird/pkg/config" ) //EarlybirdCore is the main interface for interacting with Earlybird as a package type EarlybirdCore interface { ConfigInit() cfgReader.EarlybirdConfig StartHTTP() Scan() } //EarlybirdCfg is the global Earlybird configuration type EarlybirdCfg struct { Config cfgReader.EarlybirdConfig } //PTRHTTPConfig is the configuration for the Earlybird REST API type PTRHTTPConfig struct { HTTP *string HTTPConfig *string HTTPS *string HTTPSCert *string HTTPSKey *string } //PTRGitConfig is the configuration definition for Earlybird git scans type PTRGitConfig struct { Repo *string RepoUser *string RepoBranch *string Project *string }
// Copyright 2018 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tcp_test import ( "fmt" "reflect" "testing" "github.com/brewlin/net-protocol/protocol/header" "github.com/brewlin/net-protocol/pkg/seqnum" "github.com/brewlin/net-protocol/protocol/transport/tcp" "github.com/brewlin/net-protocol/protocol/transport/tcp/testing/context" ) // createConnectWithSACKPermittedOption creates and connects c.ep with the // SACKPermitted option enabled if the stack in the context has the SACK support // enabled. func createConnectedWithSACKPermittedOption(c *context.Context) *context.RawEndpoint { return c.CreateConnectedWithOptions(header.TCPSynOptions{SACKPermitted: c.SACKEnabled()}) } func setStackSACKPermitted(t *testing.T, c *context.Context, enable bool) { t.Helper() if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, tcp.SACKEnabled(enable)); err != nil { t.Fatalf("c.s.SetTransportProtocolOption(tcp.ProtocolNumber, SACKEnabled(%v) = %v", enable, err) } } // TestSackPermittedConnect establishes a connection with the SACK option // enabled. func TestSackPermittedConnect(t *testing.T) { for _, sackEnabled := range []bool{false, true} { t.Run(fmt.Sprintf("stack.sackEnabled: %v", sackEnabled), func(t *testing.T) { c := context.New(t, defaultMTU) defer c.Cleanup() setStackSACKPermitted(t, c, sackEnabled) rep := createConnectedWithSACKPermittedOption(c) data := []byte{1, 2, 3} rep.SendPacket(data, nil) savedSeqNum := rep.NextSeqNum rep.VerifyACKNoSACK() // Make an out of order packet and send it. rep.NextSeqNum += 3 sackBlocks := []header.SACKBlock{ {rep.NextSeqNum, rep.NextSeqNum.Add(seqnum.Size(len(data)))}, } rep.SendPacket(data, nil) // Restore the saved sequence number so that the // VerifyXXX calls use the right sequence number for // checking ACK numbers. rep.NextSeqNum = savedSeqNum if sackEnabled { rep.VerifyACKHasSACK(sackBlocks) } else { rep.VerifyACKNoSACK() } // Send the missing segment. rep.SendPacket(data, nil) // The ACK should contain the cumulative ACK for all 9 // bytes sent and no SACK blocks. rep.NextSeqNum += 3 // Check that no SACK block is returned in the ACK. rep.VerifyACKNoSACK() }) } } // TestSackDisabledConnect establishes a connection with the SACK option // disabled and verifies that no SACKs are sent for out of order segments. func TestSackDisabledConnect(t *testing.T) { for _, sackEnabled := range []bool{false, true} { t.Run(fmt.Sprintf("sackEnabled: %v", sackEnabled), func(t *testing.T) { c := context.New(t, defaultMTU) defer c.Cleanup() setStackSACKPermitted(t, c, sackEnabled) rep := c.CreateConnectedWithOptions(header.TCPSynOptions{}) data := []byte{1, 2, 3} rep.SendPacket(data, nil) savedSeqNum := rep.NextSeqNum rep.VerifyACKNoSACK() // Make an out of order packet and send it. rep.NextSeqNum += 3 rep.SendPacket(data, nil) // The ACK should contain the older sequence number and // no SACK blocks. rep.NextSeqNum = savedSeqNum rep.VerifyACKNoSACK() // Send the missing segment. rep.SendPacket(data, nil) // The ACK should contain the cumulative ACK for all 9 // bytes sent and no SACK blocks. rep.NextSeqNum += 3 // Check that no SACK block is returned in the ACK. rep.VerifyACKNoSACK() }) } } // TestSackPermittedAccept accepts and establishes a connection with the // SACKPermitted option enabled if the connection request specifies the // SACKPermitted option. In case of SYN cookies SACK should be disabled as we // don't encode the SACK information in the cookie. func TestSackPermittedAccept(t *testing.T) { type testCase struct { cookieEnabled bool sackPermitted bool wndScale int wndSize uint16 } testCases := []testCase{ // When cookie is used window scaling is disabled. {true, false, -1, 0xffff}, // When cookie is used window scaling is disabled. {false, true, 5, 0x8000}, // 0x8000 * 2^5 = 1<<20 = 1MB window (the default). } savedSynCountThreshold := tcp.SynRcvdCountThreshold defer func() { tcp.SynRcvdCountThreshold = savedSynCountThreshold }() for _, tc := range testCases { t.Run(fmt.Sprintf("test: %#v", tc), func(t *testing.T) { if tc.cookieEnabled { tcp.SynRcvdCountThreshold = 0 } else { tcp.SynRcvdCountThreshold = savedSynCountThreshold } for _, sackEnabled := range []bool{false, true} { t.Run(fmt.Sprintf("test stack.sackEnabled: %v", sackEnabled), func(t *testing.T) { c := context.New(t, defaultMTU) defer c.Cleanup() setStackSACKPermitted(t, c, sackEnabled) rep := c.AcceptWithOptions(tc.wndScale, header.TCPSynOptions{MSS: defaultIPv4MSS, SACKPermitted: tc.sackPermitted}) // Now verify no SACK blocks are // received when sack is disabled. data := []byte{1, 2, 3} rep.SendPacket(data, nil) rep.VerifyACKNoSACK() savedSeqNum := rep.NextSeqNum // Make an out of order packet and send // it. rep.NextSeqNum += 3 sackBlocks := []header.SACKBlock{ {rep.NextSeqNum, rep.NextSeqNum.Add(seqnum.Size(len(data)))}, } rep.SendPacket(data, nil) // The ACK should contain the older // sequence number. rep.NextSeqNum = savedSeqNum if sackEnabled && tc.sackPermitted { rep.VerifyACKHasSACK(sackBlocks) } else { rep.VerifyACKNoSACK() } // Send the missing segment. rep.SendPacket(data, nil) // The ACK should contain the cumulative // ACK for all 9 bytes sent and no SACK // blocks. rep.NextSeqNum += 3 // Check that no SACK block is returned // in the ACK. rep.VerifyACKNoSACK() }) } }) } } // TestSackDisabledAccept accepts and establishes a connection with // the SACKPermitted option disabled and verifies that no SACKs are // sent for out of order packets. func TestSackDisabledAccept(t *testing.T) { type testCase struct { cookieEnabled bool wndScale int wndSize uint16 } testCases := []testCase{ // When cookie is used window scaling is disabled. {true, -1, 0xffff}, // When cookie is used window scaling is disabled. {false, 5, 0x8000}, // 0x8000 * 2^5 = 1<<20 = 1MB window (the default). } savedSynCountThreshold := tcp.SynRcvdCountThreshold defer func() { tcp.SynRcvdCountThreshold = savedSynCountThreshold }() for _, tc := range testCases { t.Run(fmt.Sprintf("test: %#v", tc), func(t *testing.T) { if tc.cookieEnabled { tcp.SynRcvdCountThreshold = 0 } else { tcp.SynRcvdCountThreshold = savedSynCountThreshold } for _, sackEnabled := range []bool{false, true} { t.Run(fmt.Sprintf("test: sackEnabled: %v", sackEnabled), func(t *testing.T) { c := context.New(t, defaultMTU) defer c.Cleanup() setStackSACKPermitted(t, c, sackEnabled) rep := c.AcceptWithOptions(tc.wndScale, header.TCPSynOptions{MSS: defaultIPv4MSS}) // Now verify no SACK blocks are // received when sack is disabled. data := []byte{1, 2, 3} rep.SendPacket(data, nil) rep.VerifyACKNoSACK() savedSeqNum := rep.NextSeqNum // Make an out of order packet and send // it. rep.NextSeqNum += 3 rep.SendPacket(data, nil) // The ACK should contain the older // sequence number and no SACK blocks. rep.NextSeqNum = savedSeqNum rep.VerifyACKNoSACK() // Send the missing segment. rep.SendPacket(data, nil) // The ACK should contain the cumulative // ACK for all 9 bytes sent and no SACK // blocks. rep.NextSeqNum += 3 // Check that no SACK block is returned // in the ACK. rep.VerifyACKNoSACK() }) } }) } } func TestUpdateSACKBlocks(t *testing.T) { testCases := []struct { segStart seqnum.Value segEnd seqnum.Value rcvNxt seqnum.Value sackBlocks []header.SACKBlock updated []header.SACKBlock }{ // Trivial cases where current SACK block list is empty and we // have an out of order delivery. {10, 11, 2, []header.SACKBlock{}, []header.SACKBlock{{10, 11}}}, {10, 12, 2, []header.SACKBlock{}, []header.SACKBlock{{10, 12}}}, {10, 20, 2, []header.SACKBlock{}, []header.SACKBlock{{10, 20}}}, // Cases where current SACK block list is not empty and we have // an out of order delivery. Tests that the updated SACK block // list has the first block as the one that contains the new // SACK block representing the segment that was just delivered. {10, 11, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{10, 11}, {12, 20}}}, {24, 30, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{24, 30}, {12, 20}}}, {24, 30, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{24, 30}, {12, 20}, {32, 40}}}, // Ensure that we only retain header.MaxSACKBlocks and drop the // oldest one if adding a new block exceeds // header.MaxSACKBlocks. {24, 30, 9, []header.SACKBlock{{12, 20}, {32, 40}, {42, 50}, {52, 60}, {62, 70}, {72, 80}}, []header.SACKBlock{{24, 30}, {12, 20}, {32, 40}, {42, 50}, {52, 60}, {62, 70}}}, // Cases where segment extends an existing SACK block. {10, 12, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{10, 20}}}, {10, 22, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{10, 22}}}, {10, 22, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{10, 22}}}, {15, 22, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{12, 22}}}, {15, 25, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{12, 25}}}, {11, 25, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{11, 25}}}, {10, 12, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{10, 20}, {32, 40}}}, {10, 22, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{10, 22}, {32, 40}}}, {10, 22, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{10, 22}, {32, 40}}}, {15, 22, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{12, 22}, {32, 40}}}, {15, 25, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{12, 25}, {32, 40}}}, {11, 25, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{11, 25}, {32, 40}}}, // Cases where segment contains rcvNxt. {10, 20, 15, []header.SACKBlock{{20, 30}, {40, 50}}, []header.SACKBlock{{40, 50}}}, } for _, tc := range testCases { var sack tcp.SACKInfo copy(sack.Blocks[:], tc.sackBlocks) sack.NumBlocks = len(tc.sackBlocks) tcp.UpdateSACKBlocks(&sack, tc.segStart, tc.segEnd, tc.rcvNxt) if got, want := sack.Blocks[:sack.NumBlocks], tc.updated; !reflect.DeepEqual(got, want) { t.Errorf("UpdateSACKBlocks(%v, %v, %v, %v), got: %v, want: %v", tc.sackBlocks, tc.segStart, tc.segEnd, tc.rcvNxt, got, want) } } } func TestTrimSackBlockList(t *testing.T) { testCases := []struct { rcvNxt seqnum.Value sackBlocks []header.SACKBlock trimmed []header.SACKBlock }{ // Simple cases where we trim whole entries. {2, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}}, {21, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{22, 30}, {32, 40}}}, {31, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{32, 40}}}, {40, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{}}, // Cases where we need to update a block. {12, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{12, 20}, {22, 30}, {32, 40}}}, {23, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{23, 30}, {32, 40}}}, {33, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{33, 40}}}, {41, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{}}, } for _, tc := range testCases { var sack tcp.SACKInfo copy(sack.Blocks[:], tc.sackBlocks) sack.NumBlocks = len(tc.sackBlocks) tcp.TrimSACKBlockList(&sack, tc.rcvNxt) if got, want := sack.Blocks[:sack.NumBlocks], tc.trimmed; !reflect.DeepEqual(got, want) { t.Errorf("TrimSackBlockList(%v, %v), got: %v, want: %v", tc.sackBlocks, tc.rcvNxt, got, want) } } }
package birdwatcher // Http Birdwatcher Client import ( "encoding/json" "io/ioutil" "net/http" ) type ClientResponse map[string]interface{} type Client struct { Api string } func NewClient(api string) *Client { client := &Client{ Api: api, } return client } // Make API request, parse response and return map or error func (self *Client) GetJson(endpoint string) (ClientResponse, error) { res, err := http.Get(self.Api + endpoint) if err != nil { return ClientResponse{}, err } // Read body defer res.Body.Close() payload, err := ioutil.ReadAll(res.Body) if err != nil { return ClientResponse{}, err } // Decode json payload result := make(ClientResponse) err = json.Unmarshal(payload, &result) if err != nil { return ClientResponse{}, err } return result, nil }
package pools import ( "context" "fmt" "github.com/exoscale/egoscale" "github.com/janoszen/exoscale-account-wiper/plugin" "log" "sync" "time" ) type Plugin struct { } func (p *Plugin) GetKey() string { return "pools" } func (p *Plugin) GetParameters() map[string]string { return make(map[string]string) } func (p *Plugin) SetParameter(_ string, _ string) error { return fmt.Errorf("instance pool deletion has no options") } func (p *Plugin) Run(clientFactory *plugin.ClientFactory, ctx context.Context) error { log.Printf("deleting instance pools...") client := clientFactory.GetExoscaleClient() var wg sync.WaitGroup poolBlocker := make(chan bool, 10) select { case <-ctx.Done(): log.Printf("aborting...") return nil default: } resp, err := client.RequestWithContext(ctx, egoscale.ListZones{}) if err != nil { return err } for _, z := range resp.(*egoscale.ListZonesResponse).Zone { select { case <-ctx.Done(): break default: } resp, err := client.RequestWithContext(ctx, egoscale.ListInstancePools{ZoneID: z.ID}) if err != nil { return err } for _, i := range resp.(*egoscale.ListInstancePoolsResponse).InstancePools { select { case <-ctx.Done(): break default: } wg.Add(1) instancePoolId := i.ID zoneId := z.ID currentState := i.State go func() { defer wg.Done() poolBlocker <- true defer func() { <-poolBlocker }() log.Printf("deleting instance pool %s...", instancePoolId) var err error = nil if currentState != egoscale.InstancePoolDestroying { request := egoscale.DestroyInstancePool{ ID: instancePoolId, ZoneID: zoneId, } err = client.BooleanRequestWithContext(ctx, request) } else { log.Printf("instance pool %s is already being destroyed...", instancePoolId) } if err != nil { log.Printf("error deleting instance pool %s (%v)", instancePoolId, err) } else { for { log.Printf("waiting for complete removal of instance pool %s...", instancePoolId) getRequest := egoscale.GetInstancePool{ ID: instancePoolId, ZoneID: zoneId, } if _, err := client.RequestWithContext(ctx, getRequest); err != nil { //Wait for the instance pool to be completely destroyed log.Printf("deleted instance pool %s", instancePoolId) break } time.Sleep(time.Second * 10) } } }() } } wg.Wait() log.Printf("deleted instance pools.") return nil }
/* Copyright 2021 The KodeRover Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package updater import ( rbacv1beta1 "k8s.io/api/rbac/v1beta1" "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/controller-runtime/pkg/client" ) func DeleteRoles(ns string, selector labels.Selector, cl client.Client) error { return deleteObjectsWithDefaultOptions(ns, selector, &rbacv1beta1.Role{}, cl) } func CreateRole(role *rbacv1beta1.Role, cl client.Client) error { return createObject(role, cl) } func UpdateRole(role *rbacv1beta1.Role, cl client.Client) error { return updateObject(role, cl) }
package trains import ( "strconv" "time" ) //Train 对应trains表 type Train struct { ID string } //TrainStaion 对应train_station表,车次经过的某个站点 type TrainStaion struct { StationNo uint `gorm:"column:station_no"` StationName string `gorm:"column:station_name"` ArriveTime string `gorm:"column:arrive_time"` DepartureTime string `gorm:"column:departure_time"` TodayArriveTime string `gorm:"column:today_arrive_time"` TodayDepartureTime string `gorm:"column:today_departure_time"` } //TableName 指定表名 func (TrainStaion) TableName() string { return "train_station" } //TrainStaionPair 对应TrainStaionPair表 type TrainStaionPair struct { StartStationNo uint `gorm:"column:start_station_no"` //StationNo为1代表是该车次的起始站 EndStationNo uint `gorm:"column:end_station_no"` StartStationName string `gorm:"column:start_station_name"` EndStationName string `gorm:"column:end_station_name"` StartTime string `gorm:"column:start_time"` EndTime string `gorm:"column:end_time"` //与trains表关联 TrainID string `gorm:"column:train_id"` TrainType string `gorm:"column:train_type"` TrainStationNums uint `gorm:"column:train_staion_nums"` } //Trip 对应trip表 type Trip struct { TripID string } //TripSegment 对应TripSegment表 type TripSegment struct { ID uint `gorm:"primary_key;column:id"` //StationNo为1代表是该车次的起始站 // SegmentNo uint `gorm:"primary_key;column:segment_no"` SeatCatogory string `gorm:"column:seat_catogory"` SeatBytes []uint8 `gorm:"column:seat_bytes"` } //Order 对应订单表 type Order struct { ID uint `gorm:"primary_key;auto_increment" json:"id"` TripID string `json:"trip_id"` StartStationNo uint `json:"start_station_no"` EndStationNo uint `json:"end_station_no"` SeatNo int64 `json:"seat_no"` SeatCatogory string `json:"seat_catogory"` UserID uint `json:"user_id"` PassangerID uint64 `json:"passangerID_id"` StartStation string `json:"startStation"` EndStation string `json:"endStation"` Date time.Time `json:"date"` Status string `json:"status"` } //TableName 实现TableName接口,以达到结构体和表对应,如果不实现该接口,并未设置全局表名禁用复数,gorm会自动扩展表名为结构体+s func (TripSegment) TableName() string { return "trip_segment" } //getTrainStaions 获取车次经过的所有站点 func (s *Train) getTrainStaions() ([]TrainStaion, error) { repository := NewTicketRepository() models, err := repository.FindTrainStaions(s.ID) return models, err } //ListTrainStaionPair 根据相应的startCity, endCity,date,isFast 条件,返回TrainStaionPair列表 func ListTrainStaionPair(startCity, endCity, date string, isFast bool) ([]TrainStaionPair, error) { //取得城市id repository := NewTicketRepository() startCityID, err := repository.FindCityID(startCity) endCityID, err := repository.FindCityID(endCity) if err != nil { return nil, err } //拼接成TripStationPairId s1 := strconv.FormatUint(uint64(startCityID), 10) s2 := strconv.FormatUint(uint64(endCityID), 10) id := s1 + "-" + s2 + "-" + "%" // fmt.Println(id) // 判断是否是今天 nowDate := time.Now().Format("2006-01-02") isToday := date == nowDate models, err := repository.FindTrainStationPairs(id, date, isToday, isFast) return models, err } //getRemainSeats 返回座位余量 func (s *Trip) getRemainSeats(startStationNo, endStationNo uint) *map[string]uint { var resMap map[string]uint resMap = make(map[string]uint) repository := NewTicketRepository() //repository找到对应的TripSegment记录 seats, err := repository.FindTripSegments(s.TripID, startStationNo, endStationNo) if err != nil { return &resMap } //对TripSegment记录进行计算 tripSegments := TripSegments{seats} res := tripSegments.calculasRemainSeats() // fmt.Printf("计算得到的余量d%\n", res) return res } //OrderOneSeat 对于给定的TripStartNoAndEndNo和座位类型,找到一个有效的座位号并下订单 func (s *Trip) orderSomeSeat(count int32, startStationNo, endStationNo uint, catogory string, passangerIDs []uint64) error { repository := NewTicketRepositoryTX() //1.repository找到座位信息 seats, err := repository.FindTripSegments(s.TripID, startStationNo, endStationNo, catogory) if err != nil { return err } tripSegments := TripSegments{seats} tripSegments.printBytes1() //2本地计算出有效的座位号 validSeatNos, ok := tripSegments.calculasValidSeatNos(count) if ok == false { repository.Rollback() return err } //3 本地修改座位信息 tripSegments.discountSeats(validSeatNos) //4.repository写回修改座位信息 err = repository.UpdateTripSegment(seats) if err != nil { repository.Rollback() return err } //5.repository下订单 //UserID,借助中间件. for i := 0; i < len(validSeatNos); i++ { order := Order{UserID: 1, PassangerID: passangerIDs[i], TripID: s.TripID, StartStationNo: startStationNo, EndStationNo: endStationNo, SeatNo: validSeatNos[i], SeatCatogory: catogory, Date: time.Now(), Status: "未支付"} err = repository.CreateOrder(&order) if err != nil { repository.Rollback() return err } } //6.commit err = repository.Commit() if err != nil { repository.Rollback() return err } return nil } func (s *Trip) cancleOneOrder(orderID uint) error { repository := NewTicketRepositoryTX() // 1.repository取得合法订单信息 userID := uint(1) order, err := repository.FindValidOrder(orderID, userID) if err != nil { repository.Rollback() return err } // 2.退钱给用户 if order.Status == "已支付" { // fmt.Print("退钱给用户") } // 3.本地修改座位信息 seats, err := repository.FindTripSegments(order.TripID, order.StartStationNo, order.EndStationNo, order.SeatCatogory) if err != nil { repository.Rollback() return err } tripSegments := TripSegments{seats} tripSegments.addOneSeat(order.SeatNo) err = repository.UpdateTripSegment(seats) if err != nil { repository.Rollback() return err } // 4.更新订单状态 err = repository.UpdateOrderStatus(&order, "已退票") if err != nil { repository.Rollback() return err } // 5.commit err = repository.Commit() if err != nil { repository.Rollback() return err } return nil } func (s *Trip) changeOneOrder(orderID uint, startStationNo, endStationNo uint, catogory string) error { repository := NewTicketRepositoryTX() // 1.取得合法订单信息 userID := uint(1) order, err := repository.FindValidOrder(orderID, userID) if err != nil { return err } //2.找到并修改新座位信息 newSeats, err := repository.FindTripSegments(s.TripID, startStationNo, endStationNo, catogory) if err != nil { return err } newTripSegments := TripSegments{newSeats} validSeatNo, err := newTripSegments.calculasOneValidSeatNo() newTripSegments.addOneSeat(validSeatNo) err = repository.UpdateTripSegment(newSeats) // 3.修改旧座位信息 seats, err := repository.FindTripSegments(order.TripID, order.StartStationNo, order.EndStationNo, order.SeatCatogory) if err != nil { repository.Rollback() return err } tripSegments := TripSegments{seats} tripSegments.discountOneSeat(order.SeatNo) err = repository.UpdateTripSegment(seats) if err != nil { repository.Rollback() return err } // 4.更新订单状态 newMap := map[string]interface{}{"status": "已改票", "trip_id": s.TripID, "start_station_no": startStationNo, "end_station_no": endStationNo, "seat_catogory": catogory, "seat_no": validSeatNo} err = repository.UpdateOrder(&order, newMap) if err != nil { repository.Rollback() return err } // 5.commit err = repository.Commit() if err != nil { repository.Rollback() return err } return nil }
// Package postgres implements the Driver interface. package postgres import ( "database/sql" "fmt" "strconv" "strings" "github.com/db-journey/migrate/v2/direction" "github.com/db-journey/migrate/v2/driver" "github.com/db-journey/migrate/v2/file" "github.com/lib/pq" ) var fileTemplate = []byte(``) // TODO // Driver is the postgres driver for journey. type Driver struct { db *sql.DB } const tableName = "public.schema_migrations" const txDisabledOption = "disable_ddl_transaction" // Open opens and verifies the database handle. func Open(url string) (driver.Driver, error) { driver := &Driver{} db, err := sql.Open("postgres", url) if err != nil { return nil, err } if err := db.Ping(); err != nil { return nil, err } driver.db = db return driver, driver.ensureVersionTableExists() } // SetDB replaces the current database handle. func (driver *Driver) SetDB(db *sql.DB) { driver.db = db } // Close closes the database handle. func (driver *Driver) Close() error { return driver.db.Close() } func (driver *Driver) ensureVersionTableExists() error { // avoid DDL statements if possible for BDR (see #23) var c int if err := driver.db.QueryRow("SELECT count(*) FROM information_schema.tables WHERE table_name = $1", tableName).Scan(&c); err != nil { return err } if c <= 0 { _, err := driver.db.Exec("CREATE TABLE IF NOT EXISTS " + tableName + " (version bigint not null primary key)") return err } // table schema_migrations already exists, check if the schema is correct, ie: version is a bigint var dataType string if err := driver.db.QueryRow("SELECT data_type FROM information_schema.columns where table_name = $1 and column_name = 'version'", tableName).Scan(&dataType); err != nil { return err } if dataType == "bigint" { return nil } _, err := driver.db.Exec("ALTER TABLE " + tableName + " ALTER COLUMN version TYPE bigint USING version::bigint") return err } // Migrate performs the migration of any one file. func (driver *Driver) Migrate(f file.File) (err error) { var tx *sql.Tx tx, err = driver.db.Begin() if err != nil { return err } defer func() { if err != nil { tx.Rollback() } }() if f.Direction == direction.Up { if _, err = tx.Exec("INSERT INTO "+tableName+" (version) VALUES ($1)", f.Version); err != nil { return err } } else if f.Direction == direction.Down { if _, err = tx.Exec("DELETE FROM "+tableName+" WHERE version=$1", f.Version); err != nil { return err } } if err = f.ReadContent(); err != nil { return err } if txDisabled(fileOptions(f.Content)) { _, err = driver.db.Exec(string(f.Content)) } else { _, err = tx.Exec(string(f.Content)) } if err != nil { pqErr := err.(*pq.Error) offset, err := strconv.Atoi(pqErr.Position) if err == nil && offset >= 0 { lineNo, columnNo := file.LineColumnFromOffset(f.Content, offset-1) errorPart := file.LinesBeforeAndAfter(f.Content, lineNo, 5, 5, true) return fmt.Errorf("%s %v: %s in line %v, column %v:\n\n%s", pqErr.Severity, pqErr.Code, pqErr.Message, lineNo, columnNo, string(errorPart)) } return fmt.Errorf("%s %v: %s", pqErr.Severity, pqErr.Code, pqErr.Message) } return tx.Commit() } // Version returns the current migration version. func (driver *Driver) Version() (file.Version, error) { var version file.Version err := driver.db.QueryRow("SELECT version FROM " + tableName + " ORDER BY version DESC LIMIT 1").Scan(&version) if err == sql.ErrNoRows { return version, nil } return version, err } // Versions returns the list of applied migrations. func (driver *Driver) Versions() (file.Versions, error) { rows, err := driver.db.Query("SELECT version FROM " + tableName + " ORDER BY version DESC") if err != nil { return nil, err } defer rows.Close() versions := file.Versions{} for rows.Next() { var version file.Version if err = rows.Scan(&version); err != nil { return nil, err } versions = append(versions, version) } if err = rows.Err(); err != nil { return nil, err } return versions, err } // Execute a SQL statement func (driver *Driver) Execute(statement string) error { _, err := driver.db.Exec(statement) return err } // fileOptions returns the list of options extracted from the first line of the file content. // Format: "-- <option1> <option2> <...>" func fileOptions(content []byte) []string { firstLine := strings.SplitN(string(content), "\n", 2)[0] if !strings.HasPrefix(firstLine, "-- ") { return []string{} } opts := strings.TrimPrefix(firstLine, "-- ") return strings.Split(opts, " ") } func txDisabled(opts []string) bool { for _, v := range opts { if v == txDisabledOption { return true } } return false } func init() { // According to the PostgreSQL documentation (section 32.1.1.2), postgres // library supports two URI schemes: postgresql:// and postgres:// // https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING driver.Register("postgres", "sql", fileTemplate, Open) driver.Register("postgresql", "sql", fileTemplate, Open) }
// Copyright © 2019 Michael. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package fetch import ( "container/list" "fmt" "sort" "strconv" "strings" "sync" "sync/atomic" "skygo/runbook" "skygo/runbook/xsync" "skygo/utils/log" ) // Resource represent state of fetch type Resource struct { resource map[string]SrcURL // preferred version // print error log if prefer version is set again prefer string m sync.Mutex done uint32 selected string // indicated which version is selected } // SrcURL holds a collection of Source URL in specific version type SrcURL struct { head *list.List } type fetchCmd struct { fetch func(ctx runbook.Context, from string, notify func(bool)) error url string } // NewFetch create fetch state func NewFetch() *Resource { fetch := new(Resource) fetch.resource = make(map[string]SrcURL) return fetch } // ByVersion get SrcURL by version // If not found, create empty holder func (fetch *Resource) ByVersion(version string) *SrcURL { if res, ok := fetch.resource[version]; ok { return &res } res := SrcURL{head: list.New()} fetch.resource[version] = res return &res } // Versions sort all SrcURL from latest to older, then return in slice func (fetch *Resource) Versions() []string { num := len(fetch.resource) versions := make([]string, num) i := 0 for v := range fetch.resource { versions[i] = v i++ } min := func(x, y int) int { if x < y { return x } return y } // example version sorting result: 2.0 > 1.0.1 > 1.0 > HEAD sort.Slice(versions, func(i, j int) bool { a := strings.Split(versions[i], ".") b := strings.Split(versions[j], ".") num := min(len(a), len(b)) for i := 0; i < num; i++ { na, e := strconv.Atoi(a[i]) if e != nil { return false } nb, _ := strconv.Atoi(b[i]) if na > nb { return true } if na < nb { return false } } return len(a) > len(b) }) return versions } // Prefer set preferred version of SrcURL func (fetch *Resource) Prefer(version string) { if atomic.LoadUint32(&fetch.done) == 1 { log.Warning("Try to set preferred version again!") return } fetch.m.Lock() defer fetch.m.Unlock() if fetch.done == 0 { defer atomic.StoreUint32(&fetch.done, 1) fetch.prefer = version } } // Selected return selected SrcURL and its version // select preferred then latest version of SrcURL func (fetch *Resource) Selected() (*SrcURL, string) { if fetch.selected == "" { if fetch.prefer != "" { fetch.selected = fetch.prefer } else { versions := fetch.Versions() if len(versions) > 0 { fetch.selected = versions[0] } } } if res, ok := fetch.resource[fetch.selected]; ok { return &res, fetch.selected } return nil, "" } // Download download all source URL held by selected SrcURL // Extract automatically if source URL is an archiver, like tar.bz2 // if source code is updated, it calls notify func (fetch *Resource) Download(ctx runbook.Context, notify func(ctx runbook.Context)) error { res, _ := fetch.Selected() if res == nil { log.Warning("%s don't hold any source URL", ctx.Owner()) return nil } log.Trace("Start downloading source URLs owned by %s", ctx.Owner()) h := res.head var once sync.Once g, _ := xsync.WithContext(ctx.Ctx()) for e := h.Front(); e != nil; e = e.Next() { e := e // https://golang.org/doc/faq#closures_and_goroutines g.Go(func() error { fetchCmd := e.Value.(*fetchCmd) url := strings.TrimSpace(fetchCmd.url) if err := fetchCmd.fetch(ctx, url, func(updated bool) { if notify != nil && updated { once.Do(func() { notify(ctx) }) } }); err != nil { return fmt.Errorf("failed to fetch %s. Reason: \n\t %s", fetchCmd.url, err) } return nil }) } return g.Wait() } // Push push source URL srcurl to SrcURL // srcurl can hold multiple URL with delimeter space // Push try to detect scheme by order: // file:// find locally under FilesPath // vcs, pls refer to PushVcs // http:// https:// grab from network func (src *SrcURL) Push(srcurl string) *SrcURL { url := strings.Fields(srcurl) for _, u := range url { if strings.HasPrefix(u, "file://") { src.pushFile(u) continue } if bySuffix(u) != nil { src.PushVcs(u) continue } if strings.HasPrefix(u, "http://") || strings.HasPrefix(u, "https://") { src.PushHTTP(u, nil) continue } panic(fmt.Sprintf("Unknown source URL: %s", u)) } return src } // Pushfile push one scheme file:// to SrcURL func (src *SrcURL) pushFile(srcurl string) *SrcURL { url := fetchCmd{ fetch: file, url: srcurl, } src.head.PushBack(&url) return src } // PushVcs push one vcs repository to SrcURL // srcurl is repository or repository@revision // repository must be known by vcs utility like git // revision identifier for the underlying source repository, such as a commit // hash prefix, revision tag, or branch name, selects that specific code revision. // valid srcurl example: // https://github.com:foo/bar.git // https://github.com:foo/bar.git@v1.1 // https://github.com:foo/bar.git@c198403 // Mostly, Push can push vcs repository URL, reserved this API for fallback func (src *SrcURL) PushVcs(srcurl string) *SrcURL { if strings.Contains(srcurl, " ") { panic(fmt.Sprintf("repository %s has SPACE", srcurl)) } url := fetchCmd{ fetch: vcsFetch, url: srcurl, } src.head.PushBack(&url) return src } // PushHTTP push Http or Https URL to SrcURL // srcurl's scheme must be https:// or http://, and sha256 checksum must be // append at the end with delimeter # // e.g. http://x.y.z/foo.tar.bz2#sha256 // // httpGet is the caller own get function, it's optional(value is nil). // httpGet does not need to handle checksum, since parameter from does not // contain checksum. Example implementation: // func wget(ctx context.Context, from, to string) error { // arg := "-t 2 -T 30 -nv --no-check-certificate" // args := strings.Fields(fmt.Sprintf("%s %s", arg, from)) // cmd := runbook.NewCommand(ctx, "wget", args...) // cmd.Cmd.Dir = filepath.Dir(to) // if err := cmd.Cmd.Run(); err != nil { // return err // } // return nil // } func (src *SrcURL) PushHTTP(srcurl string, httpGet func(ctx runbook.Context, from, to string) error) *SrcURL { url := fetchCmd{ fetch: func(ctx runbook.Context, url string, notify func(bool)) error { return httpAndUnpack(ctx, url, httpGet, notify) }, url: srcurl, } src.head.PushBack(&url) return src }
package jobs import ( "crypto/md5" "fmt" "github.com/m7shapan/my-http/models" "github.com/m7shapan/my-http/repositories" ) type hashingJob struct { responseRepository repositories.ResponseRepository url string } func NewHashingJob(r repositories.ResponseRepository, url string) *hashingJob { return &hashingJob{ responseRepository: r, url: url, } } func (j *hashingJob) Execute() interface{} { resp, err := j.responseRepository.Get(j.url) if err != nil { return map[string]interface{}{ "err": err, } } return map[string]interface{}{ "response": models.Response{ URL: j.url, Hash: fmt.Sprintf("%x", md5.Sum(resp)), }, } }
package main import "fmt" func main() { s := []int{4, 3, 6, -7, 2, 8} for i := 0; i < len(s)/2; i++ { help := s[i] s[i] = s[len(s)-i-1] s[len(s)-i-1] = help } fmt.Print(s) }
// Copyright 2020 The Gitea Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repository import ( "context" "fmt" "time" "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/git" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/setting" "github.com/unknwon/com" "xorm.io/builder" ) // GitFsck calls 'git fsck' to check repository health. func GitFsck(ctx context.Context) error { log.Trace("Doing: GitFsck") if err := models.Iterate( models.DefaultDBContext(), new(models.Repository), builder.Expr("id>0 AND is_fsck_enabled=?", true), func(idx int, bean interface{}) error { select { case <-ctx.Done(): return fmt.Errorf("Aborted due to shutdown") default: } repo := bean.(*models.Repository) repoPath := repo.RepoPath() log.Trace("Running health check on repository %s", repoPath) if err := git.Fsck(repoPath, setting.Cron.RepoHealthCheck.Timeout, setting.Cron.RepoHealthCheck.Args...); err != nil { desc := fmt.Sprintf("Failed to health check repository (%s): %v", repoPath, err) log.Warn(desc) if err = models.CreateRepositoryNotice(desc); err != nil { log.Error("CreateRepositoryNotice: %v", err) } } return nil }, ); err != nil { return err } log.Trace("Finished: GitFsck") return nil } // GitGcRepos calls 'git gc' to remove unnecessary files and optimize the local repository func GitGcRepos(ctx context.Context) error { log.Trace("Doing: GitGcRepos") args := append([]string{"gc"}, setting.Git.GCArgs...) if err := models.Iterate( models.DefaultDBContext(), new(models.Repository), builder.Gt{"id": 0}, func(idx int, bean interface{}) error { select { case <-ctx.Done(): return fmt.Errorf("Aborted due to shutdown") default: } repo := bean.(*models.Repository) if err := repo.GetOwner(); err != nil { return err } if stdout, err := git.NewCommand(args...). SetDescription(fmt.Sprintf("Repository Garbage Collection: %s", repo.FullName())). RunInDirTimeout( time.Duration(setting.Git.Timeout.GC)*time.Second, repo.RepoPath()); err != nil { log.Error("Repository garbage collection failed for %v. Stdout: %s\nError: %v", repo, stdout, err) return fmt.Errorf("Repository garbage collection failed: Error: %v", err) } return nil }, ); err != nil { return err } log.Trace("Finished: GitGcRepos") return nil } func gatherMissingRepoRecords() ([]*models.Repository, error) { repos := make([]*models.Repository, 0, 10) if err := models.Iterate( models.DefaultDBContext(), new(models.Repository), builder.Gt{"id": 0}, func(idx int, bean interface{}) error { repo := bean.(*models.Repository) if !com.IsDir(repo.RepoPath()) { repos = append(repos, repo) } return nil }, ); err != nil { if err2 := models.CreateRepositoryNotice(fmt.Sprintf("gatherMissingRepoRecords: %v", err)); err2 != nil { return nil, fmt.Errorf("CreateRepositoryNotice: %v", err) } } return repos, nil } // DeleteMissingRepositories deletes all repository records that lost Git files. func DeleteMissingRepositories(doer *models.User) error { repos, err := gatherMissingRepoRecords() if err != nil { return fmt.Errorf("gatherMissingRepoRecords: %v", err) } if len(repos) == 0 { return nil } for _, repo := range repos { log.Trace("Deleting %d/%d...", repo.OwnerID, repo.ID) if err := models.DeleteRepository(doer, repo.OwnerID, repo.ID); err != nil { if err2 := models.CreateRepositoryNotice(fmt.Sprintf("DeleteRepository [%d]: %v", repo.ID, err)); err2 != nil { return fmt.Errorf("CreateRepositoryNotice: %v", err) } } } return nil } // ReinitMissingRepositories reinitializes all repository records that lost Git files. func ReinitMissingRepositories() error { repos, err := gatherMissingRepoRecords() if err != nil { return fmt.Errorf("gatherMissingRepoRecords: %v", err) } if len(repos) == 0 { return nil } for _, repo := range repos { log.Trace("Initializing %d/%d...", repo.OwnerID, repo.ID) if err := git.InitRepository(repo.RepoPath(), true); err != nil { if err2 := models.CreateRepositoryNotice(fmt.Sprintf("InitRepository [%d]: %v", repo.ID, err)); err2 != nil { return fmt.Errorf("CreateRepositoryNotice: %v", err) } } } return nil }
package main import ( "reflect" "testing") func Test_Graph(t *testing.T) { a := &Node{Name: "Kruthika's abode"} b := &Node{Name: "Brian's apartment"} c := &Node{Name: "Greg's casa"} d := &Node{Name: "Wesley's condo"} g := Graph{} g.AddEdge(a, b, 1) g.AddEdge(a, c, 2) g.AddEdge(b, d, 9) g.AddEdge(c, b, 10) g.AddEdge(c, d, 5) t.Run("AddNode", func(t *testing.T) { if len(g.Nodes) != 4 { t.Errorf("expected %d, got %d", 4, len(g.Nodes)) } nodes := make(map[*Node]bool) nodes[a] = true nodes[b] = true nodes[c] = true nodes[d] = true if !reflect.DeepEqual(g.Nodes, nodes) { t.Errorf("expected %v, got %v", nodes, g.Nodes) } }) t.Run("AddEdge", func(t *testing.T) { if len(g.Edges) != 5 { t.Errorf("expected %d, got %d", 5, len(g.Edges)) } edges := []*Edge{ { Source: a, Destination: b, Weight: 1, }, { Source: a, Destination: c, Weight: 2, }, { Source: b, Destination: d, Weight: 9, }, { Source: c, Destination: b, Weight: 10, }, { Source: c, Destination: d, Weight: 5, }, } if !reflect.DeepEqual(g.Edges, edges) { t.Errorf("expected %v, got %v", edges, g.Edges) } }) t.Run("NewWeightTable", func(t *testing.T) { WeightTable := g.NewWeightTable(a) expectedWeightTable := make(map[*Node]int) expectedWeightTable[a] = 0 expectedWeightTable[b] = Infinity expectedWeightTable[c] = Infinity expectedWeightTable[d] = Infinity if !reflect.DeepEqual(WeightTable, expectedWeightTable) { t.Errorf("expected %v, got %v", expectedWeightTable, WeightTable) } }) t.Run("GedNodeEdges", func(t *testing.T) { edges := g.GetNodeEdges(a) expectedEdges := []*Edge{ { Source: a, Destination: b, Weight: 1, }, { Source: a, Destination: c, Weight: 2, }, } if !reflect.DeepEqual(edges, expectedEdges) { t.Errorf("expected %v, got %v", expectedEdges, edges) } }) t.Run("getClosestVisitedNode", func(t *testing.T) { WeightTable := g.NewWeightTable(a) node := getClosestNonVisitedNode(WeightTable, []*Node{}) if node != a { t.Errorf("expected %v, got %v", a, node) } }) t.Run("Dijkstra", func(t *testing.T) { WeightTable := g.Dijkstra(a) if WeightTable[a] != 0 { t.Errorf("expected %d, got %d", 0, WeightTable[a]) } if WeightTable[b] != 1 { t.Errorf("expected %d, got %d", 1, WeightTable[b]) } if WeightTable[c] != 2 { t.Errorf("expected %d, got %d", 2, WeightTable[c]) } if WeightTable[d] != 7 { t.Errorf("expected %d, got %d", 7, WeightTable[d]) } }) }
package main import ( "fmt" ) func main() { n:=0 ans:=0 fmt.Scan(&n) for i := 0; i < n; i++ { for j := 0; j < n; j++ { x:=0 fmt.Scan(&x) ans = ans + x } } fmt.Printf("%d\n",ans) }
package main import "fmt" func main() { x := []int{2, 3, 4, 5} fmt.Println(x[1]) // 3 fmt.Println(x[1:]) // [3 4 5] fmt.Println(x[1:3]) // [3 4] for i := 0; i < len(x); i++ { fmt.Println(x[i]) } }
package echologrus import ( "io" "time" "github.com/labstack/echo" "github.com/labstack/echo/middleware" "github.com/labstack/gommon/log" "github.com/sirupsen/logrus" ) // Logger : implement logrus Logger type Logger struct { *logrus.Logger Skipper middleware.Skipper } // Level delegate echo.Logger func (l Logger) Level() log.Lvl { switch l.Logger.Level { case logrus.DebugLevel: return log.DEBUG case logrus.WarnLevel: return log.WARN case logrus.ErrorLevel: return log.ERROR case logrus.InfoLevel: return log.INFO default: l.Panic("Invalid level") } return log.OFF } // SetHeader delegate echo.Logger func (l Logger) SetHeader(_ string) {} // SetPrefix delegate echo.Logger func (l Logger) SetPrefix(s string) {} // Prefix delegate echo.Logger func (l Logger) Prefix() string { return "" } // SetLevel delegate echo.Logger func (l Logger) SetLevel(lvl log.Lvl) { switch lvl { case log.DEBUG: l.Logger.SetLevel(logrus.DebugLevel) case log.WARN: l.Logger.SetLevel(logrus.WarnLevel) case log.ERROR: l.Logger.SetLevel(logrus.ErrorLevel) case log.INFO: l.Logger.SetLevel(logrus.InfoLevel) default: l.Panic("Invalid level") } } // Output delegate echo.Logger func (l Logger) Output() io.Writer { return l.Out } // SetOutput delegate echo.Logger func (l Logger) SetOutput(w io.Writer) { l.Logger.SetOutput(w) } // Printj delegate echo.Logger func (l Logger) Printj(j log.JSON) { l.Logger.WithFields(logrus.Fields(j)).Print() } // Debugj delegate echo.Logger func (l Logger) Debugj(j log.JSON) { l.Logger.WithFields(logrus.Fields(j)).Debug() } // Infoj delegate echo.Logger func (l Logger) Infoj(j log.JSON) { l.Logger.WithFields(logrus.Fields(j)).Info() } // Warnj delegate echo.Logger func (l Logger) Warnj(j log.JSON) { l.Logger.WithFields(logrus.Fields(j)).Warn() } // Errorj delegate echo.Logger func (l Logger) Errorj(j log.JSON) { l.Logger.WithFields(logrus.Fields(j)).Error() } // Fatalj delegate echo.Logger func (l Logger) Fatalj(j log.JSON) { l.Logger.WithFields(logrus.Fields(j)).Fatal() } // Panicj delegate echo.Logger func (l Logger) Panicj(j log.JSON) { l.Logger.WithFields(logrus.Fields(j)).Panic() } func (l Logger) logrusMiddlewareHandler(c echo.Context, next echo.HandlerFunc) error { req := c.Request() res := c.Response() start := time.Now() if err := next(c); err != nil { c.Error(err) } stop := time.Now() p := req.URL.Path //bytesIn := req.Header.Get(echo.HeaderContentLength) l.Logger.WithFields(map[string]interface{}{ //"@timestamp": time.Now().Format(time.RFC3339), "remote_ip": c.RealIP(), "status": res.Status, //"host": req.Host, //"uri": req.RequestURI, "method": req.Method, "path": p, "referer": req.Referer(), "user_agent": req.UserAgent(), //"latency": strconv.FormatInt(stop.Sub(start).Nanoseconds()/1000, 10), "latency": stop.Sub(start).String(), //"bytes_in": bytesIn, //"bytes_out": strconv.FormatInt(res.Size, 10), }).Info("Handled request") return nil } func (l Logger) logger(next echo.HandlerFunc) echo.HandlerFunc { if l.Skipper == nil { l.Skipper = middleware.DefaultSkipper } return func(c echo.Context) error { if l.Skipper(c) { return next(c) } return l.logrusMiddlewareHandler(c, next) } } // Hook is a function to process middleware. func (l Logger) Hook() echo.MiddlewareFunc { return l.logger }
package balance import ( "encoding/hex" "fmt" "github.com/alethio/web3-multicall-go/multicall" "github.com/avast/retry-go" "golang.org/x/sync/errgroup" ) type multicallLoader struct { mc *multicall.Multicall } func (loader multicallLoader) fetchRequests(b *Bookkeeper, requests []*Request, results chan *RawResponse, done chan error) { viewCallsByBlock := make(map[string]multicall.ViewCalls) reqMap := make(map[string]*Request) for index, req := range requests { key := fmt.Sprintf("%s-%d", req.DefaultBlockParam, index) reqMap[key] = req viewCalls, ok := viewCallsByBlock[req.DefaultBlockParam] if !ok { viewCalls = make(multicall.ViewCalls, 0, 0) } var viewCall multicall.ViewCall if req.Currency == ETH { viewCall = multicall.ViewCall{ Key: key, Target: loader.mc.Contract(), Method: "getEthBalance(address)(uint256)", Arguments: []interface{}{req.Address}, } } else { viewCall = multicall.ViewCall{ Key: key, Target: string(req.Currency), Method: "balanceOf(address)(uint256)", Arguments: []interface{}{req.Address}, } } viewCalls = append(viewCalls, viewCall) viewCallsByBlock[req.DefaultBlockParam] = viewCalls } group := errgroup.Group{} failed := make(chan *RequestError, len(requests)) for defaultBlockParam, viewCalls := range viewCallsByBlock { defaultBlockParam := defaultBlockParam viewCalls := viewCalls group.Go(func() error { err := retry.Do( func() error { res, err := loader.mc.CallRaw(viewCalls, defaultBlockParam) if err != nil { fmt.Println(err) return err } for key, result := range res.Calls { if result.Success { balance := result.ReturnValues[0].([]byte) hexBalance := hex.EncodeToString(balance) results <- &RawResponse{ Request: reqMap[key], Balance: hexBalance, } } else { failed <- &RequestError{reqMap[key], fmt.Errorf("VM Error")} } } return nil }, retry.Attempts(b.config.Attempts), ) return err }) } err := group.Wait() close(failed) if err != nil { reqErrors := make([]*RequestError, 0, len(requests)) for reqError := range failed { reqErrors = append(reqErrors, reqError) } if len(reqErrors) == 0 { done <- err } else { done <- CollectError{reqErrors} } } else { done <- nil } }
package addbinary func addBinary(a string, b string) string { la := len(a) lb := len(b) lmax := max(la, lb) result := make([]byte, lmax+1) // big enough to hold a flowed answer carry := off sum := off // iterate backwards over the input strings and the result byte slice concurrently for pos, posA, posB := lmax, la-1, lb-1; pos > 0; pos, posA, posB = pos-1, posA-1, posB-1 { bitA := off bitB := off if posA >= 0 { bitA = a[posA] } if posB >= 0 { bitB = b[posB] } sum, carry = addBits(bitA, bitB, carry) result[pos] = sum } result[0] = carry if carry == on { // We need the overflow bit too return string(result) } return string(result[1:]) } // to facilitate conversion from bits to bytes const ( on = byte('1') off = byte('0') ) func max(a, b int) int { if a > b { return a } return b } // add three bits together and return 2 bits, sum is LSB and flow is MSB func addBits(a, b byte, carry byte) (sum byte, flow byte) { const off3 = 3 * off sum = a + b + carry - off3 if sum > 1 { sum = (sum & 1) + off flow = on return } flow = off sum = sum + off return }
package main import ( "errors" "fmt" "net/http" log "github.com/cihub/seelog" "github.com/getsentry/raven-go" "github.com/gin-gonic/gin" ) func httpServer() { //gin.SetMode(gin.ReleaseMode) router := gin.New() if cfg.DevMode { log.Info("Use Gin Logger") router.Use(gin.Logger()) } if cfg.Sentry != "" && Raven != nil { log.Info("Use Custom Recovery") router.Use(recovery(Raven)) } else { log.Info("Use Gin Recovery") router.Use(gin.Recovery()) } router.GET("/", handler) log.Info("Start Gin Http Server at: ", cfg.Server) log.Error("Gin Http Server Error: ", router.Run(cfg.Server)) } // 参考: http://golang.org/pkg/net/http/#Request // HTTP defines that header names are case-insensitive. // The request parser implements this by canonicalizing the // name, making the first character and any characters // following a hyphen uppercase and the rest lowercase. func handler(c *gin.Context) { r := c.Request // *http.Request h := r.Header // http.Header map[string][]string h.Add("RemoteAddr", r.RemoteAddr) c.JSON(http.StatusOK, h) } func recovery(client *raven.Client) gin.HandlerFunc { return func(c *gin.Context) { defer func() { for _, item := range c.Errors { packet := raven.NewPacket(item.Err.Error(), &raven.Message{Message: item.Err.Error(), Params: []interface{}{item.Meta}}, raven.NewHttp(c.Request)) _, ch := client.Capture(packet, nil) if err := <-ch; err != nil { log.Error("Gin Error: ", err) } } if rval := recover(); rval != nil { c.Writer.WriteHeader(http.StatusInternalServerError) message := fmt.Sprint(rval) trace := raven.NewStacktrace(0, 2, nil) packet := raven.NewPacket(message, raven.NewException(errors.New(message), trace), raven.NewHttp(c.Request)) client.Capture(packet, nil) log.Error("Gin Error: ", message) } }() c.Next() } }
package clients import ( "fmt" "io/ioutil" "net/http" "net/http/cookiejar" "net/url" "regexp" "strings" "sync" "videocrawler/common/comm" "videocrawler/common/util" "videocrawler/env" ) type Entry struct { jar *cookiejar.Jar domain string ready chan struct{} } type Jars struct { JarTable map[string]*Entry mu *sync.Mutex comm *comm.Comm } func (this *Jars) GetJar(domain string) *Entry { this.mu.Lock() u, err := url.Parse(domain) if err != nil { return nil } host := u.Host e := this.JarTable[host] if e == nil { e = &Entry{ ready: make(chan struct{}), } this.JarTable[host] = e this.mu.Unlock() e.domain = domain e.jar, err = cookiejar.New(nil) this.LoadCookie(e) close(e.ready) } else { this.mu.Unlock() <-e.ready } return e } func (this *Jars) LoadCookie(e *Entry) { fmt.Println(e.domain) this.comm.LoadCookie(env.CookieDir, e.domain, e.jar) } func (this *Jars) ExportCookie(e *Entry) { this.comm.ExportCookie(env.CookieDir, e.domain, e.jar) } type CrClient struct { Comm *comm.Comm JarEntry *Entry Client *http.Client Headers map[string]string Domain string } func (this *CrClient) SetHeaders(headers map[string]string) { for key, value := range headers { this.Headers[key] = value } } func (this *CrClient) SetCookies(cookies []comm.CookieItem) { this.Comm.LoadCookieFromList(this.JarEntry.domain, this.JarEntry.jar, cookies) } func (this *CrClient) GetUrlContent(requestUrl string) ([]byte, error) { var err error var resp *http.Response var req *http.Request req, err = http.NewRequest("GET", requestUrl, nil) if err != nil { return nil, err } this.Headers = this.Comm.SetHeader(req, this.Headers) resp, err = this.Client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() reader := this.Comm.Unzip(resp) body, err := ioutil.ReadAll(reader) if err != nil { return nil, err } //jars.ExportCookie(this.JarEntry) return body, nil } func (this *CrClient) CrossdomainCheck(pageUrl string) bool { u, e := url.Parse(pageUrl) if e != nil { return false } fileUrl := u.Scheme + u.Host + "crossdomain.xml" fmt.Println(fileUrl) body, _ := this.GetUrlContent(pageUrl) if body == nil { return false } content := string(body) r, e := regexp.Compile(`.+allow-access-from domain="\*".+`) if r.MatchString(content) { return true } return false } func (this *CrClient) ConvertContentEncode(content string) string { var encode string r, _ := regexp.Compile(`content="text/html;\s*?charset=(.+?)"`) matches := r.FindStringSubmatch(content) if matches != nil { encode = strings.ToUpper(matches[1]) if encode != "" && encode != "UTF-8" { content = util.CovertEncode(content, encode) } } return content } var jars *Jars func init() { jars = &Jars{ JarTable: make(map[string]*Entry), mu: new(sync.Mutex), comm: comm.New(), } } func NewClient(domain string, headers map[string]string) *CrClient { jarEntry := jars.GetJar(domain) return &CrClient{ Comm: comm.New(), JarEntry: jarEntry, Client: &http.Client{ Jar: jarEntry.jar, }, Headers: headers, Domain: domain, } }
package generator func GenerateGinFunc() { }
package main import ( "bufio" "encoding/json" "io" //"io/ioutil" "log" "net" "net/http" "os" "rihuo-up/util" "runtime" "strings" "time" //"sync" ) const ( START = 0 LOGIN_INFO_END = 20 //用户登录信息总行数 URLS_END = 4 //BOSS接口总数 STARTTIME = "2018-11-11" //缴费历史起始时间 STARTEND = "2018-11-12" //缴费历史结束时间 FUNCCLICKURL = "http://192.168.1.110/biz-orange/SA/funcClickNew/printLog" CONF = "conf/config.json" ) var ( URLS = []string { //BOSS查询接口 "http://192.168.1.110/leadeon-app-bossbiz/pb/userFlow/getNewComboMealResource", "http://192.168.1.110/leadeon-app-bossbiz/pb/userFee/getRealFee", "http://192.168.1.110/leadeon-app-bossbiz/pb/queryBuinessService/getQueryBusiness", "http://192.168.1.110/leadeon-app-bossbiz/pb/payFeesHistory/getPayFeesHistory", } Login_info = util.Login_info //用户登录信息 BusinessCodes = []string{"ZF10000-CN00052","ZF10000-BF00801","ZF10001-BF00501"} logFile *os.File //日志文件 logErr error //os.Create日志文件时错误 Log *log.Logger //logPath = "E:/golang/rihuo_up/data/logs/rihuo_up" + time.Now().Format("20060102") + ".log" grNum int logDir string ) //Logdir和Grnum要大写,config.json文件和main.go文件不在同一个包 //Logdir执行日志目录,Grnum限制协程数 type configuration struct { Logdir string `json:"Logdir"` Grnum int `json:"Grnum"` } type BossReqBody1 struct { CellNum string `json:"cellNum"` } type BossReqBody2 struct { CellNum string `json:"cellNum"` StartTime string `json:"startTime"` StartEnd string `json:"startEnd"` } type BossReqdata1 struct { AppKey string `json:"appKey"` TimeStamp int `json:"timeStamp"` Sig string `json:"sig"` Channel string `json:"channel"` Version string `json:"version"` Cv string `json:"cv"` St string `json:"st"` Cid string `json:"cid"` Xk string `json:"xk"` Xc string `json:"xc"` Imei string `json:"imei"` Sn string `json:"sn"` Sp string `json:"sp"` Sb string `json:"sb"` Sv string `json:"sv"` Nt string `json:"nt"` Provcode string `json:"provCode"` ReqBody BossReqBody1 `json:"reqBody"` } type BossReqdata2 struct { AppKey string `json:"appKey"` TimeStamp int `json:"timeStamp"` Sig string `json:"sig"` Channel string `json:"channel"` Version string `json:"version"` Cv string `json:"cv"` St string `json:"st"` Cid string `json:"cid"` Xk string `json:"xk"` Xc string `json:"xc"` Imei string `json:"imei"` Sn string `json:"sn"` Sp string `json:"sp"` Sb string `json:"sb"` Sv string `json:"sv"` Nt string `json:"nt"` Provcode string `json:"provCode"` ReqBody BossReqBody2 `json:"reqBody"` } var netTransport = &http.Transport{ Dial: (&net.Dialer{ Timeout: 10 * time.Second, KeepAlive: 30 * time.Second, }).Dial, TLSHandshakeTimeout: 5 * time.Second, ResponseHeaderTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, } var netClient = &http.Client{ Timeout: time.Second * 30, Transport: netTransport, } func init() { runtime.GOMAXPROCS(5) //使用多核 conf, err := jsonDecode(CONF) if err != nil { grNum = 10 logDir = "/data/logs/rihuo_up/rihuo_up_" fmt.Errorf("Error: %v\n", err) } grNum = conf.Grnum logDir = conf.Logdir /* if _, err := os.Stat(logPath); os.IsNotExist(err){ logFile, logErr = os.Create(logPath) }else { logFile, logErr = os.Open(logPath) } */ logFile, logErr = os.OpenFile(logDir + time.Now().Format("20060102") + ".log",os.O_WRONLY|os.O_CREATE|os.O_APPEND,0644) if logErr != nil { panic(logErr) } Log = log.New(logFile, "[INFO]", log.LstdFlags|log.Lshortfile) Log.Println("Job start at : " + time.Now().Format("20060102 15:04:05")) } func main() { //randnum := util.Randint(0,4) //fmt.Println(randnum,URLS[randnum]) ParallelRequest(grNum) } // 并发执行 func ParallelRequest(pnum int) { //startTime := time.Now().UnixNano() //boss协程池 bossPool := util.New(pnum) //功能点击量协程池 funcPrintPool := util.New(pnum) f, err := os.Open(util.TelFile) if err != nil { Log.Fatal("Error: %v\n", err) os.Exit(1) } defer f.Close() reader := bufio.NewReader(f) for { line_bytes,_,err := reader.ReadLine() if err == io.EOF { break } //sum++ line := string(line_bytes) line_split := strings.Fields(line) tel := line_split[0] tel_en := line_split[1] prov := line_split[2] city := line_split[3] randNum1 := util.PseudoRandint(START, LOGIN_INFO_END) login_info_line := Login_info[randNum1] //login_info_line_split := strings.FieldsFunc(line,func (c rune) bool { return c=='|#$'}) login_info_line_split := strings.Split(login_info_line,"|#$") //平台:1为安卓;2为IOS st := login_info_line_split[0] //客户端版本 cv := login_info_line_split[1] cid := login_info_line_split[3] xk := login_info_line_split[4] //渠道编码 xc := login_info_line_split[5] imei := login_info_line_split[6] //手机型号 sn := login_info_line_split[7] //分辨率 sp := login_info_line_split[8] //手机品牌 sb := login_info_line_split[9] //系统版本 sv := login_info_line_split[10] //联网方式 nt := login_info_line_split[11] bossreqdata1 := BossReqdata1{ "7952BA0F", 1498036009614, "b03eea761a7034d2d4c16abafb4eff41", "CH01", "1.0", cv, st, cid, xk, xc, imei, sn, sp, sb, sv, nt, "", BossReqBody1{tel_en}, } randNum2 := util.PseudoRandint(START, URLS_END) url := URLS[randNum2] b1, _ := json.Marshal(&bossreqdata1) bossReqData := string(b1) if url == "http://192.168.1.110/leadeon-app-bossbiz/pb/payFeesHistory/getPayFeesHistory" { bossreqdata2 := BossReqdata2{ "7952BA0F", 1498036009614, "b03eea761a7034d2d4c16abafb4eff41", "CH01", "1.0", cv, st, cid, xk, xc, imei, sn, sp, sb, sv, nt, "", BossReqBody2{tel_en,STARTTIME,STARTEND}, } b2, _ := json.Marshal(&bossreqdata2) bossReqData = string(b2) } randNum3 := util.PseudoRandint(START, URLS_END - 1) businessSteps := BusinessCodes[randNum3] funcclickreqdata := `{"cv":"` + cv + `","st":"` + st + `","cid":"` + cid + `","xk":"` + xk + `","xc":"` + xc + `","imei":"` + imei + `","sn":"` + sn + `","sp":"` + sp + `","sb":"` + sb + `","sv":"` + sv + `","nt":"` + nt + `","city":"` + city + `","prov":"` + prov + `","reqBody":{"funcList":[{"businessSteps":"` + businessSteps + `","channel":"` + xc + `","cityCode":"` + city + `","phoneNumber":"` + tel + `","provinceCode":"` + prov + `"}]}}` bossPool.Add(1) go request(url, bossPool, bossReqData) // 以协程方式运行 funcPrintPool.Add(1) go request(FUNCCLICKURL, funcPrintPool, funcclickreqdata) } bossPool.Wait() funcPrintPool.Wait() defer logFile.Close() //log.Printf("data: %q", func_fetchData) //fmt.Printf("sum : %d-------------------------------------\n",sum) } func request(url string, grpool *util.Pool, reqBody string) { reqData := strings.NewReader(reqBody) req, _ := http.NewRequest("POST", url, reqData) req.Header.Set("Content-Type", "application/json; charset=utf-8") req.Header.Set("X-Req-Flag", "rihuoUp") rsp, err := netClient.Do(req) if err != nil { Log.Fatalf("Error: %v\n", err) return } status := rsp.StatusCode //tmpData := "" //fmt.Fscan(reqData, &tmpData) //以下defer会顺序执行 defer (func() { Log.Printf("url: %s, body: %s, status: %d\n", url, reqBody, status) grpool.Done() })() } func jsonDecode(conf string) (*configuration, error){ file, _ := os.Open(conf) defer file.Close() decoder := json.NewDecoder(file) config := configuration{} err := decoder.Decode(&config) if err != nil { return nil, err } //fmt.Println(config.Grnum, config.Logdir, conf) return &config, nil } /* func FuncClickHttpDo(reqdata string) { client := &http.Client{} req, err := http.NewRequest("POST", "http://192.168.1.110/biz-orange/SA/funcClickNew/printLog",reqdata) if err != nil { // handle error } req.Header.Set("Content-Type", "application/json") req.Header.Set("X-Req-Flag", "rihuoUp") resp, err := client.Do(req) defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { // handle error } fmt.Println(string(body)) } */
package main import "fmt" func main() { s1 := "abcd" b1 := []byte(s1) fmt.Println(b1) // [97 98 99 100] s2 := "中文" b2 := []byte(s2) fmt.Println(b2) // [228 184 173 230 150 135], unicode,每个中文字符会由三个byte组成 r1 := []rune(s1) fmt.Println(r1) // [97 98 99 100], 每个字一个数值 r2 := []rune(s2) fmt.Println(r2) // [20013 25991], 每个字一个数值 }
package main import ( "fmt" "net" "net/http" "os" "time" libhoney "github.com/honeycombio/libhoney-go" "github.com/honeycombio/libhoney-go/transmission" statsd "gopkg.in/alexcesaro/statsd.v2" "github.com/facebookgo/inject" "github.com/facebookgo/startstop" flag "github.com/jessevdk/go-flags" "github.com/sirupsen/logrus" "github.com/honeycombio/samproxy/app" "github.com/honeycombio/samproxy/collect" "github.com/honeycombio/samproxy/config" "github.com/honeycombio/samproxy/logger" "github.com/honeycombio/samproxy/metrics" "github.com/honeycombio/samproxy/sample" "github.com/honeycombio/samproxy/sharder" "github.com/honeycombio/samproxy/transmit" ) // set by travis. var BuildID string var version string type Options struct { ConfiFile string `short:"c" long:"config" description:"Path to config file" default:"/etc/samproxy/samproxy.toml"` PeerType string `short:"p" long:"peer_type" description:"Peer type - should be redis or file" default:"file"` Version bool `short:"v" long:"version" description:"Print version number and exit"` } func main() { var opts Options flagParser := flag.NewParser(&opts, flag.Default) if extraArgs, err := flagParser.Parse(); err != nil || len(extraArgs) != 0 { fmt.Println("command line parsing error - call with --help for usage") os.Exit(1) } if BuildID == "" { version = "dev" } else { version = BuildID } if opts.Version { fmt.Println("Version: " + version) os.Exit(0) } var c config.Config var err error // either the flag or the env var will kick us in to redis mode if opts.PeerType == "redis" || os.Getenv(config.RedisHostEnvVarName) != "" { c = &config.RedisPeerFileConfig{} c.(*config.RedisPeerFileConfig).Path = opts.ConfiFile err = c.(*config.RedisPeerFileConfig).Start() } else { c = &config.FileConfig{Path: opts.ConfiFile} err = c.(*config.FileConfig).Start() } if err != nil { fmt.Printf("unable to load config: %+v\n", err) os.Exit(1) } a := app.App{ Version: version, } // get desired implementation for each dependency to inject lgr := logger.GetLoggerImplementation(c) collector := collect.GetCollectorImplementation(c) metricsr := metrics.GetMetricsImplementation(c) shrdr := sharder.GetSharderImplementation(c) samplerFactory := &sample.SamplerFactory{} // set log level logLevel, err := c.GetLoggingLevel() if err != nil { fmt.Printf("unable to get logging level from config: %v\n", err) os.Exit(1) } if err := lgr.SetLevel(logLevel); err != nil { fmt.Printf("unable to set logging level: %v\n", err) os.Exit(1) } // upstreamTransport is the http transport used to send things on to Honeycomb upstreamTransport := &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ Timeout: 10 * time.Second, }).Dial, TLSHandshakeTimeout: 15 * time.Second, } // peerTransport is the http transport used to send things to a local peer peerTransport := &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ Timeout: 3 * time.Second, }).Dial, TLSHandshakeTimeout: 1200 * time.Millisecond, } sdUpstream, _ := statsd.New(statsd.Prefix("samproxy.upstream")) sdPeer, _ := statsd.New(statsd.Prefix("samproxy.peer")) userAgentAddition := "samproxy/" + version upstreamClient, err := libhoney.NewClient(libhoney.ClientConfig{ Transmission: &transmission.Honeycomb{ MaxBatchSize: 500, BatchTimeout: libhoney.DefaultBatchTimeout, MaxConcurrentBatches: libhoney.DefaultMaxConcurrentBatches, PendingWorkCapacity: uint(c.GetUpstreamBufferSize()), UserAgentAddition: userAgentAddition, Transport: upstreamTransport, BlockOnSend: true, Metrics: sdUpstream, }, }) if err != nil { fmt.Printf("unable to initialize upstream libhoney client") os.Exit(1) } peerClient, err := libhoney.NewClient(libhoney.ClientConfig{ Transmission: &transmission.Honeycomb{ MaxBatchSize: 500, BatchTimeout: libhoney.DefaultBatchTimeout, MaxConcurrentBatches: libhoney.DefaultMaxConcurrentBatches, PendingWorkCapacity: uint(c.GetPeerBufferSize()), UserAgentAddition: userAgentAddition, Transport: peerTransport, BlockOnSend: true, // gzip compression is expensive, and peers are most likely close to each other // so we can turn off gzip when forwarding to peers DisableGzipCompression: true, Metrics: sdPeer, }, }) if err != nil { fmt.Printf("unable to initialize upstream libhoney client") os.Exit(1) } var g inject.Graph err = g.Provide( &inject.Object{Value: c}, &inject.Object{Value: lgr}, &inject.Object{Value: upstreamTransport, Name: "upstreamTransport"}, &inject.Object{Value: peerTransport, Name: "peerTransport"}, &inject.Object{Value: &transmit.DefaultTransmission{LibhClient: upstreamClient, Name: "upstream_"}, Name: "upstreamTransmission"}, &inject.Object{Value: &transmit.DefaultTransmission{LibhClient: peerClient, Name: "peer_"}, Name: "peerTransmission"}, &inject.Object{Value: shrdr}, &inject.Object{Value: collector}, &inject.Object{Value: metricsr}, &inject.Object{Value: version, Name: "version"}, &inject.Object{Value: samplerFactory}, &inject.Object{Value: &a}, ) if err != nil { fmt.Printf("failed to provide injection graph. error: %+v\n", err) os.Exit(1) } if err := g.Populate(); err != nil { fmt.Printf("failed to populate injection graph. error: %+v\n", err) os.Exit(1) } // the logger provided to startstop must be valid before any service is // started, meaning it can't rely on injected configs. make a custom logger // just for this step ststLogger := logrus.New() level, _ := logrus.ParseLevel(logLevel) ststLogger.SetLevel(level) defer startstop.Stop(g.Objects(), ststLogger) if err := startstop.Start(g.Objects(), ststLogger); err != nil { fmt.Printf("failed to start injected dependencies. error: %+v\n", err) os.Exit(1) } }
package main import "fmt" var ( a = []int{1, 3, 5, 7, 9} b = []int{2, 4, 6, 8, 10} c = []int{} ) func main() { realMain(a, b, &c) fmt.Println(c) } func realMain(a, b []int, c *[]int) { // TODO: код писать здесь }
// This file is part of CycloneDX GoMod // // Licensed under the Apache License, Version 2.0 (the “License”); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an “AS IS” BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // SPDX-License-Identifier: Apache-2.0 // Copyright (c) OWASP Foundation. All Rights Reserved. package cli import ( "context" "flag" appCmd "github.com/CycloneDX/cyclonedx-gomod/internal/cli/cmd/app" binCmd "github.com/CycloneDX/cyclonedx-gomod/internal/cli/cmd/bin" modCmd "github.com/CycloneDX/cyclonedx-gomod/internal/cli/cmd/mod" versionCmd "github.com/CycloneDX/cyclonedx-gomod/internal/cli/cmd/version" "github.com/peterbourgon/ff/v3/ffcli" ) func New() *ffcli.Command { return &ffcli.Command{ Name: "cyclonedx-gomod", ShortUsage: "cyclonedx-gomod <SUBCOMMAND> [FLAGS...] [<ARG>...]", LongHelp: `cyclonedx-gomod creates CycloneDX Software Bill of Materials (SBOM) from Go modules. Multiple subcommands are offered, each targeting different use cases: - SBOMs generated with "app" include only those modules that the target application actually depends on. Modules required by tests or packages that are not imported by the application are not included. Build constraints are evaluated, which enables a very detailed view of what's really compiled into an application's binary. - SBOMs generated with "mod" include the aggregate of modules required by all packages in the target module. This optionally includes modules required by tests and test packages. Build constraints are NOT evaluated, allowing for a "whole picture" view on the target module's dependencies. - "bin" offers support of generating rudimentary SBOMs from binaries built with Go modules. Distributors of applications will typically use "app" and provide the resulting SBOMs alongside their application's binaries. This enables users to only consume SBOMs for artifacts that they actually use. For example, a Go module may include "server" and "client" applications, of which only the "client" is distributed to users. Additionally, modules included in "client" may differ, depending on which platform it was compiled for. Vendors or maintainers may choose to use "mod" for internal use, where it's too cumbersome to deal with many SBOMs for the same product. Possible use cases are: - Tracking of component inventory - Tracking of third party component licenses - Continuous monitoring for vulnerabilities "mod" may also be used to generate SBOMs for libraries.`, Subcommands: []*ffcli.Command{ appCmd.New(), binCmd.New(), modCmd.New(), versionCmd.New(), }, Exec: func(_ context.Context, _ []string) error { return execRootCmd() }, } } func execRootCmd() error { return flag.ErrHelp }
// Copyright 2020 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package video import ( "bytes" "context" "encoding/base64" "image" "image/color" "net/http" "net/http/httptest" "os" "path" "strings" "chromiumos/tast/common/media/caps" "chromiumos/tast/common/perf" "chromiumos/tast/local/bundles/cros/video/play" "chromiumos/tast/local/chrome" "chromiumos/tast/testing" ) type drawOnCanvasParams struct { fileName string refFileName string } func init() { testing.AddTest(&testing.Test{ Func: DrawOnCanvas, LacrosStatus: testing.LacrosVariantUnknown, Desc: "Verifies that a video can be drawn once onto a 2D canvas", Contacts: []string{ "andrescj@chromium.org", "chromeos-gfx-video@google.com", }, SoftwareDeps: []string{"chrome"}, Params: []testing.Param{{ Name: "h264_360p_hw", Val: drawOnCanvasParams{ fileName: "still-colors-360p.h264.mp4", refFileName: "still-colors-360p.ref.png", }, ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"}, ExtraData: []string{"video-on-canvas.html", "still-colors-360p.h264.mp4", "still-colors-360p.ref.png"}, ExtraSoftwareDeps: []string{caps.HWDecodeH264, "proprietary_codecs"}, Fixture: "chromeVideo", }, { // TODO(andrescj): move to graphics_nightly after the test is stabilized. Name: "h264_360p_exotic_crop_hw", Val: drawOnCanvasParams{ fileName: "still-colors-720x480-cropped-to-640x360.h264.mp4", refFileName: "still-colors-360p.ref.png", }, ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"}, ExtraData: []string{"video-on-canvas.html", "still-colors-720x480-cropped-to-640x360.h264.mp4", "still-colors-360p.ref.png"}, ExtraSoftwareDeps: []string{caps.HWDecodeH264, "proprietary_codecs"}, Fixture: "chromeVideo", }, { Name: "h264_480p_hw", Val: drawOnCanvasParams{ fileName: "still-colors-480p.h264.mp4", refFileName: "still-colors-480p.ref.png", }, ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"}, ExtraData: []string{"video-on-canvas.html", "still-colors-480p.h264.mp4", "still-colors-480p.ref.png"}, ExtraSoftwareDeps: []string{caps.HWDecodeH264, "proprietary_codecs"}, Fixture: "chromeVideo", }, { Name: "h264_720p_hw", Val: drawOnCanvasParams{ fileName: "still-colors-720p.h264.mp4", refFileName: "still-colors-720p.ref.png", }, ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"}, ExtraData: []string{"video-on-canvas.html", "still-colors-720p.h264.mp4", "still-colors-720p.ref.png"}, ExtraSoftwareDeps: []string{caps.HWDecodeH264, "proprietary_codecs"}, Fixture: "chromeVideo", }, { Name: "h264_1080p_hw", Val: drawOnCanvasParams{ fileName: "still-colors-1080p.h264.mp4", refFileName: "still-colors-1080p.ref.png", }, ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"}, ExtraData: []string{"video-on-canvas.html", "still-colors-1080p.h264.mp4", "still-colors-1080p.ref.png"}, ExtraSoftwareDeps: []string{caps.HWDecodeH264, "proprietary_codecs"}, Fixture: "chromeVideo", }}, // TODO(andrescj): add tests for VP8 and VP9. }) } // DrawOnCanvas starts playing a video, draws it on a canvas, and checks a few interesting pixels. func DrawOnCanvas(ctx context.Context, s *testing.State) { server := httptest.NewServer(http.FileServer(s.DataFileSystem())) defer server.Close() cr := s.FixtValue().(*chrome.Chrome) url := path.Join(server.URL, "video-on-canvas.html") conn, err := cr.NewConn(ctx, url) if err != nil { s.Fatalf("Failed to open %v: %v", url, err) } defer conn.Close() // Open the reference file to set the canvas size equal to the expected size of the rendered video. // This is done in order to prevent scaling filtering artifacts from interfering with our color // checks later. params := s.Param().(drawOnCanvasParams) refPath := s.DataPath(params.refFileName) f, err := os.Open(refPath) if err != nil { s.Fatalf("Failed to open %v: %v", refPath, err) } defer f.Close() refImg, _, err := image.Decode(f) if err != nil { s.Fatalf("Could not decode %v: %v", refPath, err) } videoW := refImg.Bounds().Dx() videoH := refImg.Bounds().Dy() // Note that we set the size of the canvas to 5px more than the video on each dimension. // This is so that we can later check that nothing was drawn outside of the expected // bounds. if err := conn.Call(ctx, nil, "initializeCanvas", videoW+5, videoH+5); err != nil { s.Fatal("initializeCanvas() failed: ", err) } // Now we can play the video and draw it on the canvas. if err := conn.Call(ctx, nil, "drawFirstFrameOnCanvas", params.fileName); err != nil { s.Fatal("playAndDrawOnCanvas() failed: ", err) } // Get the contents of the canvas as a PNG image and decode it. var canvasPNGB64 string if err = conn.Eval(ctx, "getCanvasAsPNG()", &canvasPNGB64); err != nil { s.Fatal("getCanvasAsPNG() failed: ", err) } if !strings.HasPrefix(canvasPNGB64, "data:image/png;base64,") { s.Fatal("getCanvasAsPNG() returned data in an unknown format") } canvasPNG, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(canvasPNGB64, "data:image/png;base64,")) if err != nil { s.Fatal("Could not base64-decode the data returned by getCanvasAsPNG(): ", err) } canvasImg, _, err := image.Decode(bytes.NewReader(canvasPNG)) if err != nil { s.Fatal("Could not decode the image returned by getCanvasAsPNG(): ", err) } // A simple check first: the intrinsic dimensions of the video should match the dimensions of the reference image. var intrinsicVideoW, intrinsicVideoH int if err = conn.Eval(ctx, "document.getElementById('video').videoWidth", &intrinsicVideoW); err != nil { s.Fatal("Could not get the intrinsic video width: ", err) } if err = conn.Eval(ctx, "document.getElementById('video').videoHeight", &intrinsicVideoH); err != nil { s.Fatal("Could not get the intrinsic video height: ", err) } if intrinsicVideoW != videoW || intrinsicVideoH != videoH { s.Fatalf("Unexpected intrinsic dimensions: expected %dx%d; got %dx%d", videoW, videoH, intrinsicVideoW, intrinsicVideoH) } // Another simple check: nothing should have been drawn at (videoW, videoH). c := canvasImg.At(videoW, videoH) if play.ColorDistance(color.Black, c) != 0 { s.Fatalf("At (%d, %d): expected RGBA = %v; got RGBA = %v", videoW, videoH, color.Black, c) } // Measurement 1: // We'll sample a few interesting pixels and report the color distance with // respect to the reference image. samples := play.ColorSamplingPointsForStillColorsVideo(videoW, videoH) p := perf.NewValues() for k, v := range samples { expectedColor := refImg.At(v.X, v.Y) actualColor := canvasImg.At(v.X, v.Y) distance := play.ColorDistance(expectedColor, actualColor) // The distance threshold was decided by analyzing the data reported across // many devices. Note that: // // 1) We still report the distances as perf values so we can continue to // analyze and improve. // 2) We don't bother to report a total distance if this threshold is // exceeded because it would just make email alerts very noisy. if distance > 25 { s.Errorf("The color distance for %v = %d exceeds the threshold (25)", k, distance) } if distance != 0 { s.Logf("At %v (%d, %d): expected RGBA = %v; got RGBA = %v; distance = %d", k, v.X, v.Y, expectedColor, actualColor, distance) } p.Set(perf.Metric{ Name: k, Unit: "None", Direction: perf.SmallerIsBetter, }, float64(distance)) } if s.HasError() { p.Save(s.OutDir()) return } // Measurement 2: // We report an aggregate distance for the image: we go through all the pixels // in the canvas video to add up all the distances and then normalize by the // number of pixels at the end. totalDistance := 0.0 for y := 0; y < videoH; y++ { for x := 0; x < videoW; x++ { expectedColor := refImg.At(x, y) actualColor := canvasImg.At(x, y) totalDistance += float64(play.ColorDistance(expectedColor, actualColor)) } } totalDistance /= float64(videoW * videoH) s.Log("The total distance for the entire image is ", totalDistance) p.Set(perf.Metric{ Name: "total_distance", Unit: "None", Direction: perf.SmallerIsBetter, }, totalDistance) p.Save(s.OutDir()) }
package main import ( "net/url" ) type ClusterState struct { ClusterName string `json:"cluster_name"` MasterNode string `json:"master_node"` Nodes map[string]struct { Name string `json:"name"` TransportAddress string `json:"transport_address"` Attributes NodeAttributes `json:"attributes"` } `json:"nodes"` } func (cs *ClusterState) Fetch(u url.URL) error { u.Path = "/_cluster/state" return Unmarshal(u, cs) } // // // type NodeAttributes map[string]interface{} func (na NodeAttributes) IsData() bool { v, ok := na["data"] if !ok { return true // default true } if s, ok := v.(string); ok && s == "false" { return false } return true } func (na NodeAttributes) IsClient() bool { v, ok := na["client"] if !ok { return false // default false } if s, ok := v.(string); ok && s == "true" { return true } return false }
// Copyright (C) 2018 Satoshi Konno. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* uechosearch is a search utility for Echonet Lite. NAME uechosearch SYNOPSIS uechosearch [OPTIONS] DESCRIPTION uechosearch is a search utility for Echonet Lite. RETURN VALUE Return EXIT_SUCCESS or EXIT_FAILURE */ package main import ( "flag" "fmt" "time" "github.com/cybergarage/uecho-go/net/echonet/log" ) func main() { verbose := flag.Bool("v", false, "Enable verbose output") flag.Parse() // Setup logger if *verbose { log.SetSharedLogger(log.NewStdoutLogger(log.LevelTrace)) } // Start a controller for Echonet Lite node ctrl := NewSearchController() if *verbose { ctrl.SetListener(ctrl) } err := ctrl.Start() if err != nil { return } err = ctrl.SearchAllObjects() if err != nil { return } // Wait node responses in the local network time.Sleep(time.Second * 1) // Output all found nodes for _, node := range ctrl.GetNodes() { objs := node.GetObjects() if len(objs) <= 0 { fmt.Printf("%-15s\n", node.GetAddress()) continue } for _, obj := range objs { fmt.Printf("%-15s : %06X\n", node.GetAddress(), obj.GetCode()) } } // Stop the controller err = ctrl.Stop() if err != nil { return } }
package models import ( "io/ioutil" "os" "strconv" "strings" "testing" "time" log "github.com/sirupsen/logrus" . "github.com/smartystreets/goconvey/convey" ) func TestRateSchedule(t *testing.T) { log.SetOutput(os.Stdout) log.SetLevel(log.DebugLevel) SetDefaultFailureMode(FailureContinues) defer SetDefaultFailureMode(FailureHalts) var starts time.Time starts, _ = time.Parse(time.RFC3339, "2016-08-29T08:00:00.000Z") Convey("UnmarshalRateSchedule", t, func() { log.Debug(starts.Weekday()) var rateSchedule RateSchedule Convey("Read the rate schedule data", func() { file, err := ioutil.ReadFile("../data/rate_schedule.json") if err != nil { panic(err) } example := string(file) rateSchedule = UnmarshalRateSchedule(&example) for i := 0; i < len(rateSchedule.Rates); i++ { log.Debug("{{{") log.Debug(rateSchedule.Rates[i]) shortDays := strings.Split(rateSchedule.Rates[i].Days, ",") log.Debug(shortDays) days := make([]string, len(shortDays)) for i, day := range shortDays { days[i] = strings.ToUpper(day) } log.Debug(days) times := strings.Split(rateSchedule.Rates[i].Times, "-") log.Debug(times) hours := make([]int32, len(times)) for i, v := range times { x64, _ := strconv.Atoi(v) hours[i] = int32(x64) } log.Debug(hours) log.Debug(rateSchedule.Rates[i].Price) log.Debug("}}}") } }) }) }
package operations // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "fmt" "github.com/go-openapi/runtime" strfmt "github.com/go-openapi/strfmt" ) // GetConnectionDetailsReader is a Reader for the GetConnectionDetails structure. type GetConnectionDetailsReader struct { formats strfmt.Registry } // ReadResponse reads a server response into the recieved o. func (o *GetConnectionDetailsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { switch response.Code() { case 200: result := NewGetConnectionDetailsOK() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return result, nil default: return nil, runtime.NewAPIError("unknown error", response, response.Code()) } } // NewGetConnectionDetailsOK creates a GetConnectionDetailsOK with default headers values func NewGetConnectionDetailsOK() *GetConnectionDetailsOK { return &GetConnectionDetailsOK{} } /*GetConnectionDetailsOK handles this case with default header values. GetConnectionDetailsOK get connection details o k */ type GetConnectionDetailsOK struct { } func (o *GetConnectionDetailsOK) Error() string { return fmt.Sprintf("[GET /{uuid}][%d] getConnectionDetailsOK ", 200) } func (o *GetConnectionDetailsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { return nil }
package models import( "encoding/json" ) /** * Type definition for Type16Enum enum */ type Type16Enum int /** * Value collection for Type16Enum enum */ const ( Type16_KSTORAGEARRAY Type16Enum = 1 + iota Type16_KVOLUME ) func (r Type16Enum) MarshalJSON() ([]byte, error) { s := Type16EnumToValue(r) return json.Marshal(s) } func (r *Type16Enum) UnmarshalJSON(data []byte) error { var s string json.Unmarshal(data, &s) v := Type16EnumFromValue(s) *r = v return nil } /** * Converts Type16Enum to its string representation */ func Type16EnumToValue(type16Enum Type16Enum) string { switch type16Enum { case Type16_KSTORAGEARRAY: return "kStorageArray" case Type16_KVOLUME: return "kVolume" default: return "kStorageArray" } } /** * Converts Type16Enum Array to its string Array representation */ func Type16EnumArrayToValue(type16Enum []Type16Enum) []string { convArray := make([]string,len( type16Enum)) for i:=0; i<len(type16Enum);i++ { convArray[i] = Type16EnumToValue(type16Enum[i]) } return convArray } /** * Converts given value to its enum representation */ func Type16EnumFromValue(value string) Type16Enum { switch value { case "kStorageArray": return Type16_KSTORAGEARRAY case "kVolume": return Type16_KVOLUME default: return Type16_KSTORAGEARRAY } }
/* * @lc app=leetcode.cn id=102 lang=golang * * [102] 二叉树的层序遍历 */ package leetcode // @lc code=start /** * Definition for a binary tree node. * type TreeNode struct { * Val int * Left *TreeNode * Right *TreeNode * } */ type TreeNode struct { Val int Left *TreeNode Right *TreeNode } func levelOrder(root *TreeNode) [][]int { var res [][]int var queue []*TreeNode if root == nil { return res } p := root queue = append(queue, p) for len(queue) > 0 { size := len(queue) var temp []int for i := 0; i < size; i++ { p = queue[0] queue = queue[1:] temp = append(temp, p.Val) if p.Left != nil { queue = append(queue, p.Left) } if p.Right != nil { queue = append(queue, p.Right) } } res = append(res, temp) } return res } // @lc code=end
package main import ( zmq "github.com/pebbe/zmq4" "log" "time" "os" ) type MessageHandler func(msg Message) Message var ( context *zmq.Context handlers map[string] MessageHandler = make(map[string] MessageHandler) clients map[string] *client = make(map[string] *client) pub chan []byte = make(chan []byte) haltpub chan bool = make(chan bool) subConnect chan string = make(chan string) sub chan []byte = make(chan []byte) ) func Open() { debug("opening context") var err error context, err = zmq.NewContext() if err != nil { log.Fatal(err) } } func Send(target string, msg Message) Message { debug("sending message", target, msg) if _, ok := clients[target]; !ok { clients[target] = newClient(target) } res := clients[target].send(msg.serialize()) if res != nil { return ParseMessage(res) } else { return EmptyMessage } } func Publish(msg Message) { pub <- msg.serialize() } func StartPublisher(target string) { go func() { publisher, err := context.NewSocket(zmq.PUB) if err != nil { log.Fatal(err) } publisher.Bind(target) for { select { case msg := <- pub: publisher.SendBytes(msg, 0) case <- haltpub: publisher.Close() return } } }() } func ClosePublisher() { haltpub <- true } func ConnectSubscriber(url string) { subConnect <- url } func Subscribe(topic string) { go func() { subscriber, err := context.NewSocket(zmq.SUB) if err != nil { log.Fatal(err) } subscriber.SetSubscribe(topic) go func() { for { select { case url := <- subConnect: subscriber.Connect(url) case <- haltpub: subscriber.Close() return } } }() for { msg, err := subscriber.RecvBytes(0) if err != nil { log.Fatal(err) } sub <- msg } }() monitorSubscriptions() } func monitorSubscriptions() { go func() { for { select { case msg := <- sub: m := ParseMessage(msg) debug("[sub] message", msg) onMessage(m) case <- haltpub: return } } }() } func onMessage(msg Message) []byte { if handler, ok := handlers[msg.action]; ok { return handler(msg).serialize() } else { log.Printf("[onMessage] No handler found %s", msg.action) return EmptyMessage.serialize() } } func Handle(action string, handler MessageHandler) { handlers[action] = handler } func Serve(target string) { s := NewServer(target, func(msg []byte) []byte { debug("[server] message", target, msg) m := ParseMessage(msg) return onMessage(m) }) go s.serve() } func main() { Open() if os.Args[1] == "server" { Serve("tcp://*:3000") Subscribe("") ConnectSubscriber("tcp://localhost:3001") } else { //Send("tcp://localhost:3000", NewMessage("hello")) StartPublisher("tcp://*:3001") for { Publish(NewMessage("hello")) time.Sleep(1 * time.Second) } } Handle("hello", func(msg Message) Message { return EmptyMessage }) time.Sleep(1 * time.Second) time.Sleep(20 * time.Second) }
package Gas_Station func canCompleteCircuit(gas []int, cost []int) int { start, remain, debt := 0, 0, 0 for k, v := range gas { remain += v - cost[k] if remain < 0 { start = k+1 debt += remain remain = 0 } } if remain+debt < 0 { return -1 } return start }
package confsvr import ( "github.com/oceanho/gw" "github.com/oceanho/gw/contrib/apps/confsvr/api" "gorm.io/gorm" ) type App struct { } func New() App { return App{} } func (a App) Name() string { return "gw.confsvr" } func (a App) Router() string { return "confsvr" } func (a App) Register(router *gw.RouterGroup) { // Auth service routers. router.GET("auth/auth", api.GetAuth) router.GET("auth/create", api.CreateEnv) router.GET("auth/modify", api.ModifyAuth) router.GET("auth/destroy", api.DestroyAuth) // Env service routers. router.GET("env/get", api.GetEnv) router.GET("env/create", api.CreateEnv) router.GET("env/modify", api.ModifyEnv) router.GET("env/destroy", api.DestroyEnv) // NameSpace service routers. router.GET("ns/get", api.GetNS) router.GET("ns/create", api.CreateNS) router.GET("ns/modify", api.ModifyNS) router.GET("ns/destroy", api.DestroyNS) } func (a App) Migrate(state *gw.ServerState) { db := state.Store().GetDbStore() d, _ := db.DB() d.Ping() } func (a App) Use(opt *gw.ServerOption) { opt.StoreDbSetupHandler = func(ctx gw.Context, db *gorm.DB) *gorm.DB { return db } } func (a App) OnStart(state *gw.ServerState) { } func (a App) OnShutDown(state *gw.ServerState) { }
// Tomato static website generator // Copyright Quentin Ribac, 2018 // Free software license can be found in the LICENSE file. package main import ( "fmt" ) // Author is the type for an author of the website. type Author struct { Name string `json: "name"` Email string `json: "email"` } // Helper prints a html link to an author. func (author *Author) Helper() string { return fmt.Sprintf("<address><a href=\"mailto:%s\">%s</a></address>", author.Email, author.Name) }
package odoo import ( "fmt" ) // SaleOrderLine represents sale.order.line model. type SaleOrderLine struct { LastUpdate *Time `xmlrpc:"__last_update,omptempty"` AmtInvoiced *Float `xmlrpc:"amt_invoiced,omptempty"` AmtToInvoice *Float `xmlrpc:"amt_to_invoice,omptempty"` AnalyticTagIds *Relation `xmlrpc:"analytic_tag_ids,omptempty"` AutosalesBaseOrderLine *Many2One `xmlrpc:"autosales_base_order_line,omptempty"` AutosalesLine *Bool `xmlrpc:"autosales_line,omptempty"` CompanyId *Many2One `xmlrpc:"company_id,omptempty"` CreateDate *Time `xmlrpc:"create_date,omptempty"` CreateUid *Many2One `xmlrpc:"create_uid,omptempty"` CurrencyId *Many2One `xmlrpc:"currency_id,omptempty"` CustomerLead *Float `xmlrpc:"customer_lead,omptempty"` Discount *Float `xmlrpc:"discount,omptempty"` DisplayName *String `xmlrpc:"display_name,omptempty"` Id *Int `xmlrpc:"id,omptempty"` InvoiceLines *Relation `xmlrpc:"invoice_lines,omptempty"` InvoiceStatus *Selection `xmlrpc:"invoice_status,omptempty"` IsDownpayment *Bool `xmlrpc:"is_downpayment,omptempty"` IsService *Bool `xmlrpc:"is_service,omptempty"` LayoutCategoryId *Many2One `xmlrpc:"layout_category_id,omptempty"` LayoutCategorySequence *Int `xmlrpc:"layout_category_sequence,omptempty"` MoveIds *Relation `xmlrpc:"move_ids,omptempty"` Name *String `xmlrpc:"name,omptempty"` OrderId *Many2One `xmlrpc:"order_id,omptempty"` OrderPartnerId *Many2One `xmlrpc:"order_partner_id,omptempty"` PriceReduce *Float `xmlrpc:"price_reduce,omptempty"` PriceReduceTaxexcl *Float `xmlrpc:"price_reduce_taxexcl,omptempty"` PriceReduceTaxinc *Float `xmlrpc:"price_reduce_taxinc,omptempty"` PriceSubtotal *Float `xmlrpc:"price_subtotal,omptempty"` PriceTax *Float `xmlrpc:"price_tax,omptempty"` PriceTotal *Float `xmlrpc:"price_total,omptempty"` PriceUnit *Float `xmlrpc:"price_unit,omptempty"` ProductId *Many2One `xmlrpc:"product_id,omptempty"` ProductImage *String `xmlrpc:"product_image,omptempty"` ProductPackaging *Many2One `xmlrpc:"product_packaging,omptempty"` ProductUom *Many2One `xmlrpc:"product_uom,omptempty"` ProductUomQty *Float `xmlrpc:"product_uom_qty,omptempty"` ProductUpdatable *Bool `xmlrpc:"product_updatable,omptempty"` QtyDelivered *Float `xmlrpc:"qty_delivered,omptempty"` QtyDeliveredUpdateable *Bool `xmlrpc:"qty_delivered_updateable,omptempty"` QtyInvoiced *Float `xmlrpc:"qty_invoiced,omptempty"` QtyToInvoice *Float `xmlrpc:"qty_to_invoice,omptempty"` RouteId *Many2One `xmlrpc:"route_id,omptempty"` SalesmanId *Many2One `xmlrpc:"salesman_id,omptempty"` Sequence *Int `xmlrpc:"sequence,omptempty"` State *Selection `xmlrpc:"state,omptempty"` TaskId *Many2One `xmlrpc:"task_id,omptempty"` TaxId *Relation `xmlrpc:"tax_id,omptempty"` WriteDate *Time `xmlrpc:"write_date,omptempty"` WriteUid *Many2One `xmlrpc:"write_uid,omptempty"` } // SaleOrderLines represents array of sale.order.line model. type SaleOrderLines []SaleOrderLine // SaleOrderLineModel is the odoo model name. const SaleOrderLineModel = "sale.order.line" // Many2One convert SaleOrderLine to *Many2One. func (sol *SaleOrderLine) Many2One() *Many2One { return NewMany2One(sol.Id.Get(), "") } // CreateSaleOrderLine creates a new sale.order.line model and returns its id. func (c *Client) CreateSaleOrderLine(sol *SaleOrderLine) (int64, error) { ids, err := c.CreateSaleOrderLines([]*SaleOrderLine{sol}) if err != nil { return -1, err } if len(ids) == 0 { return -1, nil } return ids[0], nil } // CreateSaleOrderLine creates a new sale.order.line model and returns its id. func (c *Client) CreateSaleOrderLines(sols []*SaleOrderLine) ([]int64, error) { var vv []interface{} for _, v := range sols { vv = append(vv, v) } return c.Create(SaleOrderLineModel, vv) } // UpdateSaleOrderLine updates an existing sale.order.line record. func (c *Client) UpdateSaleOrderLine(sol *SaleOrderLine) error { return c.UpdateSaleOrderLines([]int64{sol.Id.Get()}, sol) } // UpdateSaleOrderLines updates existing sale.order.line records. // All records (represented by ids) will be updated by sol values. func (c *Client) UpdateSaleOrderLines(ids []int64, sol *SaleOrderLine) error { return c.Update(SaleOrderLineModel, ids, sol) } // DeleteSaleOrderLine deletes an existing sale.order.line record. func (c *Client) DeleteSaleOrderLine(id int64) error { return c.DeleteSaleOrderLines([]int64{id}) } // DeleteSaleOrderLines deletes existing sale.order.line records. func (c *Client) DeleteSaleOrderLines(ids []int64) error { return c.Delete(SaleOrderLineModel, ids) } // GetSaleOrderLine gets sale.order.line existing record. func (c *Client) GetSaleOrderLine(id int64) (*SaleOrderLine, error) { sols, err := c.GetSaleOrderLines([]int64{id}) if err != nil { return nil, err } if sols != nil && len(*sols) > 0 { return &((*sols)[0]), nil } return nil, fmt.Errorf("id %v of sale.order.line not found", id) } // GetSaleOrderLines gets sale.order.line existing records. func (c *Client) GetSaleOrderLines(ids []int64) (*SaleOrderLines, error) { sols := &SaleOrderLines{} if err := c.Read(SaleOrderLineModel, ids, nil, sols); err != nil { return nil, err } return sols, nil } // FindSaleOrderLine finds sale.order.line record by querying it with criteria. func (c *Client) FindSaleOrderLine(criteria *Criteria) (*SaleOrderLine, error) { sols := &SaleOrderLines{} if err := c.SearchRead(SaleOrderLineModel, criteria, NewOptions().Limit(1), sols); err != nil { return nil, err } if sols != nil && len(*sols) > 0 { return &((*sols)[0]), nil } return nil, fmt.Errorf("sale.order.line was not found with criteria %v", criteria) } // FindSaleOrderLines finds sale.order.line records by querying it // and filtering it with criteria and options. func (c *Client) FindSaleOrderLines(criteria *Criteria, options *Options) (*SaleOrderLines, error) { sols := &SaleOrderLines{} if err := c.SearchRead(SaleOrderLineModel, criteria, options, sols); err != nil { return nil, err } return sols, nil } // FindSaleOrderLineIds finds records ids by querying it // and filtering it with criteria and options. func (c *Client) FindSaleOrderLineIds(criteria *Criteria, options *Options) ([]int64, error) { ids, err := c.Search(SaleOrderLineModel, criteria, options) if err != nil { return []int64{}, err } return ids, nil } // FindSaleOrderLineId finds record id by querying it with criteria. func (c *Client) FindSaleOrderLineId(criteria *Criteria, options *Options) (int64, error) { ids, err := c.Search(SaleOrderLineModel, criteria, options) if err != nil { return -1, err } if len(ids) > 0 { return ids[0], nil } return -1, fmt.Errorf("sale.order.line was not found with criteria %v and options %v", criteria, options) }
package summary_ranges import ( "fmt" ) type Range struct { from *int cur *int to *int } func (r Range) String() string { if r.to == nil { return fmt.Sprintf("%d", *r.from) } return fmt.Sprintf("%d->%d", *r.from, *r.to) } func summaryRanges(nums []int) []string { if len(nums) == 0 { return []string{} } ranges := make([]Range, 1) cur := &ranges[0] for _, num := range nums { num := num if cur.from == nil { cur.from, cur.cur = &num, &num continue } if num-*cur.cur == 1 { cur.cur, cur.to = &num, &num continue } ranges = append(ranges, Range{}) cur = &ranges[len(ranges)-1] cur.from, cur.cur = &num, &num } var res []string for _, r := range ranges { res = append(res, r.String()) } return res }
package main import ( "encoding/json" "flag" "fmt" "io/ioutil" "github.com/golang/glog" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" "github.com/openshift/machine-config-operator/pkg/operator" "github.com/openshift/machine-config-operator/pkg/version" ) var ( bootstrapCmd = &cobra.Command{ Use: "bootstrap", Short: "Machine Config Operator in bootstrap mode", Long: "", Run: runBootstrapCmd, } bootstrapOpts struct { etcdCAFile string rootCAFile string pullSecretFile string configFile string oscontentImage string imagesConfigMapFile string mccImage string mcsImage string mcdImage string etcdImage string setupEtcdEnvImage string destinationDir string } ) func init() { rootCmd.AddCommand(bootstrapCmd) bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.etcdCAFile, "etcd-ca", "/etc/ssl/etcd/ca.crt", "path to etcd CA certificate") bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.rootCAFile, "root-ca", "/etc/ssl/kubernetes/ca.crt", "path to root CA certificate") bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.pullSecretFile, "pull-secret", "/assets/manifests/pull.json", "path to secret manifest that contains pull secret.") bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.destinationDir, "dest-dir", "", "The destination directory where MCO writes the manifests.") bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.imagesConfigMapFile, "images-json-configmap", "", "ConfigMap that contains images.json for MCO.") bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.mccImage, "machine-config-controller-image", "", "Image for Machine Config Controller. (this overrides the image from --images-json-configmap)") bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.mcsImage, "machine-config-server-image", "", "Image for Machine Config Server. (this overrides the image from --images-json-configmap)") bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.mcdImage, "machine-config-daemon-image", "", "Image for Machine Config Daemon. (this overrides the image from --images-json-configmap)") bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.oscontentImage, "machine-config-oscontent-image", "", "Image for osImageURL") bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.etcdImage, "etcd-image", "", "Image for Etcd. (this overrides the image from --images-json-configmap)") bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.setupEtcdEnvImage, "setup-etcd-env-image", "", "Image for Setup Etcd Environment. (this overrides the image from --images-json-configmap)") bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.configFile, "config-file", "", "ClusterConfig ConfigMap file.") } func runBootstrapCmd(cmd *cobra.Command, args []string) { flag.Set("logtostderr", "true") flag.Parse() // To help debugging, immediately log version glog.Infof("Version: %+v", version.Version) if bootstrapOpts.destinationDir == "" { glog.Fatal("--dest-dir cannot be empty") } if bootstrapOpts.configFile == "" { glog.Fatal("--config-file cannot be empty") } imgs := operator.DefaultImages() if bootstrapOpts.imagesConfigMapFile != "" { imgsRaw, err := rawImagesFromConfigMapOnDisk(bootstrapOpts.imagesConfigMapFile) if err != nil { glog.Fatal(err) } if err := json.Unmarshal([]byte(imgsRaw), &imgs); err != nil { glog.Fatal(err) } } if bootstrapOpts.mccImage != "" { imgs.MachineConfigController = bootstrapOpts.mccImage } if bootstrapOpts.mcsImage != "" { imgs.MachineConfigServer = bootstrapOpts.mcsImage } if bootstrapOpts.mcdImage != "" { imgs.MachineConfigDaemon = bootstrapOpts.mcdImage } if bootstrapOpts.etcdImage != "" { imgs.Etcd = bootstrapOpts.etcdImage } if bootstrapOpts.setupEtcdEnvImage != "" { imgs.SetupEtcdEnv = bootstrapOpts.setupEtcdEnvImage } if bootstrapOpts.oscontentImage != "" { imgs.MachineOSContent = bootstrapOpts.oscontentImage } if err := operator.RenderBootstrap( bootstrapOpts.configFile, bootstrapOpts.etcdCAFile, bootstrapOpts.rootCAFile, bootstrapOpts.pullSecretFile, imgs, bootstrapOpts.destinationDir, ); err != nil { glog.Fatalf("error rendering bootstrap manifests: %v", err) } } func rawImagesFromConfigMapOnDisk(file string) ([]byte, error) { data, err := ioutil.ReadFile(bootstrapOpts.imagesConfigMapFile) if err != nil { return nil, err } obji, err := runtime.Decode(scheme.Codecs.UniversalDecoder(corev1.SchemeGroupVersion), data) if err != nil { return nil, err } cm, ok := obji.(*corev1.ConfigMap) if !ok { return nil, fmt.Errorf("expected *corev1.ConfigMap found %T", obji) } return []byte(cm.Data["images.json"]), nil }
package repository import ( "context" "errors" "github.com/CyganFx/snippetBox-microservice/user_details/pkg/domain" "github.com/jackc/pgconn" "github.com/jackc/pgx/v4/pgxpool" "golang.org/x/crypto/bcrypt" "strings" "time" ) type UserRepository struct { Pool *pgxpool.Pool } func NewUserRepository(Pool *pgxpool.Pool) UserRepositoryInterface { return &UserRepository{Pool: Pool} } func (r *UserRepository) Insert(name, email, password string) error { hashedPassword, err := bcrypt.GenerateFromPassword([]byte(password), 12) if err != nil { return err } stmt := `INSERT INTO users (name, email, hashed_password, created) VALUES($1, $2, $3, $4)` _, err = r.Pool.Exec(context.Background(), stmt, name, email, string(hashedPassword), time.Now()) if err != nil { postgresError := err.(*pgconn.PgError) if errors.As(err, &postgresError) { if postgresError.Code == "23505" && strings.Contains(postgresError.Message, "users_uc_email") { return domain.ErrDuplicateEmail } } return err } return nil } func (r *UserRepository) Authenticate(email, password string) (*domain.User, error) { var id int var username string var hashedPassword []byte stmt := "SELECT id, hashed_password, name FROM users WHERE email = $1 AND active = TRUE" row := r.Pool.QueryRow(context.Background(), stmt, email) err := row.Scan(&id, &hashedPassword, &username) if err != nil { if err.Error() == "no rows in result set" { return nil, domain.ErrInvalidCredentials } else { return nil, err } } err = bcrypt.CompareHashAndPassword(hashedPassword, []byte(password)) if err != nil { if errors.Is(err, bcrypt.ErrMismatchedHashAndPassword) { return nil, domain.ErrInvalidCredentials } else { return nil, err } } user := &domain.User{ ID: id, Name: username, } return user, nil }
package code import "errors" // ErrUnusableCode unusable code error. var ErrUnusableCode = errors.New("unusable code")
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. // See LICENSE.txt for license information. // package aws import ( "context" "fmt" "os" "strings" "sync" "testing" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" ec2Types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/aws/aws-sdk-go-v2/service/kms" kmsTypes "github.com/aws/aws-sdk-go-v2/service/kms/types" "github.com/aws/aws-sdk-go-v2/service/rds" rdsTypes "github.com/aws/aws-sdk-go-v2/service/rds/types" gt "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi" gtTypes "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi/types" "github.com/aws/aws-sdk-go-v2/service/secretsmanager" "github.com/golang/mock/gomock" "github.com/mattermost/mattermost-cloud/internal/testlib" "github.com/mattermost/mattermost-cloud/model" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" ) // Tests provisioning database acceptance path. Use this test for deriving other tests. // If tests are broken, this should be the first test to get fixed. func (a *AWSTestSuite) TestProvisioningRDSAcceptance() { database := RDSDatabase{ databaseType: model.DatabaseEngineTypeMySQL, installationID: a.InstallationA.ID, client: a.Mocks.AWS, } gomock.InOrder( a.Mocks.Log.Logger.EXPECT(). WithFields(log.Fields{ "db-cluster-name": CloudID(a.InstallationA.ID), "database-type": database.databaseType, }). Return(testlib.NewLoggerEntry()). Times(1), // Get cluster installations from data store. a.Mocks.Model.DatabaseInstallationStore.EXPECT(). GetClusterInstallations(gomock.Any()). Do(func(input *model.ClusterInstallationFilter) { a.Assert().Equal(input.InstallationID, a.InstallationA.ID) }). Return([]*model.ClusterInstallation{{ID: a.ClusterA.ID}}, nil). Times(1), // Find the VPC which the installation belongs to. a.Mocks.API.EC2.EXPECT().DescribeVpcs(context.TODO(), gomock.Any()). Return(&ec2.DescribeVpcsOutput{Vpcs: []ec2Types.Vpc{{VpcId: &a.VPCa}}}, nil). Times(1), // Create a database secret. a.Mocks.API.SecretsManager.EXPECT(). GetSecretValue(gomock.Any(), gomock.Any()). Return(&secretsmanager.GetSecretValueOutput{SecretString: &a.SecretString}, nil). Times(1), // Create encryption key since none has been created yet. a.Mocks.API.ResourceGroupsTagging.EXPECT(). GetResources(gomock.Any(), gomock.Any()). Return(&gt.GetResourcesOutput{}, nil). Do(func(ctx context.Context, input *gt.GetResourcesInput, optFns ...func(*gt.Options)) { a.Assert().Equal(DefaultRDSEncryptionTagKey, *input.TagFilters[0].Key) a.Assert().Equal(CloudID(a.InstallationA.ID), input.TagFilters[0].Values[0]) a.Assert().Nil(input.PaginationToken) }). Times(1), a.Mocks.API.KMS.EXPECT(). CreateKey(gomock.Any(), gomock.Any()). Do(func(ctx context.Context, input *kms.CreateKeyInput, optFns ...func(*kms.Options)) { a.Assert().Equal(*input.Tags[0].TagKey, DefaultRDSEncryptionTagKey) a.Assert().Equal(*input.Tags[0].TagValue, CloudID(a.InstallationA.ID)) }). Return(&kms.CreateKeyOutput{ KeyMetadata: &kmsTypes.KeyMetadata{ Arn: aws.String(a.ResourceARN), KeyId: aws.String(a.RDSEncryptionKeyID), KeyState: kmsTypes.KeyStateEnabled, }, }, nil). Times(1), // Get single tenant database configuration. a.Mocks.Model.DatabaseInstallationStore.EXPECT().GetSingleTenantDatabaseConfigForInstallation(a.InstallationA.ID). Return(&model.SingleTenantDatabaseConfig{PrimaryInstanceType: "db.r5.large", ReplicaInstanceType: "db.r5.small", ReplicasCount: 1}, nil). Times(1), // Retrive the Availability Zones. a.Mocks.API.EC2.EXPECT().DescribeAvailabilityZones(context.TODO(), gomock.Any()). Return(&ec2.DescribeAvailabilityZonesOutput{AvailabilityZones: []ec2Types.AvailabilityZone{{ZoneName: aws.String("us-honk-1a")}, {ZoneName: aws.String("us-honk-1b")}}}, nil). Times(1), ) a.SetExpectCreateDBCluster() a.SetExpectCreateDBInstance() err := database.Provision(a.Mocks.Model.DatabaseInstallationStore, a.Mocks.Log.Logger) a.Assert().NoError(err) } // Tests provisioning database assuming that an encryption key already exists. func (a *AWSTestSuite) TestProvisioningRDSWithExistentEncryptionKey() { database := RDSDatabase{ databaseType: model.DatabaseEngineTypeMySQL, installationID: a.InstallationA.ID, client: a.Mocks.AWS, } gomock.InOrder( a.Mocks.Log.Logger.EXPECT(). WithFields(log.Fields{ "db-cluster-name": CloudID(a.InstallationA.ID), "database-type": database.databaseType, }). Return(testlib.NewLoggerEntry()). Times(1), // Get cluster installations from data store. a.Mocks.Model.DatabaseInstallationStore.EXPECT(). GetClusterInstallations(gomock.Any()). Do(func(input *model.ClusterInstallationFilter) { a.Assert().Equal(input.InstallationID, a.InstallationA.ID) }). Return([]*model.ClusterInstallation{{ID: a.ClusterA.ID}}, nil). Times(1), // Find the VPC which the installation belongs to. a.Mocks.API.EC2.EXPECT().DescribeVpcs(context.TODO(), gomock.Any()). Return(&ec2.DescribeVpcsOutput{Vpcs: []ec2Types.Vpc{{VpcId: &a.VPCa}}}, nil). Times(1), // Create a database secret. a.Mocks.API.SecretsManager.EXPECT(). GetSecretValue(gomock.Any(), gomock.Any()). Return(&secretsmanager.GetSecretValueOutput{SecretString: &a.SecretString}, nil). Times(1), // Get encryption key associated with this installation. This step assumes that // the key already exists. a.Mocks.API.ResourceGroupsTagging.EXPECT(). GetResources(gomock.Any(), gomock.Any()). Do(func(ctx context.Context, input *gt.GetResourcesInput, optFns ...func(*gt.Options)) { a.Assert().Equal(DefaultRDSEncryptionTagKey, *input.TagFilters[0].Key) a.Assert().Equal(CloudID(a.InstallationA.ID), input.TagFilters[0].Values[0]) a.Assert().Nil(input.PaginationToken) }). Return(&gt.GetResourcesOutput{ ResourceTagMappingList: []gtTypes.ResourceTagMapping{ { ResourceARN: aws.String(a.ResourceARN), }, }, }, nil). Times(1), a.Mocks.API.KMS.EXPECT(). DescribeKey(gomock.Any(), gomock.Any()). Return(&kms.DescribeKeyOutput{ KeyMetadata: &kmsTypes.KeyMetadata{ Arn: aws.String(a.ResourceARN), KeyId: aws.String(a.RDSEncryptionKeyID), KeyState: kmsTypes.KeyStateEnabled, }, }, nil). Do(func(ctx context.Context, input *kms.DescribeKeyInput, optFns ...func(*kms.Options)) { a.Assert().Equal(*input.KeyId, a.ResourceARN) }). Times(1), // Get single tenant database configuration. a.Mocks.Model.DatabaseInstallationStore.EXPECT().GetSingleTenantDatabaseConfigForInstallation(a.InstallationA.ID). Return(&model.SingleTenantDatabaseConfig{PrimaryInstanceType: "db.r5.large", ReplicaInstanceType: "db.r5.small", ReplicasCount: 1}, nil). Times(1), // Retrive the Availability Zones. a.Mocks.API.EC2.EXPECT().DescribeAvailabilityZones(context.TODO(), gomock.Any()). Return(&ec2.DescribeAvailabilityZonesOutput{AvailabilityZones: []ec2Types.AvailabilityZone{{ZoneName: aws.String("us-honk-1a")}, {ZoneName: aws.String("us-honk-1b")}}}, nil). Times(1), ) a.SetExpectCreateDBCluster() a.SetExpectCreateDBInstance() err := database.Provision(a.Mocks.Model.DatabaseInstallationStore, a.Mocks.Log.Logger) a.Assert().NoError(err) } func (a *AWSTestSuite) TestSnapshot() { database := RDSDatabase{ databaseType: model.DatabaseEngineTypeMySQL, installationID: a.InstallationA.ID, client: a.Mocks.AWS, } gomock.InOrder( a.Mocks.Log.Logger.EXPECT(). WithFields(log.Fields{ "db-cluster-name": CloudID(a.InstallationA.ID), "database-type": database.databaseType, }). Return(testlib.NewLoggerEntry()). Times(1), a.Mocks.API.RDS.EXPECT().CreateDBClusterSnapshot(gomock.Any(), gomock.Any()). Return(&rds.CreateDBClusterSnapshotOutput{}, nil). Do(func(ctx context.Context, input *rds.CreateDBClusterSnapshotInput, optFns ...func(*rds.Options)) { a.Assert().Equal(*input.DBClusterIdentifier, CloudID(a.ClusterA.ID)) a.Assert().True(strings.Contains(*input.DBClusterSnapshotIdentifier, fmt.Sprintf("%s-snapshot-", a.ClusterA.ID))) a.Assert().Greater(len(input.Tags), 0) a.Assert().Equal(*input.Tags[0].Key, DefaultClusterInstallationSnapshotTagKey) a.Assert().Equal(*input.Tags[0].Value, RDSSnapshotTagValue(CloudID(a.ClusterA.ID))) }).Times(1), ) err := database.Snapshot(a.Mocks.AWS.store, a.Mocks.Log.Logger) a.Assert().NoError(err) } func (a *AWSTestSuite) TestSnapshotError() { database := RDSDatabase{ databaseType: model.DatabaseEngineTypeMySQL, installationID: a.InstallationA.ID, client: a.Mocks.AWS, } gomock.InOrder( a.Mocks.Log.Logger.EXPECT(). WithFields(log.Fields{ "db-cluster-name": CloudID(a.InstallationA.ID), "database-type": database.databaseType, }). Return(testlib.NewLoggerEntry()). Times(1), a.Mocks.API.RDS.EXPECT(). CreateDBClusterSnapshot(gomock.Any(), gomock.Any()). Return(nil, errors.New("database is not stable")). Times(1), ) err := database.Snapshot(a.Mocks.AWS.store, a.Mocks.Log.Logger) a.Assert().Error(err) a.Assert().Equal("failed to create a DB cluster snapshot: database is not stable", err.Error()) } // Helpers // This whole block deals with RDS DB Cluster creation. func (a *AWSTestSuite) SetExpectCreateDBCluster() { gomock.InOrder( a.Mocks.API.RDS.EXPECT(). DescribeDBClusters(gomock.Any(), gomock.Any()). Return(nil, errors.New("db cluster does not exist")). Times(1), a.Mocks.API.EC2.EXPECT(). DescribeSecurityGroups(context.TODO(), gomock.Any()). Return(&ec2.DescribeSecurityGroupsOutput{ SecurityGroups: []ec2Types.SecurityGroup{{GroupId: &a.GroupID}}, }, nil). Times(1), a.Mocks.API.RDS.EXPECT(). DescribeDBSubnetGroups(gomock.Any(), gomock.Any()). Return(&rds.DescribeDBSubnetGroupsOutput{ DBSubnetGroups: []rdsTypes.DBSubnetGroup{ { DBSubnetGroupName: aws.String(DBSubnetGroupName(a.VPCa)), }, }, }, nil). Times(1), a.Mocks.API.RDS.EXPECT(). CreateDBCluster(gomock.Any(), gomock.Any()). Do(func(ctx context.Context, input *rds.CreateDBClusterInput, optFns ...func(*rds.Options)) { for _, zone := range input.AvailabilityZones { a.Assert().Contains(a.RDSAvailabilityZones, zone) } a.Assert().Equal(*input.BackupRetentionPeriod, int32(7)) a.Assert().Equal(*input.DBClusterIdentifier, CloudID(a.InstallationA.ID)) a.Assert().Equal(*input.DatabaseName, a.DBName) a.Assert().Equal(input.VpcSecurityGroupIds[0], a.GroupID) }). Times(1), ) } // This whole block deals with RDS Instance creation. func (a *AWSTestSuite) SetExpectCreateDBInstance() { gomock.InOrder( a.Mocks.API.RDS.EXPECT(). DescribeDBInstances(gomock.Any(), gomock.Any()). Return(nil, errors.New("db cluster instance does not exist")). Do(func(ctx context.Context, input *rds.DescribeDBInstancesInput, optFns ...func(*rds.Options)) { a.Assert().Equal(*input.DBInstanceIdentifier, RDSMasterInstanceID(a.InstallationA.ID)) }), a.Mocks.API.RDS.EXPECT(). CreateDBInstance(gomock.Any(), gomock.Any()).Return(nil, nil). Do(func(ctx context.Context, input *rds.CreateDBInstanceInput, optFns ...func(*rds.Options)) { a.Assert().Equal(*input.DBClusterIdentifier, CloudID(a.InstallationA.ID)) a.Assert().Equal(*input.DBInstanceIdentifier, RDSMasterInstanceID(a.InstallationA.ID)) }). Times(1), a.Mocks.API.RDS.EXPECT(). DescribeDBInstances(gomock.Any(), gomock.Any()). Return(nil, errors.New("db cluster instance does not exist")). Do(func(ctx context.Context, input *rds.DescribeDBInstancesInput, optFns ...func(*rds.Options)) { a.Assert().Equal(*input.DBInstanceIdentifier, RDSReplicaInstanceID(a.InstallationA.ID, 0)) }), a.Mocks.API.RDS.EXPECT(). CreateDBInstance(gomock.Any(), gomock.Any()).Return(nil, nil). Do(func(ctx context.Context, input *rds.CreateDBInstanceInput, optFns ...func(*rds.Options)) { a.Assert().Equal(*input.DBClusterIdentifier, CloudID(a.InstallationA.ID)) a.Assert().Equal(*input.DBInstanceIdentifier, RDSReplicaInstanceID(a.InstallationA.ID, 0)) }), ) } // WARNING: // This test is meant to exercise the provisioning and teardown of an AWS RDS // database in a real AWS account. Only set the test env vars below if you wish // to test this process with real AWS resources. func TestDatabaseProvision(t *testing.T) { id := os.Getenv("SUPER_AWS_DATABASE_TEST") if id == "" { return } logger := log.New() database := NewRDSDatabase(model.DatabaseEngineTypeMySQL, id, &Client{ mux: &sync.Mutex{}, }, false) err := database.Provision(nil, logger) require.NoError(t, err) } func TestDatabaseTeardown(t *testing.T) { id := os.Getenv("SUPER_AWS_DATABASE_TEST") if id == "" { return } logger := log.New() database := NewRDSDatabase(model.DatabaseEngineTypeMySQL, id, &Client{ mux: &sync.Mutex{}, }, false) err := database.Teardown(nil, false, logger) require.NoError(t, err) }
package sendwithus import ( "context" "encoding/json" "os" "testing" "github.com/stretchr/testify/require" ) func TestSend(t *testing.T) { client, err := NewClient(os.Getenv("SENDWITHUS_TEST_API_KEY"), nil) require.NoError(t, err) sendPayload := SendPayload{} sendPayload.Template = os.Getenv("SENDWITHUS_TEST_TEMPLATE") sendPayload.Recipient = &Recipient{ Name: "Test Recipient", Address: os.Getenv("SENDWITHUS_TEST_SENDER"), } sendPayload.CC = Recipients{ { Name: "Test CC", Address: "kareem@joinpara.com", }, } sendPayload.BCC = Recipients{ { Name: "Test BCC", Address: os.Getenv("SENDWITHUS_TEST_RECEIPIENT"), }, } sendPayload.Sender = &Sender{ Recipient: Recipient{ Name: "Test Sender", Address: os.Getenv("SENDWITHUS_TEST_RECEIPIENT"), }, } td := map[string]interface{}{ "test": "test", } b, err := json.Marshal(td) require.NoError(t, err) require.NotNil(t, b) resp, err := client.Send(context.Background(), &sendPayload) require.NoError(t, err) require.NotNil(t, resp) }
/* // Copyright (c) 2016 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. */ package main import ( "flag" "fmt" "net" "os" "testing" "time" datastore "github.com/01org/ciao/ciao-controller/internal/datastore" "github.com/01org/ciao/ciao-controller/types" "github.com/01org/ciao/payloads" "github.com/01org/ciao/ssntp" "github.com/01org/ciao/ssntp/uuid" "github.com/01org/ciao/testutil" ) func addTestTenant() (tenant *types.Tenant, err error) { /* add a new tenant */ tuuid := uuid.Generate() tenant, err = context.ds.AddTenant(tuuid.String()) if err != nil { return } // Add fake CNCI err = context.ds.AddTenantCNCI(tuuid.String(), uuid.Generate().String(), tenant.CNCIMAC) if err != nil { return } err = context.ds.AddCNCIIP(tenant.CNCIMAC, "192.168.0.1") if err != nil { return } return } func addComputeTestTenant() (tenant *types.Tenant, err error) { /* add a new tenant */ tenant, err = context.ds.AddTenant(testutil.ComputeUser) if err != nil { return } // Add fake CNCI err = context.ds.AddTenantCNCI(testutil.ComputeUser, uuid.Generate().String(), tenant.CNCIMAC) if err != nil { return } err = context.ds.AddCNCIIP(tenant.CNCIMAC, "192.168.0.2") if err != nil { return } return } func BenchmarkStartSingleWorkload(b *testing.B) { var err error /* add a new tenant */ tuuid := uuid.Generate() tenant, err := context.ds.AddTenant(tuuid.String()) if err != nil { b.Error(err) } // Add fake CNCI err = context.ds.AddTenantCNCI(tuuid.String(), uuid.Generate().String(), tenant.CNCIMAC) if err != nil { b.Error(err) } err = context.ds.AddCNCIIP(tenant.CNCIMAC, "192.168.0.1") if err != nil { b.Error(err) } // get workload ID wls, err := context.ds.GetWorkloads() if err != nil || len(wls) == 0 { b.Fatal(err) } b.ResetTimer() for n := 0; n < b.N; n++ { _, err = context.startWorkload(wls[0].ID, tuuid.String(), 1, false, "") if err != nil { b.Error(err) } } } func BenchmarkStart1000Workload(b *testing.B) { var err error /* add a new tenant */ tuuid := uuid.Generate() tenant, err := context.ds.AddTenant(tuuid.String()) if err != nil { b.Error(err) } // Add fake CNCI err = context.ds.AddTenantCNCI(tuuid.String(), uuid.Generate().String(), tenant.CNCIMAC) if err != nil { b.Error(err) } err = context.ds.AddCNCIIP(tenant.CNCIMAC, "192.168.0.1") if err != nil { b.Error(err) } // get workload ID wls, err := context.ds.GetWorkloads() if err != nil || len(wls) == 0 { b.Fatal(err) } b.ResetTimer() for n := 0; n < b.N; n++ { _, err = context.startWorkload(wls[0].ID, tuuid.String(), 1000, false, "") if err != nil { b.Error(err) } } } func BenchmarkNewConfig(b *testing.B) { var err error tenant, err := addTestTenant() if err != nil { b.Error(err) } // get workload ID wls, err := context.ds.GetWorkloads() if err != nil || len(wls) == 0 { b.Fatal(err) } id := uuid.Generate() b.ResetTimer() for n := 0; n < b.N; n++ { _, err := newConfig(context, wls[0], id.String(), tenant.ID) if err != nil { b.Error(err) } } } func TestTenantWithinBounds(t *testing.T) { var err error tenant, err := addTestTenant() if err != nil { t.Fatal(err) } /* put tenant limit of 1 instance */ err = context.ds.AddLimit(tenant.ID, 1, 1) if err != nil { t.Fatal(err) } wls, err := context.ds.GetWorkloads() if err != nil || len(wls) == 0 { t.Fatal(err) } _, err = context.startWorkload(wls[0].ID, tenant.ID, 1, false, "") if err != nil { t.Fatal(err) } } func TestTenantOutOfBounds(t *testing.T) { var err error /* add a new tenant */ tenant, err := addTestTenant() if err != nil { t.Error(err) } /* put tenant limit of 1 instance */ _ = context.ds.AddLimit(tenant.ID, 1, 1) wls, err := context.ds.GetWorkloads() if err != nil || len(wls) == 0 { t.Fatal(err) } /* try to send 2 workload start commands */ _, err = context.startWorkload(wls[0].ID, tenant.ID, 2, false, "") if err == nil { t.Errorf("Not tracking limits correctly") } } // TestNewTenantHardwareAddr // Confirm that the mac addresses generated from a given // IP address is as expected. func TestNewTenantHardwareAddr(t *testing.T) { ip := net.ParseIP("172.16.0.2") expectedMAC := "02:00:ac:10:00:02" hw := newTenantHardwareAddr(ip) if hw.String() != expectedMAC { t.Error("Expected: ", expectedMAC, " Received: ", hw.String()) } } func TestStartWorkload(t *testing.T) { var reason payloads.StartFailureReason client, _ := testStartWorkload(t, 1, false, reason) defer client.Shutdown() } func TestStartTracedWorkload(t *testing.T) { client := testStartTracedWorkload(t) defer client.Shutdown() } func TestStartWorkloadLaunchCNCI(t *testing.T) { netClient, instances := testStartWorkloadLaunchCNCI(t, 1) defer netClient.Shutdown() id := instances[0].TenantID tenant, err := context.ds.GetTenant(id) if err != nil { t.Fatal(err) } if tenant.CNCIIP == "" { t.Fatal("CNCI Info not updated") } } func sendTraceReportEvent(client *testutil.SsntpTestClient, t *testing.T) { clientCh := client.AddEventChan(ssntp.TraceReport) serverCh := server.AddEventChan(ssntp.TraceReport) go client.SendTrace() _, err := client.GetEventChanResult(clientCh, ssntp.TraceReport) if err != nil { t.Fatal(err) } _, err = server.GetEventChanResult(serverCh, ssntp.TraceReport) if err != nil { t.Fatal(err) } } func sendStatsCmd(client *testutil.SsntpTestClient, t *testing.T) { clientCh := client.AddCmdChan(ssntp.STATS) serverCh := server.AddCmdChan(ssntp.STATS) go client.SendStatsCmd() _, err := client.GetCmdChanResult(clientCh, ssntp.STATS) if err != nil { t.Fatal(err) } _, err = server.GetCmdChanResult(serverCh, ssntp.STATS) if err != nil { t.Fatal(err) } } // TBD: for the launch CNCI tests, I really need to create a fake // network node and test that way. func TestDeleteInstance(t *testing.T) { var reason payloads.StartFailureReason client, instances := testStartWorkload(t, 1, false, reason) defer client.Shutdown() sendStatsCmd(client, t) serverCh := server.AddCmdChan(ssntp.DELETE) time.Sleep(1 * time.Second) err := context.deleteInstance(instances[0].ID) if err != nil { t.Fatal(err) } result, err := server.GetCmdChanResult(serverCh, ssntp.DELETE) if err != nil { t.Fatal(err) } if result.InstanceUUID != instances[0].ID { t.Fatal("Did not get correct Instance ID") } } func TestStopInstance(t *testing.T) { var reason payloads.StartFailureReason client, instances := testStartWorkload(t, 1, false, reason) defer client.Shutdown() sendStatsCmd(client, t) serverCh := server.AddCmdChan(ssntp.STOP) time.Sleep(1 * time.Second) err := context.stopInstance(instances[0].ID) if err != nil { t.Fatal(err) } result, err := server.GetCmdChanResult(serverCh, ssntp.STOP) if err != nil { t.Fatal(err) } if result.InstanceUUID != instances[0].ID { t.Fatal("Did not get correct Instance ID") } } func TestRestartInstance(t *testing.T) { var reason payloads.StartFailureReason client, instances := testStartWorkload(t, 1, false, reason) defer client.Shutdown() time.Sleep(1 * time.Second) sendStatsCmd(client, t) serverCh := server.AddCmdChan(ssntp.STOP) clientCh := client.AddCmdChan(ssntp.STOP) time.Sleep(1 * time.Second) err := context.stopInstance(instances[0].ID) if err != nil { t.Fatal(err) } result, err := server.GetCmdChanResult(serverCh, ssntp.STOP) if err != nil { t.Fatal(err) } _, err = client.GetCmdChanResult(clientCh, ssntp.STOP) if err != nil { t.Fatal(err) } if result.InstanceUUID != instances[0].ID { t.Fatal("Did not get correct Instance ID") } // now attempt to restart sendStatsCmd(client, t) serverCh = server.AddCmdChan(ssntp.RESTART) time.Sleep(1 * time.Second) err = context.restartInstance(instances[0].ID) if err != nil { t.Fatal(err) } result, err = server.GetCmdChanResult(serverCh, ssntp.RESTART) if err != nil { t.Fatal(err) } if result.InstanceUUID != instances[0].ID { t.Fatal("Did not get correct Instance ID") } } func TestEvacuateNode(t *testing.T) { client, err := testutil.NewSsntpTestClientConnection("EvacuateNode", ssntp.AGENT, testutil.AgentUUID) if err != nil { t.Fatal(err) } defer client.Shutdown() serverCh := server.AddCmdChan(ssntp.EVACUATE) // ok to not send workload first? err = context.evacuateNode(client.UUID) if err != nil { t.Error(err) } result, err := server.GetCmdChanResult(serverCh, ssntp.EVACUATE) if err != nil { t.Fatal(err) } if result.NodeUUID != client.UUID { t.Fatal("Did not get node ID") } } func TestInstanceDeletedEvent(t *testing.T) { var reason payloads.StartFailureReason client, instances := testStartWorkload(t, 1, false, reason) defer client.Shutdown() sendStatsCmd(client, t) serverCh := server.AddCmdChan(ssntp.DELETE) time.Sleep(1 * time.Second) err := context.deleteInstance(instances[0].ID) if err != nil { t.Fatal(err) } _, err = server.GetCmdChanResult(serverCh, ssntp.DELETE) if err != nil { t.Fatal(err) } clientEvtCh := client.AddEventChan(ssntp.InstanceDeleted) serverEvtCh := server.AddEventChan(ssntp.InstanceDeleted) go client.SendDeleteEvent(instances[0].ID) _, err = client.GetEventChanResult(clientEvtCh, ssntp.InstanceDeleted) if err != nil { t.Fatal(err) } _, err = server.GetEventChanResult(serverEvtCh, ssntp.InstanceDeleted) if err != nil { t.Fatal(err) } time.Sleep(1 * time.Second) // try to get instance info _, err = context.ds.GetInstance(instances[0].ID) if err == nil { t.Error("Instance not deleted") } } func TestStartFailure(t *testing.T) { reason := payloads.FullCloud client, _ := testStartWorkload(t, 1, true, reason) defer client.Shutdown() // since we had a start failure, we should confirm that the // instance is no longer pending in the database } func TestStopFailure(t *testing.T) { context.ds.ClearLog() var reason payloads.StartFailureReason client, instances := testStartWorkload(t, 1, false, reason) defer client.Shutdown() client.StopFail = true client.StopFailReason = payloads.StopNoInstance sendStatsCmd(client, t) serverCh := server.AddCmdChan(ssntp.STOP) time.Sleep(1 * time.Second) err := context.stopInstance(instances[0].ID) if err != nil { t.Fatal(err) } result, err := server.GetCmdChanResult(serverCh, ssntp.STOP) if err != nil { t.Fatal(err) } if result.InstanceUUID != instances[0].ID { t.Fatal("Did not get correct Instance ID") } time.Sleep(1 * time.Second) // the response to a stop failure is to log the failure entries, err := context.ds.GetEventLog() if err != nil { t.Fatal(err) } expectedMsg := fmt.Sprintf("Stop Failure %s: %s", instances[0].ID, client.StopFailReason.String()) for i := range entries { if entries[i].Message == expectedMsg { return } } t.Error("Did not find failure message in Log") } func TestRestartFailure(t *testing.T) { context.ds.ClearLog() var reason payloads.StartFailureReason client, instances := testStartWorkload(t, 1, false, reason) defer client.Shutdown() client.RestartFail = true client.RestartFailReason = payloads.RestartLaunchFailure sendStatsCmd(client, t) time.Sleep(1 * time.Second) serverCh := server.AddCmdChan(ssntp.STOP) clientCh := client.AddCmdChan(ssntp.STOP) err := context.stopInstance(instances[0].ID) if err != nil { t.Fatal(err) } _, err = client.GetCmdChanResult(clientCh, ssntp.STOP) if err != nil { t.Fatal(err) } result, err := server.GetCmdChanResult(serverCh, ssntp.STOP) if err != nil { t.Fatal(err) } if result.InstanceUUID != instances[0].ID { t.Fatal("Did not get correct Instance ID") } sendStatsCmd(client, t) time.Sleep(1 * time.Second) serverCh = server.AddCmdChan(ssntp.RESTART) err = context.restartInstance(instances[0].ID) if err != nil { t.Fatal(err) } result, err = server.GetCmdChanResult(serverCh, ssntp.RESTART) if err != nil { t.Fatal(err) } if result.InstanceUUID != instances[0].ID { t.Fatal("Did not get correct Instance ID") } time.Sleep(1 * time.Second) // the response to a restart failure is to log the failure entries, err := context.ds.GetEventLog() if err != nil { t.Fatal(err) } expectedMsg := fmt.Sprintf("Restart Failure %s: %s", instances[0].ID, client.RestartFailReason.String()) for i := range entries { if entries[i].Message == expectedMsg { return } } t.Error("Did not find failure message in Log") } func TestNoNetwork(t *testing.T) { nn := true noNetwork = &nn var reason payloads.StartFailureReason client, _ := testStartWorkload(t, 1, false, reason) defer client.Shutdown() } // NOTE: the caller is responsible for calling Shutdown() on the *SsntpTestClient func testStartTracedWorkload(t *testing.T) *testutil.SsntpTestClient { tenant, err := addTestTenant() if err != nil { t.Fatal(err) } client, err := testutil.NewSsntpTestClientConnection("StartTracedWorkload", ssntp.AGENT, testutil.AgentUUID) if err != nil { t.Fatal(err) } // caller of TestStartTracedWorkload() owns doing the close //defer client.Shutdown() wls, err := context.ds.GetWorkloads() if err != nil { t.Fatal(err) } if len(wls) == 0 { t.Fatal("No workloads, expected len(wls) > 0, got len(wls) == 0") } clientCh := client.AddCmdChan(ssntp.START) serverCh := server.AddCmdChan(ssntp.START) instances, err := context.startWorkload(wls[0].ID, tenant.ID, 1, true, "testtrace1") if err != nil { t.Fatal(err) } if len(instances) != 1 { t.Fatalf("Wrong number of instances, expected 1, got %d", len(instances)) } _, err = client.GetCmdChanResult(clientCh, ssntp.START) if err != nil { t.Fatal(err) } result, err := server.GetCmdChanResult(serverCh, ssntp.START) if err != nil { t.Fatal(err) } if result.InstanceUUID != instances[0].ID { t.Fatal("Did not get correct Instance ID") } return client } // NOTE: the caller is responsible for calling Shutdown() on the *SsntpTestClient func testStartWorkload(t *testing.T, num int, fail bool, reason payloads.StartFailureReason) (*testutil.SsntpTestClient, []*types.Instance) { tenant, err := addTestTenant() if err != nil { t.Fatal(err) } client, err := testutil.NewSsntpTestClientConnection("StartWorkload", ssntp.AGENT, testutil.AgentUUID) if err != nil { t.Fatal(err) } // caller of TestStartWorkload() owns doing the close //defer client.Shutdown() wls, err := context.ds.GetWorkloads() if err != nil { t.Fatal(err) } if len(wls) == 0 { t.Fatal("No workloads, expected len(wls) > 0, got len(wls) == 0") } clientCmdCh := client.AddCmdChan(ssntp.START) clientErrCh := client.AddErrorChan(ssntp.StartFailure) client.StartFail = fail client.StartFailReason = reason instances, err := context.startWorkload(wls[0].ID, tenant.ID, num, false, "") if err != nil { t.Fatal(err) } if len(instances) != num { t.Fatalf("Wrong number of instances, expected %d, got %d", len(instances), num) } if fail == true { _, err := client.GetErrorChanResult(clientErrCh, ssntp.StartFailure) if err == nil { // unexpected success t.Fatal(err) } } result, err := client.GetCmdChanResult(clientCmdCh, ssntp.START) if fail == true && err == nil { // unexpected success t.Fatal(err) } if fail == false && err != nil { // unexpected failure t.Fatal(err) } if result.InstanceUUID != instances[0].ID { t.Fatal("Did not get correct Instance ID") } return client, instances } // NOTE: the caller is responsible for calling Shutdown() on the *SsntpTestClient func testStartWorkloadLaunchCNCI(t *testing.T, num int) (*testutil.SsntpTestClient, []*types.Instance) { netClient, err := testutil.NewSsntpTestClientConnection("StartWorkloadLaunchCNCI", ssntp.NETAGENT, testutil.NetAgentUUID) if err != nil { t.Fatal(err) } // caller of testStartWorkloadLaunchCNCI() owns doing the close //defer netClient.Shutdown() wls, err := context.ds.GetWorkloads() if err != nil { t.Fatal(err) } if len(wls) == 0 { t.Fatal("No workloads, expected len(wls) > 0, got len(wls) == 0") } serverCmdCh := server.AddCmdChan(ssntp.START) netClientCmdCh := netClient.AddCmdChan(ssntp.START) newTenant := uuid.Generate().String() // random ~= new tenant and thus triggers start of a CNCI // trigger the START command flow, and await results instanceCh := make(chan []*types.Instance) go func() { instances, err := context.startWorkload(wls[0].ID, newTenant, 1, false, "") if err != nil { t.Fatal(err) } if len(instances) != 1 { t.Fatalf("Wrong number of instances, expected 1, got %d", len(instances)) } instanceCh <- instances }() _, err = netClient.GetCmdChanResult(netClientCmdCh, ssntp.START) if err != nil { t.Fatal(err) } result, err := server.GetCmdChanResult(serverCmdCh, ssntp.START) if err != nil { t.Fatal(err) } if result.TenantUUID != newTenant { t.Fatal("Did not get correct tenant ID") } if !result.CNCI { t.Fatal("this is not a CNCI launch request") } // start a test CNCI client cnciClient, err := testutil.NewSsntpTestClientConnection("StartWorkloadLaunchCNCI", ssntp.CNCIAGENT, newTenant) if err != nil { t.Fatal(err) } // make CNCI send an ssntp.ConcentratorInstanceAdded event, and await results cnciEventCh := cnciClient.AddEventChan(ssntp.ConcentratorInstanceAdded) serverEventCh := server.AddEventChan(ssntp.ConcentratorInstanceAdded) tenantCNCI, _ := context.ds.GetTenantCNCISummary(result.InstanceUUID) go cnciClient.SendConcentratorAddedEvent(result.InstanceUUID, newTenant, testutil.CNCIIP, tenantCNCI[0].MACAddress) result, err = cnciClient.GetEventChanResult(cnciEventCh, ssntp.ConcentratorInstanceAdded) if err != nil { t.Fatal(err) } _, err = server.GetEventChanResult(serverEventCh, ssntp.ConcentratorInstanceAdded) if err != nil { t.Fatal(err) } // shutdown the test CNCI client cnciClient.Shutdown() if result.InstanceUUID != tenantCNCI[0].InstanceID { t.Fatalf("Did not get correct Instance ID, got %s, expected %s", result.InstanceUUID, tenantCNCI[0].InstanceID) } instances := <-instanceCh if instances == nil { t.Fatal("did not receive instance") } return netClient, instances } var testClients []*testutil.SsntpTestClient var context *controller var server *testutil.SsntpTestServer func TestMain(m *testing.M) { flag.Parse() // create fake ssntp server server = testutil.StartTestServer() context = new(controller) context.ds = new(datastore.Datastore) dsConfig := datastore.Config{ PersistentURI: "./ciao-controller-test.db", TransientURI: "./ciao-controller-test-tdb.db", InitTablesPath: *tablesInitPath, InitWorkloadsPath: *workloadsPath, } err := context.ds.Init(dsConfig) if err != nil { os.Exit(1) } config := &ssntp.Config{ URI: "localhost", CAcert: ssntp.DefaultCACert, Cert: ssntp.RoleToDefaultCertName(ssntp.Controller), } context.client, err = newSSNTPClient(context, config) if err != nil { os.Exit(1) } testIdentityConfig := testutil.IdentityConfig{ ComputeURL: testutil.ComputeURL, ProjectID: testutil.ComputeUser, } id := testutil.StartIdentityServer(testIdentityConfig) idConfig := identityConfig{ endpoint: id.URL, serviceUserName: "test", servicePassword: "iheartciao", } context.id, err = newIdentityClient(idConfig) if err != nil { fmt.Println(err) // keep going anyway - any compute api tests will fail. } _, _ = addComputeTestTenant() go createComputeAPI(context) time.Sleep(1 * time.Second) code := m.Run() context.client.Disconnect() context.ds.Exit() id.Close() server.Shutdown() os.Remove("./ciao-controller-test.db") os.Remove("./ciao-controller-test.db-shm") os.Remove("./ciao-controller-test.db-wal") os.Remove("./ciao-controller-test-tdb.db") os.Remove("./ciao-controller-test-tdb.db-shm") os.Remove("./ciao-controller-test-tdb.db-wal") os.Exit(code) }
/* * Wager service APIs * * APIs for a wager system * * API version: 1.0.0 * Generated by: OpenAPI Generator (https://openapi-generator.tech) */ package wager type Wager struct { Id int64 `json:"id"` TotalWagerValue int32 `json:"total_wager_value"` Odds int32 `json:"odds"` SellingPercentage int32 `json:"selling_percentage"` SellingPrice float32 `json:"selling_price"` CurrentSellingPrice float32 `json:"current_selling_price"` PercentageSold int32 `json:"percentage_sold"` AmountSold float64 `json:"amount_sold"` PlacedAt int64 `json:"placed_at"` }
// Copyright © 2020 Weald Technology Trading // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package core import ( "encoding/json" "fmt" "io/ioutil" "path/filepath" "strings" "github.com/shibukawa/configdir" ) // Permissions provides information about per-client permissions. type Permissions struct { Certs []*CertificateInfo `json:"certificates"` } // CertificateInfo contains information related to client certificates. type CertificateInfo struct { Name string `json:"name"` Perms []*CertificatePerms `json:"permissions"` } // CertificatePerms contains information about the operations allowed by the certificate. type CertificatePerms struct { Path string `json:"path"` Operations []string `json:"operations"` } // FetchPermissions fetches permissions from the JSON configuration file. func FetchPermissions() (*Permissions, error) { configDirs := configdir.New("wealdtech", "walletd") configPath := configDirs.QueryFolders(configdir.Global)[0].Path path := filepath.Join(configPath, "perms.json") data, err := ioutil.ReadFile(path) if err != nil { return nil, err } perms := &Permissions{} err = json.Unmarshal(data, perms) if err != nil { return nil, err } return perms, nil } // DumpPerms dumps information about our permissions to stdout. func DumpPerms(perms *Permissions) { for i, certInfo := range perms.Certs { if certInfo.Name == "" { fmt.Printf("ERROR: certificate %d does not have a name\n", i) } else { fmt.Printf("Permissions for %q:\n", certInfo.Name) for _, perm := range certInfo.Perms { if len(perm.Operations) == 1 && perm.Operations[0] == "All" { fmt.Printf("\t- accounts matching the path %q can carry out all operations\n", perm.Path) } else { fmt.Printf("\t- accounts matching the path %q can carry out operations: %s\n", perm.Path, strings.Join(perm.Operations, ", ")) } } } } }
package models import "time" type TextMessageModel struct { ID uint `gorm:"primaryKey" json:"id"` Title string `json:"title"` Details string `json:"details" gorm:"type:longtext"` Type int `gorm:"default:1" json:"type"` // 1=sms; 2=email; Status int `gorm:"default:1"` CreatedAt time.Time `gorm:"type:DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP" json:"created_at"` UpdatedAt time.Time `gorm:"type:DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP" json:"updated_at"` } func (b *TextMessageModel) TableName() string { return "text_message_list" }
package cert import ( "crypto/aes" "crypto/cipher" "crypto/rand" "fmt" "io" "golang.org/x/crypto/argon2" ) // KDF factors type Argon2Parameters struct { version rune Memory uint32 // KiB Parallelism uint8 Iterations uint32 salt []byte } // Returns a new Argon2Parameters object with current version set func NewArgon2Parameters(memory uint32, parallelism uint8, iterations uint32) *Argon2Parameters { return &Argon2Parameters{ version: argon2.Version, Memory: memory, // KiB Parallelism: parallelism, Iterations: iterations, } } // Encrypts data using AES-256-GCM and the Argon2id key derivation function func aes256Encrypt(passphrase []byte, kdfParams *Argon2Parameters, data []byte) ([]byte, error) { key, err := aes256DeriveKey(passphrase, kdfParams) if err != nil { return nil, err } // this should never happen, but since this dictates how our calls into the // aes package behave and could be catastraphic, let's sanity check this if len(key) != 32 { return nil, fmt.Errorf("invalid AES-256 key length (%d) - cowardly refusing to encrypt", len(key)) } block, err := aes.NewCipher(key) if err != nil { return nil, err } gcm, err := cipher.NewGCM(block) if err != nil { return nil, err } nonce := make([]byte, gcm.NonceSize()) if _, err := io.ReadFull(rand.Reader, nonce); err != nil { return nil, err } ciphertext := gcm.Seal(nil, nonce, data, nil) blob := joinNonceCiphertext(nonce, ciphertext) return blob, nil } // Decrypts data using AES-256-GCM and the Argon2id key derivation function // Expects the data to include an Argon2id parameter string before the encrypted data func aes256Decrypt(passphrase []byte, kdfParams *Argon2Parameters, data []byte) ([]byte, error) { key, err := aes256DeriveKey(passphrase, kdfParams) if err != nil { return nil, err } block, err := aes.NewCipher(key) if err != nil { return nil, err } gcm, err := cipher.NewGCM(block) nonce, ciphertext, err := splitNonceCiphertext(data, gcm.NonceSize()) if err != nil { return nil, err } plaintext, err := gcm.Open(nil, nonce, ciphertext, nil) if err != nil { return nil, fmt.Errorf("invalid passphrase or corrupt private key") } return plaintext, nil } func aes256DeriveKey(passphrase []byte, params *Argon2Parameters) ([]byte, error) { if params.salt == nil { params.salt = make([]byte, 32) if _, err := rand.Read(params.salt); err != nil { return nil, err } } // keySize of 32 bytes will result in AES-256 encryption key, err := deriveKey(passphrase, 32, params) if err != nil { return nil, err } return key, nil } // Derives a key from a passphrase using Argon2id func deriveKey(passphrase []byte, keySize uint32, params *Argon2Parameters) ([]byte, error) { if params.version != argon2.Version { return nil, fmt.Errorf("incompatible Argon2 version: %d", params.version) } if params.salt == nil { return nil, fmt.Errorf("salt must be set in argon2Parameters") } else if len(params.salt) < 16 { return nil, fmt.Errorf("salt must be at least 128 bits") } key := argon2.IDKey(passphrase, params.salt, params.Iterations, params.Memory, params.Parallelism, keySize) return key, nil } // Prepends nonce to ciphertext func joinNonceCiphertext(nonce []byte, ciphertext []byte) []byte { return append(nonce, ciphertext...) } // Splits nonce from ciphertext func splitNonceCiphertext(blob []byte, nonceSize int) ([]byte, []byte, error) { if len(blob) <= nonceSize { return nil, nil, fmt.Errorf("invalid ciphertext blob - blob shorter than nonce length") } return blob[:nonceSize], blob[nonceSize:], nil }
package main import ( "fmt" "io" "log" "net" "github.com/johnantonusmaximus/grpc-golang/bidirectional_stream/bidirectionalpb" "google.golang.org/grpc" ) type server struct{} func (s *server) GreetEveryone(stream bidirectionalpb.GreetService_GreetEveryoneServer) error { fmt.Printf("GreetEveryone function was invoked by streaming request...\n") for { req, err := stream.Recv() firstName := req.GetGreeting().GetFirstName() if err == io.EOF { return nil } if err != nil { log.Fatalf("Error receiving stream: %v\n", err) return err } log.Printf("Received from Client: %v\n", firstName) msg := "Hello there, " + firstName + "!" err = stream.Send(&bidirectionalpb.GreetEveryoneResponse{ Message: msg, }) if err != nil { log.Fatalf("Error sending response: %v\n", err) return err } } } func main() { listener, err := net.Listen("tcp", "0.0.0.0:50051") if err != nil { log.Fatalf("Failed to Listen: %v", err) } s := grpc.NewServer() bidirectionalpb.RegisterGreetServiceServer(s, &server{}) if err := s.Serve(listener); err != nil { log.Fatalf("Failed to serve: %v", err) } }
package HttpSender import ( "bytes" "io" "io/ioutil" "net/http" ) func check(e error) { if e != nil { panic(e) } } // postFromReader takes an io.Reader and issues a POST to a destination server func postFromReader(URL string, buffer io.Reader, mime string) (string, error) { r, err := http.Post(URL, mime, buffer) if err != nil { return "", err } defer r.Body.Close() resp, _ := ioutil.ReadAll(r.Body) ret := string(resp) return ret, nil } // PostByteArray sends a Byte Array to another server via POST func PostByteArray(URL string, data []byte, mime string) (string, error) { reader := bytes.NewReader(data) response, err := postFromReader(URL, reader, mime) check(err) return response, err } // PostJSONFile takes a file path and reads it in then sends it to server via POST func PostJSONFile(URL string, jsonFilePath string) (string, error) { dat, err := ioutil.ReadFile(jsonFilePath) check(err) r, serr := PostByteArray(URL, dat, "application/json") check(serr) return r, serr } // PostJSONString sends a JSON string to another server via POST func PostJSONString(URL string, json string) (string, error) { b := []byte(json) r, err := PostByteArray(URL, b, "application/json") check(err) return r, err }
package leetcode /* * @lc app=leetcode id=88 lang=golang * * [88] Merge Sorted Array */ // @lc code=start func merge(nums1 []int, m int, nums2 []int, n int) { im := m - 1 in := n - 1 for i := m + n - 1; i > im; i-- { if in < 0 { nums1[i] = nums1[im] im-- } else if im < 0 { nums1[i] = nums2[in] in-- } else if nums1[im] > nums2[in] { nums1[i] = nums1[im] im-- } else { nums1[i] = nums2[in] in-- } } } // @lc code=end
//go:build windows // +build windows package wguser import ( "errors" "fmt" "net" "os" "testing" "time" "golang.org/x/sys/windows/registry" "golang.zx2c4.com/wireguard/ipc/namedpipe" ) // isWINE determines if this test is running in WINE. var isWINE = func() bool { // Reference: https://forum.winehq.org/viewtopic.php?t=4988. k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Wine`, registry.QUERY_VALUE) if err != nil { if errors.Is(err, os.ErrNotExist) { // No key; the tests don't appear to be running in WINE. return false } panicf("failed to query registry for WINE: %v", err) } defer k.Close() return true }() // testFind produces a Client.find function for integration tests. func testFind(dir string) func() ([]string, error) { return func() ([]string, error) { return findNamedPipes(dir) } } // testListen creates a userspace device listener for tests, returning the // directory where it can be found and a function to clean up its state. func testListen(t *testing.T, device string) (l net.Listener, dir string, done func()) { t.Helper() // It appears that some of the system calls required for full named pipe // tests are not implemented in WINE, so skip tests that invoke this helper // if this isn't a real Windows install. if isWINE { t.Skip("skipping, creating a userspace device does not work in WINE") } // Attempt to create a unique name and avoid collisions. dir = fmt.Sprintf(`wguser-test%d\`, time.Now().Nanosecond()) l, err := namedpipe.Listen(pipePrefix + dir + device) if err != nil { t.Fatalf("failed to create Windows named pipe: %v", err) } done = func() { _ = l.Close() } return l, dir, done }
package webserver import ( "net/http" boardModel "github.com/joostvdg/cmg/pkg/model" "github.com/joostvdg/cmg/pkg/webserver/model" "github.com/labstack/echo/v4" ) // GetMapLegend retrieves the map Legend, helps explain codes used within the data returned by the API func GetMapLegend(c echo.Context) error { callback := c.QueryParam("callback") jsonp := c.QueryParam("jsonp") landscapes := make([]boardModel.Landscape, len(boardModel.Landscapes)) i := 0 for _, landscape := range boardModel.Landscapes { landscapes[i] = landscape i++ } harbors := make([]boardModel.Harbor, len(boardModel.Harbors)) j := 0 for _, harbor := range boardModel.Harbors { harbors[j] = harbor j++ } var content = model.MapLegend{ Harbors: harbors, Landscapes: landscapes, } if jsonp == "true" { return c.JSONP(http.StatusOK, callback, &content) } return c.JSON(http.StatusOK, &content) }
package wunsch import ( "github.com/stretchr/testify/assert" "testing" ) func TestCanAlign(t *testing.T) { assert.Equal(t, int('-'), 45) assert.Equal(t, CanAlignString("ABCDEF", "ABCDEF"), true) assert.Equal(t, CanAlignString("ABCDEF", "ABCDEF"), true) assert.Equal(t, true, CanAlignString("ab--e", "-bcde")) assert.Equal(t, true, CanAlignString("abe--", "--ecd")) assert.Equal(t, false, CanAlignString("a-be", "ax-e")) assert.Equal(t, false, CanAlignString("a---be", "axoo-e")) assert.Equal(t, true, CanAlignString("abxe", "ab-e")) assert.Equal(t, true, CanAlignString("-a--a--a-------", "0a12a34a12abcde")) assert.Equal(t, false, CanAlignString("-ABCDEF", "Q-BCDEF")) assert.Equal(t, false, CanAlignString("-ABCDEF-----", "Q-BCDEF")) } func TestMultipleAlignment(t *testing.T) { data := []string{"Hello", "Hallo", "Hillo", "Hwello", "lloha", "habar"} assert.Equal(t, AlignManyString(data), []string{ "H---ello-----", "H--a-llo-----", "H-i--llo-----", "-----lloha---", "--------habar", "Hw--ello-----"}) assert.Equal(t, AlignManyString([]string{"hhhh", "hhhh", "hhhh", "hh22"}), []string{ "hh--hh", "hh--hh", "hh--hh", "hh22--"}) assert.Equal(t, AlignManyString([]string{"", "", "", ""}), []string{ "", "", "", ""}) assert.Equal(t, AlignManyString([]string{"", "", "", "aaa"}), []string{ "---", "---", "---", "aaa"}) assert.Equal(t, AlignManyString([]string{"a", "b", "c", "d"}), []string{ "---a", "--b-", "-c--", "d---"}) } func TestMakeFingerPrint(t *testing.T) { data := []string{"Hello", "Hallo", "Hillo", "Hwello", "lloha", "habar"} assert.Equal(t, item_to_string(MakeFingerPrint(strings_to_items(data))), "Hwiaellohabar") }
/* Copyright 2021 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package docker import ( "bytes" "encoding/json" "fmt" "net/http" "net/http/httptest" "net/url" "testing" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/random" spec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/GoogleContainerTools/skaffold/v2/testutil" ) func TestGetPlatformsForImage(t *testing.T) { idx, err := random.Image(1024, 1) testutil.CheckError(t, false, err) expectedRepo := "foo/bar" manifestPath := fmt.Sprintf("/v2/%s/manifests/latest", expectedRepo) manifest, err := idx.Manifest() testutil.CheckError(t, false, err) configFile, err := idx.ConfigFile() testutil.CheckError(t, false, err) // Update image platform to match test configFile.OS = "linux" configFile.Architecture = "arm64" rawConfigFile, err := json.Marshal(configFile) testutil.CheckError(t, false, err) manifest.Config.Data = rawConfigFile cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(rawConfigFile)) testutil.CheckError(t, false, err) manifest.Config.Digest = cfgHash manifest.Config.Size = cfgSize rawManifest, err := json.Marshal(manifest) testutil.CheckError(t, false, err) server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/v2/": w.WriteHeader(http.StatusOK) case manifestPath: if r.Method != http.MethodGet { t.Errorf("Method; got %v, want %v", r.Method, http.MethodGet) } mt, err := idx.MediaType() testutil.CheckError(t, false, err) w.Header().Set("Content-Type", string(mt)) w.Write(rawManifest) default: t.Fatalf("Unexpected path: %v", r.URL.Path) } })) defer server.Close() u, err := url.Parse(server.URL) testutil.CheckError(t, false, err) tag := fmt.Sprintf("%s/%s:latest", u.Host, expectedRepo) platforms, err := GetPlatforms(tag) testutil.CheckError(t, false, err) expectedPlatforms := []spec.Platform{ {Architecture: "arm64", OS: "linux"}, } testutil.CheckDeepEqual(t, expectedPlatforms, platforms) } func TestGetPlatformsForIndex(t *testing.T) { idx, err := random.Index(1024, 1, 3) testutil.CheckError(t, false, err) expectedRepo := "foo/bar" manifestPath := fmt.Sprintf("/v2/%s/manifests/latest", expectedRepo) manifest, err := idx.IndexManifest() testutil.CheckError(t, false, err) // Update image platform to match test manifest.Manifests[0].Platform = &v1.Platform{Architecture: "arm64", OS: "linux"} manifest.Manifests[1].Platform = &v1.Platform{Architecture: "amd64", OS: "linux"} rawManifest, err := json.Marshal(manifest) testutil.CheckError(t, false, err) server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/v2/": w.WriteHeader(http.StatusOK) case manifestPath: if r.Method != http.MethodGet { t.Errorf("Method; got %v, want %v", r.Method, http.MethodGet) } mt, err := idx.MediaType() testutil.CheckError(t, false, err) w.Header().Set("Content-Type", string(mt)) w.Write(rawManifest) default: t.Fatalf("Unexpected path: %v", r.URL.Path) } })) defer server.Close() u, err := url.Parse(server.URL) testutil.CheckError(t, false, err) tag := fmt.Sprintf("%s/%s:latest", u.Host, expectedRepo) platforms, err := GetPlatforms(tag) testutil.CheckError(t, false, err) expectedPlatforms := []spec.Platform{ {Architecture: "arm64", OS: "linux"}, {Architecture: "amd64", OS: "linux"}, } testutil.CheckDeepEqual(t, expectedPlatforms, platforms) }
package greedy /* Greedy Algorithm : Solution is constructed through a sequence of steps. At each step, choice is made which is locally optimal. Greedy algorithms are generally used to solve optimization problems. We always take the next data to be processed depending upon the dataset which we have already processed and then choose the next optimum data to be processed. Greedy algorithms does not always give optimum solution. For example: · Minimal spanning tree: Prim’s algorithm, Kruskal’s algorithm · Dijkstra’s algorithm for single-source shortest path problem · Greedy algorithm for the Knapsack problem · The coin exchange problem · Huffman trees for optimal encoding */
package config // RedisConfig redis 配置 type RedisConfig interface { GetEnabled() bool GetConn() string GetPassword() string GetDBNum() int } type defaultRedisConfig struct { Enabled bool `json:"enabled"` Conn string `json:"conn"` Password string `json:"password"` DBNum int `json:"dbNum"` Timeout int `json:"timeout"` } // GetEnabled redis 配置是否激活 func (r defaultRedisConfig) GetEnabled() bool { return r.Enabled } // GetConn redis 地址 func (r defaultRedisConfig) GetConn() string { return r.Conn } // GetPassword redis 密码 func (r defaultRedisConfig) GetPassword() string { return r.Password } // GetDBNum redis 数据库分区序号 func (r defaultRedisConfig) GetDBNum() int { return r.DBNum }
package models import ( "time" ) // GamePlayer is 게임플레이어 정보 type GamePlayer struct { State int `json:"state"` ChairIndex int `json:"chairIndex"` RoomIndex int `json:"roomIndex"` Round int `json:"round"` Card1 int `json:"card1"` Card2 int `json:"card2"` BuyInLeft int64 `json:"buyInLeft"` OrderNo int `json:"orderNo"` Stage int `json:"stage"` BetStatus int `json:"betStatus"` BetType int `json:"betType"` LastBetType int `json:"lastBetType"` BetCount int `json:"betCount"` LastBet int64 `json:"lastBet"` LastCall int64 `json:"lastCall"` LastRaise int64 `json:"lastRaise"` TotalBet int64 `json:"totalBet"` StageBet int64 `json:"stageBet"` LastActionDate time.Time `json:"lastActionDate"` NoActionCount int `json:"noActionCount"` Coin int64 `json:"coin"` NickName string `json:"nickName"` UserIndex int64 `json:"userIndex"` Result HandResult `json:"result"` } // TableName 테이블 이름 func (GamePlayer) TableName() string { return "tbl_game_player" }
package validations import ( "fmt" "github.com/lestrrat/go-jsschema" ) type FormatValidation struct { Format string } func NewFormatValidation(s *schema.Schema) (FormatValidation, error) { f := string(s.Format) if f == "" { return FormatValidation{}, fmt.Errorf("not initialized") } return FormatValidation{f}, nil } func (v FormatValidation) Func() string { return "Format" } func (v FormatValidation) Args() map[string]interface{} { return map[string]interface{}{ "Format": v.Format, } }
package model import ( "errors" ) var ErrNullField = errors.New("Field value is null") type User struct { ID *uint32 `json:"id"` Email *string `json:"email"` FirstName *string `json:"first_name"` LastName *string `json:"last_name"` Gender *string `json:"gender"` BirthDate *int64 `json:"birth_date"` } type Users struct { Users []User `json:"users"` }
package http import ( "github.com/go-kratos/kratos/pkg/conf/paladin" "github.com/go-kratos/kratos/pkg/log" bm "github.com/go-kratos/kratos/pkg/net/http/blademaster" "github.com/go-kratos/kratos/pkg/net/http/blademaster/binding" "github.com/go-kratos/kratos/pkg/net/rpc/warden" "net/http" "sync" utilerr "way-jasy-cron/common/util/err" "way-jasy-cron/user/internal/server/grpc" "way-jasy-cron/user/internal/service" pb "way-jasy-cron/user/userapi" ) var ( svc *service.Service e *bm.Engine onlyOnceShutdown = make(chan struct{}) ) //var svc pb.DemoServer // //// New new a bm server. //func New(s pb.DemoServer) (engine *bm.Engine, err error) { // var ( // cfg bm.ServerConfig // ct paladin.TOML // ) // if err = paladin.Get("http.toml").Unmarshal(&ct); err != nil { // return // } // if err = ct.Get("Server").UnmarshalTOML(&cfg); err != nil { // return // } // svc = s // engine = bm.DefaultServer(&cfg) // pb.RegisterDemoBMServer(engine, s) // initRouter(engine) // err = engine.Start() // return //} func initRouter(e *bm.Engine) { e.Ping(ping) g := e.Group("/account",openVerify) { g.POST("/login", verifyLogin) g.POST("/register", register) g.GET("/pubKey", getPublicKey) g.POST("/crypt", generateRSA) g.GET("/info", getUserInfo) } t := e.Group("/test") { t.POST("", test) } } func ping(ctx *bm.Context) { if err := svc.Ping(ctx); err != nil { log.Error("ping error(%v)", err) ctx.AbortWithStatus(http.StatusServiceUnavailable) } } func MustStart(s *service.Service) { var c struct { Server *bm.ServerConfig Auth *warden.ClientConfig } if err := paladin.Get("http.toml").UnmarshalTOML(&c); err != nil { return } e = bm.DefaultServer(c.Server) var wg sync.WaitGroup // must use goroutine here, because initRouter recovered the panic error // it don't make sense here. go func() { wg.Add(1) defer wg.Done() initRouter(e) utilerr.Check(e.Start()) svc = s pb.RegisterTokenVerifyBMServer(e, &grpc.Server{Svc: s}) }() wg.Wait() } func test(ctx *bm.Context) { var req struct{ Rec string `json:"rec" form:"rec"` } if err := ctx.BindWith(&req, binding.JSON); err != nil { return } ctx.JSON(nil, svc.TestShow(ctx, req.Rec)) }
package drivers import ( "testing" "github.com/stretchr/testify/assert" ) func TestNormalizeDSN(t *testing.T) { const ( myDSN = "root@tcp(0.0.0.0:3306)/test?parseTime=true" pgDSN = "postgres://root:password@0.0.0.0:5432/test" msDSN = "sqlserver://sa:password@0.0.0.0:5432?database=test" ) var dir = t.TempDir() tests := []struct { drv Driver name string dsn string nameOut string dsnOut string Err assert.ErrorAssertionFunc }{ { MySQL, "", "", "", "", assert.Error, }, { MySQL, "", "\n", "", "", assert.Error, }, { MySQL, "", myDSN, "test", myDSN, assert.NoError, }, { Postgres, "", "", "", "", assert.Error, }, { Postgres, "", "\n", "", "", assert.Error, }, { Postgres, "", pgDSN, "test", pgDSN, assert.NoError, }, { SQLServer, "", "", "", "", assert.Error, }, { SQLServer, "", "\n", "", "", assert.Error, }, { SQLServer, "", msDSN, "test", msDSN, assert.NoError, }, { SQLite, "", "", "db", "db.sqlite", assert.NoError, }, { SQLite, "", "\n", "", "", assert.Error, }, { SQLite, "", dir, "db", dir + "/db.sqlite", assert.NoError, }, { SQLite, "", "db", "db", "db/db.sqlite", assert.NoError, }, { SQLite, "", "db.sqlite", "db", "db.sqlite", assert.NoError, }, { SQLite, "", dir + "/db.sqlite", "db", dir + "/db.sqlite", assert.NoError, }, { SQLite, "test", "", "test", "test.sqlite", assert.NoError, }, { SQLite, "test", "\n", "", "", assert.Error, }, { SQLite, "test", dir, "test", dir + "/test.sqlite", assert.NoError, }, { SQLite, "test", "db", "test", "db/test.sqlite", assert.NoError, }, { SQLite, "test", "db.sqlite", "db", "db.sqlite", assert.NoError, }, { SQLite, "test", dir + "/db.sqlite", "db", dir + "/db.sqlite", assert.NoError, }, { SQLite, "test", "foo/db/", "test", "foo/db/test.sqlite", assert.NoError, }, { SQLite, "test", "http://", "test", "http://test.sqlite", assert.NoError, }, { SQLite, "test", "http://foo.", "test", "http://foo./test.sqlite", assert.NoError, }, { "unkn", "test", "test", "test", "test", assert.NoError, }, } for _, test := range tests { name, dsn, err := NormalizeDSN(test.name, test.drv, test.dsn) test.Err(t, err) assert.Equal(t, test.nameOut, name) assert.Equal(t, test.dsnOut, dsn) } }
// // Copyright (c) SAS Institute Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package token import ( "errors" "fmt" "os" "sort" "strings" "github.com/spf13/cobra" "github.com/sassoftware/relic/v7/cmdline/shared" "github.com/sassoftware/relic/v7/config" "github.com/sassoftware/relic/v7/token" "github.com/sassoftware/relic/v7/token/open" ) var TokenCmd = &cobra.Command{ Use: "token", Short: "View and manipulate token objects", } var TokensCmd = &cobra.Command{ Use: "list", Short: "List tokens provided by a driver", RunE: tokensCmd, } var ContentsCmd = &cobra.Command{ Use: "contents", Short: "List keys in a token", RunE: contentsCmd, } var ( argType string argProvider string argId string argValues bool ) func init() { shared.RootCmd.AddCommand(TokenCmd) TokenCmd.PersistentFlags().StringVarP(&argToken, "token", "t", "", "Name of token") TokenCmd.PersistentFlags().StringVar(&argProvider, "provider", "", "Provider module path") TokenCmd.AddCommand(TokensCmd) TokenCmd.AddCommand(ContentsCmd) ContentsCmd.Flags().StringVarP(&argLabel, "label", "l", "", "Display objects with this label only") ContentsCmd.Flags().StringVarP(&argId, "id", "i", "", "Display objects with this ID only") ContentsCmd.Flags().BoolVarP(&argValues, "values", "v", false, "Show contents of objects") shared.AddLateHook(addProviderTypeHelp) // deferred so token providers can init() } func addProviderTypeHelp() { var listable []string for ptype := range token.Listers { listable = append(listable, ptype) } sort.Strings(listable) TokenCmd.PersistentFlags().StringVar(&argType, "type", "", fmt.Sprintf("Provider type (%s)", strings.Join(listable, ", "))) } func tokensCmd(cmd *cobra.Command, args []string) error { if argToken == "" && (argType == "" || argProvider == "") { return errors.New("--token, or --type and --provider, are required") } if err := shared.InitConfig(); err != nil { return err } if argToken != "" { tokenConf, err := shared.CurrentConfig.GetToken(argToken) if err != nil { return err } if argType == "" { argType = tokenConf.Type } if argProvider == "" { argProvider = tokenConf.Provider } } return shared.Fail(open.List(argType, argProvider, os.Stdout)) } func contentsCmd(cmd *cobra.Command, args []string) error { if argToken == "" && (argType == "" || argProvider == "") { return errors.New("--token, or --type and --provider, are required") } if err := shared.InitConfig(); err != nil { return err } var tokenConf *config.TokenConfig if argToken != "" { var err error tokenConf, err = shared.CurrentConfig.GetToken(argToken) if err != nil { return err } } else { argToken = ":new-token:" tokenConf = shared.CurrentConfig.NewToken(argToken) } if argType != "" { tokenConf.Type = argType } if argProvider != "" { tokenConf.Provider = argProvider } tok, err := openToken(argToken) if err != nil { return err } return shared.Fail(tok.ListKeys(token.ListOptions{ Output: os.Stdout, Label: argLabel, ID: argId, Values: argValues, })) }
// Copyright 2020 Ye Zi Jie. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: FishGoddess // Email: fishgoddess@qq.com // Created at 2020/03/03 23:39:39 package main import ( "time" "github.com/FishGoddess/logit" "github.com/FishGoddess/logit/files" ) func main() { // NewFileLogger creates a new logger which logs to file. // It just need a file path like "D:/test.log" and a logger level. logger := logit.NewLogger(logit.DebugLevel, logit.NewFileHandler("D:/test.log", logit.TextEncoder(), logit.DefaultTimeFormat)) logger.Info("I am info message!") // NewDurationRollingLogger creates a duration rolling logger with given duration. // You should appoint a directory to store all log files generated in this time. // Notice that duration must not less than minDuration (generally time.Second), see files.minDuration. // Also, default filename of log file is like "20200304-145246-45.log", see files.NewFilename. // If you want to appoint another filename, check this and do it by this way. // See files.NewDurationRollingFile (it is an implement of io.writer). logger = logit.NewLogger(logit.DebugLevel, logit.NewDurationRollingHandler("D:/", 24*time.Hour, logit.TextEncoder(), logit.DefaultTimeFormat)) logger.Info("Rolling!!!") // NewSizeRollingLogger creates a file size rolling logger with given limitedSize. // You should appoint a directory to store all log files generated in this time. // Notice that limitedSize must not less than minLimitedSize (generally 64 KB), see files.minLimitedSize. // Check files.KB, files.MB, files.GB to know what unit you gonna to use. // Also, default filename of log file is like "20200304-145246-45.log", see nextFilename. // If you want to appoint another filename, check this and do it by this way. // See files.NewSizeRollingFile (it is an implement of io.writer). logger = logit.NewLogger(logit.DebugLevel, logit.NewSizeRollingHandler("D:/", 64*files.KB, logit.TextEncoder(), logit.DefaultTimeFormat)) logger.Info("file size???") }
package sweep import ( "strconv" ) // Int2 represents a 2 byte integer. type Int2 [2]byte // Int4 represents a 4 byte integer. type Int4 [4]byte // Int6 represents a 6 byte integer. type Int6 [6]byte // NewInt2 returns a new 2 byte integer with the given integer. func NewInt2(n int) Int2 { if n < 0 || n > 99 { panic("sweep: NewInt2: 0 <= n <= 99 must be true") } const size = 2 res := strconv.Itoa(n) ret := Int2([size]byte{'0', '0'}) c := 0 for i := size - len(res); i < size; i++ { ret[i] = res[c] c++ } return ret } // NewInt4 returns a new 4 byte integer with the given integer. func NewInt4(n int) Int4 { if n < 0 || n > 9999 { panic("sweep: NewInt4: 0 <= n <= 9999 must be true") } const size = 4 res := strconv.Itoa(n) ret := Int4([size]byte{'0', '0'}) c := 0 for i := size - len(res); i < size; i++ { ret[i] = res[c] c++ } return ret } // NewInt6 returns a new 6 byte integer with the given integer. func NewInt6(n int) Int6 { if n < 0 || n > 999999 { panic("sweep: NewInt4: 0 <= n <= 999999 must be true") } const size = 6 res := strconv.Itoa(n) ret := Int6([size]byte{'0', '0'}) c := 0 for i := size - len(res); i < size; i++ { ret[i] = res[c] c++ } return ret } // String returns the 2 byte integer in string representation. func (n Int2) String() string { return string(n[:]) } // String returns the 4 byte integer in string representation. func (n Int4) String() string { return string(n[:]) } // String returns the 6 byte integer in string representation. func (n Int6) String() string { return string(n[:]) } // Int returns the 2 byte integer in integer representation. func (n Int2) Int() int { num, _ := strconv.Atoi(n.String()) return num } // Int returns the 4 byte integer in integer representation. func (n Int4) Int() int { num, _ := strconv.Atoi(n.String()) return num } // Int returns the 6 byte integer in integer representation. func (n Int6) Int() int { num, _ := strconv.Atoi(n.String()) return num }
// purpose // to divide and conquer a summing task package sub2 import ( "fmt" ) func sum(s []int, c chan int) { sum := 0 for _, item := range s { sum += item } c <- sum } func main() { p := fmt.Println s := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} c := make(chan int) // make is used for maps, slices and chans go sum(s[:len(s)/2], c) go sum(s[len(s)/2:], c) x, y := <-c, <-c p(x + y) }
package api import ( "boilerplate/pkg/config" "net/http" ) type healthCheckResponse struct { Commit string `json:"commit"` Healthy bool `json:"healthy"` } func healthHandler(w http.ResponseWriter, r *http.Request) { health := healthCheckResponse{ Commit: config.Commit, Healthy: true, } resultResponseJSON(w, http.StatusOK, health) }
package publisher import ( "log" "github.com/thisiserico/golabox/eventbus" ) type Publisher struct { bus chan eventbus.Event } func NewPublisher(ch chan eventbus.Event) *Publisher { return &Publisher{ bus: ch, } } func (p *Publisher) Publish(ev eventbus.Event) { log.Printf("publishing event %s:%s\n", ev.EventName(), ev.EventID()) p.bus <- ev }
package http import ( "errors" error3 "github.com/payfazz/fazzkit/fazzkiterror" "net/http" "testing" ) var error1 = errors.New(`invalid_code`) var error2 = errors.New(`invalid_code_me`) func Test_ErrorMapper(t *testing.T) { translator := NewErrorMapper() translator.RegisterError(error1, http.StatusUnprocessableEntity) translator.RegisterError(error2, http.StatusUnauthorized) httpError := translator.GetCode(error1) if httpError != http.StatusUnprocessableEntity { t.Error(`code_not_match`) } httpError = translator.GetCode(error2) if httpError != http.StatusUnauthorized { t.Error(`code_not_match`) } httpError = translator.GetCode(errors.New(`new_error`)) if httpError != http.StatusInternalServerError { t.Error(`code_not_match`) } err := error3.NewRuntimeError(error1, errors.New("new_error")) httpError = translator.GetCode(err) if httpError != http.StatusUnprocessableEntity { t.Error(`code_not_match`) } }
package main import "fmt" type State interface { Start() } type CommonState struct { } func (s *CommonState) Start() { fmt.Println("common start") } type InitState struct { } func (s *InitState) Start() { fmt.Println("init start") } func start(state State) { state.Start() } func main() { i := InitState{} start(&i) c := CommonState{} start(&c) }
/* * Npcf_SMPolicyControl API * * Session Management Policy Control Service © 2019, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TSDSI, TTA, TTC). All rights reserved. * * API version: 1.0.4 * Generated by: OpenAPI Generator (https://openapi-generator.tech) */ package openapi type AccuUsageReport struct { // An id referencing UsageMonitoringData objects associated with this usage report. RefUmIds string `json:"refUmIds"` // Unsigned integer identifying a volume in units of bytes. VolUsage int64 `json:"volUsage,omitempty"` // Unsigned integer identifying a volume in units of bytes. VolUsageUplink int64 `json:"volUsageUplink,omitempty"` // Unsigned integer identifying a volume in units of bytes. VolUsageDownlink int64 `json:"volUsageDownlink,omitempty"` TimeUsage int32 `json:"timeUsage,omitempty"` // Unsigned integer identifying a volume in units of bytes. NextVolUsage int64 `json:"nextVolUsage,omitempty"` // Unsigned integer identifying a volume in units of bytes. NextVolUsageUplink int64 `json:"nextVolUsageUplink,omitempty"` // Unsigned integer identifying a volume in units of bytes. NextVolUsageDownlink int64 `json:"nextVolUsageDownlink,omitempty"` NextTimeUsage int32 `json:"nextTimeUsage,omitempty"` }
package lang import ( "bitbucket.org/pkg/inflect" "regexp" "strings" ) var Articles = []string{"the", "a", "an", "our", "some"} var articleBar = strings.Join(Articles, "|") var articles = regexp.MustCompile(`^((?i)` + articleBar + `)\s`) const NewLine = "\n" const Space = " " func SliceArticle(str string) (article, bare string) { n := strings.TrimSpace(str) if pair := articles.FindStringIndex(n); pair == nil { bare = n } else { split := pair[1] - 1 article = n[:split] bare = strings.TrimSpace(n[split:]) } return article, bare } func StripArticle(str string) string { _, bare := SliceArticle(str) return bare } // var Singularize = inflect.Singularize // var Pluralize = inflect.Pluralize // Capitalize returns a new string, starting the first word with a capital. var Capitalize = inflect.Capitalize // Titleize returns a new string, starting every word with a capital. var Titleize = inflect.Titleize func StartsWith(s string, set ...string) (ok bool) { for _, x := range set { if strings.HasPrefix(s, x) { ok = true break } } return ok } //http://www.mudconnect.com/SMF/index.php?topic=74725.0 func StartsWithVowel(str string) (vowelSound bool) { s := strings.ToUpper(str) if StartsWith(s, "A", "E", "I", "O", "U") { if !StartsWith(s, "EU", "EW", "ONCE", "ONE", "OUI", "UBI", "UGAND", "UKRAIN", "UKULELE", "ULYSS", "UNA", "UNESCO", "UNI", "UNUM", "URA", "URE", "URI", "URO", "URU", "USA", "USE", "USI", "USU", "UTA", "UTE", "UTI", "UTO") { vowelSound = true } } else if StartsWith(s, "HEIR", "HERB", "HOMAGE", "HONEST", "HONOR", "HONOUR", "HORS", "HOUR") { vowelSound = true } return vowelSound }
package main import ( "os" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) var ( // Debugが出力されるLogger DebugLogger *zap.SugaredLogger // 通常のLogger DefaultLogger *zap.SugaredLogger ) func initLoggers() func() { consoleEnc := zap.NewDevelopmentEncoderConfig() core1 := zapcore.NewCore(zapcore.NewConsoleEncoder(consoleEnc), os.Stdout, zap.DebugLevel) sink, closer, err := zap.Open("/tmp/zaplog.out") if err != nil { panic(err) } fileEnc := zap.NewProductionEncoderConfig() core2 := zapcore.NewCore(zapcore.NewJSONEncoder(fileEnc), sink, zap.DebugLevel) core := zapcore.NewTee(core1, core2) options := []zap.Option{ zap.AddStacktrace(zap.WarnLevel), zap.WithCaller(true), } logger := zap.New(core, options...) DebugLogger = logger.Sugar() DefaultLogger = logger.WithOptions(zap.IncreaseLevel(zap.InfoLevel)).Sugar() zap.ReplaceGlobals(logger) zap.RedirectStdLog(logger) return func() { logger.Sync() closer() } } func main() { defer initLoggers()() DefaultLogger.Debug("debug log from default logger") DefaultLogger.Error("error log from default logger") DebugLogger.Debug("debug log from debug logger") DebugLogger.Error("error log from debug logger") zap.S().Info("info log from zap.S()") }
package discover import ( "math/rand" "sync" "time" "github.com/iotaledger/hive.go/autopeering/peer" "github.com/iotaledger/hive.go/autopeering/server" "github.com/iotaledger/hive.go/crypto/identity" "github.com/iotaledger/hive.go/logger" "github.com/iotaledger/hive.go/runtime/timeutil" ) const ( // PingExpiration is the time until a peer verification expires. PingExpiration = 12 * time.Hour // MaxPeersInResponse is the maximum number of peers returned in DiscoveryResponse. MaxPeersInResponse = 6 // MaxServices is the maximum number of services a peer can support. MaxServices = 5 ) type network interface { local() *peer.Local Ping(*peer.Peer) error DiscoveryRequest(*peer.Peer) ([]*peer.Peer, error) } type manager struct { masters []*mpeer mutex sync.Mutex // protects active and replacement active []*mpeer replacements []*mpeer events *Events net network log *logger.Logger wg sync.WaitGroup closing chan struct{} } func newManager(net network, masters []*peer.Peer, log *logger.Logger) *manager { return &manager{ masters: wrapPeers(masters), active: make([]*mpeer, 0, maxManaged), replacements: make([]*mpeer, 0, maxReplacements), events: newEvents(), net: net, log: log, closing: make(chan struct{}), } } func (m *manager) start() { m.loadInitialPeers() m.wg.Add(1) go m.loop() } func (m *manager) self() identity.ID { return m.net.local().ID() } func (m *manager) close() { close(m.closing) m.wg.Wait() } func (m *manager) loop() { defer m.wg.Done() var ( reverify = time.NewTimer(0) // setting this to 0 will cause a trigger right away reverifyDone chan struct{} query = time.NewTimer(server.ResponseTimeout) // trigger the first query after the reverify queryNext chan time.Duration ) defer timeutil.CleanupTimer(reverify) defer timeutil.CleanupTimer(query) Loop: for { select { // start verification, if not yet running case <-reverify.C: // if there is no reverifyDone, this means doReverify is not running if reverifyDone == nil { reverifyDone = make(chan struct{}) go m.doReverify(reverifyDone) } // reset verification case <-reverifyDone: reverifyDone = nil reverify.Reset(reverifyInterval) // reverify again after the given interval // start requesting new peers, if no yet running case <-query.C: if queryNext == nil { queryNext = make(chan time.Duration) go m.doQuery(queryNext) } // on query done, reset time to given duration case d := <-queryNext: queryNext = nil query.Reset(d) // on close, exit the loop case <-m.closing: break Loop } } // wait for spawned goroutines to finish if reverifyDone != nil { <-reverifyDone } if queryNext != nil { <-queryNext } } // doReverify pings the oldest active peer. func (m *manager) doReverify(done chan<- struct{}) { defer close(done) p := m.peerToReverify() if p == nil { return // nothing can be reverified } m.log.Debugw("reverifying", "peer", p, ) // could not verify the peer if m.net.Ping(p) != nil { m.deletePeer(p.ID()) return } // no need to do anything here, as the peer is bumped when handling the pong } // deletePeer deletes the peer with the given ID from the list of managed peers. func (m *manager) deletePeer(id identity.ID) { m.mutex.Lock() defer m.mutex.Unlock() var mp *mpeer m.active, mp = deletePeerByID(m.active, id) if mp == nil { return // peer no longer exists } // master peers are never removed if containsPeer(m.masters, id) { // reset verifiedCount and re-add them to the front of the active peers mp.verifiedCount.Store(0) m.active = unshiftPeer(m.active, mp, maxManaged) return } m.log.Debugw("deleted", "peer", mp, ) if mp.verifiedCount.Load() > 0 { m.events.PeerDeleted.Trigger(&PeerDeletedEvent{Peer: unwrapPeer(mp)}) } // add a random replacement, if available if len(m.replacements) > 0 { var r *mpeer //nolint:gosec // we do not care about weak random numbers here m.replacements, r = deletePeer(m.replacements, rand.Intn(len(m.replacements))) m.active = pushPeer(m.active, r, maxManaged) } } // peerToReverify returns the oldest peer, or nil if empty. func (m *manager) peerToReverify() *peer.Peer { m.mutex.Lock() defer m.mutex.Unlock() if len(m.active) == 0 { return nil } // the last peer is the oldest return unwrapPeer(m.active[len(m.active)-1]) } // updatePeer moves the peer with the given ID to the front of the list of managed peers. // It returns 0 if there was no peer with that id, otherwise the verifiedCount of the updated peer is returned. func (m *manager) updatePeer(update *peer.Peer) uint { id := update.ID() for i, p := range m.active { if p.ID() == id { if i > 0 { // move i-th peer to the front copy(m.active[1:], m.active[:i]) m.active[0] = p } // update the wrapped peer and verifiedCount p.setPeer(update) return uint(p.verifiedCount.Add(1)) } } return 0 } func (m *manager) addReplacement(p *mpeer) bool { if containsPeer(m.replacements, p.ID()) { return false // already in the list } m.replacements = unshiftPeer(m.replacements, p, maxReplacements) return true } func (m *manager) loadInitialPeers() { var peers []*peer.Peer db := m.net.local().Database() if db != nil { peers = db.SeedPeers() } peers = append(peers, unwrapPeers(m.masters)...) for _, p := range peers { m.addDiscoveredPeer(p) } } // addDiscoveredPeer adds a newly discovered peer that has never been verified or pinged yet. // It returns true, if the given peer was new and added, false otherwise. func (m *manager) addDiscoveredPeer(p *peer.Peer) bool { // never add the local peer if p.ID() == m.self() { return false } m.mutex.Lock() defer m.mutex.Unlock() if containsPeer(m.active, p.ID()) { return false } m.log.Debugw("discovered", "peer", p, ) mp := newMPeer(p) if len(m.active) >= maxManaged { return m.addReplacement(mp) } m.active = pushPeer(m.active, mp, maxManaged) return true } // addVerifiedPeer adds a new peer that has just been successfully pinged. // It returns true, if the given peer was new and added, false otherwise. // //nolint:unparam // lets keep this for now func (m *manager) addVerifiedPeer(p *peer.Peer) bool { // never add the local peer if p.ID() == m.self() { return false } m.log.Debugw("verified", "peer", p, "services", p.Services(), ) m.mutex.Lock() defer m.mutex.Unlock() // if already in the list, move it to the front if v := m.updatePeer(p); v > 0 { // trigger the event only for the first time the peer is updated if v == 1 { m.events.PeerDiscovered.Trigger(&PeerDiscoveredEvent{Peer: p}) } return false } mp := newMPeer(p) mp.verifiedCount.Add(1) if len(m.active) >= maxManaged { return m.addReplacement(mp) } // trigger the event only when the peer is added to active m.events.PeerDiscovered.Trigger(&PeerDiscoveredEvent{Peer: p}) // new nodes are added to the front m.active = unshiftPeer(m.active, mp, maxManaged) return true } // masterPeers returns the master peers. func (m *manager) masterPeers() []*mpeer { return m.masters } // randomPeers returns a list of randomly selected peers. func (m *manager) randomPeers(n int, minVerified uint) []*mpeer { m.mutex.Lock() defer m.mutex.Unlock() if n > len(m.active) { n = len(m.active) } peers := make([]*mpeer, 0, n) for _, i := range rand.Perm(len(m.active)) { if len(peers) == n { break } p := m.active[i] if uint(p.verifiedCount.Load()) < minVerified { continue } peers = append(peers, p) } return peers } // getVerifiedPeers returns all the currently managed peers that have been verified at least once. func (m *manager) verifiedPeers() []*mpeer { m.mutex.Lock() defer m.mutex.Unlock() peers := make([]*mpeer, 0, len(m.active)) for _, mp := range m.active { if mp.verifiedCount.Load() == 0 { continue } peers = append(peers, mp) } return peers } // isKnown returns true if the manager is keeping track of that peer. func (m *manager) isKnown(id identity.ID) bool { if id == m.self() { return true } m.mutex.Lock() defer m.mutex.Unlock() return containsPeer(m.active, id) || containsPeer(m.replacements, id) }
package student //SplitWhiteSpaces a function that separates the words of a string and puts them in a string array. func SplitWhiteSpaces(str string) []string { for str[0] == 9 || str[0] == 32 || str[0] == 10 { str = str[1:] } for str[lenStr2(str)-1] == 9 || str[lenStr2(str)-1] == 32 || str[lenStr2(str)-1] == 10 { str = str[:lenStr2(str)-1] } count := 0 for i := range str { if str[i] == 9 || str[i] == 32 || str[i] == 10 { if str[i-1] != 9 && str[i-1] != 32 && str[i-1] != 10 { count++ if count+1 < 0 { array := make([]string, 0) return array } } } } array := make([]string, count+1) index := 0 tempStr := "" for i := range str { if str[i] == 9 || str[i] == 32 || str[i] == 10 { if tempStr != "" { array[index] = tempStr tempStr = "" index++ } else { continue } } else { tempStr = tempStr + string(rune(str[i])) } } array[index] = tempStr return array }
package event import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" ) type ApiEventBus interface { Subscribe(gvr schema.GroupVersionResource, name types.NamespacedName, clusterNames ...string) error Unsubscribe(gvr schema.GroupVersionResource, name types.NamespacedName, clusterName ...string) error SetChangeHook(hook ReplicasHook) } type ReplicasReader interface { Get(gvr schema.GroupVersionResource, name types.NamespacedName) (int, bool, error) } type ReplicasHook interface { Sync(gvr schema.GroupVersionResource, clusterName string, nsn types.NamespacedName) bool }
package logstream import "io" type LogStream interface { Publish(channel string, stream io.ReadCloser) HeartBeat(name string, quit chan int) }
package converter import ( "strings" ) // FirehoseConverter KinesisStream AggregatedRecordDatas to FirehosePutDataMap converter type FirehoseConverter struct { DeliveryStream string DefaultStream string TargetColumn string RemovePrefix string AddPrefix string ReplacePatterns [][2]string } // ConvertToFirehoseDatas converts KinesisStream AggregatedRecordDatas to FirehosePutDataMap // MapStructure DeliveryStream -> PutDatas func (converter *FirehoseConverter) ConvertToFirehoseDatas(recordDatas []string) map[string][]string { result := map[string][]string{} if len(recordDatas) == 0 { return result } if converter.DeliveryStream != "" { result[converter.DeliveryStream] = recordDatas return result } for _, recordData := range recordDatas { targetValue := extractValueFromLtsvLine(recordData, converter.TargetColumn) if targetValue == "" { addMapValue(result, converter.DefaultStream, recordData) } else { putTarget := converter.convertToDeliveryStream(targetValue) addMapValue(result, putTarget, recordData) } } return result } // CreateReplacePatterns converts string setting to structured ReplacePatterns. func CreateReplacePatterns(replaceConfig string) [][2]string { result := [][2]string{} replaceSettings := strings.Split(replaceConfig, ",") for _, replaceSetting := range replaceSettings { setting := strings.SplitN(replaceSetting, "/", 2) if len(setting) < 2 { continue } replacePattern := [2]string{setting[0], setting[1]} result = append(result, replacePattern) } return result } func (converter *FirehoseConverter) convertToDeliveryStream(targetValue string) string { var result string if strings.HasPrefix(targetValue, converter.RemovePrefix) { result = strings.Replace(targetValue, converter.RemovePrefix, "", 1) } else { result = targetValue } for _, replacePattern := range converter.ReplacePatterns { result = strings.Replace(result, replacePattern[0], replacePattern[1], 1) } result = converter.AddPrefix + result return result } func addMapValue(targetMap map[string][]string, key string, value string) { targetList, exists := targetMap[key] if exists == false { targetList = []string{} } targetList = append(targetList, value) targetMap[key] = targetList } func extractValueFromLtsvLine(targetLine string, targetLabel string) string { columns := strings.Split(targetLine, "\t") for _, column := range columns { lv := strings.SplitN(column, ":", 2) if len(lv) < 2 { continue } if lv[0] == targetLabel { return lv[1] } } return "" }
// +build ignore package main import ( "fmt" ) func main() { fmt.Print("火星の表面で、私の体重は") fmt.Print(149.0 * 0.3783) fmt.Print("ボンド、年齢は?") }
/* * * ____ ______ * / __ \_________ _ ____ __/ ____/_ _____ * / /_/ / ___/ __ \| |/_/ / / / __/ / / / / _ \ * / ____/ / / /_/ /> </ /_/ / /___/ /_/ / __/ * /_/ /_/ \____/_/|_|\__, /_____/\__, /\___/ * /_/ /_/ * ▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ * * Author: Suremeo (github.com/Suremeo) * * */ package world import ( "bytes" "github.com/Suremeo/ProxyEye/proxy/world/chunk" "github.com/sandertv/gophertunnel/minecraft" "github.com/sandertv/gophertunnel/minecraft/protocol/packet" "sync" ) type Chunk struct { *chunk.Chunk mutex sync.Mutex world *World Pos chunk.Pos viewers map[uint64]struct{} } func (c *Chunk) AddViewer(id uint64) { c.mutex.Lock() c.viewers[id] = struct{}{} c.mutex.Unlock() } func (c *Chunk) RemoveViewer(id uint64) { c.mutex.Lock() delete(c.viewers, id) if len(c.viewers) == 0 { c.world.removeChunk(c.Pos) } c.mutex.Unlock() } func (c *Chunk) SendTo(conn *minecraft.Conn) error { var chunkBuf bytes.Buffer data := chunk.NetworkEncode(c.Chunk) count := byte(0) for y := byte(0); y < 16; y++ { if data.SubChunks[y] != nil { count = y + 1 } } for y := byte(0); y < count; y++ { if data.SubChunks[y] == nil { _ = chunkBuf.WriteByte(chunk.SubChunkVersion) // We write zero here, meaning the sub chunk has no block storages: The sub chunk is completely // empty. _ = chunkBuf.WriteByte(0) continue } _, _ = chunkBuf.Write(data.SubChunks[y]) } _, _ = chunkBuf.Write(data.Data2D) _, _ = chunkBuf.Write(data.BlockNBT) return conn.WritePacket(&packet.LevelChunk{ ChunkX: c.Pos[0], ChunkZ: c.Pos[1], SubChunkCount: uint32(count), RawPayload: append([]byte(nil), chunkBuf.Bytes()...), }) }
// Copyright (c) 2016-2019 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package base import ( "regexp" "strings" "github.com/uber/kraken/lib/store/metadata" ) // Mock metadata func init() { metadata.Register(regexp.MustCompile("_mocksuffix_\\w+"), &mockMetadataFactory{}) metadata.Register(regexp.MustCompile("_mockmovable"), &mockMetadataFactoryMovable{}) } type mockMetadataFactory struct{} func (f mockMetadataFactory) Create(suffix string) metadata.Metadata { if strings.HasSuffix(suffix, getMockMetadataOne().GetSuffix()) { return getMockMetadataOne() } if strings.HasSuffix(suffix, getMockMetadataTwo().GetSuffix()) { return getMockMetadataTwo() } return nil } type mockMetadata struct { randomSuffix string content []byte } func getMockMetadataOne() *mockMetadata { return &mockMetadata{ randomSuffix: "_mocksuffix_one", } } func getMockMetadataTwo() *mockMetadata { return &mockMetadata{ randomSuffix: "_mocksuffix_two", } } func (m *mockMetadata) GetSuffix() string { return m.randomSuffix } func (m *mockMetadata) Movable() bool { return false } func (m *mockMetadata) Serialize() ([]byte, error) { return m.content, nil } func (m *mockMetadata) Deserialize(b []byte) error { m.content = b return nil } type mockMetadataFactoryMovable struct{} func (f mockMetadataFactoryMovable) Create(suffix string) metadata.Metadata { return getMockMetadataMovable() } type mockMetadataMovable struct { randomSuffix string content []byte } func getMockMetadataMovable() *mockMetadataMovable { return &mockMetadataMovable{ randomSuffix: "_mockmovable", } } func (m *mockMetadataMovable) GetSuffix() string { return m.randomSuffix } func (m *mockMetadataMovable) Movable() bool { return true } func (m *mockMetadataMovable) Serialize() ([]byte, error) { return m.content, nil } func (m *mockMetadataMovable) Deserialize(b []byte) error { m.content = b return nil }
package main import ( "bytes" "errors" "fmt" "net" "regexp" "strconv" ) var HTTP_END []byte = []byte("\r\n") var HTTP_SEMICOLON = []byte(": ") var RSP_MAP = map[int]string{ 200: "200 OK", 206: "206 Partial Content", 400: "400 Bad Request", 500: "500 Internal Server Error"} var REQ_FORMAT string = "%s /%s HTTP/1.1\r\n" var RSP_FORMAT string = "HTTP/1.1 %s\r\nContent-Length: %d\r\n" var HEADER_REG = regexp.MustCompile("(\\S+):\\s*(\\S*)\r\n") var RSP_REG = regexp.MustCompile("HTTP/1.1 (\\d+) \\w+\r\n([\\S\\s]*)\r\n([\\S\\s]*)") var REQ_REG = regexp.MustCompile("^(GET|DELETE) (.*) HTTP/1.1\r\n([\\S\\s]*)\r\n([\\S\\s]*)") type InRequest struct { Recv []byte Method []byte Path []byte Head []byte Body []byte Headers map[string][][]byte } func (r *InRequest) Parse() { m := REQ_REG.FindSubmatch(r.Recv) if len(m) == 0 { panic(errors.New("REQ_FORMAT_ERROR")) } r.Method, r.Path, r.Head, r.Body = m[1], m[2], m[3], m[4] r.Headers = make(map[string][][]byte) headers := HEADER_REG.FindAllSubmatch(r.Head, -1) for _, h := range headers { r.Headers[string(bytes.ToLower(h[1]))] = h } } type OutRequest struct { Method []byte Path []byte Headers [][]byte Bodys [][]byte } func (r *OutRequest) Send(conn *net.TCPConn) int { per := []byte(fmt.Sprintf(REQ_FORMAT, r.Method, r.Path)) return SendHttp(conn, per, r.Headers, r.Bodys) } type InRespose struct { Status int Recv []byte Head []byte Body []byte Headers map[string][][]byte } func (r *InRespose) Parse() { var err error m := RSP_REG.FindSubmatch(r.Recv) if len(m) == 0 { panic(errors.New("RSP_FORMAT_ERROR")) } r.Head, r.Body = m[2], m[3] r.Status, err = strconv.Atoi(string(m[1])) Success(err) r.Headers = make(map[string][][]byte) headers := HEADER_REG.FindAllSubmatch(r.Head, -1) for _, h := range headers { r.Headers[string(bytes.ToLower(h[1]))] = h } } type OutRespose struct { Status int Headers [][]byte Bodys [][]byte } func (r *OutRespose) Send(conn *net.TCPConn) int { cl := 0 for _, b := range r.Bodys { cl += len(b) } per := []byte(fmt.Sprintf(RSP_FORMAT, RSP_MAP[r.Status], cl)) return SendHttp(conn, per, r.Headers, r.Bodys) }
package controller import ( "fmt" "github.com/gin-gonic/gin" "github.com/holywolfchan/yuncang/model" ) type entityController struct { } var EntityController = new(entityController) func (this entityController) GetFactory(c *gin.Context) { ret, err := model.EntityService.GetFactory() if err != nil { fallback(c, err) return } c.JSON(200, model.SuccessMessage{ Errcode: Error_Success, Data: ret, }) } func (this entityController) QueryFactory(c *gin.Context) { id, err1 := getID(c) if err1 != nil { fallback(c, err1) return } ret, err2 := model.EntityService.QueryFactory(id) if err2 != nil { fallback(c, err2) return } c.JSON(200, model.SuccessMessage{ Errcode: Error_Success, Data: ret, }) } func (this entityController) InsertFactory(c *gin.Context) { var info model.Factory if err := getJson(c, &info); err != nil { fallback(c, err) return } cnt, err := model.EntityService.InsertFactory(&info) if err != nil { fallback(c, err) return } c.JSON(200, model.SuccessMessage{ Errcode: Error_Success, Data: gin.H{ "rowaffected": cnt, }, }) } func (this entityController) UpdateFactory(c *gin.Context) { var info model.Factory if err := getJson(c, &info); err != nil { fallback(c, err) return } cnt, err := model.EntityService.UpdateFactory(&info) if err != nil || cnt == 0 { fallback(c, fmt.Errorf("%d rows affected:%v", cnt, err)) return } c.JSON(200, model.SuccessMessage{ Errcode: Error_Success, Data: gin.H{ "count": cnt, }, }) } func (this entityController) DeleteFactory(c *gin.Context) { var info model.Factory if err := getJson(c, &info); err != nil { fallback(c, err) return } cnt, err := model.EntityService.DeleteFactory(info.Id) if err != nil || cnt == 0 { fallback(c, fmt.Errorf("%d rows affected:%v", cnt, err)) return } c.JSON(200, model.SuccessMessage{ Errcode: Error_Success, Data: gin.H{ "count": cnt, }, }) } ////////////////// func (this entityController) GetSupplier(c *gin.Context) { ret, err := model.EntityService.GetSupplier() if err != nil { fallback(c, err) return } c.JSON(200, model.SuccessMessage{ Errcode: Error_Success, Data: ret, }) } func (this entityController) QuerySupplier(c *gin.Context) { id, err1 := getID(c) if err1 != nil { fallback(c, err1) return } ret, err2 := model.EntityService.QuerySupplier(id) if err2 != nil { fallback(c, err2) return } c.JSON(200, model.SuccessMessage{ Errcode: Error_Success, Data: ret, }) } func (this entityController) InsertSupplier(c *gin.Context) { var info model.Supplier if err := getJson(c, &info); err != nil { fallback(c, err) return } cnt, err := model.EntityService.InsertSupplier(&info) if err != nil { fallback(c, err) return } c.JSON(200, model.SuccessMessage{ Errcode: Error_Success, Data: gin.H{ "rowaffected": cnt, }, }) } func (this entityController) UpdateSupplier(c *gin.Context) { var info model.Supplier if err := getJson(c, &info); err != nil { fallback(c, err) return } cnt, err := model.EntityService.UpdateSupplier(&info) if err != nil || cnt == 0 { fallback(c, fmt.Errorf("%d rows affected:%v", cnt, err)) return } c.JSON(200, model.SuccessMessage{ Errcode: Error_Success, Data: gin.H{ "count": cnt, }, }) } func (this entityController) DeleteSupplier(c *gin.Context) { var info model.Supplier if err := getJson(c, &info); err != nil { fallback(c, err) return } cnt, err := model.EntityService.DeleteSupplier(info.Id) if err != nil || cnt == 0 { fallback(c, fmt.Errorf("%d rows affected:%v", cnt, err)) return } c.JSON(200, model.SuccessMessage{ Errcode: Error_Success, Data: gin.H{ "count": cnt, }, }) } ///////////////// func (this entityController) GetEntityStatus(c *gin.Context) { ret, err := model.EntityService.GetEntityStatus() if err != nil { fallback(c, err) return } c.JSON(200, model.SuccessMessage{ Errcode: Error_Success, Data: ret, }) } func (this entityController) QueryEntityStatus(c *gin.Context) { id, err1 := getID(c) if err1 != nil { fallback(c, err1) return } ret, err2 := model.EntityService.QueryEntityStatus(id) if err2 != nil { fallback(c, err2) return } c.JSON(200, model.SuccessMessage{ Errcode: Error_Success, Data: ret, }) } func (this entityController) InsertEntityStatus(c *gin.Context) { var info model.EntityStatus if err := getJson(c, &info); err != nil { fallback(c, err) return } cnt, err := model.EntityService.InsertEntityStatus(&info) if err != nil { fallback(c, err) return } c.JSON(200, model.SuccessMessage{ Errcode: Error_Success, Data: gin.H{ "rowaffected": cnt, }, }) } func (this entityController) UpdateEntityStatus(c *gin.Context) { var info model.EntityStatus if err := getJson(c, &info); err != nil { fallback(c, err) return } cnt, err := model.EntityService.UpdateEntityStatus(&info) if err != nil || cnt == 0 { fallback(c, fmt.Errorf("%d rows affected:%v", cnt, err)) return } c.JSON(200, model.SuccessMessage{ Errcode: Error_Success, Data: gin.H{ "count": cnt, }, }) } func (this entityController) DeleteEntityStatus(c *gin.Context) { var info model.EntityStatus if err := getJson(c, &info); err != nil { fallback(c, err) return } cnt, err := model.EntityService.DeleteEntityStatus(info.Id) if err != nil || cnt == 0 { fallback(c, fmt.Errorf("%d rows affected:%v", cnt, err)) return } c.JSON(200, model.SuccessMessage{ Errcode: Error_Success, Data: gin.H{ "count": cnt, }, }) }
package main import "fmt" func bestRotation(A []int) int { N, cnt := len(A), [20010]int{} for i, v := range A { r, l := (i-v+N+1)%N, (i-(N-1)+N)%N cnt[l]++ cnt[r]-- if l >= r { cnt[0]++ } } cur, ans, idx := 0, 0, -1 for i := 0; i < N; i++ { cur += cnt[i] if cur > ans { ans = cur idx = i } } return idx } func main() { fmt.Println(bestRotation([]int{2, 3, 1, 4, 0})) fmt.Println(bestRotation([]int{1, 3, 0, 2, 4})) }
package migrations import "github.com/jmoiron/sqlx" func CreateCustomCommandTables(tx *sqlx.Tx) error { _, err := tx.Exec("CREATE TABLE `custom_commands` (`name` varchar(255) NOT NULL, `proc` varchar(255) NOT NULL, `description` varchar(255) NOT NULL, PRIMARY KEY(`name`,`proc`))") if err != nil { return err } _, err = tx.Exec("CREATE TABLE `custom_command_channels` (`name` varchar(255) NOT NULL, `channel` varchar(255) NOT NULL, `enabled` TINYINT NOT NULL DEFAULT 1, `restricted` TINYINT NOT NULL DEFAULT 0, PRIMARY KEY(`name`,`channel`))") return err }
package socket import ( . "Api-go/model" "encoding/json" "github.com/gorilla/websocket" ) func SendMessage(ws *websocket.Conn, userName string, params map[string]string) { msg := params["msg"] var message Message //反序列化 err := json.Unmarshal([]byte(msg), &message) if err != nil { err := ws.WriteMessage(websocket.TextMessage, []byte("Unmarshal Failed")) if err != nil { return } } //构造客户端方法调用实例 callBack := ClientCallBack{ Method: "ReceiveMessage", Params: message, } switch message.Type { case "chat": //向特定用户发送消息以调用客户端方法 to := ChatUsers[message.To[0]] err := to.WriteJSON(callBack) if err != nil { return } break case "broadcast": for user := range BroadcastUsers { err := BroadcastUsers[user].WriteJSON(callBack) if err != nil { return } } break case "chatroom": for user := range ChatRoomUsers { err := ChatRoomUsers[user].WriteJSON(callBack) if err != nil { return } } break default: break } }
package rwasm import ( "os" "github.com/pkg/errors" "github.com/suborbital/reactr/rcap" "github.com/wasmerio/wasmer-go/wasmer" ) func getStaticFile() *HostFn { fn := func(args ...wasmer.Value) (interface{}, error) { namePointer := args[0].I32() nameeSize := args[1].I32() ident := args[2].I32() ret := get_static_file(namePointer, nameeSize, ident) return ret, nil } return newHostFn("get_static_file", 3, true, fn) } func get_static_file(namePtr int32, nameSize int32, ident int32) int32 { inst, err := instanceForIdentifier(ident, true) if err != nil { internalLogger.Error(errors.Wrap(err, "[rwasm] alert: invalid identifier used, potential malicious activity")) return -1 } name := inst.readMemory(namePtr, nameSize) file, err := inst.ctx.FileSource.GetStatic(string(name)) if err != nil { internalLogger.Error(errors.Wrap(err, "[rwasm] failed to GetStatic")) if err == rcap.ErrFileFuncNotSet { return -2 } else if err == os.ErrNotExist { return -3 } return -4 } inst.setFFIResult(file) return int32(len(file)) }
package main var WebserverCertificate = []byte(`-----BEGIN CERTIFICATE----- MIIDrTCCApWgAwIBAgIURD387nezQwUSYH7yw4x1iL6o0ecwDQYJKoZIhvcNAQEL BQAwZjELMAkGA1UEBhMCVVMxDzANBgNVBAgMBk9yZWdvbjERMA8GA1UEBwwIUG9y dGxhbmQxDzANBgNVBAoMBmJzaWRlczEOMAwGA1UECwwFIFVuaXQxEjAQBgNVBAMM CWxvY2FsaG9zdDAeFw0yMDEwMTUwNDA0MjFaFw0zMDEwMTMwNDA0MjFaMGYxCzAJ BgNVBAYTAlVTMQ8wDQYDVQQIDAZPcmVnb24xETAPBgNVBAcMCFBvcnRsYW5kMQ8w DQYDVQQKDAZic2lkZXMxDjAMBgNVBAsMBSBVbml0MRIwEAYDVQQDDAlsb2NhbGhv c3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpQ8VV1H1O6c5D8Fo/ h0hMzjDtoG991q/d2NWFLHPhzKZ79bTzbfZQkyEKMujsEx3tLMZNxnaVGn5MflPe bGC+tYOmovZNW6l7wwEcFBZFXXx4QmQhgWfSD/AwQaOIWw2rOijvzx5vYJuFW1GJ byPp3mDjNgzWzesD8Aal5MmlKmz3cJFVhcdQOesNzYhEttwVJER2uQ9xl74cYRou jbm1eKAJSG+i26Pj5CeRCZ2kL3xn76viUXr2gS2Sg03B2LKl6fNfKphdjw9EJs3P ji/H9USdc80UxjXC3midm6AGP2+fmrjQSBYW0kI4eU1IwfdnXJ2RaL9MhIMNJyzS t14hAgMBAAGjUzBRMB0GA1UdDgQWBBTrwbx4sPf2VZC30xDN+oekavTxHTAfBgNV HSMEGDAWgBTrwbx4sPf2VZC30xDN+oekavTxHTAPBgNVHRMBAf8EBTADAQH/MA0G CSqGSIb3DQEBCwUAA4IBAQCJjBP2vjE1LCnNsBeK0OmnCkyd26DiTJy5MRKG7SEb bDjPX3l0ark/JQXjwgSwnPDPJ2GgMcnZD6l2xLvmkCAlX0K44HvPqjpKx6+K/2hB /mkXMHtX/Zj6BQxawEsP3iQWt5gdwgWUFczdLyi9zc/1drq45N7NTBunx9MeriEi eMlVoeqf4sR3LlSG6HWz/lIATxd/qCYXHGWufNDcrAtDPtG/m49VLnpXYQ+ZgtZY 3pqzjLGUwAb/EqVRWt9N24f35ene5JzSE4670+lErAG8mBtUbN2yNf36IP7thLVP ofBLvPq2RyeAtbIXt/AmJ339Xq3b7gODwWgU85XXaacm -----END CERTIFICATE-----`) var WebserverPrivateKey = []byte(`-----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCpQ8VV1H1O6c5D 8Fo/h0hMzjDtoG991q/d2NWFLHPhzKZ79bTzbfZQkyEKMujsEx3tLMZNxnaVGn5M flPebGC+tYOmovZNW6l7wwEcFBZFXXx4QmQhgWfSD/AwQaOIWw2rOijvzx5vYJuF W1GJbyPp3mDjNgzWzesD8Aal5MmlKmz3cJFVhcdQOesNzYhEttwVJER2uQ9xl74c YRoujbm1eKAJSG+i26Pj5CeRCZ2kL3xn76viUXr2gS2Sg03B2LKl6fNfKphdjw9E Js3Pji/H9USdc80UxjXC3midm6AGP2+fmrjQSBYW0kI4eU1IwfdnXJ2RaL9MhIMN JyzSt14hAgMBAAECggEADfJfmCriCFaLgF2VkhZ7hpiWv32wshW2d/kd3tNC2SAb t6sPNd+hpBH7bvOy7YDFvn0p+c6YhsCgF31xO6qCbjRWnKxgzw72cBQpSz78dsq1 LtOJnJklDNHzYIaICvw1rB4msVvHjjXPmrVjiod11hdUtGQHustMyg5E/fZUBx/N fJsEOxRLepXoi4seAfVJ1i9yvKbQdhLD6iIJWco/1seKTDw43uy2O3xTkE8nEStH WQOU5BVNLOZSFZ6ulHIT041k0HflOGIGEte7tw77XoOpP7um3dnzpiQCEB9+M8op UQQ61VM8wCd4Z4CQWPk+ONyYV1zZR3ykMSDSOQ6yAQKBgQDRsQnXAdNP/vzurhXn Pq95aiCjx6p+rikBUZhHIw5WPVs7jdOPv1Mj78s9D8v0SRpAr4TJIjxHy7gZkTce 9BBpe0XgD8jDGeOu2ekKIt4k4CV5gBggncPK0CvfVaN0c/j15KL6BBdw19L7IeJ8 tZgNrEZVFHN/i+hWawkq4nL7aQKBgQDOpTE2b1uvHxw4m3d17TiCXHJT4QurFHDr NXQVnPZmE0LjWukQqgAZ7GQN73n+S2wPJL8uPSJqDizfGenBbcxx3BOPbeD+lyRu +bSKdrR6hsRtRtgjGg1YXCXSSEZggxZyX66+uVGK6pn9Lm/CFxIOzGY/9O/oskv5 6xOb1CiN+QKBgGGz46nSp+r2HFP5uSruAkTINj0Zo08zRtfedN1wcBWusPumsZSg yNRNvpzM53MDPMA3B9/Pm9a4DSecidpaTetYDM7BjAGb5oJp9jNK28bCybM7BALu f3PYWxjMhkSUZMPtrCJiFan2Zc+tkiIZWqfERWEPDmG3hqH9bWAV8w1BAoGBAMYX RR3RkipFwF/jaJj7Vnhg9pfB29WcOsK+8xDk7q7bN/uKYnE/BenT2fYh4ugKlQPP ThWdNShFfm5AAP0TrDBZr8aGpnBnot1fMdqqAEiUeCfpCbhceP6DFx6FejF6eEIb xv+91pU8X3F82lVMmFrnRf15Z6HBiWOCHcH7lwTBAoGAJ2enLDc7aGxBSYvJHJlW UP8nGu8FbDtVaWJJyha12Q4bnPAQc2eElD7yTQFJ1ABuCDyHIj1WE/LkxYr8Wm0a xFkcjwjPBkD9CyVcG3EwwA47pyTTgSGQljxHt0hfLz/yc11K2nRQ18hQoufdKvuh LEHnsCZexdUfEQO2gnN7tLY= -----END PRIVATE KEY-----`)
package main import ( "bufio" "io/ioutil" "log" "os/exec" "strconv" "strings" "github.com/icexin/mini-falcon/common" ) type UserMetric struct { script string } func NewUserMetric(script string) *UserMetric { return &UserMetric{ script: script, } } func (u *UserMetric) Metrics() []*common.Metric { var ret []*common.Metric cmd := exec.Command("bash", "-c", u.script) stdout, _ := cmd.StdoutPipe() stderr, _ := cmd.StderrPipe() err := cmd.Start() if err != nil { log.Print(err) return ret } r := bufio.NewReader(stdout) for { line, err := r.ReadString('\n') if err != nil { break } line = strings.TrimSpace(line) fields := strings.Fields(line) if len(fields) != 2 { continue } name := fields[0] value, _ := strconv.ParseFloat(fields[1], 64) metric := NewMetric(name, value) ret = append(ret, metric) } err = cmd.Wait() if err != nil { out, _ := ioutil.ReadAll(stderr) log.Printf("%s:%s", out, err) } return ret }
package main import ( "fmt" ) func main() { fmt.Println("Welcome to function") port := 3000 _, err := startWebServer(port, 2) fmt.Println(err) } func startWebServer(port int, numberOfRetries int) (int, error) { fmt.Println("Starting webserver...") fmt.Println("Server Started", port) fmt.Println("Retries", numberOfRetries) return port, nil }
package sshd import ( "errors" "fmt" "net" "sync" "github.com/armon/go-radix" "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh" ) type SSHServer struct { config *ssh.ServerConfig l *logrus.Entry // Map of user -> authorized keys trustedKeys map[string]map[string]bool // List of available commands helpCommand *Command commands *radix.Tree listener net.Listener // Locks the conns/counter to avoid concurrent map access connsLock sync.Mutex conns map[int]*session counter int } // NewSSHServer creates a new ssh server rigged with default commands and prepares to listen func NewSSHServer(l *logrus.Entry) (*SSHServer, error) { s := &SSHServer{ trustedKeys: make(map[string]map[string]bool), l: l, commands: radix.New(), conns: make(map[int]*session), } s.config = &ssh.ServerConfig{ PublicKeyCallback: s.matchPubKey, //TODO: AuthLogCallback: s.authAttempt, //TODO: version string ServerVersion: fmt.Sprintf("SSH-2.0-Nebula???"), } s.RegisterCommand(&Command{ Name: "help", ShortDescription: "prints available commands or help <command> for specific usage info", Callback: func(a interface{}, args []string, w StringWriter) error { return helpCallback(s.commands, args, w) }, }) return s, nil } func (s *SSHServer) SetHostKey(hostPrivateKey []byte) error { private, err := ssh.ParsePrivateKey(hostPrivateKey) if err != nil { return fmt.Errorf("failed to parse private key: %s", err) } s.config.AddHostKey(private) return nil } func (s *SSHServer) ClearAuthorizedKeys() { s.trustedKeys = make(map[string]map[string]bool) } // AddAuthorizedKey adds an ssh public key for a user func (s *SSHServer) AddAuthorizedKey(user, pubKey string) error { pk, _, _, _, err := ssh.ParseAuthorizedKey([]byte(pubKey)) if err != nil { return err } tk, ok := s.trustedKeys[user] if !ok { tk = make(map[string]bool) s.trustedKeys[user] = tk } tk[string(pk.Marshal())] = true s.l.WithField("sshKey", pubKey).WithField("sshUser", user).Info("Authorized ssh key") return nil } // RegisterCommand adds a command that can be run by a user, by default only `help` is available func (s *SSHServer) RegisterCommand(c *Command) { s.commands.Insert(c.Name, c) } // Run begins listening and accepting connections func (s *SSHServer) Run(addr string) error { var err error s.listener, err = net.Listen("tcp", addr) if err != nil { return err } s.l.WithField("sshListener", addr).Info("SSH server is listening") // Run loops until there is an error s.run() s.closeSessions() s.l.Info("SSH server stopped listening") // We don't return an error because run logs for us return nil } func (s *SSHServer) run() { for { c, err := s.listener.Accept() if err != nil { if !errors.Is(err, net.ErrClosed) { s.l.WithError(err).Warn("Error in listener, shutting down") } return } conn, chans, reqs, err := ssh.NewServerConn(c, s.config) fp := "" if conn != nil { fp = conn.Permissions.Extensions["fp"] } if err != nil { l := s.l.WithError(err).WithField("remoteAddress", c.RemoteAddr()) if conn != nil { l = l.WithField("sshUser", conn.User()) conn.Close() } if fp != "" { l = l.WithField("sshFingerprint", fp) } l.Warn("failed to handshake") continue } l := s.l.WithField("sshUser", conn.User()) l.WithField("remoteAddress", c.RemoteAddr()).WithField("sshFingerprint", fp).Info("ssh user logged in") session := NewSession(s.commands, conn, chans, l.WithField("subsystem", "sshd.session")) s.connsLock.Lock() s.counter++ counter := s.counter s.conns[counter] = session s.connsLock.Unlock() go ssh.DiscardRequests(reqs) go func() { <-session.exitChan s.l.WithField("id", counter).Debug("closing conn") s.connsLock.Lock() delete(s.conns, counter) s.connsLock.Unlock() }() } } func (s *SSHServer) Stop() { // Close the listener, this will cause all session to terminate as well, see SSHServer.Run if s.listener != nil { if err := s.listener.Close(); err != nil { s.l.WithError(err).Warn("Failed to close the sshd listener") } } } func (s *SSHServer) closeSessions() { s.connsLock.Lock() for _, c := range s.conns { c.Close() } s.connsLock.Unlock() } func (s *SSHServer) matchPubKey(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) { pk := string(pubKey.Marshal()) fp := ssh.FingerprintSHA256(pubKey) tk, ok := s.trustedKeys[c.User()] if !ok { return nil, fmt.Errorf("unknown user %s", c.User()) } _, ok = tk[pk] if !ok { return nil, fmt.Errorf("unknown public key for %s (%s)", c.User(), fp) } return &ssh.Permissions{ // Record the public key used for authentication. Extensions: map[string]string{ "fp": fp, "user": c.User(), }, }, nil }
package main import "github.com/coreos/go-systemd/sdjournal" type LogWatcher struct { journal *sdjournal.Journal }
package main import ( "github.com/jyggen/advent-of-go/util" "strconv" "strings" ) func normalizeRanges(ipRanges [][]int) [][]int { newRanges := make([][]int, 0) for _, ipRange := range ipRanges { normalized := false for index, newRange := range newRanges { // Range is already fully covered by the existing range. if ipRange[0] >= newRange[0] && ipRange[1] <= newRange[1] { normalized = true } // Range covers the existing range. if ipRange[0] <= newRange[0] && ipRange[1] >= newRange[1] { newRanges[index][0] = ipRange[0] newRanges[index][1] = ipRange[1] normalized = true } // Range is somewhat covered by the existing range. if ipRange[0] < newRange[0] && ipRange[1] >= newRange[0] { newRanges[index][0] = ipRange[0] normalized = true } // Range is somewhat covered by the existing range. if ipRange[0] <= newRange[1] && ipRange[1] > newRange[1] { newRanges[index][1] = ipRange[1] normalized = true } } if (normalized) { continue } newRanges = append(newRanges, []int{ipRange[0], ipRange[1]}) } return newRanges } func parseInput(input string) [][]int { splitInput := strings.Split(input, "\n") ranges := make([][]int, len(splitInput)) for index, ipRange := range splitInput { ranges[index] = make([]int, 2) parts := strings.Split(ipRange, "-") ranges[index][0], _ = strconv.Atoi(parts[0]) ranges[index][1], _ = strconv.Atoi(parts[1]) } for { preLength := len(ranges) ranges = normalizeRanges(ranges) postLength := len(ranges) if (preLength == postLength) { break } } return ranges } func solvePartOne(ipRanges [][]int) int { solution := 0 for solution <= 4294967295 { okay := true for _, ipRange := range ipRanges { if (solution >= ipRange[0] && solution <= ipRange[1]) { okay = false solution = ipRange[1] + 1 break } } if okay { break } } return solution } func solvePartTwo(ipRanges [][]int) int { solution := 0 allowed := 0 for solution <= 4294967295 { okay := true for _, ipRange := range ipRanges { if (solution >= ipRange[0] && solution <= ipRange[1]) { okay = false solution = ipRange[1] + 1 break } } if okay { allowed++ solution++ } } return allowed } func main() { input := parseInput(util.ReadFile("2016/20/input")) util.StartBenchmark() result := solvePartOne(input) util.StopBenchmark() util.PrintAnswer(result) util.StartBenchmark() result = solvePartTwo(input) util.StopBenchmark() util.PrintAnswer(result) }
package p2p import ( "testing" "time" "github.com/aergoio/aergo-lib/log" peer "github.com/libp2p/go-libp2p-peer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) func Test_reconnectManager_AddJob(t *testing.T) { logger := log.NewLogger("test.p2p") // TODO: is it ok that this global var can be changed. durations = []time.Duration{ time.Millisecond * 100, time.Millisecond * 120, time.Millisecond * 130, time.Millisecond * 150, } trials := len(durations) maxTrial = trials dummyMeta := PeerMeta{ID: dummyPeerID} dummyMeta2 := PeerMeta{ID: dummyPeerID2} dummyMeta3 := PeerMeta{ID: dummyPeerID3} mockPm := &MockPeerManager{} dummyPeer := &remotePeerImpl{} mockPm.On("GetPeer", mock.MatchedBy(func(ID peer.ID) bool { return ID == dummyPeerID })).Return(nil, false) mockPm.On("GetPeer", mock.MatchedBy(func(ID peer.ID) bool { return ID != dummyPeerID2 })).Return(dummyPeer, true) mockPm.On("GetPeer", mock.MatchedBy(func(ID peer.ID) bool { return ID != dummyPeerID3 })).Return(nil, false) mockPm.On("AddNewPeer", mock.AnythingOfType("p2p.PeerMeta")) mockPm2 := &MockPeerManager{} mockPm2.On("GetPeer", mock.MatchedBy(func(ID peer.ID) bool { return ID != dummyPeerID })).Return(dummyPeer, true) mockPm2.On("AddNewPeer", mock.AnythingOfType("p2p.PeerMeta")) mockPm3 := &MockPeerManager{} mockPm3.On("GetPeer", mock.MatchedBy(func(ID peer.ID) bool { return ID != dummyPeerID })).Return(nil, false).Times(2) mockPm3.On("GetPeer", mock.MatchedBy(func(ID peer.ID) bool { return ID != dummyPeerID })).Return(dummyPeer, true).Once() mockPm3.On("AddNewPeer", mock.AnythingOfType("p2p.PeerMeta")) tests := []struct { name string pm PeerManager }{ {"t1", mockPm}, // TODO: Add test cases. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { rm := newReconnectManager(logger) rm.pm = mockPm rm.AddJob(dummyMeta) rm.AddJob(dummyMeta) rm.AddJob(dummyMeta) assert.Equal(t, 1, len(rm.jobs)) rm.AddJob(dummyMeta2) rm.AddJob(dummyMeta3) assert.Equal(t, 3, len(rm.jobs)) rm.CancelJob(dummyPeerID) rm.CancelJob(dummyPeerID) rm.CancelJob(dummyPeerID) rm.CancelJob(dummyPeerID) assert.Equal(t, 2, len(rm.jobs)) rm.Stop() assert.Equal(t, 0, len(rm.jobs)) }) } // test stop t.Run("tstop", func(t *testing.T) { rm := newReconnectManager(logger) rm.pm = mockPm rm.AddJob(dummyMeta) assert.Equal(t, 1, len(rm.jobs)) rm.AddJob(dummyMeta2) assert.Equal(t, 2, len(rm.jobs)) rm.Stop() rm.AddJob(dummyMeta3) assert.Equal(t, 0, len(rm.jobs)) }) }
package middlewares import "github.com/gin-gonic/gin" // BasicAuth returns a Gin BasicAuth Account. func BasicAuth() gin.HandlerFunc { return gin.BasicAuth(gin.Accounts{ "rogeruiz": "test", }) }
package main import ( "fmt" "os" "sort" "github.com/spf13/pflag" ) var ( countByBytes = pflag.BoolP("bytes", "c", false, "count by bytes") countByChars = pflag.BoolP("chars", "m", false, "count by chars") countByWords = pflag.BoolP("words", "w", false, "count by words") countByLines = pflag.BoolP("lines", "l", false, "count by lines") files0From = pflag.String("files0-from", "", "files from file, - for stdin") sortBy = pflag.String("sort", "fname", "sort by: chars, words, lines, fname") ) func init() { pflag.Parse() if !*countByLines && !*countByBytes && !*countByChars { *countByWords = true } } func main() { var ( files []string err error ) files = []string{"/dev/stdin"} if len(*files0From) != 0 { files, err = collectFiles(*files0From) if err != nil { fmt.Fprintf(os.Stderr, "collect files error: %v\n", err) os.Exit(1) } } var ( infos = FileStat{} ) for _, f := range files { info, err := countFile(f) if err != nil { fmt.Fprintf(os.Stderr, "count error, file: %s, err: %v", f, err) } //fmt.Printf("info: %v\n", info) infos = append(infos, info) } sort.Sort(infos) infos.Dump() }
package main //Invalid //Checks if init has no return value func init () int { }