text
stringlengths
11
4.05M
/* Mysterium network payment library. * * Copyright (C) 2020 BlockDev AG * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package uniswap import ( "context" "errors" "math/big" "time" uniswapv2factory "github.com/bonedaddy/go-defi/bindings/uniswap/factory" uniswapv2pair "github.com/bonedaddy/go-defi/bindings/uniswap/pair" "github.com/bonedaddy/go-defi/utils" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" ) // Client allows to do operations on uniswap smart contracts. type Client struct { bc utils.Blockchain } // NewClient returns a new instance of uniswap client. func NewClient(bc utils.Blockchain) *Client { return &Client{ bc: bc, } } // GetReserves retursn the available reserves in a pair func (c *Client) GetReserves(token0, token1 common.Address) (*Reserve, error) { addr := GeneratePairAddress(token0, token1) caller, err := uniswapv2pair.NewUniswapv2pairCaller(addr, c.bc) if err != nil { return nil, err } ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() reserves, err := caller.GetReserves(&bind.CallOpts{ Context: ctx, }) if err != nil { return nil, err } // This is the tricky bit. // The reserve call returns the reserves for token0 and token1 in a sorted order. // This means we need to check if our token addresses are sorted or not and flip the reserves if they are not sorted. stoken0, _ := sortAddressess(token0, token1) if stoken0 != token0 { // We're not sorted, so the reserves need to be flipped to represent the actual reserves. reserves.Reserve0, reserves.Reserve1 = reserves.Reserve1, reserves.Reserve0 } return &Reserve{Reserve0: reserves.Reserve0, Reserve1: reserves.Reserve1, BlockTimestampLast: reserves.BlockTimestampLast}, nil } // GetExchangeAmount returns the amount of tokens you'd receive when exchanging the given amount of token0 to token1. func (c *Client) GetExchangeAmount(amount *big.Int, token0, token1 common.Address) (*big.Int, error) { reserves, err := c.GetReserves(token0, token1) if err != nil { return nil, err } return Quote(amount, reserves.Reserve0, reserves.Reserve1), nil } // GetExchangeAmountForPath calculates the amount for a given path. func (c *Client) GetExchangeAmountForPath(amount *big.Int, tokens ...common.Address) (*big.Int, error) { if len(tokens) <= 1 { return nil, errors.New("not enough tokens for path") } pairs := GetPathPairs(tokens) for i := range pairs { a, err := c.GetExchangeAmount(amount, pairs[i].Token0, pairs[i].Token1) if err != nil { return nil, err } amount = a } return amount, nil } // Factory returns a uniswap cactory factory binding func (c *Client) Factory() (*uniswapv2factory.Uniswapv2factory, error) { return uniswapv2factory.NewUniswapv2factory(FactoryAddress, c.bc) }
package redisLayer import ( "fmt" "os" "github.com/gomodule/redigo/redis" ) var pool *redis.Pool var connection redis.Conn var redisURL = os.Getenv("REDIS_URL") func Initialize() { pool = newPool() } func SetKeyBytes(key string, value []byte) error { if pool == nil { Initialize() } conn := pool.Get() defer conn.Close() _, err := conn.Do("SET", key, value) if err != nil { return err } return nil } func GetKeyBytes(key string) ([]byte, error) { if pool == nil { Initialize() } conn := pool.Get() defer conn.Close() value, err := redis.String(conn.Do("GET", key)) if err != nil { fmt.Printf(err.Error()) } return []byte(value), nil } func SetKeyString(key string, value string) error { if pool == nil { Initialize() } conn := pool.Get() defer conn.Close() _, err := conn.Do("SET", key, value) if err != nil { return err } return nil } func GetKeyString(key string) (string, error) { if pool == nil { Initialize() } conn := pool.Get() defer conn.Close() value, err := redis.String(conn.Do("GET", key)) if err != nil { fmt.Printf(err.Error()) } return value, nil } func Exists(key string) (bool, error) { if pool == nil { Initialize() } conn := pool.Get() defer conn.Close() ok, err := redis.Bool(conn.Do("EXISTS", key)) if err != nil { return ok, fmt.Errorf("Error checking if key %s exists: %v", key, err) } return ok, err } func FlushDb() error { if pool == nil { Initialize() } conn := pool.Get() defer conn.Close() _, err := conn.Do("FLUSHDB") return err } func newPool() *redis.Pool { return &redis.Pool{ MaxIdle: 3, MaxActive: 20, Dial: func() (redis.Conn, error) { var c redis.Conn var err error if (redisURL == "") { c, err = redis.Dial("tcp", ":6379") } else { c, err = redis.DialURL(redisURL) } if err != nil { panic(err.Error()) } return c, err }, } }
package main import "sync" func main() { wg := sync.WaitGroup{} si := []int{1,2,3,4,5,6,7} for i := range si { wg.Add(1) go func(l int) { println(l) wg.Done() }(i) } // var i *int = 0x00c420094010 // println(*i) wg.Wait() }
package horspool func createShiftTable(pattern string) map[byte]int { shiftTable := make(map[byte]int) for i := 0; i < len(pattern)-1; i++ { index := pattern[i] shiftTable[index] = len(pattern) - i - 1 } return shiftTable } func createReverseShiftTable(pattern string) map[byte]int { shiftTable := make(map[byte]int) for i := 1; i < len(pattern); i++ { index := pattern[i] if _, isExist := shiftTable[index]; !isExist { shiftTable[index] = i } } return shiftTable } func calculateShiftAmount(shiftTable map[byte]int, char byte, patternLength int) int { if shiftAmount, isExists := shiftTable[char]; isExists { return shiftAmount } return patternLength } func matchPatternReverse(text, pattern string) bool { patternLength := len(pattern) textLength := len(text) for i := range pattern { if text[textLength-i-1] != pattern[patternLength-i-1] { return false } } return true } // Find finds the index of first matching pattern in text using horspool algorithm // returns -1 if pattern does not exist in text func Find(text string, pattern string) int { shiftTable := createShiftTable(pattern) textLength := len(text) patternLength := len(pattern) needle := patternLength for needle <= textLength { textSlice := text[needle-patternLength : needle] isMatched := matchPatternReverse(textSlice, pattern) if !isMatched { needle += calculateShiftAmount(shiftTable, text[needle-1], patternLength) } else { return needle - patternLength } } return -1 } // FindLast finds the index of last matching pattern in text using horspool algorithm // returns -1 if pattern does not exist in text func FindLast(text string, pattern string) int { shiftTable := createReverseShiftTable(pattern) textLength := len(text) patternLength := len(pattern) needle := textLength - patternLength for needle >= 0 { textSlice := text[needle : needle+patternLength] if textSlice != pattern { needle -= calculateShiftAmount(shiftTable, text[needle], patternLength) } else { return needle } } return -1 }
package heuristics func mScore(nb int, x int, y int, size int, nbPos map[int][2]int) float32 { tmp := nbPos[nb] x1 := tmp[0] y1 := tmp[1] return float32(abs((x1 - x)) + abs((y1 - y))) } func manhattan(grid []int, size int, depth int) float32 { var score float32 for x := 0; x < size; x++ { for y := 0; y < size; y++ { val := grid[get1d(x, y, size)] if val != 0 { score += mScore(val, x, y, size, finalPos) } } } return score } func manhattanA(grid []int, size int, depth int) float32 { return manhattan(grid, size, depth) + float32(depth) }
// Copyright 2020 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 package consensus import ( "fmt" "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance" valuetransaction "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction" "github.com/iotaledger/wasp/packages/chain" "github.com/iotaledger/wasp/packages/coretypes" "github.com/iotaledger/wasp/packages/hashing" "github.com/iotaledger/wasp/packages/kv/dict" "github.com/iotaledger/wasp/packages/util" "github.com/iotaledger/wasp/packages/vm" "github.com/iotaledger/wasp/packages/vm/runvm" ) type runCalculationsParams struct { requests []*request leaderPeerIndex uint16 balances map[valuetransaction.ID][]*balance.Balance accrueFeesTo coretypes.AgentID timestamp int64 } // runs the VM for requests and posts result to committee's queue func (op *operator) runCalculationsAsync(par runCalculationsParams) { if op.currentState == nil { op.log.Debugf("runCalculationsAsync: variable currentState is not known") return } ctx := &vm.VMTask{ Processors: op.chain.Processors(), ChainID: *op.chain.ID(), Color: *op.chain.Color(), Entropy: (hashing.HashValue)(op.stateTx.ID()), Balances: par.balances, ValidatorFeeTarget: par.accrueFeesTo, Requests: takeRefs(par.requests), Timestamp: par.timestamp, VirtualState: op.currentState, Log: op.log, } ctx.OnFinish = func(_ dict.Dict, _ error, vmError error) { if vmError != nil { op.log.Errorf("VM task failed: %v", vmError) return } op.chain.ReceiveMessage(&chain.VMResultMsg{ Task: ctx, Leader: par.leaderPeerIndex, }) } if err := runvm.RunComputationsAsync(ctx); err != nil { op.log.Errorf("RunComputationsAsync: %v", err) } } func (op *operator) sendResultToTheLeader(result *vm.VMTask, leader uint16) { op.log.Debugw("sendResultToTheLeader") if op.consensusStage != consensusStageSubCalculationsStarted { op.log.Debugf("calculation result on SUB dismissed because stage changed from '%s' to '%s'", stages[consensusStageSubCalculationsStarted].name, stages[op.consensusStage].name) return } sigShare, err := op.dkshare.SignShare(result.ResultTransaction.EssenceBytes()) if err != nil { op.log.Errorf("error while signing transaction %v", err) return } reqids := make([]coretypes.RequestID, len(result.Requests)) for i := range reqids { reqids[i] = *result.Requests[i].RequestID() } essenceHash := hashing.HashData(result.ResultTransaction.EssenceBytes()) batchHash := vm.BatchHash(reqids, result.Timestamp, leader) op.log.Debugw("sendResultToTheLeader", "leader", leader, "batchHash", batchHash.String(), "essenceHash", essenceHash.String(), "ts", result.Timestamp, ) msgData := util.MustBytes(&chain.SignedHashMsg{ PeerMsgHeader: chain.PeerMsgHeader{ BlockIndex: op.mustStateIndex(), }, BatchHash: batchHash, OrigTimestamp: result.Timestamp, EssenceHash: essenceHash, SigShare: sigShare, }) if err := op.chain.SendMsg(leader, chain.MsgSignedHash, msgData); err != nil { op.log.Error(err) return } op.sentResultToLeader = result.ResultTransaction op.sentResultToLeaderIndex = leader op.setNextConsensusStage(consensusStageSubCalculationsFinished) } func (op *operator) saveOwnResult(result *vm.VMTask) { if op.consensusStage != consensusStageLeaderCalculationsStarted { op.log.Debugf("calculation result on LEADER dismissed because stage changed from '%s' to '%s'", stages[consensusStageLeaderCalculationsStarted].name, stages[op.consensusStage].name) return } sigShare, err := op.dkshare.SignShare(result.ResultTransaction.EssenceBytes()) if err != nil { op.log.Errorf("error while signing transaction %v", err) return } reqids := make([]coretypes.RequestID, len(result.Requests)) for i := range reqids { reqids[i] = *result.Requests[i].RequestID() } bh := vm.BatchHash(reqids, result.Timestamp, op.chain.OwnPeerIndex()) if bh != op.leaderStatus.batchHash { panic("bh != op.leaderStatus.batchHash") } if len(result.Requests) != int(result.ResultBlock.Size()) { panic("len(result.RequestIDs) != int(result.ResultBlock.Size())") } essenceHash := hashing.HashData(result.ResultTransaction.EssenceBytes()) op.log.Debugw("saveOwnResult", "batchHash", bh.String(), "ts", result.Timestamp, "essenceHash", essenceHash.String(), ) op.leaderStatus.resultTx = result.ResultTransaction op.leaderStatus.batch = result.ResultBlock op.leaderStatus.signedResults[op.chain.OwnPeerIndex()] = &signedResult{ essenceHash: essenceHash, sigShare: sigShare, } op.setNextConsensusStage(consensusStageLeaderCalculationsFinished) } func (op *operator) aggregateSigShares(sigShares [][]byte) error { resTx := op.leaderStatus.resultTx finalSignature, err := op.dkshare.RecoverFullSignature(sigShares, resTx.EssenceBytes()) if err != nil { return err } if err := resTx.PutSignature(finalSignature); err != nil { return fmt.Errorf("something wrong while aggregating final signature: %v", err) } return nil }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package executor_test import ( "crypto/tls" "fmt" "net" "net/http/httptest" "strconv" "strings" "testing" "time" "github.com/gorilla/mux" "github.com/pingcap/failpoint" "github.com/pingcap/fn" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/server" "github.com/pingcap/tidb/store/helper" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/pdapi" "github.com/stretchr/testify/require" "google.golang.org/grpc" ) type infosSchemaClusterTableSuite struct { store kv.Storage dom *domain.Domain rpcServer *grpc.Server httpServer *httptest.Server mockAddr string listenAddr string startTime time.Time } func createInfosSchemaClusterTableSuite(t *testing.T) *infosSchemaClusterTableSuite { s := new(infosSchemaClusterTableSuite) s.store, s.dom = testkit.CreateMockStoreAndDomain(t) s.rpcServer, s.listenAddr = setUpRPCService(t, s.dom, "127.0.0.1:0") s.httpServer, s.mockAddr = s.setUpMockPDHTTPServer() s.startTime = time.Now() t.Cleanup(func() { if s.rpcServer != nil { s.rpcServer.Stop() } if s.httpServer != nil { s.httpServer.Close() } }) return s } func setUpRPCService(t *testing.T, dom *domain.Domain, addr string) (*grpc.Server, string) { lis, err := net.Listen("tcp", addr) require.NoError(t, err) // Fix issue 9836 sm := &testkit.MockSessionManager{ PS: make([]*util.ProcessInfo, 1), SerID: 1, } sm.PS = append(sm.PS, &util.ProcessInfo{ ID: 1, User: "root", Host: "127.0.0.1", Command: mysql.ComQuery, }) srv := server.NewRPCServer(config.GetGlobalConfig(), dom, sm) port := lis.Addr().(*net.TCPAddr).Port addr = fmt.Sprintf("127.0.0.1:%d", port) go func() { require.NoError(t, srv.Serve(lis)) }() config.UpdateGlobal(func(conf *config.Config) { conf.Status.StatusPort = uint(port) }) return srv, addr } func (s *infosSchemaClusterTableSuite) setUpMockPDHTTPServer() (*httptest.Server, string) { // mock PD http server router := mux.NewRouter() srv := httptest.NewServer(router) // mock store stats stat mockAddr := strings.TrimPrefix(srv.URL, "http://") router.Handle(pdapi.Stores, fn.Wrap(func() (*helper.StoresStat, error) { return &helper.StoresStat{ Count: 1, Stores: []helper.StoreStat{ { Store: helper.StoreBaseStat{ ID: 1, Address: "127.0.0.1:20160", State: 0, StateName: "Up", Version: "4.0.0-alpha", StatusAddress: mockAddr, GitHash: "mock-tikv-githash", StartTimestamp: s.startTime.Unix(), }, }, }, }, nil })) // mock regions router.Handle(pdapi.Regions, fn.Wrap(func() (*helper.RegionsInfo, error) { return &helper.RegionsInfo{ Count: 1, Regions: []helper.RegionInfo{ { ID: 1, StartKey: "", EndKey: "", Epoch: helper.RegionEpoch{ ConfVer: 1, Version: 2, }, WrittenBytes: 10000, ReadBytes: 20000, ApproximateSize: 300000, ApproximateKeys: 1000, }, }, }, nil })) // mock PD API router.Handle(pdapi.Status, fn.Wrap(func() (interface{}, error) { return struct { Version string `json:"version"` GitHash string `json:"git_hash"` StartTimestamp int64 `json:"start_timestamp"` }{ Version: "4.0.0-alpha", GitHash: "mock-pd-githash", StartTimestamp: s.startTime.Unix(), }, nil })) var mockConfig = func() (map[string]interface{}, error) { configuration := map[string]interface{}{ "key1": "value1", "key2": map[string]string{ "nest1": "n-value1", "nest2": "n-value2", }, "key3": map[string]interface{}{ "nest1": "n-value1", "nest2": "n-value2", "key4": map[string]string{ "nest3": "n-value4", "nest4": "n-value5", }, }, } return configuration, nil } // PD config. router.Handle(pdapi.Config, fn.Wrap(mockConfig)) // TiDB/TiKV config. router.Handle("/config", fn.Wrap(mockConfig)) // PD region. router.Handle("/pd/api/v1/stats/region", fn.Wrap(func() (*helper.PDRegionStats, error) { return &helper.PDRegionStats{ Count: 1, EmptyCount: 1, StorageSize: 1, StorageKeys: 1, StoreLeaderCount: map[uint64]int{1: 1}, StorePeerCount: map[uint64]int{1: 1}, }, nil })) return srv, mockAddr } type mockStore struct { helper.Storage host string } func (s *mockStore) EtcdAddrs() ([]string, error) { return []string{s.host}, nil } func (s *mockStore) TLSConfig() *tls.Config { panic("not implemented") } func (s *mockStore) StartGCWorker() error { panic("not implemented") } func (s *mockStore) Name() string { return "mockStore" } func (s *mockStore) Describe() string { return "" } func TestTiDBClusterInfo(t *testing.T) { s := createInfosSchemaClusterTableSuite(t) mockAddr := s.mockAddr store := &mockStore{ s.store.(helper.Storage), mockAddr, } // information_schema.cluster_info tk := testkit.NewTestKit(t, store) tidbStatusAddr := fmt.Sprintf(":%d", config.GetGlobalConfig().Status.StatusPort) row := func(cols ...string) string { return strings.Join(cols, " ") } tk.MustQuery("select type, instance, status_address, version, git_hash from information_schema.cluster_info").Check(testkit.Rows( row("tidb", ":4000", tidbStatusAddr, "None", "None"), row("pd", mockAddr, mockAddr, "4.0.0-alpha", "mock-pd-githash"), row("tikv", "store1", "", "", ""), )) startTime := s.startTime.Format(time.RFC3339) tk.MustQuery("select type, instance, start_time from information_schema.cluster_info where type != 'tidb'").Check(testkit.Rows( row("pd", mockAddr, startTime), row("tikv", "store1", ""), )) require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/infoschema/mockStoreTombstone", `return(true)`)) tk.MustQuery("select type, instance, start_time from information_schema.cluster_info where type = 'tikv'").Check(testkit.Rows()) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/infoschema/mockStoreTombstone")) // information_schema.cluster_config instances := []string{ "pd,127.0.0.1:11080," + mockAddr + ",mock-version,mock-githash,0", "tidb,127.0.0.1:11080," + mockAddr + ",mock-version,mock-githash,1001", "tikv,127.0.0.1:11080," + mockAddr + ",mock-version,mock-githash,0", } fpExpr := `return("` + strings.Join(instances, ";") + `")` require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/infoschema/mockClusterInfo", fpExpr)) defer func() { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/infoschema/mockClusterInfo")) }() tk.MustQuery("select type, instance, status_address, version, git_hash, server_id from information_schema.cluster_info").Check(testkit.Rows( row("pd", "127.0.0.1:11080", mockAddr, "mock-version", "mock-githash", "0"), row("tidb", "127.0.0.1:11080", mockAddr, "mock-version", "mock-githash", "1001"), row("tikv", "127.0.0.1:11080", mockAddr, "mock-version", "mock-githash", "0"), )) tk.MustQuery("select * from information_schema.cluster_config").Check(testkit.Rows( "pd 127.0.0.1:11080 key1 value1", "pd 127.0.0.1:11080 key2.nest1 n-value1", "pd 127.0.0.1:11080 key2.nest2 n-value2", "pd 127.0.0.1:11080 key3.key4.nest3 n-value4", "pd 127.0.0.1:11080 key3.key4.nest4 n-value5", "pd 127.0.0.1:11080 key3.nest1 n-value1", "pd 127.0.0.1:11080 key3.nest2 n-value2", "tidb 127.0.0.1:11080 key1 value1", "tidb 127.0.0.1:11080 key2.nest1 n-value1", "tidb 127.0.0.1:11080 key2.nest2 n-value2", "tidb 127.0.0.1:11080 key3.key4.nest3 n-value4", "tidb 127.0.0.1:11080 key3.key4.nest4 n-value5", "tidb 127.0.0.1:11080 key3.nest1 n-value1", "tidb 127.0.0.1:11080 key3.nest2 n-value2", "tikv 127.0.0.1:11080 key1 value1", "tikv 127.0.0.1:11080 key2.nest1 n-value1", "tikv 127.0.0.1:11080 key2.nest2 n-value2", "tikv 127.0.0.1:11080 key3.key4.nest3 n-value4", "tikv 127.0.0.1:11080 key3.key4.nest4 n-value5", "tikv 127.0.0.1:11080 key3.nest1 n-value1", "tikv 127.0.0.1:11080 key3.nest2 n-value2", )) tk.MustQuery("select TYPE, `KEY`, VALUE from information_schema.cluster_config where `key`='key3.key4.nest4' order by type").Check(testkit.Rows( "pd key3.key4.nest4 n-value5", "tidb key3.key4.nest4 n-value5", "tikv key3.key4.nest4 n-value5", )) } func TestTikvRegionStatus(t *testing.T) { s := createInfosSchemaClusterTableSuite(t) mockAddr := s.mockAddr store := &mockStore{ s.store.(helper.Storage), mockAddr, } tk := testkit.NewTestKit(t, store) restoreConfig := config.RestoreFunc() defer restoreConfig() config.UpdateGlobal(func(conf *config.Config) { conf.EnableGlobalIndex = true }) tk.MustExec("use test") tk.MustExec("drop table if exists test_t1") tk.MustExec(`CREATE TABLE test_t1 ( a int(11) DEFAULT NULL, b int(11) DEFAULT NULL, c int(11) DEFAULT NULL)`) tk.MustQuery("select REGION_ID, DB_NAME, TABLE_NAME, IS_INDEX, INDEX_ID, INDEX_NAME, IS_PARTITION, PARTITION_NAME from information_schema.TIKV_REGION_STATUS where DB_NAME = 'test' and TABLE_NAME = 'test_t1'").Check(testkit.Rows( "1 test test_t1 0 <nil> <nil> 0 <nil>", )) tk.MustExec("alter table test_t1 add index p_a (a)") tk.MustQuery("select REGION_ID, DB_NAME, TABLE_NAME, IS_INDEX, INDEX_NAME, IS_PARTITION, PARTITION_NAME from information_schema.TIKV_REGION_STATUS where DB_NAME = 'test' and TABLE_NAME = 'test_t1' order by IS_INDEX").Check(testkit.Rows( "1 test test_t1 0 <nil> 0 <nil>", "1 test test_t1 1 p_a 0 <nil>", )) tk.MustExec("alter table test_t1 add unique p_b (b);") tk.MustQuery("select REGION_ID, DB_NAME, TABLE_NAME, IS_INDEX, INDEX_NAME, IS_PARTITION, PARTITION_NAME from information_schema.TIKV_REGION_STATUS where DB_NAME = 'test' and TABLE_NAME = 'test_t1' order by IS_INDEX, INDEX_NAME").Check(testkit.Rows( "1 test test_t1 0 <nil> 0 <nil>", "1 test test_t1 1 p_a 0 <nil>", "1 test test_t1 1 p_b 0 <nil>", )) tk.MustExec("drop table if exists test_t2") tk.MustExec(`CREATE TABLE test_t2 ( a int(11) DEFAULT NULL, b int(11) DEFAULT NULL, c int(11) DEFAULT NULL) PARTITION BY RANGE (c) ( PARTITION p0 VALUES LESS THAN (10), PARTITION p1 VALUES LESS THAN (MAXVALUE))`) tk.MustQuery("select REGION_ID, DB_NAME, TABLE_NAME, IS_INDEX, INDEX_ID, INDEX_NAME, IS_PARTITION, PARTITION_NAME from information_schema.TIKV_REGION_STATUS where DB_NAME = 'test' and TABLE_NAME = 'test_t2' order by PARTITION_NAME").Check(testkit.Rows( "1 test test_t2 0 <nil> <nil> 1 p0", "1 test test_t2 0 <nil> <nil> 1 p1", )) tk.MustExec("alter table test_t2 add index p_a (a)") tk.MustQuery("select REGION_ID, DB_NAME, TABLE_NAME, IS_INDEX, INDEX_NAME, IS_PARTITION, PARTITION_NAME from information_schema.TIKV_REGION_STATUS where DB_NAME = 'test' and TABLE_NAME = 'test_t2' order by IS_INDEX, PARTITION_NAME").Check(testkit.Rows( "1 test test_t2 0 <nil> 1 p0", "1 test test_t2 0 <nil> 1 p1", "1 test test_t2 1 p_a 1 p0", "1 test test_t2 1 p_a 1 p1", )) tk.MustExec("alter table test_t2 add unique p_b (b);") tk.MustQuery("select REGION_ID, DB_NAME, TABLE_NAME, IS_INDEX, INDEX_NAME, IS_PARTITION, PARTITION_NAME from information_schema.TIKV_REGION_STATUS where DB_NAME = 'test' and TABLE_NAME = 'test_t2' order by IS_INDEX, IS_PARTITION desc, PARTITION_NAME").Check(testkit.Rows( "1 test test_t2 0 <nil> 1 p0", "1 test test_t2 0 <nil> 1 p1", "1 test test_t2 1 p_a 1 p0", "1 test test_t2 1 p_a 1 p1", "1 test test_t2 1 p_b 0 <nil>", )) } func TestTableStorageStats(t *testing.T) { s := createInfosSchemaClusterTableSuite(t) tk := testkit.NewTestKit(t, s.store) err := tk.QueryToErr("select * from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'test'") require.EqualError(t, err, "pd unavailable") mockAddr := s.mockAddr store := &mockStore{ s.store.(helper.Storage), mockAddr, } // Test information_schema.TABLE_STORAGE_STATS. tk = testkit.NewTestKit(t, store) // Test table_schema is not specified. err = tk.QueryToErr("select * from information_schema.TABLE_STORAGE_STATS") require.EqualError(t, err, "Please add where clause to filter the column TABLE_SCHEMA. "+ "For example, where TABLE_SCHEMA = 'xxx' or where TABLE_SCHEMA in ('xxx', 'yyy')") // Test it would get null set when get the sys schema. tk.MustQuery("select TABLE_NAME from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'information_schema';").Check([][]interface{}{}) tk.MustQuery("select TABLE_NAME from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA in ('information_schema', 'metrics_schema');").Check([][]interface{}{}) tk.MustQuery("select TABLE_NAME from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'information_schema' and TABLE_NAME='schemata';").Check([][]interface{}{}) tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t (a int, b int, index idx(a))") tk.MustQuery("select TABLE_NAME, TABLE_SIZE from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'test' and TABLE_NAME='t';").Check(testkit.Rows("t 1")) tk.MustExec("create table t1 (a int, b int, index idx(a))") tk.MustQuery("select TABLE_NAME, sum(TABLE_SIZE) from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'test' group by TABLE_NAME;").Sort().Check(testkit.Rows( "t 1", "t1 1", )) tk.MustQuery("select TABLE_SCHEMA, sum(TABLE_SIZE) from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'test' group by TABLE_SCHEMA;").Check(testkit.Rows( "test 2", )) rows := tk.MustQuery("select TABLE_NAME from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'mysql';").Rows() result := 53 require.Len(t, rows, result) // More tests about the privileges. tk.MustExec("create user 'testuser'@'localhost'") tk.MustExec("create user 'testuser2'@'localhost'") tk.MustExec("create user 'testuser3'@'localhost'") tk1 := testkit.NewTestKit(t, store) defer tk1.MustExec("drop user 'testuser'@'localhost'") defer tk1.MustExec("drop user 'testuser2'@'localhost'") defer tk1.MustExec("drop user 'testuser3'@'localhost'") tk.MustExec("grant all privileges on *.* to 'testuser2'@'localhost'") tk.MustExec("grant select on *.* to 'testuser3'@'localhost'") require.NoError(t, tk.Session().Auth(&auth.UserIdentity{ Username: "testuser", Hostname: "localhost", }, nil, nil, nil)) // User has no access to this schema, so the result set is empty. tk.MustQuery("select count(1) from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'mysql'").Check(testkit.Rows("0")) require.NoError(t, tk.Session().Auth(&auth.UserIdentity{ Username: "testuser2", Hostname: "localhost", }, nil, nil, nil)) tk.MustQuery("select count(1) from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'mysql'").Check(testkit.Rows(strconv.Itoa(result))) require.NoError(t, tk.Session().Auth(&auth.UserIdentity{ Username: "testuser3", Hostname: "localhost", }, nil, nil, nil)) tk.MustQuery("select count(1) from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'mysql'").Check(testkit.Rows(strconv.Itoa(result))) } func TestIssue42619(t *testing.T) { s := createInfosSchemaClusterTableSuite(t) mockAddr := s.mockAddr store := &mockStore{ s.store.(helper.Storage), mockAddr, } tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t (a int, b int, index idx(a))") tk.MustQuery("SELECT TABLE_SCHEMA, TABLE_NAME, PEER_COUNT, REGION_COUNT, EMPTY_REGION_COUNT, TABLE_SIZE, TABLE_KEYS " + "FROM information_schema.TABLE_STORAGE_STATS " + "WHERE TABLE_SCHEMA = 'test' and TABLE_NAME='t'").Check( testkit.Rows("test t 1 1 1 1 1")) tk.MustExec( "CREATE TABLE tp (a int(11) DEFAULT NULL,b int(11) DEFAULT NULL,c int(11) DEFAULT NULL," + "KEY ia(a), KEY ib(b), KEY ic (c))" + "PARTITION BY RANGE (`a`)" + "(PARTITION `p0` VALUES LESS THAN (300)," + "PARTITION `p1` VALUES LESS THAN (600)," + "PARTITION `p2` VALUES LESS THAN (900)," + "PARTITION `p3` VALUES LESS THAN (MAXVALUE))") tk.MustQuery("SELECT TABLE_SCHEMA, TABLE_NAME, PEER_COUNT, REGION_COUNT, EMPTY_REGION_COUNT, TABLE_SIZE, TABLE_KEYS " + "FROM information_schema.TABLE_STORAGE_STATS " + "WHERE TABLE_SCHEMA = 'test'").Sort().Check( testkit.Rows( "test t 1 1 1 1 1", "test tp 1 1 1 1 1", "test tp 1 1 1 1 1", "test tp 1 1 1 1 1", "test tp 1 1 1 1 1", "test tp 1 1 1 1 1", )) }
package host import ( "bytes" "context" "encoding/binary" "fmt" "net" "sync" "time" "github.com/google/gopacket" "github.com/google/gopacket/layers" "github.com/google/gopacket/pcap" ) // Scanner provide container for control local network scanning // process and checking results type Scanner struct { mu sync.RWMutex ctx context.Context cancelFunc context.CancelFunc found chan *Host unique map[string]bool Error error } // NewScanner will initialise new instance of Scanner func NewScanner() *Scanner { ctx, cancelFunc := context.WithCancel(context.Background()) return &Scanner{ mu: sync.RWMutex{}, ctx: ctx, cancelFunc: cancelFunc, found: make(chan *Host), unique: make(map[string]bool), } } // Ctx wrap given context and return new with cancel func func (s *Scanner) Ctx(ctx context.Context) (context.Context, context.CancelFunc) { s.ctx, s.cancelFunc = context.WithCancel(ctx) return s.ctx, s.cancelFunc } // Hosts will return a read only channel to receive found Host func (s *Scanner) Hosts() <-chan *Host { return s.found } func (s *Scanner) fail(err error) { s.mu.Lock() defer s.mu.Unlock() s.Error = err if s.ctx.Err() == nil { s.cancelFunc() } } func (s *Scanner) foundHost(host *Host) bool { s.mu.Lock() defer s.mu.Unlock() if s.ctx.Err() != nil { return false } if _, ok := s.unique[host.ID()]; !ok { s.unique[host.ID()] = true s.found <- host } return true } // Scan will detect system interfaces and go over each one to detect // IP addresses to read/write ARP packets // Blocked until every interfaces unable to write packets or stop call // so typically should be run as a goroutine func (s *Scanner) Scan() { interfaces, err := net.Interfaces() if err != nil { s.fail(err) return } var wg sync.WaitGroup for i := range interfaces { wg.Add(1) go func(iface net.Interface) { defer wg.Done() if err := s.scanInterface(&iface); err != nil { s.fail(fmt.Errorf("interface [%v] error: %w", iface.Name, err)) return } }(interfaces[i]) } wg.Wait() close(s.found) } // Scans an individual interface's local network for machines using ARP requests/replies. // // Loops forever, sending packets out regularly. It returns an error if // it's ever unable to write a packet. func (s *Scanner) scanInterface(iface *net.Interface) error { // We just look for IPv4 addresses, so try to find if the interface has one. var addr *net.IPNet addresses, err := iface.Addrs() if err != nil { return err } for _, a := range addresses { if IPNet, ok := a.(*net.IPNet); ok { IPv4 := IPNet.IP.To4() if IPv4 == nil { continue } addr = &net.IPNet{ IP: IPv4, Mask: IPNet.Mask[len(IPNet.Mask)-4:], } } } // Sanity-check that the interface has a good address. if addr == nil { return nil } else if addr.IP[0] == 127 { return nil } else if addr.Mask[0] != 0xff || addr.Mask[1] != 0xff { return nil } // Open up a pcap handle for packet reads/writes. handle, err := pcap.OpenLive(iface.Name, 65536, true, pcap.BlockForever) if err != nil { return err } defer handle.Close() // Start up a goroutine to read in packet data. go s.listenARP(handle, iface) for { // Write our scan packets out to the handle. if err := writeARP(handle, iface, addr); err != nil { return fmt.Errorf("error writing packets: %w", err) } // We don't know exactly how long it'll take for packets to be // sent back to us, but 10 seconds should be more than enough // time ;) timeout := time.NewTicker(10 * time.Second) select { case <-timeout.C: continue case <-s.ctx.Done(): return nil } } } // Watches a handle for incoming ARP responses we might care about. // Push new Host once any correct response received // Work until 'stop' is closed. func (s *Scanner) listenARP(handle *pcap.Handle, iface *net.Interface) { src := gopacket.NewPacketSource(handle, layers.LayerTypeEthernet) in := src.Packets() for { var packet gopacket.Packet select { case <-s.ctx.Done(): return case packet = <-in: arpLayer := packet.Layer(layers.LayerTypeARP) if arpLayer == nil { continue } arp := arpLayer.(*layers.ARP) if arp.Operation != layers.ARPReply || bytes.Equal([]byte(iface.HardwareAddr), arp.SourceHwAddress) { // This is a packet I sent. continue } // Note: we might get some packets here that aren't responses to ones we've sent, // if for example someone else sends US an ARP request. Doesn't much matter, though... // all information is good information :) if !s.foundHost(&Host{ IP: fmt.Sprintf("%v", net.IP(arp.SourceProtAddress)), MAC: fmt.Sprintf("%v", net.HardwareAddr(arp.SourceHwAddress)), }) { return } } } } // writeARP writes an ARP request for each address on our local network to the // pcap handle. func writeARP(handle *pcap.Handle, iface *net.Interface, addr *net.IPNet) error { // Set up all the layers' fields we can. eth := layers.Ethernet{ SrcMAC: iface.HardwareAddr, DstMAC: net.HardwareAddr{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, EthernetType: layers.EthernetTypeARP, } arp := layers.ARP{ AddrType: layers.LinkTypeEthernet, Protocol: layers.EthernetTypeIPv4, HwAddressSize: 6, ProtAddressSize: 4, Operation: layers.ARPRequest, SourceHwAddress: []byte(iface.HardwareAddr), SourceProtAddress: []byte(addr.IP), DstHwAddress: []byte{0, 0, 0, 0, 0, 0}, } // Set up buffer and options for serialization. buf := gopacket.NewSerializeBuffer() opts := gopacket.SerializeOptions{ FixLengths: true, ComputeChecksums: true, } // Send one packet for every address. for _, ip := range ips(addr) { arp.DstProtAddress = ip if err := gopacket.SerializeLayers(buf, opts, &eth, &arp); err != nil { return err } if err := handle.WritePacketData(buf.Bytes()); err != nil { return err } } return nil } // ips is a simple and not very good method for getting all IPv4 addresses from a // net.IPNet. It returns all IPs it can over the channel it sends back, closing // the channel when fail. func ips(n *net.IPNet) (out []net.IP) { num := binary.BigEndian.Uint32([]byte(n.IP)) mask := binary.BigEndian.Uint32([]byte(n.Mask)) num &= mask for mask < 0xffffffff { var buf [4]byte binary.BigEndian.PutUint32(buf[:], num) out = append(out, net.IP(buf[:])) mask++ num++ } return }
package main import "fmt" type person struct { first string last string age int } // func [reciver] functionName([params]) [return type] {} // Here we attach this function (fullName) to the type person func (p person) fullName() string { return p.first + " " + p.last } func main() { p1 := person{"James", "Bond", 44} p2 := person{"Miss", "Moneypenny", 19} fmt.Println(p1.fullName()) fmt.Println(p2.fullName()) }
package memrepo import ( "github.com/scjalliance/drivestream/commit" "github.com/scjalliance/drivestream/resource" ) var _ commit.TreeGroup = (*CommitTreeGroup)(nil) // CommitTreeGroup is an unordered group of tree changes sharing a common // parent. type CommitTreeGroup struct { repo *Repository drive resource.ID commit commit.SeqNum parent resource.ID } // Parent returns the parent resource ID of the group. func (ref CommitTreeGroup) Parent() resource.ID { return ref.parent } // Changes returns the set of changes contained in the group. func (ref CommitTreeGroup) Changes() (changes []commit.TreeChange, err error) { drv, ok := ref.repo.drives[ref.drive] if !ok { return nil, commit.NotFound{Drive: ref.drive, Commit: ref.commit} } if ref.commit >= commit.SeqNum(len(drv.Commits)) { return nil, commit.NotFound{Drive: ref.drive, Commit: ref.commit} } tree := drv.Commits[ref.commit].Tree if tree == nil { return nil, commit.TreeGroupNotFound{Drive: ref.drive, Commit: ref.commit, Parent: ref.parent} } group := tree[ref.parent] if group == nil { return nil, commit.TreeGroupNotFound{Drive: ref.drive, Commit: ref.commit, Parent: ref.parent} } for child, removed := range group { changes = append(changes, commit.TreeChange{ Parent: ref.parent, Child: child, Removed: removed, }) } return changes, nil }
package main import "fmt" type Item struct { productID int qtd int price float64 } type Order struct { userID int items []Item } func (o Order) Value() float64 { total := 0.0 for _, Item := range o.items { total += Item.price * float64(Item.qtd) } return total } func main() { order := Order{ userID: 1, items: []Item{ Item{1, 2, 12.10}, Item{2, 3, 30.2}, Item{11, 100, 3.1}, }, } fmt.Printf("Total value is %.2f", order.Value()) }
package sieve import ( "bytes" "encoding/json" "fmt" "regexp" "text/template" ) type EventHandler interface { HandleEvent(results []string, event *Event) error } // SimpleEventHandler assigns event description formatted based on regex groups type SimpleEventHandler struct { *template.Template Severity string } func NewSimpleEventHandler(severity string, descTmpl string) SimpleEventHandler { return SimpleEventHandler{simpleTemplate(descTmpl), severity} } func (handler SimpleEventHandler) HandleEvent(results []string, event *Event) error { var output bytes.Buffer err := handler.Execute(&output, results) if err != nil { return fmt.Errorf("error in custom event handler %s; %s", event.Type, err) } event.Desc = output.String() event.Severity = handler.Severity return nil } // SimpleEventHandler assigns event description formatted based on // fields of JSON extracted from the first and only regex match group type JsonEventHandler struct { *template.Template Severity string } func NewJsonEventHandler(severity string, descTmpl string) JsonEventHandler { return JsonEventHandler{template.Must(template.New("").Parse(descTmpl)), severity} } func (handler JsonEventHandler) HandleEvent(results []string, event *Event) error { if len(results) < 1 { return fmt.Errorf("no regex matches found") } err := json.Unmarshal([]byte(results[1]), &event.Info) if err != nil { return err } var output bytes.Buffer err = handler.Execute(&output, event.Info) if err != nil { return fmt.Errorf("error in known event handler %s; %s", event.Type, err) } event.Desc = output.String() event.Severity = handler.Severity return nil } var templateIndexRe *regexp.Regexp func simpleTemplate(tmpl string) *template.Template { // replace $1 with {{index . 1}} as understood by text/template tmpl = templateIndexRe.ReplaceAllString(tmpl, "{{index . $1}}") return template.Must(template.New("").Parse(tmpl)) } func init() { templateIndexRe = regexp.MustCompile(`\$(\d+)`) }
package main import ( "crypto/ed25519" "fmt" "time" "github.com/pascaldekloe/jwt" ) var JWTPrivateKey ed25519.PrivateKey var JWTPublicKey ed25519.PublicKey func initJWT() { seed := Config.Seed if seed == "" { return } for len(seed) < ed25519.SeedSize { seed = seed + seed } if len(seed) > ed25519.SeedSize { seed = seed[:ed25519.SeedSize] } JWTPrivateKey = ed25519.NewKeyFromSeed([]byte(seed)) JWTPublicKey = []byte(JWTPrivateKey)[32:] } func createUserToken(groups []string) ([]byte, error) { var claims jwt.Claims claims.Subject = "user" claims.Issued = jwt.NewNumericTime(time.Now().Round(time.Second)) claims.Set = make(map[string]interface{}, len(groups)) for _, x := range groups { claims.Set[x] = true } return claims.EdDSASign(JWTPrivateKey) } func verifyUserToken(token []byte) (map[string]bool, error) { claims, err := jwt.EdDSACheck(token, JWTPublicKey) if err != nil { return nil, err } if !claims.Valid(time.Now()) { return nil, fmt.Errorf("credential time constraints exceeded") } if claims.Subject != "user" { return nil, fmt.Errorf("wrong claims subject") } out := make(map[string]bool) for k, v := range claims.Set { out[k] = v.(bool) } return out, nil }
package main import ( "bufio" "fmt" "os" "strconv" ) func main() { var i uint64 = 4 var d float64 = 4.0 var s string = "HackerRank " scanner := bufio.NewScanner(os.Stdin) var j uint64 var e float64 var t string var texts []string for scanner.Scan() { text := scanner.Text() texts = append(texts, text) if len(texts) == 3 { break } } j, _ = strconv.ParseUint(texts[0], 10, 64) e, _ = strconv.ParseFloat(texts[1], 64) t = texts[2] i += j d += e s += t fmt.Printf("%d\n", i) fmt.Printf("%.1f\n", d) fmt.Printf("%s\n", s) }
package problems import ( "testing" ) func TestReverseBetween(t *testing.T) { tasks := []struct { list []int m, n int expect []int }{ { list: []int{1, 2, 3, 4, 5}, m: 2, n: 4, expect: []int{1, 4, 3, 2, 5}, }, { list: []int{2, 3, 5, 6, 4, 1, 8, 9, 0, 7}, m: 2, n: 6, expect: []int{2, 1, 4, 6, 5, 3, 8, 9, 0, 7}, }, { list: []int{1, 2, 3, 4, 5, 6, 7, 8, 9}, m: 1, n: 6, expect: []int{6, 5, 4, 3, 2, 1, 7, 8, 9}, }, { list: []int{1, 2, 3}, m: 1, n: 1, expect: []int{1, 2, 3}, }, { list: []int{1, 2, 3}, m: 0, n: 1, expect: []int{1, 2, 3}, }, { list: []int{1, 2, 3}, m: 2, n: 1, expect: []int{1, 2, 3}, }, { list: []int{}, m: 1, n: 2, expect: []int{}, }, } for fIdx, f := range []func(*ListNode, int, int) *ListNode{reverseBetween} { for i, task := range tasks { h1 := makeLinkedList(task.list...) h2 := f(h1, task.m, task.n) var got []int for p := h2; p != nil; p = p.Next { got = append(got, p.Val) } if !equal(got, task.expect) { t.Errorf("func #%d, task #%d failed, output: %v, expect: %v", fIdx, i, got, task.expect) } } } } func makeLinkedList(list ...int) (head *ListNode) { var p *ListNode for _, v := range list { n := &ListNode{ Val: v, } if p != nil { p.Next = n p = p.Next } else { // first node p = n head = n } } return } func equal(a, b []int) bool { if len(a) != len(b) { return false } for i, v := range a { if v != b[i] { return false } } return true }
package stockdb import( _ "github.com/go-sql-driver/mysql" "entity" "util" ) type StockHistDataDB struct { DBBase } func (s *StockHistDataDB) Insert(code string, d entity.StockHistData) int { db := s.Open() stmt, err := db.Prepare("insert stockhistdata set code=?, date=?, open=?, close=?, highest=?, lowest=?, volume=?, money=?") util.CheckError(err) defer stmt.Close() res, err := stmt.Exec(code, d.Date, d.Open, d.Close, d.Highest, d.Lowest, d.Volume, d.Money) util.CheckError(err) _, reserr := res.LastInsertId() util.CheckError(reserr) db.Close() return 0 } func (s *StockHistDataDB) Delete(code string, date string) int { db := s.Open() stmt, err := db.Prepare("delete from stockhistdata where code=? and date=?") util.CheckError(err) res, err := stmt.Exec(code, date) util.CheckError(err) defer stmt.Close() _, reserr := res.RowsAffected() util.CheckError(reserr) db.Close() return 0 } func (s *StockHistDataDB) Update(code string, d entity.StockHistData) int { db := s.Open() stmt, err := db.Prepare("update stockhistdata set open=?, close=?, highest=?, lowest=?, volume=?, money=? where code=? and date=?") util.CheckError(err) res, err := stmt.Exec(d.Date, d.Open, d.Close, d.Highest, d.Lowest, d.Volume, d.Money, code, d.Date) util.CheckError(err) defer stmt.Close() _, reserr := res.RowsAffected() util.CheckError(reserr) db.Close() return 0 } func (s *StockHistDataDB) Query(code string, date string) entity.StockHistData { db := s.Open() stmt, err := db.Prepare("select code, date, open, close, highest, lowest, volume, money from stockhistdata where code = ? and date = ?") util.CheckError(err) defer stmt.Close() var newcode, newdate string var open, close, highest, lowest float32 var volume, money int err = stmt.QueryRow(code, date).Scan(&newcode, &newdate, &open, &close, &highest, &lowest, &volume, &money) util.CheckError(err) db.Close() return entity.StockHistData{ Date: newdate, Open: open, Close: close, Highest: highest, Lowest: lowest, Volume: volume, Money: money, } } func (s *StockHistDataDB) TranInsert(code string, datas [] entity.StockHistData) int { db := s.Open() tx, err := db.Begin() util.CheckError(err) for _, d := range datas { stmt, err := tx.Prepare("insert stockhistdata set code=?, date=?, open=?, close=?, highest=?, lowest=?, volume=?, money=?") util.CheckError(err) _, reserr := stmt.Exec(code, d.Date, d.Open, d.Close, d.Highest, d.Lowest, d.Volume, d.Money) util.CheckError(reserr) defer stmt.Close() } err = tx.Commit() util.CheckError(err) db.Close() return 0 } func NewStockHistDataDB(dbname string) *StockHistDataDB { stdb := new(StockHistDataDB) stdb.Init(dbname) return stdb }
package main import "fmt" func main() { numbers := [5]int{1, 2, 3, 4, 5} var s1 []int = numbers[1:3] var s2 []int = numbers[2:4] fmt.Println(numbers) fmt.Println(s1) fmt.Println(s2) // Change data s2[0] = 333 fmt.Println("numbers :", numbers) fmt.Println("s1 :", s1) fmt.Println("s2 :", s2) }
/* Given an initial array arr, every day you produce a new array using the array of the previous day. On the i-th day, you do the following operations on the array of day i-1 to produce the array of day i: If an element is smaller than both its left neighbor and its right neighbor, then this element is incremented. If an element is bigger than both its left neighbor and its right neighbor, then this element is decremented. The first and last elements never change. After some days, the array does not change. Return that final array. Example 1: Input: arr = [6,2,3,4] Output: [6,3,3,4] Explanation: On the first day, the array is changed from [6,2,3,4] to [6,3,3,4]. No more operations can be done to this array. Example 2: Input: arr = [1,6,3,4,3,5] Output: [1,4,4,4,4,5] Explanation: On the first day, the array is changed from [1,6,3,4,3,5] to [1,5,4,3,4,5]. On the second day, the array is changed from [1,5,4,3,4,5] to [1,4,4,4,4,5]. No more operations can be done to this array. Constraints: 1 <= arr.length <= 100 1 <= arr[i] <= 100 */ package main import ( "fmt" "reflect" ) func main() { test([]int{6, 2, 3, 4}, []int{6, 3, 3, 4}) test([]int{1, 6, 3, 4, 3, 5}, []int{1, 4, 4, 4, 4, 5}) } func assert(x bool) { if !x { panic("assertion failed") } } func test(a, r []int) { p := transform(a) fmt.Println(p) assert(reflect.DeepEqual(p, r)) } func transform(a []int) []int { p := append([]int{}, a...) q := append([]int{}, a...) for { f := false for i := 1; i < len(a)-1; i++ { switch { case p[i] > p[i-1] && p[i] > p[i+1]: q[i], f = p[i]-1, true case p[i] < p[i-1] && p[i] < p[i+1]: q[i], f = p[i]+1, true default: q[i] = p[i] } } p, q = q, p if !f { return p } } }
package main import ( "flag" "fmt" "github.com/nokamoto/grpc-proxy/proxy" "github.com/prometheus/client_golang/prometheus/promhttp" "net/http" ) func main() { var ( port = flag.Int("p", 9000, "gRPC server port") pb = flag.String("pb", "", "file descriptor protocol buffers filepath") yml = flag.String("yaml", "", "yaml configuration filepath") prom = flag.Int("metrics", 9001, "Prometheus exporter port") ) go func() { http.Handle("/metrics", promhttp.Handler()) fmt.Println(http.ListenAndServe(fmt.Sprintf(":%d", prom), nil)) }() flag.Parse() srv, err := proxy.NewServer(*port, *pb, *yml) if err != nil { panic(err) } srv.Serve() }
package sploit import ( "bytes" "testing" ) func TestROPDumpX8664(t *testing.T) { e, _ := NewELF(elfFile) r, err := e.ROP() if err != nil { t.Fatal(err) } r.Dump() } func TestROPInstrSearchX8664(t *testing.T) { e, _ := NewELF(elfFile) r, _ := e.ROP() gadgets, err := r.InstrSearch(".*") if err != nil { t.Fatal(err) } if len(gadgets) != 72 { t.Fatal("Number of gadgets for wildcard match != 72") } gadgets, err = r.InstrSearch("add rsp, 8 ; pop rbx ; pop rbp ; pop r12 ; pop r13 ; pop r14 ; pop r15 ; ret") if err != nil { t.Fatal(err) } if len(gadgets) != 1 || gadgets[0].Address != 0x11ae { t.Fatal("Single gadget search did not return gadget at 0x11ae") } if bytes.Compare(gadgets[0].Opcode, []byte{0x48, 0x83, 0xc4, 0x08, 0x5b, 0x5d, 0x41, 0x5c, 0x41, 0x5d, 0x41, 0x5e, 0x41, 0x5f, 0xc3}) != 0 { t.Fatal("Gadget machine code does not match expected") } } func TestROPDumpARM(t *testing.T) { e, _ := NewELF("test/prog1.arm") r, err := e.ROP() if err != nil { t.Fatal(err) } r.Dump() }
package main import ( "alignfootbot/afdb" "fmt" "github.com/Syfaro/telegram-bot-api" "github.com/vrischmann/envconfig" "log" "reflect" "strconv" "strings" ) type Config struct { DbHost string `envconfig:"DB_HOST"` DbPort string `envconfig:"DB_PORT"` DbUser string `envconfig:"DB_USER"` DbName string `envconfig:"DB_NAME"` DbPass string `envconfig:"DB_PASSW"` DbSslMode string `envconfig:"DB_SSL_MODE"` BotToken string `envconfig:"BOT_TOKEN"` } func getConfig() *Config { var conf Config if err := envconfig.Init(&conf); err != nil { log.Fatalln(err) } return &conf } func startGame(db *afdb.Db, bot *tgbotapi.BotAPI, msg *tgbotapi.Message) { if db.GameExists(msg.Chat.ID) { gameInfo := db.GameInfo(msg.Chat.ID) reply := fmt.Sprintf("[%s](tg://user?id=%d) уже начал всех собирать: %s", gameInfo.Holder, gameInfo.HolderId, gameInfo.Comment) responce := tgbotapi.NewMessage(msg.Chat.ID, reply) responce.ParseMode = "Markdown" bot.Send(responce) return } strTemplate := `Всем привет, собираемся играть, деньги принимает [%s](tg://user?id=%d) (%s) Чтобы записаться ставьте "+", если сдали деньги ставьте $200 (значит сдали 200р). Если хотите привести друга, ставьте +2, если передумали, ставьте "-", но деньги не вернем.` comment := strings.TrimPrefix(strings.TrimPrefix(msg.Text, "/go"), "@alignfootbot") db.NewGame(msg.Chat.ID, msg.From.String(), int64(msg.From.ID), comment) reply := fmt.Sprintf(strTemplate, msg.From.String(), msg.From.ID, comment) responce := tgbotapi.NewMessage(msg.Chat.ID, reply) responce.ParseMode = "Markdown" bot.Send(responce) } func countPlayers(db *afdb.Db, bot *tgbotapi.BotAPI, msg *tgbotapi.Message) { log.Println("count players") if !db.GameExists(msg.Chat.ID) { reply := fmt.Sprintf("Всего в банке: %f р.", db.HowMuchMoney(msg.Chat.ID)) responce := tgbotapi.NewMessage(msg.Chat.ID, reply) bot.Send(responce) return } players := db.ChatPlayers(msg.Chat.ID) text := "Всего сдали: %f р.\nВсего в банке: %f р.\nОтметились %d человек:\n" sum := float64(0) count := 0 for _, player := range players { text += fmt.Sprintf("[%s](tg://user?id=%d)", player.UserName, player.UserId) if player.Count > 1 { text += " +" + strconv.Itoa(player.Count-1) } text += fmt.Sprintf("(%fp.)\n", player.Money) sum += player.Money count += player.Count } reply := tgbotapi.NewMessage(msg.Chat.ID, fmt.Sprintf(text, sum, db.HowMuchMoney(msg.Chat.ID), count)) reply.ParseMode = "Markdown" bot.Send(reply) } func addPlayer(db *afdb.Db, bot *tgbotapi.BotAPI, msg *tgbotapi.Message) { log.Println("add player") players := 1 fmt.Sscanf(msg.Text, "+%d", &players) text := "записал" if !db.NewPlayer(msg.Chat.ID, int64(msg.From.ID), msg.From.String(), players) { text = "пока никто не собирался играть, запишись попозже" } reply := tgbotapi.NewMessage(msg.Chat.ID, text) reply.BaseChat.ReplyToMessageID = msg.MessageID bot.Send(reply) } func removePlayer(db *afdb.Db, bot *tgbotapi.BotAPI, msg *tgbotapi.Message) { log.Println("remove player") players := 1 fmt.Sscanf(msg.Text, "-%d", &players) db.DropPlayer(msg.Chat.ID, int64(msg.From.ID), players) text := fmt.Sprintf("ну ладно, в следующий раз приходи") reply := tgbotapi.NewMessage(msg.Chat.ID, text) reply.BaseChat.ReplyToMessageID = msg.MessageID bot.Send(reply) } func addMoney(db *afdb.Db, bot *tgbotapi.BotAPI, msg *tgbotapi.Message) { log.Println("add money") var money float64 fmt.Sscanf(msg.Text, "$%f", &money) text := fmt.Sprintf("принял") if !db.PutMoney(msg.Chat.ID, int64(msg.From.ID), msg.From.String(), money) { text = "пока никто не собирался играть" } reply := tgbotapi.NewMessage(msg.Chat.ID, text) reply.BaseChat.ReplyToMessageID = msg.MessageID bot.Send(reply) } func setGameCost(db *afdb.Db, bot *tgbotapi.BotAPI, msg *tgbotapi.Message) { log.Println("set game cost") var money float64 log.Printf("Text: %s", msg.Text) fmt.Sscanf(msg.CommandArguments(), "%f", &money) db.SetGameCost(msg.Chat.ID, money) text := fmt.Sprintf("запомнил") reply := tgbotapi.NewMessage(msg.Chat.ID, text) bot.Send(reply) } func finishGame(db *afdb.Db, bot *tgbotapi.BotAPI, msg *tgbotapi.Message) { log.Println("finish game") if !db.GameExists(msg.Chat.ID) { reply := "никто и не собирался" responce := tgbotapi.NewMessage(msg.Chat.ID, reply) bot.Send(responce) return } playersList := "" players := db.ChatPlayers(msg.Chat.ID) for _, player := range players { playersList += fmt.Sprintf("[%s](tg://user?id=%d)", player.UserName, player.UserId) if player.Count > 1 { playersList += " +" + strconv.Itoa(player.Count - 1) } playersList += ", " } db.PayForTheGame(msg.Chat.ID) text := fmt.Sprintf("%s cпасибо за игру, в банке осталось %f", playersList, db.HowMuchMoney(msg.Chat.ID)) reply := tgbotapi.NewMessage(msg.Chat.ID, text) reply.ParseMode = "Markdown" bot.Send(reply) } func handleCommands(db *afdb.Db, bot *tgbotapi.BotAPI, msg *tgbotapi.Message) { cmds := map[string]func(*afdb.Db, *tgbotapi.BotAPI, *tgbotapi.Message){ "/go": startGame, "/cost": setGameCost, "/count": countPlayers, "/finish": finishGame, "/go@alignfootbot": startGame, "/cost@alignfootbot": setGameCost, "/count@alignfootbot": countPlayers, "/finish@alignfootbot": finishGame, } tokens := strings.Fields(msg.Text) if cmd, ok := cmds[tokens[0]]; ok { cmd(db, bot, msg) } } func handleText(db *afdb.Db, bot *tgbotapi.BotAPI, msg *tgbotapi.Message) bool { actions := map[byte]func(*afdb.Db, *tgbotapi.BotAPI, *tgbotapi.Message){ '+': addPlayer, '-': removePlayer, '$': addMoney, '/': handleCommands, } if cmd, ok := actions[msg.Text[0]]; ok { cmd(db, bot, msg) return true } return false } type Service struct { db *afdb.Db botApi *tgbotapi.BotAPI } func (th *Service) Run() { var ucfg tgbotapi.UpdateConfig = tgbotapi.NewUpdate(0) ucfg.Timeout = 60 updates, err := th.botApi.GetUpdatesChan(ucfg) if err != nil { log.Panic("Can't get updates: %s", err) } th.db.Init() for update := range updates { if update.Message == nil { continue } if reflect.TypeOf(update.Message.Text).Kind() == reflect.String && update.Message.Text != "" { handleText(th.db, th.botApi, update.Message) } } } func (th *Service) Close() { th.db.Close() } func CreateService(conf *Config) Service { db, err := afdb.DbConnect(conf.DbHost, conf.DbPort, conf.DbUser, conf.DbPass, conf.DbName, conf.DbSslMode) if err != nil { log.Panic("Can't connect to database") } log.Printf("DB connection is established") bot, err := tgbotapi.NewBotAPI(conf.BotToken) if err != nil { db.Close() log.Panic("Can't get API") } return Service{ db: db, botApi: bot, } } func main() { conf := getConfig() service := CreateService(conf) defer service.Close() service.Run() }
// 1. 以 maxPutQPS 的速度写入 // 2. 写入一定数量之后,开始以 getQPS, putQPS 分别进行读写 // 3. 读取的内容为之前写入的数据,按照 sampleRatio (抽样率)进行抽样后随机处理,然后再进行去取。如果读取的速率大于写入的速率,则倒带至最开始 // 4. 顺便告诉需要等多久 package pool import ( "context" "errors" "fmt" "io/ioutil" "log" "math/rand" "os" "path" "sync/atomic" "time" "golang.org/x/time/rate" ) type Pool struct { prefillSize float64 poolSize float64 getSize int64 notShuffle bool pools [2]pool recorder Recorder get chan []byte put chan []byte } func New(ctx context.Context, recorder Recorder, opt Options) *Pool { opt.setDefault() rand.Seed(int64(time.Now().Nanosecond())) dataCh := make(chan []byte) p := Pool{ poolSize: opt.GetQPS * opt.GetPeriod.Seconds(), get: make(chan []byte), put: dataCh, getSize: int64(1 / opt.SampleRatio), recorder: recorder, notShuffle: opt.NotShuffle, } if opt.PutQPS > opt.GetQPS { p.prefillSize = p.poolSize / opt.SampleRatio } else { p.prefillSize = p.poolSize / opt.SampleRatio * (opt.GetQPS / opt.PutQPS) } p.pools[0] = newPool(int(p.poolSize)) p.pools[1] = newPool(int(p.poolSize)) log.Printf("prefill or read size: %v, pool size: %v\n", p.prefillSize, p.poolSize) go func() { log.Println("start prefill", p.prefillSize) if err := p.prefill(ctx, dataCh, opt.MaxPutQPS, opt.MaxGetQPS, opt.Burst); err != nil { log.Println("failed to prefill: ", err) return } log.Println("finish prefill") go p.fillPools(ctx, opt.MaxGetQPS, opt.Burst) go p.putWorker(ctx, opt.PutQPS, opt.Burst, dataCh) go p.getWorker(ctx, opt.GetQPS, opt.Burst) }() return &p } func (p *Pool) Put(ctx context.Context, data []byte) error { select { case <-ctx.Done(): return ctx.Err() case p.put <- data: return nil } } func (p *Pool) Get(ctx context.Context) ([]byte, error) { select { case <-ctx.Done(): return nil, ctx.Err() case data := <-p.get: return data, nil } } type Options struct { GetPeriod time.Duration SampleRatio float64 GetQPS float64 PutQPS float64 // rate to fill the pool MaxGetQPS float64 // rate to prefill the pool MaxPutQPS float64 Burst float64 ReadOnly bool ReadOnce bool NotShuffle bool } func (opt *Options) setDefault() { if opt.GetPeriod == 0 { opt.GetPeriod = time.Minute } if opt.SampleRatio <= 0 { opt.SampleRatio = 0.01 } if opt.GetQPS == 0 { opt.GetQPS = 10 } if opt.PutQPS == 0 { opt.PutQPS = 10 } if opt.MaxPutQPS == 0 { opt.MaxPutQPS = -1 } if opt.MaxGetQPS == 0 { opt.MaxGetQPS = -1 } if opt.Burst <= 0 { opt.Burst = 1 } } type pool struct { data [][]byte readable, writable chan struct{} } func newPool(size int) pool { p := pool{ data: make([][]byte, size), readable: make(chan struct{}, 1), writable: make(chan struct{}, 1), } p.writable <- struct{}{} return p } func (p *Pool) prefill(ctx context.Context, ch <-chan []byte, wqps, rqps, burst float64) error { var counter uint64 finish := make(chan error, 1) go func() (err error) { defer log.Println("finish real prefill") defer func() { finish <- err }() wlimiter := rate.NewLimiter(rate.Every(time.Second/time.Duration(wqps)), int(burst)) for { select { case <-ctx.Done(): return ctx.Err() case key, ok := <-ch: if !ok { return errors.New("channel was closed") } if err := p.recorder.Put(key); err != nil { return err } if c := atomic.AddUint64(&counter, 1); float64(c) >= p.prefillSize { return nil } } if err := wlimiter.Wait(ctx); err != nil { return err } } }() defer p.recorder.Rewind() var rcounter uint64 rlimiter := rate.NewLimiter(rate.Every(time.Second/time.Duration(rqps)), int(burst)) for { select { case <-ctx.Done(): return ctx.Err() case err := <-finish: return err default: } bs, err := p.recorder.GetN(100) if err != nil { return err } rcounter += uint64(len(bs)) if float64(rcounter) >= p.prefillSize { atomic.StoreUint64(&counter, rcounter) log.Println("read all from previous") return nil } // read to EOF if len(bs) < 100 { atomic.StoreUint64(&counter, rcounter) log.Printf("read %v from previous\n", rcounter) break } if err := rlimiter.Wait(ctx); err != nil { return err } } return <-finish } func (p *Pool) putWorker(ctx context.Context, putQPS, burst float64, ch <-chan []byte) { limiter := rate.NewLimiter(rate.Every(time.Second/time.Duration(putQPS)), int(burst)) for { select { case <-ctx.Done(): return case data, ok := <-ch: if !ok { log.Println("finish put worker") return } if err := p.recorder.Put(data); err != nil { log.Println(err) time.Sleep(time.Second) } } limiter.Wait(ctx) } } func (p *Pool) getWorker(ctx context.Context, getQPS, burst float64) { limiter := rate.NewLimiter(rate.Every(time.Second/time.Duration(getQPS)), int(burst)) idx := 0 for { select { case <-ctx.Done(): return case <-p.pools[idx].readable: } log.Println("start to read", idx) before := time.Now() for _, key := range p.pools[idx].data { if err := limiter.Wait(ctx); err != nil { log.Println(err) return } select { case <-ctx.Done(): return case p.get <- key: } } log.Println("end read", idx, time.Since(before)) p.pools[idx].writable <- struct{}{} idx = (idx + 1) % 2 } } func (p *Pool) fillPools(ctx context.Context, qps, burst float64) { idx := 0 for { select { case <-ctx.Done(): return case <-p.pools[idx].writable: } log.Println("fill the pool", idx) //fill the pool if err := p.fillPool(ctx, p.pools[idx].data, qps, burst); err != nil { log.Printf("failed to fill the pool %v: %v, will try later\n", idx, err) time.Sleep(time.Second) p.pools[idx].writable <- struct{}{} continue } // if err := p.dump(idx); err != nil { // time.Sleep(time.Second) // p.pools[idx].writable <- struct{}{} // continue // } p.pools[idx].readable <- struct{}{} idx = (idx + 1) % 2 } } func (p *Pool) dump(idx int) error { dir, err := ioutil.TempDir("/tmp", "oss-key") if err != nil { return err } fp := path.Join(dir, "key-file") f, err := os.Create(fp) if err != nil { return err } defer f.Close() log.Println("dump key file to ", fp, idx) for _, key := range p.pools[idx].data { if _, err := f.Write(append(key, '\n')); err != nil { return err } } return nil } func (p *Pool) fillPool(ctx context.Context, data [][]byte, qps, burst float64) error { limiter := rate.NewLimiter(rate.Every(time.Second/time.Duration(qps)), int(burst)) for i := 0; i < len(data); i++ { bs, err := p.recorder.GetN(p.getSize) if err != nil { return err } if int64(len(bs)) != p.getSize { // need rewind and get more if err := p.recorder.Rewind(); err != nil { return err } log.Println("rewind to the beginning") bs2, err := p.recorder.GetN(p.getSize - int64(len(bs))) if err != nil { return err } bs = append(bs, bs2...) } data[i] = bs[rand.Int63n(p.getSize)] if err := limiter.Wait(ctx); err != nil { return err } } if p.notShuffle { return nil } //shuffle the data dest := make([][]byte, len(data)) perm := rand.Perm(len(data)) for i, v := range perm { dest[v] = data[i] } if n := copy(data, dest); float64(n) != p.poolSize { return fmt.Errorf("failed to copy, got %v, want %v", n, p.poolSize) } return nil }
package openinstrument import ( "fmt" "time" ) type DurationTimer struct { name string start_time time.Time end_time time.Time total_time time.Duration running bool } func NewNamedDurationTimer(name string) *DurationTimer { return &DurationTimer{ name: name, } } func NewDurationTimer() *DurationTimer { return &DurationTimer{} } func (this *DurationTimer) Start() { if !this.running { this.start_time = time.Now() this.running = true } } func (this *DurationTimer) Stop() { if this.running { this.end_time = time.Now() d := this.end_time.Sub(this.start_time) this.total_time = time.Duration(d.Nanoseconds() + this.total_time.Nanoseconds()) } } func (this *DurationTimer) Duration() time.Duration { if this.running { this.Stop() } return this.total_time } func (this *DurationTimer) String() string { if this.name != "" { return fmt.Sprintf("%s: %s", this.name, this.Duration()) } return this.Duration().String() }
package helpers import ( "strconv" "strings" ) func SumConverter(amount string) (float64, bool, error) { val, err := strconv.ParseFloat(amount, 64) if err != nil { s := strings.Trim(amount, "$") if val, err = strconv.ParseFloat(s, 64); err != nil { return 0, false, err } return val, false, nil } return val, true, nil }
package model // HealthComponent has variables that can be // implemented into other structs type HealthComponent struct { }
package main import ( "database/sql" "fmt" _ "github.com/lib/pq" "context" "log" "net" "google.golang.org/grpc" pb "google.golang.org/grpc/examples/helloworld/helloworld" ) const ( port = ":50051" ) // server is used to implement helloworld.GreeterServer. type server struct{} // SayHello implements helloworld.GreeterServer func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { log.Printf("Received: %v", in.Name) execdb() return &pb.HelloReply{Message: "Hello " + in.Name}, nil } func main() { lis, err := net.Listen("tcp", port) if err != nil { log.Fatalf("failed to listen: %v", err) } s := grpc.NewServer() pb.RegisterGreeterServer(s, &server{}) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } } func execdb(){ // 初始化資料庫: db, err := sql.Open("postgres", "user=postgres password=123 dbname=testdb sslmode=disable") defer db.Close() checkErr(err) // 新增資料列: stmt, err := db.Prepare("INSERT INTO member(id,nickname,phoneNumber,email) VALUES($1,$2,$3,$4);") checkErr(err) res, err := stmt.Exec("Apple", "iPhone","0920123456","apple@gmail.com") checkErr(err) println("已新增資料列。\n") // 查詢資料列: println("查詢資料列:") rows, err := db.Query("SELECT id,nickname,phoneNumber,email FROM member") checkErr(err) var id, nickName, phoneNumber, email string for rows.Next() { err = rows.Scan(&id, &nickName, &phoneNumber, &email) checkErr(err) fmt.Println("\t", id, nickName, phoneNumber, email) } // 更改資料列: println("\n更改資料列中...") stmt, err = db.Prepare("UPDATE member SET phoneNumber=$1 WHERE id=$2") checkErr(err) res, err = stmt.Exec("iPhoneXR", "Apple") checkErr(err) affect, err := res.RowsAffected() checkErr(err) println("已更改", affect, "個資料列。\n") // 查詢資料列: println("查詢資料列:") rows, err = db.Query("SELECT id,nickname,phoneNumber,email FROM member") checkErr(err) for rows.Next() { err = rows.Scan(&id, &nickName, &phoneNumber, &email) checkErr(err) fmt.Println("\t", id, nickName, phoneNumber, email) } // 删除資料列: /* stmt, err = db.Prepare("DELETE FROM member WHERE id=$1") checkErr(err) res, err = stmt.Exec("apple") checkErr(err) affect, err = res.RowsAffected() checkErr(err) println("\n已刪除", affect, "個資料列。\n") */ } func checkErr(err error) { if err != nil { panic(err) } }
package main import ( "consumer-importer/io" "consumer-importer/model" "consumer-importer/service" "fmt" "time" ) var consumerService = service.ConsumerService{Props: &dbProperties} var fileService = service.FileService{Props: &dbProperties} var dbInitializerService = service.DatabaseInitializerService{Props: &dbProperties} var readFiles = make([]string, 0) //BeginService starts up the process to read the consumer files and import to a postgres database func BeginService() { fmt.Println("Starting Consumer File Importer service...") dbInitializerService.InitializeDBTables() for { fmt.Println("Checking for existing files in directory: " + directory) filenames := io.GetFilesFromDirectory(directory) unreadFiles := getUnreadFiles(filenames) if len(unreadFiles) == 0 { fmt.Println("No files to load!") } else { doImportProcess(filenames) } time.Sleep(5 * time.Second) } } func getUnreadFiles(filenames []string) []string { unreadFiles := make([]string, 0) for _, file := range filenames { exists := false for _, unreadFile := range readFiles { if file == unreadFile { exists = true break } } if !exists { unreadFiles = append(unreadFiles, file) } } return unreadFiles } func doImportProcess(filenames []string) { for _, filename := range filenames { alreadyRead, err := fileService.IsFileAlreadyRead(filename) if err != nil { panic(err) } if alreadyRead { readFiles = append(readFiles, filename) fmt.Println("File " + filename + " has already been read!") continue } path := directory + filename fileReader := io.FileReader{ FilePath: path, } fileReader.Initialize() defer fileReader.CloseFile() savedFile := storeFile(filename) for fileReader.Next() { line := fileReader.ReadLine() processFileLine(line) } fileService.UpdateFileAsSuccessful(*savedFile.ID) fmt.Println("File " + filename + " has been successfully read!") } } func processFileLine(line string) { fmt.Println("Reading line: " + line) if io.IsHeader(line) { return } consumer := io.Format(line) consumerService.Save(&consumer) } func storeFile(filename string) *model.File { file := model.File{ ImportDate: time.Now(), Name: filename, Successful: false, } fileService.Save(&file) return &file }
package util var ( EnvNodeName = "NODE_NAME" )
package main import "strconv" type overLoad struct { Name string Age int Wives []string } func NewOverLoad(arg interface{}) interface{} { o := &overLoad{} if val, ok := arg.(int); ok { o.Age = val } else if val, ok := arg.(string); ok { o.Name = val } else if val, ok := arg.([]string); ok { o.Wives = val } return o } const defaultLen = 10 type nodecha struct { Name string } type nodes []*nodecha type num int func (n num) ToString() string { return strconv.FormatInt(int64(n), 10) } func (ns nodes) add(n *nodecha) { ns = append(ns, n) } func (ns nodes) get(i int) *nodecha { return ns[i] } func NewNodes(val interface{}) nodes { if v, ok := val.(int); ok { return make(nodes, v) } if v, ok := val.(nodes); ok { n := make(nodes, len(v)) copy(n, v) return n } else { return make(nodes, defaultLen) } }
package response //HealthResponse ... type HealthResponse struct { Status string `bson:"status" json:"status"` }
package json import ( "encoding/json" "log" ) func ToString(i interface{}) string { if i == nil { return "" } b, err := json.Marshal(i) if err != nil { log.Println("json.ToString:", err) } return string(b) } func ToBytes(i interface{}) (bs []byte) { if i == nil { return } b, err := json.Marshal(i) if err != nil { log.Println("json.ToBytes:", err) } return b } func ToObject(s string, i interface{}) { err := json.Unmarshal([]byte(s), &i) if err != nil { log.Println("json.ToObject:", err) } } func ToObjectByBytes(b []byte, i interface{}) { err := json.Unmarshal(b, &i) if err != nil { log.Println("json.ToObjectByBytes:", string(b), err) } }
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package swarming import ( "testing" "github.com/maruel/ut" ) func TestNew(t *testing.T) { t.Parallel() // TODO(maruel): Make a fake. _, err := New("https://localhost:1") ut.AssertEqual(t, nil, err) }
package helper import ( "bytes" "fmt" "regexp" "strings" "syscall" "text/template" "github.com/gookit/goutil/strutil" "golang.org/x/term" ) const ( // RegGoodName match a good option, argument name RegGoodName = `^[a-zA-Z][\w-]*$` // RegGoodCmdName match a good command name RegGoodCmdName = `^[a-zA-Z][\w-]*$` // RegGoodCmdId match command id. eg: "self:init" RegGoodCmdId = `^[a-zA-Z][\w:-]*$` // match command path. eg: "self init" // RegGoodCmdPath = `^[a-zA-Z][\w -]*$` ) var ( // GoodName good name for option and argument goodName = regexp.MustCompile(RegGoodName) // GoodCmdId match a good command name goodCmdId = regexp.MustCompile(RegGoodCmdId) // GoodCmdName match a good command name goodCmdName = regexp.MustCompile(RegGoodCmdName) ) // IsGoodName check func IsGoodName(name string) bool { return goodName.MatchString(name) } // IsGoodCmdId check func IsGoodCmdId(name string) bool { return goodCmdId.MatchString(name) } // IsGoodCmdName check func IsGoodCmdName(name string) bool { return goodCmdName.MatchString(name) } // exec: `stty -a 2>&1` // const ( // mac: speed 9600 baud; 97 rows; 362 columns; // macSttyMsgPattern = `(\d+)\s+rows;\s*(\d+)\s+columns;` // linux: speed 38400 baud; rows 97; columns 362; line = 0; // linuxSttyMsgPattern = `rows\s+(\d+);\s*columns\s+(\d+);` // ) var ( terminalWidth, terminalHeight int // macSttyMsgMatch = regexp.MustCompile(macSttyMsgPattern) // linuxSttyMsgMatch = regexp.MustCompile(linuxSttyMsgPattern) ) // GetTerminalSize for current console terminal. func GetTerminalSize(refresh ...bool) (w int, h int) { if terminalWidth > 0 && len(refresh) > 0 && !refresh[0] { return terminalWidth, terminalHeight } var err error w, h, err = term.GetSize(syscall.Stdin) if err != nil { return } // cache result terminalWidth, terminalHeight = w, h return } // Panicf message func Panicf(format string, v ...any) { panic(fmt.Sprintf("GCli: "+format, v...)) } // RenderText render text template with data. TODO use strutil.RenderText() func RenderText(input string, data any, fns template.FuncMap, isFile ...bool) string { t := template.New("cli") t.Funcs(template.FuncMap{ // don't escape content "raw": func(s string) string { return s }, "trim": strings.TrimSpace, // join strings. usage {{ join .Strings ","}} "join": func(ss []string, sep string) string { return strings.Join(ss, sep) }, // lower first char "lcFirst": strutil.LowerFirst, // upper first char "ucFirst": strutil.UpperFirst, }) // custom add template functions if len(fns) > 0 { t.Funcs(fns) } if len(isFile) > 0 && isFile[0] { template.Must(t.ParseFiles(input)) } else { template.Must(t.Parse(input)) } // use buffer receive rendered content var buf bytes.Buffer if err := t.Execute(&buf, data); err != nil { panic(err) } return buf.String() }
package main import "fmt" func main(){ x := []int{4, 7, 5 , 43} fmt.Println(x) x = append(x, 23, 33, 55, 44) fmt.Println(x) y := []int{23, 24, 54, 53} x = append(x, y...) fmt.Println(x) }
package main import "fmt" type Recipe struct { Name string CookTime float32 CookTemp float32 } func (r Recipe) String() string { return fmt.Sprintf("{Name: %s; CookTime: %g; CookTemp: %g}", r.Name, r.CookTime, r.CookTemp) } // Lab 08. Embarassingly Parallel // Requirements: // 01 - As a lonely person living at the center of the Earth, I would like an application that mirrors my intellectual proccessing of recipes // // Objective: // 01 - See goroutines and channels in action // 02 - See an example of a buffered channel // 03 - Understand how to do implement a parallel for loop // // Steps: //01 - Study the code to understand what's going on func parallelFor(recipes []Recipe, fun func(recipe Recipe) Recipe) { type done struct{} finished := make(chan done, len(recipes)) for index, value := range recipes { go func(i int, recipe Recipe) { recipes[i] = fun(recipe) finished <- done{} }(index, value) } for i := 0; i < len(recipes); i++ { <-finished } } func main() { //Make an array of 10 recipes and initialize them recipes := make([]Recipe, 10) for i := 0; i < len(recipes); i++ { recipes[i] = Recipe{Name: "Grilll Cheese", CookTime: 5.0, CookTemp: 275} } fmt.Printf("Before: %v\n\n", recipes) parallelFor(recipes, func(recipe Recipe) Recipe { recipe.CookTime *= 2 recipe.CookTemp += 10 return recipe }) fmt.Printf("After: %v\n", recipes) }
package gcp // see gcp_test.go for a test that subsumes tests for the deleter
package line_login import ( "github.com/5hields/line-login/linethrift" "github.com/apache/thrift/lib/go/thrift" "log" ) func newThriftClient(apiUrl string) (*thrift.TStandardClient, error) { trans, err := thrift.NewTHttpClient(apiUrl) if err != nil { return nil, err } httpTrans := trans.(*thrift.THttpClient) header := map[string]string{ "X-Line-Application": lineApp, "User-Agent": userAgent, } for key, val := range header { httpTrans.SetHeader(key, val) } protocol := thrift.NewTCompactProtocolFactory().GetProtocol(trans) thriftClient := thrift.NewTStandardClient(protocol, protocol) return thriftClient, nil } type LineClient struct { talkClient *linethrift.TalkServiceClient authClient *linethrift.AuthServiceClient } func NewLineClient() *LineClient { talk, err := newThriftClient(registerUrl) if err != nil { log.Fatal(err) } auth, err := newThriftClient(authRegisterUrl) if err != nil { log.Fatal(err) } return &LineClient{ talkClient: linethrift.NewTalkServiceClient(talk), authClient: linethrift.NewAuthServiceClient(auth), } }
// DRUNKWATER TEMPLATE(add description and prototypes) // Question Title and Description on leetcode.com // Function Declaration and Function Prototypes on leetcode.com //640. Solve the Equation //Solve a given equation and return the value of x in the form of string "x=#value". The equation contains only '+', '-' operation, the variable x and its coefficient. //If there is no solution for the equation, return "No solution". //If there are infinite solutions for the equation, return "Infinite solutions". //If there is exactly one solution for the equation, we ensure that the value of x is an integer. //Example 1: //Input: "x+5-3+x=6+x-2" //Output: "x=2" //Example 2: //Input: "x=x" //Output: "Infinite solutions" //Example 3: //Input: "2x=x" //Output: "x=0" //Example 4: //Input: "2x+3x-6x=x+2" //Output: "x=-1" //Example 5: //Input: "x=x+2" //Output: "No solution" //func solveEquation(equation string) string { //} // Time Is Money
package immortal import ( "io/ioutil" "log" "testing" ) func TestNewLoggerFileNone(t *testing.T) { log.SetOutput(ioutil.Discard) cfg := &Config{ Log: Log{ File: "/dev/null/nonexist", }, } quit := make(chan struct{}) l := NewLogger(cfg, quit) expect(t, true, l == nil) } func TestNewLoggerBadLogger(t *testing.T) { log.SetOutput(ioutil.Discard) cfg := &Config{ Logger: "any-logger", } quit := make(chan struct{}) l := NewLogger(cfg, quit) expect(t, true, l == nil) } func TestNewLoggerLogger(t *testing.T) { cfg := &Config{ Logger: "cat", } quit := make(chan struct{}) l := NewLogger(cfg, quit) expect(t, true, l != nil) close(quit) }
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build ignore // +build ignore package main import ( "bytes" "go/format" "log" "os" "path/filepath" "text/template" . "github.com/pingcap/tidb/expression/generator/helper" ) var addOrSubTime = template.Must(template.New("").Parse(` {{ if eq $.FuncName "AddTime" }} // Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by go generate in expression/generator; DO NOT EDIT. package expression import ( "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" ) {{ end }} {{ define "SetNull" }}{{if .Output.Fixed}}result.SetNull(i, true){{else}}result.AppendNull(){{end}} // fixed: {{.Output.Fixed }}{{ end }} {{ define "ConvertStringToDuration" }} {{ if and (ne .SigName "builtinAddStringAndStringSig") (ne .SigName "builtinSubStringAndStringSig") }} if !isDuration(arg1) { {{ template "SetNull" . }} continue }{{ end }} sc := b.ctx.GetSessionVars().StmtCtx arg1Duration, _, err := types.ParseDuration(sc, arg1, {{if eq .Output.TypeName "String"}}getFsp4TimeAddSub{{else}}types.GetFsp{{end}}(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) {{ template "SetNull" . }} continue } return err } {{ end }} {{ range .Sigs }} {{ if .AllNull}} func (b *{{.SigName}}) vecEval{{ .Output.TypeName }}(input *chunk.Chunk, result *chunk.Column) error { n := input.NumRows() {{ if .Output.Fixed }} result.Resize{{ .Output.TypeNameInColumn }}(n, true) {{ else }} result.Reserve{{ .Output.TypeNameInColumn }}(n) for i := 0; i < n; i++ { result.AppendNull() } {{ end }} return nil } {{ else }} func (b *{{.SigName}}) vecEval{{ .Output.TypeName }}(input *chunk.Chunk, result *chunk.Column) error { n := input.NumRows() {{ $reuse := (and (eq .TypeA.TypeName .Output.TypeName) .TypeA.Fixed) }} {{ if $reuse }} if err := b.args[0].VecEval{{ .TypeA.TypeName }}(b.ctx, input, result); err != nil { return err } buf0 := result {{ else }} buf0, err := b.bufAllocator.get() if err != nil { return err } defer b.bufAllocator.put(buf0) if err := b.args[0].VecEval{{ .TypeA.TypeName }}(b.ctx, input, buf0); err != nil { return err } {{ end }} {{ if or (eq .SigName "builtinAddStringAndStringSig") (eq .SigName "builtinSubStringAndStringSig") }} arg1Type := b.args[1].GetType() if mysql.HasBinaryFlag(arg1Type.GetFlag()) { result.Reserve{{ .Output.TypeNameInColumn }}(n) for i := 0; i < n; i++ { result.AppendNull() } return nil } {{ end }} buf1, err := b.bufAllocator.get() if err != nil { return err } defer b.bufAllocator.put(buf1) if err := b.args[1].VecEval{{ .TypeB.TypeName }}(b.ctx, input, buf1); err != nil { return err } {{ if $reuse }} result.MergeNulls(buf1) {{ else if .Output.Fixed}} result.Resize{{ .Output.TypeNameInColumn }}(n, false) result.MergeNulls(buf0, buf1) {{ else }} result.Reserve{{ .Output.TypeNameInColumn}}(n) {{ end }} {{ if .TypeA.Fixed }} arg0s := buf0.{{.TypeA.TypeNameInColumn}}s() {{ end }} {{ if .TypeB.Fixed }} arg1s := buf1.{{.TypeB.TypeNameInColumn}}s() {{ end }} {{ if .Output.Fixed }} resultSlice := result.{{.Output.TypeNameInColumn}}s() {{ end }} for i := 0; i < n; i++ { {{ if .Output.Fixed }} if result.IsNull(i) { continue } {{ else }} if buf0.IsNull(i) || buf1.IsNull(i) { result.AppendNull() continue } {{ end }} // get arg0 & arg1 {{ if .TypeA.Fixed }} arg0 := arg0s[i] {{ else }} arg0 := buf0.Get{{ .TypeA.TypeNameInColumn }}(i) {{ end }} {{ if .TypeB.Fixed }} arg1 := arg1s[i] {{ else }} arg1 := buf1.Get{{ .TypeB.TypeNameInColumn }}(i) {{ end }} // calculate {{ if or (eq .SigName "builtinAddDatetimeAndDurationSig") (eq .SigName "builtinSubDatetimeAndDurationSig") }} {{ if eq $.FuncName "AddTime" }} output, err := arg0.Add(b.ctx.GetSessionVars().StmtCtx, types.Duration{Duration: arg1, Fsp: -1}) {{ else }} sc := b.ctx.GetSessionVars().StmtCtx arg1Duration := types.Duration{Duration: arg1, Fsp: -1} output, err := arg0.Add(sc, arg1Duration.Neg()) {{ end }} if err != nil { return err } {{ else if or (eq .SigName "builtinAddDatetimeAndStringSig") (eq .SigName "builtinSubDatetimeAndStringSig") }} {{ if eq $.FuncName "AddTime" }} {{ template "ConvertStringToDuration" . }} output, err := arg0.Add(sc, arg1Duration) {{ else }} if !isDuration(arg1) { result.SetNull(i, true) // fixed: true continue } sc := b.ctx.GetSessionVars().StmtCtx arg1Duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) result.SetNull(i, true) // fixed: true continue } return err } output, err := arg0.Add(sc, arg1Duration.Neg()) {{ end }} if err != nil { return err } {{ else if or (eq .SigName "builtinAddDurationAndDurationSig") (eq .SigName "builtinSubDurationAndDurationSig") }} {{ if eq $.FuncName "AddTime" }} output, err := types.AddDuration(arg0, arg1) if err != nil { return err } {{ else }} output, err := types.SubDuration(arg0, arg1) if err != nil { return err } {{ end }} {{ else if or (eq .SigName "builtinAddDurationAndStringSig") (eq .SigName "builtinSubDurationAndStringSig") }} {{ template "ConvertStringToDuration" . }} {{ if eq $.FuncName "AddTime" }} output, err := types.AddDuration(arg0, arg1Duration.Duration) if err != nil { return err } {{ else }} output, err := types.SubDuration(arg0, arg1Duration.Duration) if err != nil { return err } {{ end }} {{ else if or (eq .SigName "builtinAddStringAndDurationSig") (eq .SigName "builtinSubStringAndDurationSig") }} sc := b.ctx.GetSessionVars().StmtCtx fsp1 := b.args[1].GetType().GetDecimal() arg1Duration := types.Duration{Duration: arg1, Fsp: fsp1} var output string var isNull bool if isDuration(arg0) { {{ if eq $.FuncName "AddTime" }} output, err = strDurationAddDuration(sc, arg0, arg1Duration) {{ else }} output, err = strDurationSubDuration(sc, arg0, arg1Duration) {{ end }} if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) {{ template "SetNull" . }} continue } return err } } else { {{ if eq $.FuncName "AddTime" }} output, isNull, err = strDatetimeAddDuration(sc, arg0, arg1Duration) {{ else }} output, isNull, err = strDatetimeSubDuration(sc, arg0, arg1Duration) {{ end }} if err != nil { return err } if isNull { sc.AppendWarning(err) {{ template "SetNull" . }} continue } } {{ else if or (eq .SigName "builtinAddStringAndStringSig") (eq .SigName "builtinSubStringAndStringSig") }} {{ template "ConvertStringToDuration" . }} var output string var isNull bool if isDuration(arg0) { {{ if eq $.FuncName "AddTime" }} output, err = strDurationAddDuration(sc, arg0, arg1Duration) {{ else }} output, err = strDurationSubDuration(sc, arg0, arg1Duration) {{ end }} if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) {{ template "SetNull" . }} continue } return err } } else { {{ if eq $.FuncName "AddTime" }} output, isNull, err = strDatetimeAddDuration(sc, arg0, arg1Duration) {{ else }} output, isNull, err = strDatetimeSubDuration(sc, arg0, arg1Duration) {{ end }} if err != nil { return err } if isNull { sc.AppendWarning(err) {{ template "SetNull" . }} continue } } {{ else if or (eq .SigName "builtinAddDateAndDurationSig") (eq .SigName "builtinSubDateAndDurationSig") }} fsp0 := b.args[0].GetType().GetDecimal() fsp1 := b.args[1].GetType().GetDecimal() arg1Duration := types.Duration{Duration: arg1, Fsp: fsp1} {{ if eq $.FuncName "AddTime" }} sum, err := types.Duration{Duration: arg0, Fsp: fsp0}.Add(arg1Duration) {{ else }} sum, err := types.Duration{Duration: arg0, Fsp: fsp0}.Sub(arg1Duration) {{ end }} if err != nil { return err } output := sum.String() {{ else if or (eq .SigName "builtinAddDateAndStringSig") (eq .SigName "builtinSubDateAndStringSig") }} {{ template "ConvertStringToDuration" . }} fsp0 := b.args[0].GetType().GetDecimal() {{ if eq $.FuncName "AddTime" }} sum, err := types.Duration{Duration: arg0, Fsp: fsp0}.Add(arg1Duration) {{ else }} sum, err := types.Duration{Duration: arg0, Fsp: fsp0}.Sub(arg1Duration) {{ end }} if err != nil { return err } output := sum.String() {{ end }} // commit result {{ if .Output.Fixed }} resultSlice[i] = output {{ else }} result.Append{{ .Output.TypeNameInColumn }}(output) {{ end }} } return nil } {{ end }}{{/* if .AllNull */}} func (b *{{.SigName}}) vectorized() bool { return true } {{ end }}{{/* range */}} `)) var timeDiff = template.Must(template.New("").Parse(` {{ define "BufAllocator0" }} buf0, err := b.bufAllocator.get() if err != nil { return err } defer b.bufAllocator.put(buf0) {{ end }} {{ define "BufAllocator1" }} buf1, err := b.bufAllocator.get() if err != nil { return err } defer b.bufAllocator.put(buf1) {{ end }} {{ define "ArgsVecEval" }} if err := b.args[0].VecEval{{ .TypeA.TypeName }}(b.ctx, input, buf0); err != nil { return err } if err := b.args[1].VecEval{{ .TypeB.TypeName }}(b.ctx, input, buf1); err != nil { return err } {{ end }} {{ range . }} {{ $AIsString := (eq .TypeA.TypeName "String") }} {{ $BIsString := (eq .TypeB.TypeName "String") }} {{ $AIsTime := (eq .TypeA.TypeName "Time") }} {{ $BIsTime := (eq .TypeB.TypeName "Time") }} {{ $AIsDuration := (eq .TypeA.TypeName "Duration") }} {{ $BIsDuration := (eq .TypeB.TypeName "Duration") }} {{ $MaybeDuration := (or (or $AIsDuration $BIsDuration) (and $AIsString $AIsString)) }} {{ $reuseA := (eq .TypeA.TypeName "Duration") }} {{ $reuseB := (eq .TypeB.TypeName "Duration") }} {{ $reuse := (or $reuseA $reuseB ) }} {{ $noNull := (ne .SigName "builtinNullTimeDiffSig") }} func (b *{{.SigName}}) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { n := input.NumRows() {{- if not $noNull }} result.ResizeGoDuration(n, true) {{- else }} result.ResizeGoDuration(n, false) r64s := result.GoDurations() {{- if $reuseA }} buf0 := result {{- template "BufAllocator1" . }} {{- template "ArgsVecEval" . }} result.MergeNulls(buf1) {{- else if $reuseB }} buf1 := result {{- template "BufAllocator0" . }} {{- template "ArgsVecEval" . }} result.MergeNulls(buf0) {{- else }} {{- template "BufAllocator0" . }} {{- template "BufAllocator1" . }} {{- template "ArgsVecEval" . }} result.MergeNulls(buf0, buf1) {{- end }} {{- if .TypeA.Fixed }} arg0 := buf0.{{.TypeA.TypeNameInColumn}}s() {{- end }} {{- if .TypeB.Fixed }} arg1 := buf1.{{.TypeB.TypeNameInColumn}}s() {{- end }} {{- if (or $AIsDuration $BIsDuration) }} var ( lhs types.Duration rhs types.Duration ) {{- end }} {{- if or (or $AIsString $BIsString) (and $AIsTime $BIsTime) }} stmtCtx := b.ctx.GetSessionVars().StmtCtx {{- end }} for i:=0; i<n ; i++{ if result.IsNull(i) { continue } {{- if $AIsString }} {{ if $BIsDuration }} lhsDur, _, lhsIsDuration, {{- else if $BIsTime }} _, lhsTime, lhsIsDuration, {{- else if $BIsString }} lhsDur, lhsTime, lhsIsDuration, {{- end }} err := convertStringToDuration(stmtCtx, buf0.GetString(i), b.tp.GetDecimal()) if err != nil { return err } {{- if $BIsDuration }} if !lhsIsDuration { result.SetNull(i, true) continue } lhs = lhsDur {{- else if $BIsTime }} if lhsIsDuration { result.SetNull(i, true) continue } {{- end }} {{- else if $AIsTime }} lhsTime := arg0[i] {{- else }} lhs.Duration = arg0[i] {{- end }} {{- if $BIsString }} {{ if $AIsDuration }} rhsDur, _, rhsIsDuration, {{- else if $AIsTime }}_, rhsTime, rhsIsDuration, {{- else if $AIsString }} rhsDur, rhsTime, rhsIsDuration, {{- end}} err := convertStringToDuration(stmtCtx, buf1.GetString(i), b.tp.GetDecimal()) if err != nil { return err } {{- if $AIsDuration }} if !rhsIsDuration { result.SetNull(i, true) continue } rhs = rhsDur {{- else if $AIsTime }} if rhsIsDuration { result.SetNull(i, true) continue } {{- end }} {{- else if $BIsTime }} rhsTime := arg1[i] {{- else }} rhs.Duration = arg1[i] {{- end }} {{- if and $AIsString $BIsString }} if lhsIsDuration != rhsIsDuration { result.SetNull(i, true) continue } var ( d types.Duration isNull bool ) if lhsIsDuration { d, isNull, err = calculateDurationTimeDiff(b.ctx, lhsDur, rhsDur) } else { d, isNull, err = calculateTimeDiff(stmtCtx, lhsTime, rhsTime) } {{- else if or $AIsDuration $BIsDuration }} d, isNull, err := calculateDurationTimeDiff(b.ctx, lhs, rhs) {{- else if or $AIsTime $BIsTime }} d, isNull, err := calculateTimeDiff(stmtCtx, lhsTime, rhsTime) {{- end }} if err != nil { return err } if isNull { result.SetNull(i, true) continue } r64s[i] = d.Duration } {{- end }} {{/* if $noNull */}} return nil } func (b *{{.SigName}}) vectorized() bool { return true } {{ end }}{{/* range */}} `)) func getIntervalUnitListForDurationAsDuration() []string { return []string{ "MICROSECOND", "SECOND", "MINUTE", "HOUR", "SECOND_MICROSECOND", "MINUTE_MICROSECOND", "MINUTE_SECOND", "HOUR_MICROSECOND", "HOUR_SECOND", "HOUR_MINUTE", "DAY_MICROSECOND", } } func getIntervalUnitListForDurationAsDatetime() []string { return []string{ "DAY", "WEEK", "MONTH", "QUARTER", "YEAR", "DAY_SECOND", "DAY_MINUTE", "DAY_HOUR", "YEAR_MONTH", } } func getIntervalUnitList() []string { return append(getIntervalUnitListForDurationAsDuration(), getIntervalUnitListForDurationAsDatetime()...) } var testFileFuncs = template.FuncMap{ "getIntervalUnitListForDurationAsDuration": getIntervalUnitListForDurationAsDuration, "getIntervalUnitListForDurationAsDatetime": getIntervalUnitListForDurationAsDatetime, "getIntervalUnitList": getIntervalUnitList, } var testFile = template.Must(template.New("").Funcs(testFileFuncs).Parse(` // Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by go generate in expression/generator; DO NOT EDIT. package expression import ( "math" "testing" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/types" ) type gener struct { defaultGener } func (g gener) gen() interface{} { result := g.defaultGener.gen() if _, ok := result.(string); ok { dg := newDefaultGener(0, types.ETDuration) d := dg.gen().(types.Duration) if d.Duration%2 == 0 { d.Fsp = 0 } else { d.Fsp = 1 } result = d.String() } return result } {{ define "addOrSubDateCases" }} {{- range $sig := .Sigs }} // {{ $sig.SigName }} {{- if and (eq $sig.TypeA.ETName "Duration") (eq $sig.Output.ETName "Duration") -}} {{- $unitList := getIntervalUnitListForDurationAsDuration -}} {{- range $unit := $unitList }} { retEvalType: types.ET{{ $sig.Output.ETName }}, childrenTypes: []types.EvalType{types.ET{{ $sig.TypeA.ETName }}, types.ET{{ $sig.TypeB.ETName }}, types.ETString}, geners: []dataGenerator{ newDefaultGener(0.2, types.ET{{$sig.TypeA.ETName}}), {{- if eq $sig.TypeB.ETName "String" }} &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32, newDefaultRandGen()}}, {{- else }} newDefaultGener(0.2, types.ET{{$sig.TypeB.ETName}}), {{- end }} }, constants: []*Constant{nil, nil, {Value: types.NewStringDatum("{{$unit}}"), RetType: types.NewFieldType(mysql.TypeString)}}, chunkSize: 128, }, {{- end }} {{- else if and (eq $sig.TypeA.ETName "Duration") (eq $sig.Output.ETName "Datetime") -}} // TODO: Make the following cases stable, i.e., shouldn't be affected by crossing a day (date part is padded to current date). {{- $unitList := getIntervalUnitListForDurationAsDatetime -}} {{- range $unit := $unitList }} { retEvalType: types.ET{{ $sig.Output.ETName }}, childrenTypes: []types.EvalType{types.ET{{ $sig.TypeA.ETName }}, types.ET{{ $sig.TypeB.ETName }}, types.ETString}, geners: []dataGenerator{ newDefaultGener(0.2, types.ET{{$sig.TypeA.ETName}}), {{- if eq $sig.TypeB.ETName "String" }} &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32, newDefaultRandGen()}}, {{- else }} newDefaultGener(0.2, types.ET{{$sig.TypeB.ETName}}), {{- end }} }, constants: []*Constant{nil, nil, {Value: types.NewStringDatum("{{$unit}}"), RetType: types.NewFieldType(mysql.TypeString)}}, chunkSize: 128, }, {{- end }} {{- else -}} {{- $unitList := getIntervalUnitList -}} {{- range $unit := $unitList }} { retEvalType: types.ET{{ $sig.Output.ETName }}, childrenTypes: []types.EvalType{types.ET{{ $sig.TypeA.ETName }}, types.ET{{ $sig.TypeB.ETName }}, types.ETString}, {{- if ne $sig.FieldTypeA "" }} childrenFieldTypes: []*types.FieldType{types.NewFieldType(mysql.Type{{$sig.FieldTypeA}}), types.NewFieldType(mysql.Type{{$sig.FieldTypeB}})}, {{- end }} geners: []dataGenerator{ {{- if eq $sig.FieldTypeA "Date" }} newNullWrappedGener(0.2, dateGener{randGen: newDefaultRandGen()}), {{- else if eq $sig.TypeA.ETName "String"}} newNullWrappedGener(0.2, dateOrDatetimeStrGener{dateRatio: 0.2, dateStrGener: dateStrGener{randGen: newDefaultRandGen()}, dateTimeStrGener: dateTimeStrGener{Fsp: -1, randGen: newDefaultRandGen()}}), {{- else if eq $sig.TypeA.ETName "Int"}} newNullWrappedGener(0.2, dateOrDatetimeIntGener{dateRatio: 0.2, dateIntGener: dateIntGener{dateGener: dateGener{randGen: newDefaultRandGen()}}, dateTimeIntGener: dateTimeIntGener{dateTimeGener: dateTimeGener{randGen: newDefaultRandGen()}}}), {{- else if eq $sig.TypeA.ETName "Real"}} newNullWrappedGener(0.2, dateOrDatetimeRealGener{dateRatio: 0.2, dateRealGener: dateRealGener{fspRatio: 0.5, dateGener: dateGener{randGen: newDefaultRandGen()}}, dateTimeRealGener: dateTimeRealGener{fspRatio: 0.5, dateTimeGener: dateTimeGener{randGen: newDefaultRandGen()}}}), {{- else if eq $sig.TypeA.ETName "Decimal"}} newNullWrappedGener(0.2, dateOrDatetimeDecimalGener{dateRatio: 0.2, dateDecimalGener: dateDecimalGener{fspRatio: 0.5, dateGener: dateGener{randGen: newDefaultRandGen()}}, dateTimeDecimalGener: dateTimeDecimalGener{fspRatio: 0.5, dateTimeGener: dateTimeGener{randGen: newDefaultRandGen()}}}), {{- else }} newDefaultGener(0.2, types.ET{{$sig.TypeA.ETName}}), {{- end }} {{- if eq $sig.TypeB.ETName "String" }} &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32, newDefaultRandGen()}}, {{- else }} newDefaultGener(0.2, types.ET{{$sig.TypeB.ETName}}), {{- end }} }, constants: []*Constant{nil, nil, {Value: types.NewStringDatum("{{$unit}}"), RetType: types.NewFieldType(mysql.TypeString)}}, chunkSize: 128, }, {{- end }} {{- end }} {{- end }} {{ end }} {{ define "addOrSubTimeCases" }} {{- range $sig := .Sigs }} // {{ $sig.SigName }} { retEvalType: types.ET{{ .Output.ETName }}, {{- if eq .TestTypeA "" }} childrenTypes: []types.EvalType{types.ET{{ .TypeA.ETName }}, types.ET{{ .TypeB.ETName }}}, {{- else }} childrenTypes: []types.EvalType{types.ET{{ .TestTypeA }}, types.ET{{ .TestTypeB }}}, {{- end }} {{- if ne .FieldTypeA "" }} childrenFieldTypes: []*types.FieldType{types.NewFieldType(mysql.Type{{.FieldTypeA}}), types.NewFieldType(mysql.Type{{.FieldTypeB}})}, {{- end }} geners: []dataGenerator{ {{- if eq .TestTypeA "" }} gener{*newDefaultGener(0.2, types.ET{{.TypeA.ETName}})}, gener{*newDefaultGener(0.2, types.ET{{.TypeB.ETName}})}, {{- else }} gener{*newDefaultGener(0.2, types.ET{{ .TestTypeA }})}, gener{*newDefaultGener(0.2, types.ET{{ .TestTypeB }})}, {{- end }} }, }, {{- end }} {{ end }} {{/* Add more test cases here if we have more functions in this file */}} var vecBuiltin{{.Category}}GeneratedCases = map[string][]vecExprBenchCase{ {{- range .Functions }} {{- if eq .FuncName "AddTime" }} ast.AddTime: { {{- template "addOrSubTimeCases" . -}} }, {{ end }} {{- if eq .FuncName "SubTime" }} ast.SubTime: { {{- template "addOrSubTimeCases" . -}} }, {{ end }} {{- if eq .FuncName "TimeDiff" }} ast.TimeDiff: { // builtinNullTimeDiffSig {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDuration, types.ETDatetime}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDuration, types.ETTimestamp}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDatetime, types.ETDuration}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETTimestamp, types.ETDuration}}, // builtinDurationDurationTimeDiffSig {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDuration, types.ETDuration}}, // builtinDurationStringTimeDiffSig {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDuration, types.ETString}, geners: []dataGenerator{nil, &dateTimeStrGener{Year: 2019, Month: 11, Fsp: 0, randGen: newDefaultRandGen()}}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDuration, types.ETString}, geners: []dataGenerator{nil, &dateTimeStrGener{Year: 2019, Month: 10, Fsp: 0, randGen: newDefaultRandGen()}}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDuration, types.ETString}, geners: []dataGenerator{nil, &dateTimeStrGener{Year: 2019, Month: 10, Fsp: 4, randGen: newDefaultRandGen()}}}, // builtinTimeTimeTimeDiffSig {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDatetime, types.ETDatetime}, geners: []dataGenerator{&dateTimeGener{Year: 2019, Month: 10, randGen: newDefaultRandGen()}, &dateTimeGener{Year: 2019, Month: 10, randGen: newDefaultRandGen()}}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDatetime, types.ETTimestamp}, geners: []dataGenerator{&dateTimeGener{Year: 2019, Month: 10, randGen: newDefaultRandGen()}, &dateTimeGener{Year: 2019, Month: 10, randGen: newDefaultRandGen()}}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETTimestamp, types.ETTimestamp}, geners: []dataGenerator{&dateTimeGener{Year: 2019, Month: 10, randGen: newDefaultRandGen()}, &dateTimeGener{Year: 2019, Month: 10, randGen: newDefaultRandGen()}}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETTimestamp, types.ETDatetime}, geners: []dataGenerator{&dateTimeGener{Year: 2019, Month: 10, randGen: newDefaultRandGen()}, &dateTimeGener{Year: 2019, Month: 10, randGen: newDefaultRandGen()}}}, // builtinTimeStringTimeDiffSig {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDatetime, types.ETString}, geners: []dataGenerator{&dateTimeGener{Year: 2019, Month: 10, randGen: newDefaultRandGen()}, &dateTimeStrGener{Year: 2019, Month: 10, Fsp: 0, randGen: newDefaultRandGen()}}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETTimestamp, types.ETString}, geners: []dataGenerator{&dateTimeGener{Year: 2019, Month: 10, randGen: newDefaultRandGen()}, &dateTimeStrGener{Year: 2019, Month: 10, Fsp: 0, randGen: newDefaultRandGen()}}}, // builtinStringDurationTimeDiffSig {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETDuration}, geners: []dataGenerator{&dateTimeStrGener{Year: 2019, Month: 10, Fsp: 0, randGen: newDefaultRandGen()}, nil}}, // builtinStringTimeTimeDiffSig {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETDatetime}, geners: []dataGenerator{&dateTimeStrGener{Year: 2019, Month: 10, Fsp: 0, randGen: newDefaultRandGen()}, &dateTimeGener{Year: 2019, Month: 10, randGen: newDefaultRandGen()}}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETTimestamp}, geners: []dataGenerator{&dateTimeStrGener{Year: 2019, Month: 10, Fsp: 0, randGen: newDefaultRandGen()}, &dateTimeGener{Year: 2019, Month: 10, randGen: newDefaultRandGen()}}}, // builtinStringStringTimeDiffSig {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETString}, geners: []dataGenerator{&dateTimeStrGener{Year: 2019, Month: 10, Fsp: 0, randGen: newDefaultRandGen()}, &dateTimeStrGener{Year: 2019, Month: 10, Fsp: 0, randGen: newDefaultRandGen()}}}, }, {{ end }} {{- if eq .FuncName "AddDate" }} ast.AddDate: { {{- template "addOrSubDateCases" . -}} }, {{ end }} {{- if eq .FuncName "SubDate" }} ast.SubDate: { {{- template "addOrSubDateCases" . -}} }, {{ end }} {{ end }} } func TestVectorizedBuiltin{{.Category}}EvalOneVecGenerated(t *testing.T) { testVectorizedEvalOneVec(t, vecBuiltin{{.Category}}GeneratedCases) } func TestVectorizedBuiltin{{.Category}}FuncGenerated(t *testing.T) { testVectorizedBuiltinFunc(t, vecBuiltin{{.Category}}GeneratedCases) } func BenchmarkVectorizedBuiltin{{.Category}}EvalOneVecGenerated(b *testing.B) { benchmarkVectorizedEvalOneVec(b, vecBuiltin{{.Category}}GeneratedCases) } func BenchmarkVectorizedBuiltin{{.Category}}FuncGenerated(b *testing.B) { benchmarkVectorizedBuiltinFunc(b, vecBuiltin{{.Category}}GeneratedCases) } `)) var addTimeSigsTmpl = []sig{ {SigName: "builtinAddDatetimeAndDurationSig", TypeA: TypeDatetime, TypeB: TypeDuration, Output: TypeDatetime}, {SigName: "builtinAddDatetimeAndStringSig", TypeA: TypeDatetime, TypeB: TypeString, Output: TypeDatetime}, {SigName: "builtinAddDurationAndDurationSig", TypeA: TypeDuration, TypeB: TypeDuration, Output: TypeDuration}, {SigName: "builtinAddDurationAndStringSig", TypeA: TypeDuration, TypeB: TypeString, Output: TypeDuration}, {SigName: "builtinAddStringAndDurationSig", TypeA: TypeString, TypeB: TypeDuration, Output: TypeString}, {SigName: "builtinAddStringAndStringSig", TypeA: TypeString, TypeB: TypeString, Output: TypeString}, {SigName: "builtinAddDateAndDurationSig", TypeA: TypeDuration, TypeB: TypeDuration, Output: TypeString, FieldTypeA: "Date", FieldTypeB: "Duration", TestTypeA: "Datetime", TestTypeB: "Duration"}, {SigName: "builtinAddDateAndStringSig", TypeA: TypeDuration, TypeB: TypeString, Output: TypeString, FieldTypeA: "Date", FieldTypeB: "String", TestTypeA: "Datetime", TestTypeB: "String"}, {SigName: "builtinAddTimeDateTimeNullSig", TypeA: TypeDatetime, TypeB: TypeDatetime, Output: TypeDatetime, AllNull: true}, {SigName: "builtinAddTimeStringNullSig", TypeA: TypeDatetime, TypeB: TypeDatetime, Output: TypeString, AllNull: true, FieldTypeA: "Date", FieldTypeB: "Datetime"}, {SigName: "builtinAddTimeDurationNullSig", TypeA: TypeDuration, TypeB: TypeDatetime, Output: TypeDuration, AllNull: true}, } var subTimeSigsTmpl = []sig{ {SigName: "builtinSubDatetimeAndDurationSig", TypeA: TypeDatetime, TypeB: TypeDuration, Output: TypeDatetime}, {SigName: "builtinSubDatetimeAndStringSig", TypeA: TypeDatetime, TypeB: TypeString, Output: TypeDatetime}, {SigName: "builtinSubDurationAndDurationSig", TypeA: TypeDuration, TypeB: TypeDuration, Output: TypeDuration}, {SigName: "builtinSubDurationAndStringSig", TypeA: TypeDuration, TypeB: TypeString, Output: TypeDuration}, {SigName: "builtinSubStringAndDurationSig", TypeA: TypeString, TypeB: TypeDuration, Output: TypeString}, {SigName: "builtinSubStringAndStringSig", TypeA: TypeString, TypeB: TypeString, Output: TypeString}, {SigName: "builtinSubDateAndDurationSig", TypeA: TypeDuration, TypeB: TypeDuration, Output: TypeString, FieldTypeA: "Date", FieldTypeB: "Duration", TestTypeA: "Datetime", TestTypeB: "Duration"}, {SigName: "builtinSubDateAndStringSig", TypeA: TypeDuration, TypeB: TypeString, Output: TypeString, FieldTypeA: "Date", FieldTypeB: "String", TestTypeA: "Datetime", TestTypeB: "String"}, {SigName: "builtinSubTimeDateTimeNullSig", TypeA: TypeDatetime, TypeB: TypeDatetime, Output: TypeDatetime, AllNull: true}, {SigName: "builtinSubTimeStringNullSig", TypeA: TypeDatetime, TypeB: TypeDatetime, Output: TypeString, AllNull: true, FieldTypeA: "Date", FieldTypeB: "Datetime"}, {SigName: "builtinSubTimeDurationNullSig", TypeA: TypeDuration, TypeB: TypeDatetime, Output: TypeDuration, AllNull: true}, } var timeDiffSigsTmpl = []sig{ {SigName: "builtinNullTimeDiffSig", Output: TypeDuration}, {SigName: "builtinTimeStringTimeDiffSig", TypeA: TypeDatetime, TypeB: TypeString, Output: TypeDuration}, {SigName: "builtinDurationStringTimeDiffSig", TypeA: TypeDuration, TypeB: TypeString, Output: TypeDuration}, {SigName: "builtinDurationDurationTimeDiffSig", TypeA: TypeDuration, TypeB: TypeDuration, Output: TypeDuration}, {SigName: "builtinStringTimeTimeDiffSig", TypeA: TypeString, TypeB: TypeDatetime, Output: TypeDuration}, {SigName: "builtinStringDurationTimeDiffSig", TypeA: TypeString, TypeB: TypeDuration, Output: TypeDuration}, {SigName: "builtinStringStringTimeDiffSig", TypeA: TypeString, TypeB: TypeString, Output: TypeDuration}, {SigName: "builtinTimeTimeTimeDiffSig", TypeA: TypeDatetime, TypeB: TypeDatetime, Output: TypeDuration}, } var addDateSigsTmpl = []sig{ {SigName: "builtinAddDateStringStringSig", TypeA: TypeString, TypeB: TypeString, Output: TypeString}, {SigName: "builtinAddDateStringIntSig", TypeA: TypeString, TypeB: TypeInt, Output: TypeString}, {SigName: "builtinAddDateStringRealSig", TypeA: TypeString, TypeB: TypeReal, Output: TypeString}, {SigName: "builtinAddDateStringDecimalSig", TypeA: TypeString, TypeB: TypeDecimal, Output: TypeString}, {SigName: "builtinAddDateIntStringSig", TypeA: TypeInt, TypeB: TypeString, Output: TypeString}, {SigName: "builtinAddDateIntIntSig", TypeA: TypeInt, TypeB: TypeInt, Output: TypeString}, {SigName: "builtinAddDateIntRealSig", TypeA: TypeInt, TypeB: TypeReal, Output: TypeString}, {SigName: "builtinAddDateIntDecimalSig", TypeA: TypeInt, TypeB: TypeDecimal, Output: TypeString}, {SigName: "builtinAddDateRealStringSig", TypeA: TypeReal, TypeB: TypeString, Output: TypeString}, {SigName: "builtinAddDateRealIntSig", TypeA: TypeReal, TypeB: TypeInt, Output: TypeString}, {SigName: "builtinAddDateRealRealSig", TypeA: TypeReal, TypeB: TypeReal, Output: TypeString}, {SigName: "builtinAddDateRealDecimalSig", TypeA: TypeReal, TypeB: TypeDecimal, Output: TypeString}, {SigName: "builtinAddDateDecimalStringSig", TypeA: TypeDecimal, TypeB: TypeString, Output: TypeString}, {SigName: "builtinAddDateDecimalIntSig", TypeA: TypeDecimal, TypeB: TypeInt, Output: TypeString}, {SigName: "builtinAddDateDecimalRealSig", TypeA: TypeDecimal, TypeB: TypeReal, Output: TypeString}, {SigName: "builtinAddDateDecimalDecimalSig", TypeA: TypeDecimal, TypeB: TypeDecimal, Output: TypeString}, {SigName: "builtinAddDateDatetimeStringSig", TypeA: TypeDatetime, TypeB: TypeString, Output: TypeDatetime, FieldTypeA: "Date", FieldTypeB: "String"}, {SigName: "builtinAddDateDatetimeIntSig", TypeA: TypeDatetime, TypeB: TypeInt, Output: TypeDatetime, FieldTypeA: "Date", FieldTypeB: "Longlong"}, {SigName: "builtinAddDateDatetimeRealSig", TypeA: TypeDatetime, TypeB: TypeReal, Output: TypeDatetime, FieldTypeA: "Date", FieldTypeB: "Double"}, {SigName: "builtinAddDateDatetimeDecimalSig", TypeA: TypeDatetime, TypeB: TypeDecimal, Output: TypeDatetime, FieldTypeA: "Date", FieldTypeB: "NewDecimal"}, {SigName: "builtinAddDateDatetimeStringSig", TypeA: TypeDatetime, TypeB: TypeString, Output: TypeDatetime}, {SigName: "builtinAddDateDatetimeIntSig", TypeA: TypeDatetime, TypeB: TypeInt, Output: TypeDatetime}, {SigName: "builtinAddDateDatetimeRealSig", TypeA: TypeDatetime, TypeB: TypeReal, Output: TypeDatetime}, {SigName: "builtinAddDateDatetimeDecimalSig", TypeA: TypeDatetime, TypeB: TypeDecimal, Output: TypeDatetime}, {SigName: "builtinAddDateDurationStringSig", TypeA: TypeDuration, TypeB: TypeString, Output: TypeDuration}, {SigName: "builtinAddDateDurationIntSig", TypeA: TypeDuration, TypeB: TypeInt, Output: TypeDuration}, {SigName: "builtinAddDateDurationRealSig", TypeA: TypeDuration, TypeB: TypeReal, Output: TypeDuration}, {SigName: "builtinAddDateDurationDecimalSig", TypeA: TypeDuration, TypeB: TypeDecimal, Output: TypeDuration}, {SigName: "builtinAddDateDurationStringSig", TypeA: TypeDuration, TypeB: TypeString, Output: TypeDatetime}, {SigName: "builtinAddDateDurationIntSig", TypeA: TypeDuration, TypeB: TypeInt, Output: TypeDatetime}, {SigName: "builtinAddDateDurationRealSig", TypeA: TypeDuration, TypeB: TypeReal, Output: TypeDatetime}, {SigName: "builtinAddDateDurationDecimalSig", TypeA: TypeDuration, TypeB: TypeDecimal, Output: TypeDatetime}, } var subDateSigsTmpl = []sig{ {SigName: "builtinSubDateStringStringSig", TypeA: TypeString, TypeB: TypeString, Output: TypeString}, {SigName: "builtinSubDateStringIntSig", TypeA: TypeString, TypeB: TypeInt, Output: TypeString}, {SigName: "builtinSubDateStringRealSig", TypeA: TypeString, TypeB: TypeReal, Output: TypeString}, {SigName: "builtinSubDateStringDecimalSig", TypeA: TypeString, TypeB: TypeDecimal, Output: TypeString}, {SigName: "builtinSubDateIntStringSig", TypeA: TypeInt, TypeB: TypeString, Output: TypeString}, {SigName: "builtinSubDateIntIntSig", TypeA: TypeInt, TypeB: TypeInt, Output: TypeString}, {SigName: "builtinSubDateIntRealSig", TypeA: TypeInt, TypeB: TypeReal, Output: TypeString}, {SigName: "builtinSubDateIntDecimalSig", TypeA: TypeInt, TypeB: TypeDecimal, Output: TypeString}, {SigName: "builtinSubDateRealStringSig", TypeA: TypeReal, TypeB: TypeString, Output: TypeString}, {SigName: "builtinSubDateRealIntSig", TypeA: TypeReal, TypeB: TypeInt, Output: TypeString}, {SigName: "builtinSubDateRealRealSig", TypeA: TypeReal, TypeB: TypeReal, Output: TypeString}, {SigName: "builtinSubDateRealDecimalSig", TypeA: TypeReal, TypeB: TypeDecimal, Output: TypeString}, {SigName: "builtinSubDateDecimalStringSig", TypeA: TypeDecimal, TypeB: TypeString, Output: TypeString}, {SigName: "builtinSubDateDecimalIntSig", TypeA: TypeDecimal, TypeB: TypeInt, Output: TypeString}, {SigName: "builtinSubDateDecimalRealSig", TypeA: TypeDecimal, TypeB: TypeReal, Output: TypeString}, {SigName: "builtinSubDateDecimalDecimalSig", TypeA: TypeDecimal, TypeB: TypeDecimal, Output: TypeString}, {SigName: "builtinSubDateDatetimeStringSig", TypeA: TypeDatetime, TypeB: TypeString, Output: TypeDatetime, FieldTypeA: "Date", FieldTypeB: "String"}, {SigName: "builtinSubDateDatetimeIntSig", TypeA: TypeDatetime, TypeB: TypeInt, Output: TypeDatetime, FieldTypeA: "Date", FieldTypeB: "Longlong"}, {SigName: "builtinSubDateDatetimeRealSig", TypeA: TypeDatetime, TypeB: TypeReal, Output: TypeDatetime, FieldTypeA: "Date", FieldTypeB: "Double"}, {SigName: "builtinSubDateDatetimeDecimalSig", TypeA: TypeDatetime, TypeB: TypeDecimal, Output: TypeDatetime, FieldTypeA: "Date", FieldTypeB: "NewDecimal"}, {SigName: "builtinSubDateDatetimeStringSig", TypeA: TypeDatetime, TypeB: TypeString, Output: TypeDatetime}, {SigName: "builtinSubDateDatetimeIntSig", TypeA: TypeDatetime, TypeB: TypeInt, Output: TypeDatetime}, {SigName: "builtinSubDateDatetimeRealSig", TypeA: TypeDatetime, TypeB: TypeReal, Output: TypeDatetime}, {SigName: "builtinSubDateDatetimeDecimalSig", TypeA: TypeDatetime, TypeB: TypeDecimal, Output: TypeDatetime}, {SigName: "builtinSubDateDurationStringSig", TypeA: TypeDuration, TypeB: TypeString, Output: TypeDuration}, {SigName: "builtinSubDateDurationIntSig", TypeA: TypeDuration, TypeB: TypeInt, Output: TypeDuration}, {SigName: "builtinSubDateDurationRealSig", TypeA: TypeDuration, TypeB: TypeReal, Output: TypeDuration}, {SigName: "builtinSubDateDurationDecimalSig", TypeA: TypeDuration, TypeB: TypeDecimal, Output: TypeDuration}, {SigName: "builtinSubDateDurationStringSig", TypeA: TypeDuration, TypeB: TypeString, Output: TypeDatetime}, {SigName: "builtinSubDateDurationIntSig", TypeA: TypeDuration, TypeB: TypeInt, Output: TypeDatetime}, {SigName: "builtinSubDateDurationRealSig", TypeA: TypeDuration, TypeB: TypeReal, Output: TypeDatetime}, {SigName: "builtinSubDateDurationDecimalSig", TypeA: TypeDuration, TypeB: TypeDecimal, Output: TypeDatetime}, } type sig struct { SigName string TypeA, TypeB, Output TypeContext FieldTypeA, FieldTypeB string // Optional TestTypeA, TestTypeB string // Optional, specific Type for test in builtinAddDateAndDurationSig & builtinAddDateAndStringSig AllNull bool } type function struct { FuncName string Sigs []sig } var tmplVal = struct { Category string Functions []function }{ Category: "Time", Functions: []function{ {FuncName: "AddTime", Sigs: addTimeSigsTmpl}, {FuncName: "SubTime", Sigs: subTimeSigsTmpl}, {FuncName: "TimeDiff", Sigs: timeDiffSigsTmpl}, {FuncName: "AddDate", Sigs: addDateSigsTmpl}, {FuncName: "SubDate", Sigs: subDateSigsTmpl}, }, } func generateDotGo(fileName string) error { w := new(bytes.Buffer) err := addOrSubTime.Execute(w, function{FuncName: "AddTime", Sigs: addTimeSigsTmpl}) if err != nil { return err } err = addOrSubTime.Execute(w, function{FuncName: "SubTime", Sigs: subTimeSigsTmpl}) if err != nil { return err } err = timeDiff.Execute(w, timeDiffSigsTmpl) if err != nil { return err } data, err := format.Source(w.Bytes()) if err != nil { log.Println("[Warn]", fileName+": gofmt failed", err) data = w.Bytes() // write original data for debugging } return os.WriteFile(fileName, data, 0644) } func generateTestDotGo(fileName string) error { w := new(bytes.Buffer) err := testFile.Execute(w, tmplVal) if err != nil { return err } data, err := format.Source(w.Bytes()) if err != nil { log.Println("[Warn]", fileName+": gofmt failed", err) data = w.Bytes() // write original data for debugging } return os.WriteFile(fileName, data, 0644) } // generateOneFile generate one xxx.go file and the associated xxx_test.go file. func generateOneFile(fileNamePrefix string) (err error) { err = generateDotGo(fileNamePrefix + ".go") if err != nil { return } err = generateTestDotGo(fileNamePrefix + "_test.go") return } func main() { var err error outputDir := "." err = generateOneFile(filepath.Join(outputDir, "builtin_time_vec_generated")) if err != nil { log.Fatalln("generateOneFile", err) } }
package main import "fmt" import "math" //可以使用多个import语句 func main() { fmt.Printf("%f\n", math.Pi) fmt.Printf("%.2f\n", math.Pi) }
/* A strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down). Write a function to determine if a number is strobogrammatic. The number is represented as a string. For example, the numbers "69", "88", and "818" are all strobogrammatic. */ package main func main() { assert(strobogrammatic("69") == true) assert(strobogrammatic("88") == true) assert(strobogrammatic("818") == true) } func assert(x bool) { if !x { panic("assertion failed") } } // https://oeis.org/A000787 func strobogrammatic(s string) bool { m := map[byte]byte{ '0': '0', '1': '1', '8': '8', '6': '9', '9': '6', } for i, j := 0, len(s)-1; i <= j; i, j = i+1, j-1 { a, b := s[i], s[j] if !(m[a] != 0 && m[b] != 0 && m[a] == b) { return false } } return true }
package main import ( "fmt" ) func main() { fmt.Print("first ") fmt.Print("second") }
package kata func FindOdd(seq []int) int { target:=0 for i:=0;i<len(seq);i++{ target = seq[i] times:=0 for j:=0;j<len(seq);j++ { if target == seq[j]{ times++ } } if times%2==1{ return target } } return -1 }
package problems /* Follow up: 1. only use constant extra space. 2. recursive solution is fine. */ // normal binary tree func connect(root *Node) *Node { if root == nil { return nil } p := root.Next // next subtree's most left child for p != nil { if p.Left != nil { p = p.Left break } if p.Right != nil { p = p.Right break } p = p.Next } // connect root's childs if root.Right != nil { root.Right.Next = p } if root.Left != nil { if root.Right != nil { root.Left.Next = root.Right } else { root.Left.Next = p } } f := connect f(root.Right) f(root.Left) return root } // use a queue, is not good. func connect1(root *Node) *Node { if root == nil { return nil } var q []*Node q = append(q, root) for len(q) != 0 { var nq []*Node for i := 0; i+1 < len(q); i++ { q[i].Next = q[i+1] if n := q[i].Left; n != nil { nq = append(nq, n) } if n := q[i].Right; n != nil { nq = append(nq, n) } } if n := q[len(q)-1].Left; n != nil { nq = append(nq, n) } if n := q[len(q)-1].Right; n != nil { nq = append(nq, n) } q = nq } return root }
package main import ( "fmt" "time" "github.com/jinzhu/gorm" _ "github.com/lib/pq" ) type Post struct { Id int Content string Author string `sql:"not null"` Comments []Comment CreatedAt time.Time } type Comment struct { Id int Content string Author string `sql:"not null"` PostId int CreatedAt time.Time } var Db *gorm.DB func init() { var err error Db, err = gorm.Open("postgres", "user=gwp dbname=gwp password=gwp sslmode=disable") if err != nil { panic(err) } Db.AutoMigrate(&Post{}, &Comment{}) } func main() { post := Post{Content: "Hello Gooo!", Author: "Jaaaaaaaav"} fmt.Println(post) Db.Create(&post) fmt.Println(post) comment := Comment{Content: "Goood post", Author: "Ruuuuuuuby"} fmt.Println(comment) Db.Model(&post).Association("Comments").Append(comment) fmt.Println(post) var readPost Post Db.Where("id = $1", 10).First(&readPost) var comments []Comment Db.Model(&readPost).Related(&comments) fmt.Println(comments) fmt.Println(comments[0]) }
package main import ( "fmt" "io" "os" ) func main() { // 读取文件 方法1 file, err := os.Open("./main/test.txt") // 关闭文件流 defer file.Close(); if err != nil { fmt.Println("打开文件出错") } // 读取文件里面的内容 var tempSlice = make([]byte, 1024) var strSlice []byte for { n, err := file.Read(tempSlice) if err == io.EOF { fmt.Printf("读取完毕") break } fmt.Printf("读取到了%v 个字节 \n", n) strSlice := append(strSlice, tempSlice...) fmt.Println(string(strSlice)) } }
/** * Definition for a binary tree node. * type TreeNode struct { * Val int * Left *TreeNode * Right *TreeNode * } */ func kthSmallest(root *TreeNode, k int) int { elements := [] int{} dfs(root, &elements) sort.Slice(elements, func(a, b int) bool { return elements[a] < elements[b] }) result := elements[k - 1] return result } func dfs(node *TreeNode, arr *[]int) { if node == nil { return; } dfs(node.Left, arr) *arr = append(*arr, node.Val) dfs(node.Right, arr) } //O(N) /** * Definition for a binary tree node. * type TreeNode struct { * Val int * Left *TreeNode * Right *TreeNode * } */ func kthSmallest(root *TreeNode, k int) int { count := countNodes(root.Left) fmt.Println(count) if k <= count { return kthSmallest(root.Left, k) } else if k > count + 1 { return kthSmallest(root.Right, k - count - 1) } return root.Val } func countNodes(node *TreeNode) int { if node == nil { return 0; } return 1 + countNodes(node.Left) + countNodes(node.Right) }
package gate import ( "encoding/binary" "flag" "fmt" "github.com/golang/protobuf/proto" "github.com/gorilla/websocket" "github.com/pkg/errors" "hub000.xindong.com/rookie/rookie-framework/protobuf" "log" "net/http" "net/url" "sausage-shoot-proto/protocol" "testing" ) type TestHandler struct {} func (m *TestHandler)Unmarshal(t int, message []byte, clientID string) (protobuf.CSRequest, error){ fmt.Println("in Unmarshal func") return protobuf.CSRequest{}, errors.New("sdf") } func (m *TestHandler)Marshal(resp protobuf.CSResponse) (*Message, error){ return &Message{}, nil } func TestWSGate(t *testing.T) { sigChan := make(chan string, 1) go func() { c := &Config{ Port:10000, WriteWait:20, PongWait:60, PingPeriod:54, MaxMessageSize:512, MessageBufferSize:256, } g := &TestHandler{} gate := NewGate(c).UseWS(g).UseHttp() gate.RegisterHttpRouter("/index", func(writer http.ResponseWriter, r *http.Request) { fmt.Println("this is a http request") sigChan <- "over" }) gate.StartServer() }() GetWSConn() Get("http://localhost:10000/index") fmt.Println(<- sigChan) } func GetWSConn(){ var addr = flag.String("addr", "localhost:10000", "http service address") u := url.URL{Scheme: "ws", Host: *addr, Path: "/ws"} log.Printf("connecting to %s", u.String()) c, _, err := websocket.DefaultDialer.Dial(u.String(), nil) if err != nil { log.Fatal("dial:", err) } defer c.Close() c.WriteMessage(websocket.BinaryMessage, createMsg()) } func createMsg() []byte { // build modulemsg login := protocol.GetConfReq{ PlayerID: "qinhan", } data, err := proto.Marshal(&login) if err != nil { log.Println(err) } // add protocol num m := make([]byte, 2+len(data)) // 默认使用大端序 binary.BigEndian.PutUint16(m, uint16(4)) copy(m[2:], data) return m } func getMessage(msg []byte) (uint16, []byte) { temp := make([]byte, 2) body := make([]byte, len(msg)-2) copy(temp, msg[:2]) copy(body, msg[2:]) top := binary.BigEndian.Uint16(temp) return top, body }
// Checkdisk looks at mounted filesystems, and reports the freespace. // If the freespace is less than 10%, exit code is non-0. package main import ( "bufio" "fmt" "log" "os" "strings" "syscall" ) func main() { // capicityThreshold is the percentage when alerts are generated. capacityThreshold := 90 var alert bool var output string fs := getFs() for _, v := range fs { var statfs syscall.Statfs_t err := syscall.Statfs(v, &statfs) if err != nil { log.Printf("statfs %s: %s\n", v, err) } cap := fsCapacity(statfs) if cap >= capacityThreshold { alert = true } output = fmt.Sprintf("%s%s: %d%% full\n", output, v, cap) } if alert == true { fmt.Printf("DISK(S) CRITICAL\n%s", output) os.Exit(2) } fmt.Printf("DISK(S) OK\n%s", output) os.Exit(0) } // getFs returns the names of mounted filesystems. func getFs() []string { f, err := os.Open("/etc/mtab") if err != nil { panic(err) } defer f.Close() scanner := bufio.NewScanner(f) stopwords := []string{"/proc", "/sys", "/dev/pts", "nfs"} var fs []string for scanner.Scan() { // Get the second word in s.Text() disk := strings.Fields(scanner.Text())[1] var virtualDevice bool for _, v := range stopwords { if strings.Contains(disk, v) { virtualDevice = true } } if !virtualDevice { fs = append(fs, disk) } if err := scanner.Err(); err != nil { log.Println("scanner:", err) } } return fs } // fsCapacity returns a percentage of how full a filesystem is. func fsCapacity(fs syscall.Statfs_t) int { blocks := float64(fs.Blocks) free := float64(fs.Bfree) // Capacity is (blocks-free)/blocks * 100 dif := blocks - free quo := dif / blocks cap := quo * 100 return int(cap + 0.5) }
package main import ( "bufio" "fmt" "io" "log" "os" "sort" "strings" "unicode" "unicode/utf8" ) func main() { reader := bufio.NewReader(os.Stdin) lineno := 0 var chars []rune pinyinsOf := map[rune][]string{} for { lineno++ line, err := reader.ReadString('\n') if err == io.EOF { break } else if err != nil { log.Fatalf("error when reading line %d: %v", lineno, err) } // We are looking for lines like // 的 4886 de:99.9671% di:0.0329% // or // 梀 4356 su yin // Namely, one character followed by a number (we drop this field), and // pinyins with optional colon and probability. line = line[:len(line)-1] fields := strings.Fields(line) if len(fields) < 3 { continue } char, size := utf8.DecodeRuneInString(fields[0]) if size != len(fields[0]) { continue } if !unicode.In(char, unicode.Han) { log.Fatalf("%c is not a Han character\n", char) } // Collect all pinyins of this character. pinyins := make([]string, len(fields)-2) for i, f := range fields[2:] { if j := strings.Index(f, ":"); j > -1 { f = f[:j] } // Make sure that this only consists of small letter. if strings.TrimLeft(f, "abcdefghijklmnopqrstuvwxyz") != "" { log.Fatalf("line %d of pinyin data file has non-pinyin:", lineno) } pinyins[i] = f } chars = append(chars, char) pinyinsOf[char] = pinyins } sort.Sort(runes(chars)) fmt.Println() for _, char := range chars { fmt.Printf("%c%s\n", char, strings.Join(pinyinsOf[char], ",")) } } type runes []rune func (rs runes) Len() int { return len(rs) } func (rs runes) Less(i, j int) bool { return rs[i] < rs[j] } func (rs runes) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] }
package generator import ( "fmt" ) type PersistStringer struct{} // TYPECHANGE func (per *PersistStringer) MessageInputDeclaration(method *Method) string { printer := &Printer{} printer.P("type %s struct{\n", NewPLInputName(method)) getPersistLibTypeName := GetSqlPersistLibTypeName if method.IsSpanner() { getPersistLibTypeName = GetSpannerPersistLibTypeName } inputTypeDescs := method.GetTypeDescArrayForStruct(method.GetInputTypeStruct()) for _, qf := range inputTypeDescs { typeName := getPersistLibTypeName(qf) printer.P("%s %s\n", qf.Name, typeName) } printer.P("}\n") printer.P("// this could be used in a query, so generate the getters/setters\n") for _, qf := range inputTypeDescs { typeName := getPersistLibTypeName(qf) plInputName := NewPLInputName(method) printer.P( "func(p *%s) Get%s() %s{ return p.%s }\n", plInputName, qf.Name, typeName, qf.Name, ) printer.P( "func(p *%s) Set%s(param %s) { p.%s = param }\n", plInputName, qf.Name, typeName, qf.Name, ) } return printer.String() } // a cache for the method types we have already written func (per *PersistStringer) PersistImplBuilder(service *Service, alreadyWrote map[string]bool) string { var dbType string var backend string if service.IsSpanner() { dbType = "spanner.Client" backend = "Spanner" } else { dbType = "sql.DB" backend = "Sql" } sName := service.GetName() printer := &Printer{} printer.Q( "type ", sName, "Impl struct{\n", "PERSIST *persist_lib.", NewPersistHelperName(service), "\n", "FORWARDED RestOf", sName, "Handlers\n", "HOOKS ", sName, "Hooks\n", "MAPPINGS ", sName, "TypeMapping\n", "}\n", ) printer.P("type RestOf%sHandlers interface{\n", service.GetName()) for _, m := range *service.Methods { spannerBi := m.Service.IsSpanner() && m.IsBidiStreaming() if m.GetMethodOption() == nil || spannerBi { if m.IsUnary() { printer.P( "%s(ctx context.Context, req *%s) (*%s, error)\n", m.GetName(), m.GetInputType(), m.GetOutputType(), ) } else if m.IsServerStreaming() { printer.P( "%s(req *%s, stream %s) error\n", m.GetName(), m.GetInputType(), NewStreamType(m), ) } else { printer.P( "%s(stream %s) error\n", m.GetName(), NewStreamType(m), ) } } } printer.P("}\n") WriteBuilderTypeMappingsInterface(printer, service) WriteTypeMappingsContractInterfaces(printer, service, alreadyWrote) WriteBuilderHookInterfaceAndFunc(printer, service) printer.Q( "type ", sName, "ImplBuilder struct {\n", "err error\n ", "rest RestOf", sName, "Handlers\n", "queryHandlers *persist_lib.", sName, "QueryHandlers\n", "i *", sName, "Impl\n", "db *", dbType, "\n", "hooks ", sName, "Hooks\n", "mappings ", sName, "TypeMapping\n", "}\n", "func New", sName, "Builder() *", sName, "ImplBuilder {\n", "return &", sName, "ImplBuilder{i: &", sName, "Impl{}}\n", "}\n", ) WriteBuilderHooksAcceptingFunc(printer, service) WriteBuilderTypeMappingsAcceptingFunc(printer, service) printer.PA([]string{ "func (b *%sImplBuilder) WithRestOfGrpcHandlers(r RestOf%sHandlers) *%sImplBuilder {\n", "b.rest = r\n return b\n}\n", }, service.GetName(), service.GetName(), service.GetName(), ) printer.PA([]string{ "func (b *%sImplBuilder) WithPersistQueryHandlers(p *persist_lib.%sQueryHandlers)", "*%sImplBuilder {\n", "b.queryHandlers = p\n return b\n}\n", }, service.GetName(), service.GetName(), service.GetName(), ) // setup default query functions printer.PA([]string{ "func (b *%sImplBuilder) WithDefaultQueryHandlers() *%sImplBuilder {\n", "accessor := persist_lib.New%sClientGetter(&b.db)\n", "queryHandlers := &persist_lib.%sQueryHandlers{\n", }, service.GetName(), service.GetName(), backend, service.GetName(), ) for _, m := range *service.Methods { if m.GetMethodOption() == nil || (m.Service.IsSpanner() && m.IsBidiStreaming()) { continue } printer.P( "%s: persist_lib.Default%s(accessor),\n", NewPersistHandlerName(m), NewPersistHandlerName(m), ) } printer.P("}\n b.queryHandlers = queryHandlers\n return b\n}\n") // fill in holes with defaults printer.PA([]string{ "func (b *%sImplBuilder) WithNilAsDefaultQueryHandlers(p *persist_lib.%sQueryHandlers)", "*%sImplBuilder {\n", "accessor := persist_lib.New%sClientGetter(&b.db)\n", }, service.GetName(), service.GetName(), service.GetName(), backend, ) for _, m := range *service.Methods { if m.GetMethodOption() == nil || (m.Service.IsSpanner() && m.IsBidiStreaming()) { continue } phn := NewPersistHandlerName(m) printer.P( "if p.%s == nil {\np.%s = persist_lib.Default%s(accessor)\n}\n", phn, phn, phn, ) } printer.P("b.queryHandlers = p\n return b\n}\n") // provide the builder with a client printer.PA([]string{ "func (b *%sImplBuilder) With%sClient(c *%s) *%sImplBuilder {\n", "b.db = c\n return b\n}\n", }, service.GetName(), backend, dbType, service.GetName(), ) if service.IsSpanner() { printer.PA([]string{ "func (b *%sImplBuilder) WithSpannerURI(ctx context.Context, uri string) *%sImplBuilder {\n", "cli, err := spanner.NewClient(ctx, uri)\n b.err = err\n b.db = cli\n return b\n}\n", }, service.GetName(), service.GetName(), ) } else { printer.PA([]string{ "func (b *%sImplBuilder) WithNewSqlDb(driverName, dataSourceName string) *%sImplBuilder {\n", "db, err := sql.Open(driverName, dataSourceName)\n", "b.err = err\n", "if b.err == nil {\n", "\tb.db = db\n", "}\n", "return b\n}\n", }, service.GetName(), service.GetName(), ) } // Build method, returns impl, err printer.Q( "func (b *", sName, "ImplBuilder) Build() (*", sName, "Impl, error) {\n", "if b.err != nil {\n return nil, b.err\n", "}\n", "b.i.PERSIST = &persist_lib.", NewPersistHelperName(service), "{Handlers: *b.queryHandlers}\n", "b.i.FORWARDED = b.rest\n", "b.i.HOOKS = b.hooks\n", "b.i.MAPPINGS = b.mappings\n", "return b.i, nil\n", "}\n", ) // MustBuild method, returns impl. Can panic. printer.PA([]string{ "func (b *%sImplBuilder) MustBuild() *%sImpl {\n", "s, err := b.Build()\n", "if err != nil {\n panic(\"error in builder: \" + err.Error())\n}\n", "return s\n}\n", }, service.GetName(), service.GetName(), ) return printer.String() } func WriteBuilderHookInterfaceAndFunc(p *Printer, s *Service) { p.Q("type ", s.GetName(), "Hooks interface{\n") for _, m := range *s.Methods { opt := m.GetMethodOption() if opt == nil { continue } if opt.GetBefore() { sliceStarOrStar := "*" if m.IsServerStreaming() { sliceStarOrStar = "[]*" } p.Q("\t", m.GetBeforeHookName(), "(*", m.GetInputType(), ") (", sliceStarOrStar, m.GetOutputType(), ", error)\n") } if opt.GetAfter() { p.Q("\t", m.GetAfterHookName(), "(*", m.GetInputType(), ", *", m.GetOutputType(), ") error\n") } } p.Q("}\n") } func WriteBuilderHooksAcceptingFunc(p *Printer, serv *Service) { s := serv.GetName() p.Q( "func(b *", s, "ImplBuilder) WithHooks(hs ", s, "Hooks) *", s, "ImplBuilder {\n", "b.hooks = hs\n", "return b\n", "}\n", ) } func WriteBuilderTypeMappingsAcceptingFunc(p *Printer, serv *Service) { s := serv.GetName() p.Q("func(b *", s, "ImplBuilder) WithTypeMapping(ts ", s, "TypeMapping) *", s, "ImplBuilder {\n") p.Q("\tb.mappings = ts\n") p.Q("\treturn b\n") p.Q("}\n") } func WriteBuilderTypeMappingsInterface(p *Printer, s *Service) { sName := s.GetName() // TODO google's WKT protobufs probably don't need the package prefix p.Q("type ", sName, "TypeMapping interface{\n") tms := s.GetServiceOption().GetTypes() for _, tm := range tms { // TODO implement these interfaces _, titled := getGoNamesForTypeMapping(tm, s.File) // p.Q(titled, "() ", sName, titled, "MappingImpl\n") p.Q(titled, "() ", titled, "MappingImpl\n") } p.Q("}\n") } func WriteScanValuerInterface(p *Printer, s *Service) { if s.IsSQL() { p.Q("type ScanValuer interface {\n") p.Q("\tsql.Scanner\n") p.Q("\tdriver.Valuer\n") p.Q("}\n") } else if s.IsSpanner() { p.Q("type ScanValuer interface {\n") p.Q("\tSpannerScan(src *spanner.GenericColumnValue) error\n") p.Q("\tSpannerValue() (interface{}, error)\n") p.Q("}\n") } } func WriteTypeMappingsContractInterfaces(p *Printer, s *Service, alreadyWrote map[string]bool) { for _, tm := range s.GetServiceOption().GetTypes() { name, titled := getGoNamesForTypeMapping(tm, s.File) if alreadyWrote[titled] { continue } _, maybeStar := needsExtraStar(tm) p.Q("type ", titled, "MappingImpl interface{\n") p.Q("ToProto(*", maybeStar, name, ") error\n") p.Q("Empty() ", titled, "MappingImpl\n") if s.IsSQL() { p.Q("ToSql(", maybeStar, name, ") sql.Scanner\n") p.Q("sql.Scanner\n") p.Q("driver.Valuer\n") } else if s.IsSpanner() { p.Q("ToSpanner(", maybeStar, name, ") ", titled, "MappingImpl\n") p.Q("SpannerScan(src *spanner.GenericColumnValue) error\n") p.Q("SpannerValue() (interface{}, error)\n") } p.Q("}\n") alreadyWrote[titled] = true } } func (per *PersistStringer) HandlersStructDeclaration(service *Service) string { printer := &Printer{} // contains our query handlers struct, and is reciever of our methods printer.P( "type %s struct{\nHandlers %sQueryHandlers}\n", NewPersistHelperName(service), service.GetName(), ) // actually runs the queries printer.P("type %sQueryHandlers struct {\n", service.GetName()) for _, method := range *service.Methods { if method.GetMethodOption() == nil { continue } var rowType string if method.IsSpanner() { rowType = "*spanner.Row" } else { rowType = "Scanable" } if method.IsClientStreaming() { printer.P( "%s func(context.Context)(func(*%s)error, func() (%s, error), error)\n", NewPersistHandlerName(method), NewPLInputName(method), rowType, ) } else if method.IsBidiStreaming() { printer.P( "%s func(context.Context) (func(*%s) (%s, error), func() error)\n", NewPersistHandlerName(method), NewPLInputName(method), rowType, ) } else { printer.P( "%s func(context.Context, *%s, func(%s)) error\n", NewPersistHandlerName(method), NewPLInputName(method), rowType, ) } } printer.P("}\n") return printer.String() } func (per *PersistStringer) HelperFunctionImpl(service *Service) string { printer := &Printer{} for _, method := range *service.Methods { if method.GetMethodOption() == nil { continue // we do not have any persist options } var rowType string if method.IsSpanner() { rowType = "*spanner.Row" } else { rowType = "Scanable" } if method.IsClientStreaming() { printer.PA([]string{ "// given a context, returns two functions. (feed, stop)\n", "// feed will be called once for every row recieved by the handler\n", "// stop will be called when the client is done streaming. it expects\n", "//a row to be returned, or nil.\n", "func (p *%s) %s(ctx context.Context)(func(*%s) error, func() (%s, error), error) {\n", "return p.Handlers.%s(ctx)\n}\n", }, NewPersistHelperName(service), method.GetName(), NewPLInputName(method), rowType, NewPersistHandlerName(method), ) } else if method.IsBidiStreaming() { printer.PA([]string{ "// returns two functions (feed, stop)\n", "// feed needs to be called for every row received. It will run the query\n", "// and return the result + error", "// stop needs to be called to signal the transaction has finished\n", "func (p *%s) %s(ctx context.Context)(func(*%s) (%s, error), func() error) {\n", "return p.Handlers.%s(ctx)\n}\n", }, NewPersistHelperName(service), method.GetName(), NewPLInputName(method), rowType, NewPersistHandlerName(method), ) } else { printer.PA([]string{ "// next must be called on each result row\n", "func(p *%s) %s(ctx context.Context, params *%s, next func(%s)) error {\n", "return p.Handlers.%s(ctx, params, next)\n}\n", }, NewPersistHelperName(service), method.GetName(), NewPLInputName(method), rowType, NewPersistHandlerName(method), ) } } return printer.String() } func (per *PersistStringer) QueryInterfaceDefinition(method *Method) string { if method.GetMethodOption() == nil { return "" } printer := &Printer{} printer.P( "type %sParams interface{\n", NewPLQueryMethodName(method), ) getPersistLibTypeName := GetSpannerPersistLibTypeName if method.IsSQL() { getPersistLibTypeName = GetSqlPersistLibTypeName } for _, t := range method.GetTypeDescForQueryFields() { interfaceType := getPersistLibTypeName(t) printer.P("Get%s() %s\n", t.Name, interfaceType) } printer.P("}\n") return printer.String() } // TYPECHANGE func (per *PersistStringer) SqlQueryFunction(method *Method) string { opts := method.GetMethodOption() if opts == nil { return "" } // Join query with space query := func() (out string) { for _, q := range opts.GetQuery() { out += q + " " } return }() args := opts.GetArguments() tds := method.GetTypeDescForFieldsInStructSnakeCase(method.GetInputTypeStruct()) var argParams string for _, a := range args { argParams += fmt.Sprintf("req.Get%s(),\n", tds[a].Name) } // if we are an empty result, then perform an exec, not a query lenOfResult := len(method.GetTypeDescArrayForStruct(method.GetOutputTypeStruct())) printer := &Printer{} queryMethodName := NewPLQueryMethodName(method) printer.P("func %s(tx Runable, req %sParams) *Result {", queryMethodName, queryMethodName) if lenOfResult == 0 || method.IsClientStreaming() { // use an exec printer.PA([]string{ "res, err := tx.Exec(\n\"%s\",\n%s)\n", "if err != nil {\n return newResultFromErr(err)\n}\n", "return newResultFromSqlResult(res)\n", }, query, argParams, ) } else if method.IsServerStreaming() { printer.PA([]string{ "res, err := tx.Query(\n\"%s\",\n%s)\n", "if err != nil {\n return newResultFromErr(err)\n}\n", "return newResultFromRows(res)", }, query, argParams, ) } else { printer.PA([]string{ "row := tx.QueryRow(\n\"%s\",\n%s)\n", "return newResultFromRow(row)\n", }, query, argParams, ) } printer.P("\n}\n") return printer.String() } func (per *PersistStringer) SpannerQueryFunction(method *Method) string { // we do not have a persist query if method.GetMethodOption() == nil { return "" } printer := &Printer{} if method.IsSelect() { printer.P( "func %s(req %sParams) spanner.Statement {\nreturn %s\n}\n", NewPLQueryMethodName(method), NewPLQueryMethodName(method), method.Query, ) } else { printer.P( "func %s(req %sParams) *spanner.Mutation {\nreturn %s\n}\n", NewPLQueryMethodName(method), NewPLQueryMethodName(method), method.Query, ) } return printer.String() } func (per *PersistStringer) DefaultFunctionsImpl(service *Service) string { printer := &Printer{} for _, method := range *service.Methods { if method.GetMethodOption() == nil { continue } if method.IsSQL() { printer.P("%s", per.DefaultSqlFunctionsImpl(method)) } else if method.IsSpanner() { printer.P("%s", per.DefaultSpannerFunctionsImpl(method)) } } return printer.String() } func (per *PersistStringer) DefaultSqlFunctionsImpl(method *Method) string { printer := &Printer{} lenOfOutFields := len(method.GetTypeDescArrayForStruct(method.GetOutputTypeStruct())) if method.IsClientStreaming() { // use exec printer.PA([]string{ "func Default%sHandler(accessor SqlClientGetter) func(context.Context) ", "(func(*%s) error, func() (Scanable, error), error) {\n", "return func(ctx context.Context) (func(*%s) error, func() (Scanable, error), error) {\n", "sqlDb, err := accessor()\n", "if err != nil {\n return nil, nil, err\n}\n", "tx, err := sqlDb.Begin()\n", "if err != nil {\n return nil, nil, err\n}\n", "feed := func(req *%s) error {\n", "if res := %s(tx, req); res.Err() != nil {\n", "if err := tx.Rollback(); err != nil {\n return fmt.Errorf(\"%s\", err, res.Err())\n}\n", "return res.Err()\n}\n", "return nil\n}\n", "done := func() (Scanable, error) {\n if err := tx.Commit();err != nil {\n", "return nil, err\n}\n return nil, nil\n}\n", "return feed, done, nil\n}\n}\n", }, method.GetName(), NewPLInputName(method), NewPLInputName(method), NewPLInputName(method), NewPLQueryMethodName(method), "%v, %v", ) } else if lenOfOutFields == 0 { // use exec printer.PA([]string{ "func Default%sHandler(accessor SqlClientGetter) ", "func (context.Context, *%s, func(Scanable)) error {\n", "return func(ctx context.Context, req *%s, next func(Scanable)) error {\n", "sqlDB, err := accessor()\n if err != nil {\n return err \n}\n", "if res := %s(sqlDB, req); res.Err() != nil {\n return err \n}\n", "return nil\n}\n}\n", }, method.GetName(), NewPLInputName(method), NewPLInputName(method), NewPLQueryMethodName(method), ) } else if method.IsServerStreaming() { // use query printer.PA([]string{ "func Default%sHandler(accessor SqlClientGetter) ", "func(context.Context, *%s, func(Scanable)) error {\n", "return func(ctx context.Context, req *%s, next func(Scanable)) error {\n", "sqlDB, err := accessor()\n if err != nil {\n return err\n}\n", "tx, err := sqlDB.Begin()\n", "if err != nil {\n return err\n}\n", "res := %s(tx, req)\n", "err = res.Do(func(row Scanable) error {\n", "next(row)\n return nil\n})\n", "if err != nil {\n return err \n}\n", "if err := tx.Commit(); err != nil { return err \n}\n", "return res.Err()\n}\n}\n", }, method.GetName(), NewPLInputName(method), NewPLInputName(method), NewPLQueryMethodName(method), ) } else if method.IsUnary() { // use queryRow printer.PA([]string{ "func Default%sHandler(accessor SqlClientGetter) ", "func(context.Context, *%s, func(Scanable)) error {\n", "return func(ctx context.Context, req *%s, next func(Scanable)) error {\n", "sqlDB, err := accessor()\n if err != nil {\n return err\n}\n", "res := %s(sqlDB, req)\n", "err = res.Do(func(row Scanable) error {\n", "next(row)\nreturn nil})\n", "if err != nil {\n return err\n}\n", "return nil\n}\n", "}\n", }, method.GetName(), NewPLInputName(method), NewPLInputName(method), NewPLQueryMethodName(method), ) } else if method.IsBidiStreaming() { printer.PA([]string{ "func Default%sHandler(accessor SqlClientGetter) ", "func(context.Context) (func(*%s) (Scanable, error), func() error) {\n", "return func(ctx context.Context) (func(*%s) (Scanable, error), func() error) {\n", "var feedErr error\n", "sqlDb, err := accessor()\n", "if err != nil {\n feedErr = err\n}\n", "tx, err := sqlDb.Begin()\n", "if err != nil {\n feedErr = err\n}\n", "feed := func(req *%s) (Scanable, error) {\n", "if feedErr != nil{\n return nil, feedErr\n}\n res := %s(tx, req)\n", "return res, nil\n}\n", "done := func() error {\n if feedErr != nil {\n tx.Rollback()\n} else {\n feedErr = tx.Commit()\n}\n", "return feedErr\n}\n", "return feed,done\n}\n}\n", }, method.GetName(), NewPLInputName(method), NewPLInputName(method), NewPLInputName(method), NewPLQueryMethodName(method), ) } return printer.String() } func (per *PersistStringer) DefaultSpannerFunctionsImpl(method *Method) string { printer := &Printer{} if method.IsClientStreaming() { printer.PA([]string{ "func Default%sHandler(accessor SpannerClientGetter) func(context.Context) ", "(func(*%s) error, func()(*spanner.Row, error), error) {\n", "return func(ctx context.Context) (func(*%s) error, func()(*spanner.Row, error), error) {\n", "var muts []*spanner.Mutation\n", "feed := func(req *%s) error {\nmuts = append(muts, %s(req))\nreturn nil\n}\n", "done := func() (*spanner.Row, error) {\n", "cli, err := accessor()\nif err != nil {\n return nil, err\n}\n", "if _, err := cli.Apply(ctx, muts); err != nil {\nreturn nil, err\n}\n", "return nil, nil // we dont have a row, because we are an apply\n", "}\n return feed, done, nil\n}\n}\n", }, method.GetName(), NewPLInputName(method), NewPLInputName(method), NewPLInputName(method), NewPLQueryMethodName(method), ) } else { printer.PA([]string{ "func Default%sHandler(accessor SpannerClientGetter) ", "func(context.Context, *%s, func(*spanner.Row)) error {\n", "return func(ctx context.Context, req *%s, next func(*spanner.Row)) error {\n", }, method.GetName(), NewPLInputName(method), NewPLInputName(method), ) printer.P("cli, err := accessor()\n if err != nil {\n return err\n}\n") if method.IsSelect() { printer.PA([]string{ "iter := cli.Single().Query(ctx, %s(req))\n", "if err := iter.Do(func(r *spanner.Row) error {\n", "next(r)\nreturn nil\n}); err != nil {\nreturn err\n}\n", }, NewPLQueryMethodName(method), ) } else { printer.PA([]string{ "if _, err := cli.Apply(ctx, []*spanner.Mutation{%s(req)}); err != nil {\n", "return err\n}\n next(nil) // this is an apply, it has no result\n", }, NewPLQueryMethodName(method), ) } printer.P("return nil\n}\n}\n") } return printer.String() } func (per *PersistStringer) DeclareSpannerGetter() string { printer := &Printer{} printer.P("type SpannerClientGetter func() (*spanner.Client, error)\n") printer.PA([]string{ "func NewSpannerClientGetter(cli **spanner.Client) SpannerClientGetter {\n", "return func() (*spanner.Client, error) {\n return *cli, nil \n}\n}\n", }) return printer.String() } // package level definitions for sql implemented libraries // SqlClientGetter // Scanable interface // Runable interface // Result struct func (per *PersistStringer) DeclareSqlPackageDefs() string { printer := &Printer{} printer.P("type SqlClientGetter func() (*sql.DB, error)\n") printer.PA([]string{ "func NewSqlClientGetter(cli **sql.DB) SqlClientGetter {\n", "return func() (*sql.DB, error) {\n return *cli, nil \n}\n}\n", }) printer.P("type Scanable interface{\nScan(dest ...interface{}) error\n}\n") printer.PA([]string{ "type Runable interface{\n", "Query(string, ...interface{}) (*sql.Rows, error)\n", "QueryRow(string, ...interface{}) *sql.Row\n", "Exec(string, ...interface{}) (sql.Result, error)\n}\n", }) printer.PA([]string{ "type Result struct {\n", "result sql.Result\n", "row *sql.Row\n", "rows *sql.Rows\n", "err error\n", "}\n", "func newResultFromSqlResult(r sql.Result) *Result {\n", "return &Result{result: r}\n", "}\n", "func newResultFromRow(r *sql.Row) *Result {\n", "return &Result{row: r}\n", "}\n", "func newResultFromRows(r *sql.Rows) *Result {\n", "return &Result{rows: r}\n", "}\n", "func newResultFromErr(err error) *Result {\n", "return &Result{err: err}\n", "}\n", "func (r *Result) Do(fun func(Scanable) error) error {\n", "if r.err != nil {\n", "return r.err\n", "}\n", "if r.row != nil {\n", "if err := fun(r.row); err != nil {\n", "return err\n", "}\n", "}\n", "if r.rows != nil {\n", "defer r.rows.Close()\n", "for r.rows.Next() {\n", "if err := fun(r.rows); err != nil {\n", "return err\n", "}\n", "}\n", "}\n", "return nil\n", "}\n", "// returns sql.ErrNoRows if it did not scan into dest\n", "func (r *Result) Scan(dest ...interface{}) error {\n", "if r.result != nil {\n return sql.ErrNoRows\n}", "else if r.row != nil {\n return r.row.Scan(dest...)\n}", "else if r.rows != nil {\n", "err := r.rows.Scan(dest...)\n", "if r.rows.Next() {\n r.rows.Close()\n}\n", "return err\n", "}\n", "return sql.ErrNoRows\n", "}\n", "func (r *Result) Err() error {\n", "return r.err\n", "}\n", }) return printer.String() } func IteratorHelper(m *Method) string { var iterType string if m.Service.IsSpanner() { iterType = "spanner.RowIterator" } else if m.Service.IsSQL() { iterType = "persist_lib.Result" } p := &Printer{} out := m.GetOutputType() sName := m.Service.GetName() p.Q("func ", IterProtoName(m), "(ms ", sName, "TypeMapping, iter *", iterType, ", next func(i *", out, ") error) error {\n") p.PA([]string{ "return iter.Do(func(r %s) error {\n", "item, err := %s(ms, r)\n", "if err != nil {\n", "return fmt.Errorf(\"error converting %s row to protobuf message: %s\", err)\n", "}\n", "return next(item)\n})\n}\n", }, m.backend.RowType(), FromScanableFuncName(m), m.GetOutputType(), "%s", // so our printer doesnt freak out ) return p.String() } // TYPECHANGE func GetSqlPersistLibTypeName(t TypeDesc) string { if t.IsMapped { return "interface{}" } else if t.IsMessage { return "[]byte" } else { return t.GoName } } // TYPECHANGE func GetSpannerPersistLibTypeName(t TypeDesc) string { if t.IsMapped { return "interface{}" } else if t.IsMessage && t.IsRepeated { return "[][]byte" } else if t.IsMessage { return "[]byte" } else { return t.GoName } }
package errors import ( "errors" "fmt" "strconv" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) type StatusError spb.Status func ErrorCode(err error) int32 { var stErr *StatusError if errors.As(err, &stErr) { return stErr.Code } if s, ok := status.FromError(err); ok { return int32(s.Code()) } return int32(codes.Unknown) } func (err *StatusError) Error() string { p := (*spb.Status)(err) return fmt.Sprintf("status error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) } func parseCode(c interface{}) int32 { switch v := c.(type) { case int32: return v case codes.Code: return int32(v) default: i, err := strconv.Atoi(fmt.Sprintf("%d", v)) if err != nil { panic("unknown error code, err: " + err.Error()) } return int32(i) } } func With(err error, msg string) error { return fmt.Errorf(msg+": %w", err) } func Withf(err error, format string, a ...interface{}) error { args := make([]interface{}, 0, len(a)+1) args = append(args, a...) args = append(args, err) return fmt.Errorf(format+": %w", args...) } func New(code interface{}, msg string) error { return &StatusError{ Code: parseCode(code), Message: msg, } } func Newf(code interface{}, format string, a ...interface{}) error { return &StatusError{ Code: parseCode(code), Message: fmt.Sprintf(format, a...), } }
package database import ( "github.com/jinzhu/gorm" "database/sql" "flag" "os" "strconv" "sync" seeds "ImaginatoGolangTestTask/seeder" ) // IConnection ITransaction is type IConnection interface { GetDB() *gorm.DB } // GormDB is type connection struct { db *gorm.DB readonly bool isTranscation bool } var db *gorm.DB var connectionOnce sync.Once func Init() { connectionOnce.Do(func() { var err error connectionString := os.Getenv("ConnectionString") dialect := os.Getenv("Dialect") logMode := os.Getenv("LogMode") db, err = gorm.Open(dialect, connectionString) if err != nil { panic(err) } boolLogMode, _ := strconv.ParseBool(logMode) db.LogMode(boolLogMode) //seeder code command execute flag.Parse() args := flag.Args() if len(args) >= 1 { switch args[0] { case "seed": conn, _ := sql.Open(dialect, connectionString) seeds.Execute(conn, args[1:]...) os.Exit(0) } } }) } func NewConnection() IConnection { return &connection{ db, false, true, } } func Close() error { if db != nil { return db.Close() } return nil } // GetDB is func (selfConn *connection) GetDB() *gorm.DB { return selfConn.db }
package entity type UserEntity struct { Id int64 `mysql:"id" redis:"id" json:"UserId"` Username string `mysql:"username" redis:"username" json:"Username"` Password string `mysql:"password" redis:"password" json:"Password"` Role string `mysql:"role" redis:"role" json:"Role"` CreatedTime string `mysql:"created_time" redis:"created_time" json:"CreatedTime"` UpdatedTime string `mysql:"updated_time" redis:"updated_time" json:"UpdatedTime" ` }
package main import ( "net/smtp" "log" //"fmt" //"bytes" "bytes" ) func main() { //auth := smtp.PlainAuth("", "", "", "mail.sonicwall.com:25") //err := smtp.SendMail("mail.sonicwall.com:25", auth, "test@sonicwall.com", []string{"wcheng@sonicwall.com"}, []byte("test")) //if err != nil { // log.Fatal(err) //} client, err := smtp.Dial("mail.company.com:25") if err != nil { log.Fatal(err) } defer client.Close() if err := client.Mail("taf@company.com"); err != nil { log.Fatal(err) } if err := client.Rcpt("wcheng@company.com"); err != nil { log.Fatal(err) } wc, err := client.Data() if err != nil { log.Fatal(err) } defer wc.Close() buf := bytes.NewBufferString("this is body") if _, err = buf.WriteTo(wc); err != nil { log.Fatal(err) } //_, err = fmt.Fprintf(wc, "this is email body") //if err != nil { // log.Fatal(err) //} }
package main import ( "encoding/csv" "os" "reflect" "strconv" "github.com/olekukonko/tablewriter" ) type Reporter interface { Append(ps *PageStats) Render() error } func structToMap(ps *PageStats) map[string]string { values := make(map[string]string) s := reflect.ValueOf(ps).Elem() typeOfT := s.Type() for i := 0; i < s.NumField(); i++ { f := s.Field(i) var v string switch f.Interface().(type) { case int, int8, int16, int32, int64: v = strconv.FormatInt(f.Int(), 10) case uint, uint8, uint16, uint32, uint64: v = strconv.FormatUint(f.Uint(), 10) case float32: v = strconv.FormatFloat(f.Float(), 'f', 4, 32) case float64: v = strconv.FormatFloat(f.Float(), 'f', 4, 64) case []byte: v = string(f.Bytes()) case string: v = f.String() } values[typeOfT.Field(i).Name] = v } return values } func mapValues(m map[string]string, fields []string) []string { row := make([]string, 0, len(fields)) for _, k := range fields { if v, ok := m[k]; ok { row = append(row, v) } } return row } type TableReporter struct { table *tablewriter.Table fields []string } func StatHeaders() []string { statType := reflect.TypeOf(PageStats{}) header := make([]string, 0, statType.NumField()) for i := 0; i < statType.NumField(); i++ { field := statType.Field(i) header = append(header, field.Name) } return header } func NewTableReporter() *TableReporter { tr := &TableReporter{ table: tablewriter.NewWriter(os.Stdout), fields: StatHeaders(), } tr.table.SetHeader(tr.fields) return tr } func NewTSVReporter() *TableReporter { tr := NewTableReporter() tr.table.SetAutoWrapText(false) tr.table.SetAutoFormatHeaders(true) tr.table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) tr.table.SetAlignment(tablewriter.ALIGN_LEFT) tr.table.SetCenterSeparator("") tr.table.SetColumnSeparator("") tr.table.SetRowSeparator("") tr.table.SetHeaderLine(false) tr.table.SetBorder(false) tr.table.SetTablePadding("\t") // pad with tabs tr.table.SetNoWhiteSpace(true) return tr } func (tr *TableReporter) Append(ps *PageStats) { m := structToMap(ps) row := mapValues(m, tr.fields) tr.table.Append(row) } func (tr *TableReporter) Render() error { tr.table.Render() return nil } type CSVReporter struct { w *csv.Writer fields []string } func NewCSVReporter() *CSVReporter { cr := &CSVReporter{ w: csv.NewWriter(os.Stdout), fields: StatHeaders(), } cr.w.Write(cr.fields) return cr } func (cr *CSVReporter) Append(ps *PageStats) { m := structToMap(ps) row := mapValues(m, cr.fields) cr.w.Write(row) } func (cr *CSVReporter) Render() error { cr.w.Flush() return nil }
package models import ( "database/sql" "strconv" "time" "github.com/GoAdminGroup/go-admin/modules/db" "github.com/GoAdminGroup/go-admin/modules/db/dialect" ) // RoleModel is role model structure. type RoleModel struct { Base Id int64 Name string Slug string CreatedAt string UpdatedAt string } // Role return a default role model. func Role() RoleModel { return RoleModel{Base: Base{TableName: "goadmin_roles"}} } // RoleWithId return a default role model of given id. func RoleWithId(id string) RoleModel { idInt, _ := strconv.Atoi(id) return RoleModel{Base: Base{TableName: "goadmin_roles"}, Id: int64(idInt)} } func (t RoleModel) SetConn(con db.Connection) RoleModel { t.Conn = con return t } func (t RoleModel) WithTx(tx *sql.Tx) RoleModel { t.Tx = tx return t } // Find return a default role model of given id. func (t RoleModel) Find(id interface{}) RoleModel { item, _ := t.Table(t.TableName).Find(id) return t.MapToModel(item) } // IsSlugExist check the row exist with given slug and id. func (t RoleModel) IsSlugExist(slug string, id string) bool { if id == "" { check, _ := t.Table(t.TableName).Where("slug", "=", slug).First() return check != nil } check, _ := t.Table(t.TableName). Where("slug", "=", slug). Where("id", "!=", id). First() return check != nil } // New create a role model. func (t RoleModel) New(name, slug string) (RoleModel, error) { id, err := t.WithTx(t.Tx).Table(t.TableName).Insert(dialect.H{ "name": name, "slug": slug, }) t.Id = id t.Name = name t.Slug = slug return t, err } // Update update the role model. func (t RoleModel) Update(name, slug string) (int64, error) { return t.WithTx(t.Tx).Table(t.TableName). Where("id", "=", t.Id). Update(dialect.H{ "name": name, "slug": slug, "updated_at": time.Now().Format("2006-01-02 15:04:05"), }) } // CheckPermission check the permission of role. func (t RoleModel) CheckPermission(permissionId string) bool { checkPermission, _ := t.Table("goadmin_role_permissions"). Where("permission_id", "=", permissionId). Where("role_id", "=", t.Id). First() return checkPermission != nil } // DeletePermissions delete all the permissions of role. func (t RoleModel) DeletePermissions() error { return t.WithTx(t.Tx).Table("goadmin_role_permissions"). Where("role_id", "=", t.Id). Delete() } // AddPermission add the permissions to the role. func (t RoleModel) AddPermission(permissionId string) (int64, error) { if permissionId != "" { if !t.CheckPermission(permissionId) { return t.WithTx(t.Tx).Table("goadmin_role_permissions"). Insert(dialect.H{ "permission_id": permissionId, "role_id": t.Id, }) } } return 0, nil } // MapToModel get the role model from given map. func (t RoleModel) MapToModel(m map[string]interface{}) RoleModel { t.Id = m["id"].(int64) t.Name, _ = m["name"].(string) t.Slug, _ = m["slug"].(string) t.CreatedAt, _ = m["created_at"].(string) t.UpdatedAt, _ = m["updated_at"].(string) return t }
package main import ( "fmt" ) func main(){ var a int; b:=2; var c,d,e = 1, 2.0,"hello"; f:=010; /*octal no always starts with 0*/ g:=0xa; /*hexadecimal*/ fmt.Println("Hello-Go"); fmt.Printf("a=%d type(a)=%T\n",a,a); fmt.Printf("b=%d type(b)=%T\n",b,b); fmt.Printf("c=%d type(c)=%T\n",c,c); fmt.Printf("d=%f type(d)=%T\n",d,d); fmt.Printf("e=%s type(e)=%T\n",e,e); fmt.Printf("f=%d type(f)=%T\n",f,f); fmt.Printf("g=%d type(g)=%T\n",g,g); }
// Copyright 2018 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cascades import ( "math" "github.com/pingcap/tidb/expression" plannercore "github.com/pingcap/tidb/planner/core" impl "github.com/pingcap/tidb/planner/implementation" "github.com/pingcap/tidb/planner/memo" "github.com/pingcap/tidb/planner/property" ) // ImplementationRule defines the interface for implementation rules. type ImplementationRule interface { // Match checks if current GroupExpr matches this rule under required physical property. Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) // OnImplement generates physical plan using this rule for current GroupExpr. Note that // childrenReqProps of generated physical plan should be set correspondingly in this function. OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) ([]memo.Implementation, error) } var defaultImplementationMap = map[memo.Operand][]ImplementationRule{ memo.OperandTableDual: { &ImplTableDual{}, }, memo.OperandMemTableScan: { &ImplMemTableScan{}, }, memo.OperandProjection: { &ImplProjection{}, }, memo.OperandTableScan: { &ImplTableScan{}, }, memo.OperandIndexScan: { &ImplIndexScan{}, }, memo.OperandTiKVSingleGather: { &ImplTiKVSingleReadGather{}, }, memo.OperandShow: { &ImplShow{}, }, memo.OperandSelection: { &ImplSelection{}, }, memo.OperandSort: { &ImplSort{}, }, memo.OperandAggregation: { &ImplHashAgg{}, }, memo.OperandLimit: { &ImplLimit{}, }, memo.OperandTopN: { &ImplTopN{}, &ImplTopNAsLimit{}, }, memo.OperandJoin: { &ImplHashJoinBuildLeft{}, &ImplHashJoinBuildRight{}, &ImplMergeJoin{}, }, memo.OperandUnionAll: { &ImplUnionAll{}, }, memo.OperandApply: { &ImplApply{}, }, memo.OperandMaxOneRow: { &ImplMaxOneRow{}, }, memo.OperandWindow: { &ImplWindow{}, }, } // ImplTableDual implements LogicalTableDual as PhysicalTableDual. type ImplTableDual struct { } // Match implements ImplementationRule Match interface. func (*ImplTableDual) Match(_ *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { return prop.IsSortItemEmpty() } // OnImplement implements ImplementationRule OnImplement interface. func (*ImplTableDual) OnImplement(expr *memo.GroupExpr, _ *property.PhysicalProperty) ([]memo.Implementation, error) { logicProp := expr.Group.Prop logicDual := expr.ExprNode.(*plannercore.LogicalTableDual) dual := plannercore.PhysicalTableDual{RowCount: logicDual.RowCount}.Init(logicDual.SCtx(), logicProp.Stats, logicDual.SelectBlockOffset()) dual.SetSchema(logicProp.Schema) return []memo.Implementation{impl.NewTableDualImpl(dual)}, nil } // ImplMemTableScan implements LogicalMemTable as PhysicalMemTable. type ImplMemTableScan struct { } // Match implements ImplementationRule Match interface. func (*ImplMemTableScan) Match(_ *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { return prop.IsSortItemEmpty() } // OnImplement implements ImplementationRule OnImplement interface. func (*ImplMemTableScan) OnImplement( expr *memo.GroupExpr, reqProp *property.PhysicalProperty, ) ([]memo.Implementation, error) { logic := expr.ExprNode.(*plannercore.LogicalMemTable) logicProp := expr.Group.Prop physical := plannercore.PhysicalMemTable{ DBName: logic.DBName, Table: logic.TableInfo, Columns: logic.TableInfo.Columns, Extractor: logic.Extractor, }.Init(logic.SCtx(), logicProp.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), logic.SelectBlockOffset()) physical.SetSchema(logicProp.Schema) return []memo.Implementation{impl.NewMemTableScanImpl(physical)}, nil } // ImplProjection implements LogicalProjection as PhysicalProjection. type ImplProjection struct { } // Match implements ImplementationRule Match interface. func (*ImplProjection) Match(_ *memo.GroupExpr, _ *property.PhysicalProperty) (matched bool) { return true } // OnImplement implements ImplementationRule OnImplement interface. func (*ImplProjection) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) ([]memo.Implementation, error) { logicProp := expr.Group.Prop logicProj := expr.ExprNode.(*plannercore.LogicalProjection) childProp, ok := logicProj.TryToGetChildProp(reqProp) if !ok { return nil, nil } proj := plannercore.PhysicalProjection{ Exprs: logicProj.Exprs, CalculateNoDelay: logicProj.CalculateNoDelay, AvoidColumnEvaluator: logicProj.AvoidColumnEvaluator, }.Init(logicProj.SCtx(), logicProp.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), logicProj.SelectBlockOffset(), childProp) proj.SetSchema(logicProp.Schema) return []memo.Implementation{impl.NewProjectionImpl(proj)}, nil } // ImplTiKVSingleReadGather implements TiKVSingleGather // as PhysicalTableReader or PhysicalIndexReader. type ImplTiKVSingleReadGather struct { } // Match implements ImplementationRule Match interface. func (*ImplTiKVSingleReadGather) Match(_ *memo.GroupExpr, _ *property.PhysicalProperty) (matched bool) { return true } // OnImplement implements ImplementationRule OnImplement interface. func (*ImplTiKVSingleReadGather) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) ([]memo.Implementation, error) { logicProp := expr.Group.Prop sg := expr.ExprNode.(*plannercore.TiKVSingleGather) if sg.IsIndexGather { reader := sg.GetPhysicalIndexReader(logicProp.Schema, logicProp.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), reqProp) return []memo.Implementation{impl.NewIndexReaderImpl(reader, sg.Source)}, nil } reader := sg.GetPhysicalTableReader(logicProp.Schema, logicProp.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), reqProp) return []memo.Implementation{impl.NewTableReaderImpl(reader, sg.Source)}, nil } // ImplTableScan implements TableScan as PhysicalTableScan. type ImplTableScan struct { } // Match implements ImplementationRule Match interface. func (*ImplTableScan) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { ts := expr.ExprNode.(*plannercore.LogicalTableScan) return prop.IsSortItemEmpty() || (len(prop.SortItems) == 1 && ts.HandleCols != nil && prop.SortItems[0].Col.Equal(nil, ts.HandleCols.GetCol(0))) } // OnImplement implements ImplementationRule OnImplement interface. func (*ImplTableScan) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) ([]memo.Implementation, error) { logicProp := expr.Group.Prop logicalScan := expr.ExprNode.(*plannercore.LogicalTableScan) ts := logicalScan.GetPhysicalScan(logicProp.Schema, logicProp.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt)) if !reqProp.IsSortItemEmpty() { ts.KeepOrder = true ts.Desc = reqProp.SortItems[0].Desc } tblCols, tblColHists := logicalScan.Source.TblCols, logicalScan.Source.TblColHists return []memo.Implementation{impl.NewTableScanImpl(ts, tblCols, tblColHists)}, nil } // ImplIndexScan implements IndexScan as PhysicalIndexScan. type ImplIndexScan struct { } // Match implements ImplementationRule Match interface. func (*ImplIndexScan) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { is := expr.ExprNode.(*plannercore.LogicalIndexScan) return is.MatchIndexProp(prop) } // OnImplement implements ImplementationRule OnImplement interface. func (*ImplIndexScan) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) ([]memo.Implementation, error) { logicalScan := expr.ExprNode.(*plannercore.LogicalIndexScan) is := logicalScan.GetPhysicalIndexScan(expr.Group.Prop.Schema, expr.Group.Prop.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt)) if !reqProp.IsSortItemEmpty() { is.KeepOrder = true if reqProp.SortItems[0].Desc { is.Desc = true } } return []memo.Implementation{impl.NewIndexScanImpl(is, logicalScan.Source.TblColHists)}, nil } // ImplShow is the implementation rule which implements LogicalShow to // PhysicalShow. type ImplShow struct { } // Match implements ImplementationRule Match interface. func (*ImplShow) Match(_ *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { return prop.IsSortItemEmpty() } // OnImplement implements ImplementationRule OnImplement interface. func (*ImplShow) OnImplement(expr *memo.GroupExpr, _ *property.PhysicalProperty) ([]memo.Implementation, error) { logicProp := expr.Group.Prop show := expr.ExprNode.(*plannercore.LogicalShow) // TODO(zz-jason): unifying LogicalShow and PhysicalShow to a single // struct. So that we don't need to create a new PhysicalShow object, which // can help us to reduce the gc pressure of golang runtime and improve the // overall performance. showPhys := plannercore.PhysicalShow{ ShowContents: show.ShowContents, Extractor: show.Extractor, }.Init(show.SCtx()) showPhys.SetSchema(logicProp.Schema) return []memo.Implementation{impl.NewShowImpl(showPhys)}, nil } // ImplSelection is the implementation rule which implements LogicalSelection // to PhysicalSelection. type ImplSelection struct { } // Match implements ImplementationRule Match interface. func (*ImplSelection) Match(_ *memo.GroupExpr, _ *property.PhysicalProperty) (matched bool) { return true } // OnImplement implements ImplementationRule OnImplement interface. func (*ImplSelection) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) ([]memo.Implementation, error) { logicalSel := expr.ExprNode.(*plannercore.LogicalSelection) physicalSel := plannercore.PhysicalSelection{ Conditions: logicalSel.Conditions, }.Init(logicalSel.SCtx(), expr.Group.Prop.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), logicalSel.SelectBlockOffset(), reqProp.CloneEssentialFields()) switch expr.Group.EngineType { case memo.EngineTiDB: return []memo.Implementation{impl.NewTiDBSelectionImpl(physicalSel)}, nil case memo.EngineTiKV: return []memo.Implementation{impl.NewTiKVSelectionImpl(physicalSel)}, nil default: return nil, plannercore.ErrInternal.GenWithStack("Unsupported EngineType '%s' for Selection.", expr.Group.EngineType.String()) } } // ImplSort is the implementation rule which implements LogicalSort // to PhysicalSort or NominalSort. type ImplSort struct { } // Match implements ImplementationRule match interface. func (*ImplSort) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { ls := expr.ExprNode.(*plannercore.LogicalSort) return plannercore.MatchItems(prop, ls.ByItems) } // OnImplement implements ImplementationRule OnImplement interface. // If all of the sort items are columns, generate a NominalSort, otherwise // generate a PhysicalSort. func (*ImplSort) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) ([]memo.Implementation, error) { ls := expr.ExprNode.(*plannercore.LogicalSort) if newProp, canUseNominal := plannercore.GetPropByOrderByItems(ls.ByItems); canUseNominal { newProp.ExpectedCnt = reqProp.ExpectedCnt ns := plannercore.NominalSort{}.Init( ls.SCtx(), expr.Group.Prop.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), ls.SelectBlockOffset(), newProp) return []memo.Implementation{impl.NewNominalSortImpl(ns)}, nil } ps := plannercore.PhysicalSort{ByItems: ls.ByItems}.Init( ls.SCtx(), expr.Group.Prop.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), ls.SelectBlockOffset(), &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64}, ) return []memo.Implementation{impl.NewSortImpl(ps)}, nil } // ImplHashAgg is the implementation rule which implements LogicalAggregation // to PhysicalHashAgg. type ImplHashAgg struct { } // Match implements ImplementationRule Match interface. func (*ImplHashAgg) Match(_ *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { // TODO: deal with the hints when we have implemented StreamAgg. return prop.IsSortItemEmpty() } // OnImplement implements ImplementationRule OnImplement interface. func (*ImplHashAgg) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) ([]memo.Implementation, error) { la := expr.ExprNode.(*plannercore.LogicalAggregation) hashAgg := plannercore.NewPhysicalHashAgg( la, expr.Group.Prop.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64}, ) hashAgg.SetSchema(expr.Group.Prop.Schema.Clone()) switch expr.Group.EngineType { case memo.EngineTiDB: return []memo.Implementation{impl.NewTiDBHashAggImpl(hashAgg)}, nil case memo.EngineTiKV: return []memo.Implementation{impl.NewTiKVHashAggImpl(hashAgg)}, nil default: return nil, plannercore.ErrInternal.GenWithStack("Unsupported EngineType '%s' for HashAggregation.", expr.Group.EngineType.String()) } } // ImplLimit is the implementation rule which implements LogicalLimit // to PhysicalLimit. type ImplLimit struct { } // Match implements ImplementationRule Match interface. func (*ImplLimit) Match(_ *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { return prop.IsSortItemEmpty() } // OnImplement implements ImplementationRule OnImplement interface. func (*ImplLimit) OnImplement(expr *memo.GroupExpr, _ *property.PhysicalProperty) ([]memo.Implementation, error) { logicalLimit := expr.ExprNode.(*plannercore.LogicalLimit) newProp := &property.PhysicalProperty{ExpectedCnt: float64(logicalLimit.Count + logicalLimit.Offset)} physicalLimit := plannercore.PhysicalLimit{ Offset: logicalLimit.Offset, Count: logicalLimit.Count, }.Init(logicalLimit.SCtx(), expr.Group.Prop.Stats, logicalLimit.SelectBlockOffset(), newProp) physicalLimit.SetSchema(expr.Group.Prop.Schema.Clone()) return []memo.Implementation{impl.NewLimitImpl(physicalLimit)}, nil } // ImplTopN is the implementation rule which implements LogicalTopN // to PhysicalTopN. type ImplTopN struct { } // Match implements ImplementationRule Match interface. func (*ImplTopN) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { topN := expr.ExprNode.(*plannercore.LogicalTopN) if expr.Group.EngineType != memo.EngineTiDB { return prop.IsSortItemEmpty() } return plannercore.MatchItems(prop, topN.ByItems) } // OnImplement implements ImplementationRule OnImplement interface. func (*ImplTopN) OnImplement(expr *memo.GroupExpr, _ *property.PhysicalProperty) ([]memo.Implementation, error) { lt := expr.ExprNode.(*plannercore.LogicalTopN) resultProp := &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64} topN := plannercore.PhysicalTopN{ ByItems: lt.ByItems, Count: lt.Count, Offset: lt.Offset, }.Init(lt.SCtx(), expr.Group.Prop.Stats, lt.SelectBlockOffset(), resultProp) switch expr.Group.EngineType { case memo.EngineTiDB: return []memo.Implementation{impl.NewTiDBTopNImpl(topN)}, nil case memo.EngineTiKV: return []memo.Implementation{impl.NewTiKVTopNImpl(topN)}, nil default: return nil, plannercore.ErrInternal.GenWithStack("Unsupported EngineType '%s' for TopN.", expr.Group.EngineType.String()) } } // ImplTopNAsLimit is the implementation rule which implements LogicalTopN // as PhysicalLimit with required order property. type ImplTopNAsLimit struct { } // Match implements ImplementationRule Match interface. func (*ImplTopNAsLimit) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { topN := expr.ExprNode.(*plannercore.LogicalTopN) _, canUseLimit := plannercore.GetPropByOrderByItems(topN.ByItems) return canUseLimit && plannercore.MatchItems(prop, topN.ByItems) } // OnImplement implements ImplementationRule OnImplement interface. func (*ImplTopNAsLimit) OnImplement(expr *memo.GroupExpr, _ *property.PhysicalProperty) ([]memo.Implementation, error) { lt := expr.ExprNode.(*plannercore.LogicalTopN) newProp := &property.PhysicalProperty{ExpectedCnt: float64(lt.Count + lt.Offset)} newProp.SortItems = make([]property.SortItem, len(lt.ByItems)) for i, item := range lt.ByItems { newProp.SortItems[i].Col = item.Expr.(*expression.Column) newProp.SortItems[i].Desc = item.Desc } physicalLimit := plannercore.PhysicalLimit{ Offset: lt.Offset, Count: lt.Count, }.Init(lt.SCtx(), expr.Group.Prop.Stats, lt.SelectBlockOffset(), newProp) physicalLimit.SetSchema(expr.Group.Prop.Schema.Clone()) return []memo.Implementation{impl.NewLimitImpl(physicalLimit)}, nil } func getImplForHashJoin(expr *memo.GroupExpr, prop *property.PhysicalProperty, innerIdx int, useOuterToBuild bool) memo.Implementation { join := expr.ExprNode.(*plannercore.LogicalJoin) chReqProps := make([]*property.PhysicalProperty, 2) chReqProps[0] = &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64} chReqProps[1] = &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64} stats := expr.Group.Prop.Stats if prop.ExpectedCnt < stats.RowCount { expCntScale := prop.ExpectedCnt / stats.RowCount chReqProps[1-innerIdx].ExpectedCnt = expr.Children[1-innerIdx].Prop.Stats.RowCount * expCntScale } hashJoin := plannercore.NewPhysicalHashJoin(join, innerIdx, useOuterToBuild, stats.ScaleByExpectCnt(prop.ExpectedCnt), chReqProps...) hashJoin.SetSchema(expr.Group.Prop.Schema) return impl.NewHashJoinImpl(hashJoin) } // ImplHashJoinBuildLeft implements LogicalJoin to PhysicalHashJoin which uses the left child to build hash table. type ImplHashJoinBuildLeft struct { } // Match implements ImplementationRule Match interface. func (*ImplHashJoinBuildLeft) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { switch expr.ExprNode.(*plannercore.LogicalJoin).JoinType { case plannercore.InnerJoin, plannercore.LeftOuterJoin, plannercore.RightOuterJoin: return prop.IsSortItemEmpty() default: return false } } // OnImplement implements ImplementationRule OnImplement interface. func (*ImplHashJoinBuildLeft) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) ([]memo.Implementation, error) { join := expr.ExprNode.(*plannercore.LogicalJoin) switch join.JoinType { case plannercore.InnerJoin: return []memo.Implementation{getImplForHashJoin(expr, reqProp, 0, false)}, nil case plannercore.LeftOuterJoin: return []memo.Implementation{getImplForHashJoin(expr, reqProp, 1, true)}, nil case plannercore.RightOuterJoin: return []memo.Implementation{getImplForHashJoin(expr, reqProp, 0, false)}, nil default: return nil, nil } } // ImplHashJoinBuildRight implements LogicalJoin to PhysicalHashJoin which uses the right child to build hash table. type ImplHashJoinBuildRight struct { } // Match implements ImplementationRule Match interface. func (*ImplHashJoinBuildRight) Match(_ *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { return prop.IsSortItemEmpty() } // OnImplement implements ImplementationRule OnImplement interface. func (*ImplHashJoinBuildRight) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) ([]memo.Implementation, error) { join := expr.ExprNode.(*plannercore.LogicalJoin) switch join.JoinType { case plannercore.SemiJoin, plannercore.AntiSemiJoin, plannercore.LeftOuterSemiJoin, plannercore.AntiLeftOuterSemiJoin: return []memo.Implementation{getImplForHashJoin(expr, reqProp, 1, false)}, nil case plannercore.InnerJoin: return []memo.Implementation{getImplForHashJoin(expr, reqProp, 1, false)}, nil case plannercore.LeftOuterJoin: return []memo.Implementation{getImplForHashJoin(expr, reqProp, 1, false)}, nil case plannercore.RightOuterJoin: return []memo.Implementation{getImplForHashJoin(expr, reqProp, 0, true)}, nil } return nil, nil } // ImplMergeJoin implements LogicalMergeJoin to PhysicalMergeJoin. type ImplMergeJoin struct { } // Match implements ImplementationRule Match interface. func (*ImplMergeJoin) Match(_ *memo.GroupExpr, _ *property.PhysicalProperty) (matched bool) { return true } // OnImplement implements ImplementationRule OnImplement interface. func (*ImplMergeJoin) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) ([]memo.Implementation, error) { join := expr.ExprNode.(*plannercore.LogicalJoin) physicalMergeJoins := join.GetMergeJoin(reqProp, expr.Schema(), expr.Group.Prop.Stats, expr.Children[0].Prop.Stats, expr.Children[1].Prop.Stats) mergeJoinImpls := make([]memo.Implementation, 0, len(physicalMergeJoins)) for _, physicalPlan := range physicalMergeJoins { physicalMergeJoin := physicalPlan.(*plannercore.PhysicalMergeJoin) mergeJoinImpls = append(mergeJoinImpls, impl.NewMergeJoinImpl(physicalMergeJoin)) } return mergeJoinImpls, nil } // ImplUnionAll implements LogicalUnionAll to PhysicalUnionAll. type ImplUnionAll struct { } // Match implements ImplementationRule Match interface. func (*ImplUnionAll) Match(_ *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { return prop.IsSortItemEmpty() } // OnImplement implements ImplementationRule OnImplement interface. func (*ImplUnionAll) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) ([]memo.Implementation, error) { logicalUnion := expr.ExprNode.(*plannercore.LogicalUnionAll) chReqProps := make([]*property.PhysicalProperty, len(expr.Children)) for i := range expr.Children { chReqProps[i] = &property.PhysicalProperty{ExpectedCnt: reqProp.ExpectedCnt} } physicalUnion := plannercore.PhysicalUnionAll{}.Init( logicalUnion.SCtx(), expr.Group.Prop.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), logicalUnion.SelectBlockOffset(), chReqProps..., ) physicalUnion.SetSchema(expr.Group.Prop.Schema) return []memo.Implementation{impl.NewUnionAllImpl(physicalUnion)}, nil } // ImplApply implements LogicalApply to PhysicalApply type ImplApply struct { } // Match implements ImplementationRule Match interface. func (*ImplApply) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { return prop.AllColsFromSchema(expr.Children[0].Prop.Schema) } // OnImplement implements ImplementationRule OnImplement interface func (*ImplApply) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) ([]memo.Implementation, error) { la := expr.ExprNode.(*plannercore.LogicalApply) join := la.GetHashJoin(reqProp) physicalApply := plannercore.PhysicalApply{ PhysicalHashJoin: *join, OuterSchema: la.CorCols, }.Init( la.SCtx(), expr.Group.Prop.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), la.SelectBlockOffset(), &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64, SortItems: reqProp.SortItems}, &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64}) physicalApply.SetSchema(expr.Group.Prop.Schema) return []memo.Implementation{impl.NewApplyImpl(physicalApply)}, nil } // ImplMaxOneRow implements LogicalMaxOneRow to PhysicalMaxOneRow. type ImplMaxOneRow struct { } // Match implements ImplementationRule Match interface. func (*ImplMaxOneRow) Match(_ *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { return prop.IsSortItemEmpty() } // OnImplement implements ImplementationRule OnImplement interface func (*ImplMaxOneRow) OnImplement(expr *memo.GroupExpr, _ *property.PhysicalProperty) ([]memo.Implementation, error) { mor := expr.ExprNode.(*plannercore.LogicalMaxOneRow) physicalMaxOneRow := plannercore.PhysicalMaxOneRow{}.Init( mor.SCtx(), expr.Group.Prop.Stats, mor.SelectBlockOffset(), &property.PhysicalProperty{ExpectedCnt: 2}) return []memo.Implementation{impl.NewMaxOneRowImpl(physicalMaxOneRow)}, nil } // ImplWindow implements LogicalWindow to PhysicalWindow. type ImplWindow struct { } // Match implements ImplementationRule Match interface. func (*ImplWindow) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { lw := expr.ExprNode.(*plannercore.LogicalWindow) var byItems []property.SortItem byItems = append(byItems, lw.PartitionBy...) byItems = append(byItems, lw.OrderBy...) childProperty := &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64, SortItems: byItems} return prop.IsPrefix(childProperty) } // OnImplement implements ImplementationRule OnImplement interface. func (*ImplWindow) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) ([]memo.Implementation, error) { lw := expr.ExprNode.(*plannercore.LogicalWindow) var byItems []property.SortItem byItems = append(byItems, lw.PartitionBy...) byItems = append(byItems, lw.OrderBy...) physicalWindow := plannercore.PhysicalWindow{ WindowFuncDescs: lw.WindowFuncDescs, PartitionBy: lw.PartitionBy, OrderBy: lw.OrderBy, Frame: lw.Frame, }.Init( lw.SCtx(), expr.Group.Prop.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), lw.SelectBlockOffset(), &property.PhysicalProperty{ExpectedCnt: math.MaxFloat64, SortItems: byItems}, ) physicalWindow.SetSchema(expr.Group.Prop.Schema) return []memo.Implementation{impl.NewWindowImpl(physicalWindow)}, nil }
package index import ( "context" "encoding/json" "errors" "fmt" "net/http" "os" "regexp" "time" "github.com/dgrijalva/jwt-go" "go.mongodb.org/mongo-driver/bson" "golang.org/x/crypto/bcrypt" ) func LoginUser(response http.ResponseWriter, request *http.Request) { response.Header().Add("content-type", "application/json") secret, _ := os.LookupEnv("ACCESS_SECRET") var user Users var result Users json.NewDecoder(request.Body).Decode(&user) // if len(user.Email) < 4 { // newErr := errors.New("Invalid email or password >>input") // responseError(newErr, response) // return // } re := regexp.MustCompile(`^[a-z0-9._%+\-]+@[a-z0-9.\-]+\.[a-z]{2,4}$`) if !re.MatchString(user.Email) { newErr := errors.New("Pls, enter a valid email address") responseError(newErr, response) return } collection := getDB("users") ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() // filter := bson.D{"email": user.Email} err := collection.FindOne(ctx, bson.D{{"email", user.Email}}).Decode(&result) // emailFound := collection.FindOne(ctx, Users{Email: user.Email}) fmt.Println(err) if err != nil { newErr := errors.New("Invalid email or password") responseError(newErr, response) return } err = bcrypt.CompareHashAndPassword([]byte(result.Password), []byte(user.Password)) if err != nil { newErr := errors.New("Invalid email or password") responseError(newErr, response) return } atClaims := jwt.MapClaims{} atClaims["authorized"] = true atClaims["id"] = result.ID atClaims["email"] = result.Email atClaims["exp"] = time.Now().Add(time.Minute * 60).Unix() at := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims) token, err := at.SignedString([]byte(secret)) if err != nil { newErr := errors.New("Unable to generate token") responseError(newErr, response) return } finalResult := make(map[string]interface{}) finalResult["message"] = "User logged in successfully" finalResult["status"] = 200 finalResult["success"] = true finalResult["token"] = token json.NewEncoder(response).Encode(finalResult) }
/* Copyright © 2022 SUSE LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package factoryreset import ( "bytes" "encoding/csv" "errors" "fmt" "io" "io/ioutil" "os" "os/exec" "path" "path/filepath" "sort" "strings" "syscall" "unsafe" "github.com/rancher-sandbox/rancher-desktop/src/go/rdctl/pkg/directories" "github.com/sirupsen/logrus" "golang.org/x/sys/windows" "golang.org/x/text/encoding/unicode" ) var ( pKernel32 = windows.NewLazySystemDLL("kernel32.dll") pEnumProcesses = pKernel32.NewProc("K32EnumProcesses") ) // CheckProcessWindows - returns true if Rancher Desktop is still running, false if it isn't // along with an error condition if there's a problem detecting that. // // It does this by calling `tasklist`, the Windows answer to ps(1) func CheckProcessWindows() (bool, error) { cmd := exec.Command("tasklist", "/NH", "/FI", "IMAGENAME eq Rancher Desktop.exe", "/FO", "CSV") cmd.SysProcAttr = &syscall.SysProcAttr{CreationFlags: CREATE_NO_WINDOW} allOutput, err := cmd.CombinedOutput() if err != nil { return false, fmt.Errorf("Failed to run %q: %w", cmd, err) } r := csv.NewReader(bytes.NewReader(allOutput)) for { record, err := r.Read() if err != nil { if !errors.Is(err, io.EOF) { return false, fmt.Errorf("Failed to csv-read the output for tasklist: %w", err) } break } if len(record) > 0 && record[0] == "Rancher Desktop.exe" { return true, nil } } return false, nil } // KillRancherDesktop terminates all processes where the executable is from the // Rancher Desktop application, excluding the current process. func KillRancherDesktop() error { err := stopPrivilegedService() if err != nil { return fmt.Errorf("failed to stop privileged service: %w", err) } appDir, err := directories.GetApplicationDirectory() if err != nil { return fmt.Errorf("could not find application directory: %w", err) } var processes []uint32 err = directories.InvokeWin32WithBuffer(func(size int) error { processes = make([]uint32, size) var bytesReturned uint32 // We can't use `windows.EnumProcesses`, because it passes in an incorrect // value for the second argument (`cb`). elementSize := unsafe.Sizeof(uint32(0)) bufferSize := uintptr(len(processes)) * elementSize n, _, err := pEnumProcesses.Call( uintptr(unsafe.Pointer(&processes[0])), bufferSize, uintptr(unsafe.Pointer(&bytesReturned)), ) if n == 0 { return err } if uintptr(bytesReturned) >= bufferSize { return windows.ERROR_INSUFFICIENT_BUFFER } processesFound := uintptr(bytesReturned) / elementSize logrus.Tracef("got %d processes", processesFound) processes = processes[:processesFound] return nil }) if err != nil { return fmt.Errorf("could not get process list: %w", err) } sort.Slice(processes, func(i, j int) bool { return processes[i] < processes[j] }) var processesToKill []uint32 for _, pid := range processes { // Add a scope to help with defer (func(pid uint32) { if pid == uint32(os.Getpid()) { // Skip the current process. return } hProc, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pid) if err != nil { // We can't open privileged processes, processes that have exited since, // idle process, etc.; so we log this at trace level instead. logrus.Tracef("failed to open pid %d: %s (skipping)", pid, err) return } defer windows.CloseHandle(hProc) var imageName string err = directories.InvokeWin32WithBuffer(func(size int) error { nameBuf := make([]uint16, size) charsWritten := uint32(size) err := windows.QueryFullProcessImageName(hProc, 0, &nameBuf[0], &charsWritten) if err != nil { logrus.Tracef("failed to get image name for pid %d: %s", pid, err) return err } if charsWritten >= uint32(size)-1 { logrus.Tracef("buffer too small for pid %d image name", pid) return windows.ERROR_INSUFFICIENT_BUFFER } imageName = windows.UTF16ToString(nameBuf) return nil }) if err != nil { logrus.Debugf("failed to get process name of pid %d: %s (skipping)", pid, err) return } relPath, err := filepath.Rel(appDir, imageName) if err != nil { // This may be because they're on different drives, network shares, etc. logrus.Tracef("failed to make pid %d image %s relative to %s: %s", pid, imageName, appDir, err) return } if strings.HasPrefix(relPath, "..") { // Relative path includes "../" prefix, not a child of appDir logrus.Tracef("skipping pid %d (%s), not in app %s", pid, imageName, appDir) return } logrus.Tracef("will terminate pid %d image %s", pid, imageName) processesToKill = append(processesToKill, pid) })(pid) } for _, pid := range processesToKill { (func() { hProc, err := windows.OpenProcess(windows.PROCESS_TERMINATE, false, pid) if err != nil { logrus.Infof("failed to open process %d for termination, skipping", pid) return } defer windows.CloseHandle(hProc) if err = windows.TerminateProcess(hProc, 0); err != nil { logrus.Infof("failed to terminate process %d: %s", pid, err) } })() } return nil } func deleteWindowsData(keepSystemImages bool, appName string) error { dirs, err := getDirectoriesToDelete(keepSystemImages, appName) if err != nil { return err } for _, dir := range dirs { logrus.WithField("path", dir).Trace("Removing directory") if err := os.RemoveAll(dir); err != nil { logrus.Errorf("Problem trying to delete %s: %s\n", dir, err) } } return nil } func getDirectoriesToDelete(keepSystemImages bool, appName string) ([]string, error) { // Ordered from least important to most, so that if delete fails we // still keep some useful data. appData, err := directories.GetRoamingAppDataDirectory() if err != nil { return nil, fmt.Errorf("could not get AppData folder: %w", err) } localAppData, err := directories.GetLocalAppDataDirectory() if err != nil { return nil, fmt.Errorf("could not get LocalAppData folder: %w", err) } dirs := []string{path.Join(localAppData, fmt.Sprintf("%s-updater", appName))} localRDAppData := path.Join(localAppData, appName) if keepSystemImages { // We need to unpack the local appData dir, so we don't delete the main cached downloads // Specifically, don't delete .../cache/k3s & k3s-versions.json files, err := ioutil.ReadDir(localRDAppData) if err != nil { if !errors.Is(err, os.ErrNotExist) { return nil, fmt.Errorf("could not get files in folder %s: %w", localRDAppData, err) } } else { for _, file := range files { baseName := file.Name() if strings.ToLower(baseName) != "cache" { dirs = append(dirs, path.Join(localRDAppData, baseName)) } else { cacheDir := path.Join(localRDAppData, baseName) cacheFiles, err := ioutil.ReadDir(cacheDir) if err != nil { logrus.Infof("could not get files in folder %s: %s", cacheDir, err) } else { for _, cacheDirFile := range cacheFiles { cacheDirFileName := cacheDirFile.Name() lcFileName := strings.ToLower(cacheDirFileName) if lcFileName != "k3s" && lcFileName != "k3s-versions.json" { dirs = append(dirs, path.Join(cacheDir, cacheDirFileName)) } } } } } } } else { dirs = append(dirs, localRDAppData) } dirs = append(dirs, path.Join(appData, appName)) return dirs, nil } const CREATE_NO_WINDOW = 0x08000000 func unregisterWSL() error { cmd := exec.Command("wsl", "--list", "--quiet") cmd.SysProcAttr = &syscall.SysProcAttr{CreationFlags: CREATE_NO_WINDOW} rawBytes, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("error getting current WSLs: %w", err) } decoder := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM).NewDecoder() actualOutput, err := decoder.String(string(rawBytes)) if err != nil { return fmt.Errorf("error getting current WSLs: %w", err) } actualOutput = strings.ReplaceAll(actualOutput, "\r", "") wsls := strings.Split(actualOutput, "\n") wslsToKill := []string{} for _, s := range wsls { if s == "rancher-desktop" || s == "rancher-desktop-data" { wslsToKill = append(wslsToKill, s) } } for _, wsl := range wslsToKill { cmd := exec.Command("wsl", "--unregister", wsl) cmd.SysProcAttr = &syscall.SysProcAttr{CreationFlags: CREATE_NO_WINDOW} if err := cmd.Run(); err != nil { logrus.Errorf("Error unregistering WSL %s: %s\n", wsl, err) } } return nil }
package payloads import ( "crypto/aes" "crypto/cipher" "crypto/rand" "encoding/base64" "fmt" "io" ) // Cipher provides methods to encrypt and decrypt values type Cipher struct { cipher.Block } // NewCipher returns a new aes Cipher for encrypting values func NewCipher(secret []byte) (*Cipher, error) { c, err := aes.NewCipher(secret) if err != nil { return nil, err } return &Cipher{Block: c}, err } // Encrypt a value for use in a Payload func (c *Cipher) Encrypt(value string) (string, error) { ciphertext := make([]byte, aes.BlockSize+len(value)) iv := ciphertext[:aes.BlockSize] if _, err := io.ReadFull(rand.Reader, iv); err != nil { return "", fmt.Errorf("failed to create initialization vector %s", err) } stream := cipher.NewCFBEncrypter(c.Block, iv) stream.XORKeyStream(ciphertext[aes.BlockSize:], []byte(value)) return base64.StdEncoding.EncodeToString(ciphertext), nil } // Decrypt a value from a Payload to its original string func (c *Cipher) Decrypt(s string) (string, error) { encrypted, err := base64.StdEncoding.DecodeString(s) if err != nil { return "", fmt.Errorf("failed to decrypt value %s", err) } if len(encrypted) < aes.BlockSize { return "", fmt.Errorf("encrypted value should be "+ "at least %d bytes, but is only %d bytes", aes.BlockSize, len(encrypted)) } iv := encrypted[:aes.BlockSize] encrypted = encrypted[aes.BlockSize:] stream := cipher.NewCFBDecrypter(c.Block, iv) stream.XORKeyStream(encrypted, encrypted) return string(encrypted), nil }
package main import ( "strconv" "strings" ) func parsePrice(priceText string) float64 { newPrice := strings.Join(strings.Split(priceText, ","), "") price, err := strconv.ParseFloat(newPrice, 64) if err != nil { return 0.0 } return price } func parseCurrency(price string) float64 { numberStart := 0 price = strings.TrimSpace(price) for i := 0; i < len(price); i++ { _, err := strconv.Atoi(string(price[i])) if err == nil { numberStart = i break } } return parsePrice(price[numberStart:]) }
/* Copyright 2022 The KubeVela Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package utils import ( "os" "path/filepath" "strings" "testing" "github.com/stretchr/testify/assert" ) func TestGetTerraformConfigurationFromRemote(t *testing.T) { type want struct { config string errMsg string } type args struct { name string url string path string data []byte variableFile string } cases := map[string]struct { args args want want }{ "valid": { args: args{ name: "valid", url: "https://github.com/kubevela-contrib/terraform-modules.git", path: "unittest/", data: []byte(` variable "aaa" { type = list(object({ type = string sourceArn = string config = string })) default = [] }`), variableFile: "main.tf", }, want: want{ config: ` variable "aaa" { type = list(object({ type = string sourceArn = string config = string })) default = [] }`, }, }, "configuration is remote with path": { args: args{ name: "aws-subnet", url: "https://github.com/kubevela-contrib/terraform-modules.git", path: "unittest/aws/subnet", data: []byte(` variable "aaa" { type = list(object({ type = string sourceArn = string config = string })) default = [] }`), variableFile: "variables.tf", }, want: want{ config: ` variable "aaa" { type = list(object({ type = string sourceArn = string config = string })) default = [] }`, }, }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { home, _ := os.UserHomeDir() path := filepath.Join(home, ".vela", "terraform") tmpPath := filepath.Join(path, tc.args.name, tc.args.path) if len(tc.args.data) > 0 { err := os.MkdirAll(tmpPath, os.ModePerm) assert.NoError(t, err) err = os.WriteFile(filepath.Clean(filepath.Join(tmpPath, tc.args.variableFile)), tc.args.data, 0644) assert.NoError(t, err) } defer os.RemoveAll(tmpPath) conf, err := GetTerraformConfigurationFromRemote(tc.args.name, tc.args.url, tc.args.path, nil) if tc.want.errMsg != "" { if err != nil && !strings.Contains(err.Error(), tc.want.errMsg) { t.Errorf("\n%s\nGetTerraformConfigurationFromRemote(...): -want error %v, +got error:%s", name, err, tc.want.errMsg) } } else { assert.Equal(t, tc.want.config, conf) } }) } }
package htmlLinks import ( "net/http" "net/url" "sync" "github.com/PuerkitoBio/goquery" ) type Links struct { Internal int External int Inaccesable int } /* FindLinks: In a html document, finds Internal, External and Inaccesable links. Param: doc (goquery.Document) html-document Returns: Links */ func FindLinks(doc goquery.Document) Links { var internalLinks []string var externalLinks []string var invalidLinks []string doc.Find("a").Each(func(i int, s *goquery.Selection) { link, exists := s.Attr("href") if !exists { return } findInternalAndExternalLinks(link, &internalLinks, &externalLinks) }) findInaccesibleLinks(externalLinks, &invalidLinks) return Links{ Internal: len(internalLinks), External: len(externalLinks), Inaccesable: len(invalidLinks), } } func findInternalAndExternalLinks(urlToProccess string, internalLinks *[]string, externalLinks *[]string) { u, err := url.Parse(urlToProccess) if err != nil { return } if u.Scheme == "" && u.Host == "" { *internalLinks = append(*internalLinks, urlToProccess) } else { *externalLinks = append(*externalLinks, urlToProccess) } } func findInaccesibleLinks(links []string, invalidLinks *[]string) { concurrencyLimit := 2 if len(links) > 2 { concurrencyLimit = len(links) / 2 } gaurd := make(chan struct{}, concurrencyLimit) wg := sync.WaitGroup{} // check links concurrently for _, link := range links { _, err := url.Parse(link) if err != nil { return } gaurd <- struct{}{} wg.Add(1) go func(urlLink string) { defer wg.Done() pingUrl(urlLink, invalidLinks) <-gaurd }(link) } wg.Wait() } // ping urls to check accesibility func pingUrl(urlToPing string, invalidLinks *[]string) { res, err := http.Get(urlToPing) if err != nil { *invalidLinks = append(*invalidLinks, urlToPing) return } defer res.Body.Close() if res.StatusCode != 200 { *invalidLinks = append(*invalidLinks, urlToPing) } }
package main import ( "context" "crypto/tls" "fmt" "os" "time" "github.com/go-jwdk/activemq-connector" "github.com/go-jwdk/jobworker" "github.com/go-stomp/stomp" uuid "github.com/satori/go.uuid" ) func main() { addr := os.Getenv("ACTIVEMQ_ADDR") username := os.Getenv("ACTIVEMQ_USERNAME") password := os.Getenv("ACTIVEMQ_PASSWORD") s := &activemq.Setting{ Network: "tcp", Addr: addr, Config: &tls.Config{}, Opts: []func(*stomp.Conn) error{ stomp.ConnOpt.Login(username, password), }, AckMode: stomp.AckClient, Persistent: true, NumMaxRetries: 3, } conn, err := activemq.Open(s) if err != nil { fmt.Println("open conn error:", err) return } defer func() { err := conn.Close() if err != nil { fmt.Println("close conn error:", err) } }() go func() { for { _, err := conn.Enqueue(context.Background(), &jobworker.EnqueueInput{ Queue: "test", Payload: "hello: " + uuid.NewV4().String(), }) if err != nil { fmt.Println("could not enqueue a job", err) } time.Sleep(3 * time.Second) } }() done := make(chan struct{}) go func() { out, err := conn.Subscribe(context.Background(), &jobworker.SubscribeInput{Queue: "test"}) if err != nil { fmt.Println("receive jobs error:", err) } for job := range out.Subscription.Queue() { printJob(job) _, err := conn.CompleteJob(context.Background(), &jobworker.CompleteJobInput{ Job: job, }) if err != nil { fmt.Println("complete jobs error:", err) } } close(done) }() <-done } func printJob(job *jobworker.Job) { fmt.Println("# ----------") for k, v := range job.Metadata { fmt.Println(k, ":", v) } fmt.Println("# ----------") fmt.Println("Content :", job.Content) fmt.Println("# ----------") fmt.Println("Queue :", job.QueueName) fmt.Println("# ----------") }
package database import ( "fmt" "github.com/jinzhu/gorm" _ "github.com/jinzhu/gorm/dialects/postgres" ) func Connection() *gorm.DB { connection := "host=0.0.0.0 port=5432 user=postgres dbname=teste2 password=postgres sslmode=disable" db, err := gorm.Open("postgres", connection) if err != nil { fmt.Println("Erro: ") fmt.Println(err) } return db }
package main import "fmt" // fmt is format like printf func main() { fmt.Println("Hello World") }
package main import ( "flag" "fmt" "os" "runtime" "sync" "github.com/BurntSushi/toml" "github.com/agtorre/gocolorize" "github.com/mijia/gobuildweb/assets" "github.com/mijia/gobuildweb/loggers" "strings" ) type ProjectConfig struct { sync.RWMutex Package *PackageConfig Assets *assets.Config Distribution *DistributionConfig } func (pc ProjectConfig) getAssetEntry(entryName string) (assets.Entry, bool) { pc.RLock() defer pc.RUnlock() return assets.GetEntryConfig(*pc.Assets, entryName) } type PackageConfig struct { Name string Version string Authors []string Dependencies []string `toml:"deps"` Builder string `toml:builder` BuildOpts []string `toml:"build_opts"` OmitTests []string `toml:"omit_tests"` IsGraceful bool `toml:"is_graceful"` } type DistributionConfig struct { BuildOpts []string `toml:"build_opts"` PackExtras []string `toml:"pack_extras"` CrossTargets [][2]string `toml:"cross_targets"` ExtraCmd []string `toml:"extra_cmd"` } func usage() { fmt.Println("Usage:") fmt.Println(" run Will watch your file changes and run the application") fmt.Println(" dist Build your web application") fmt.Println(" -specified-entries entry1,entry2 run -debug Will watch your file changes and run the application, just compile the specified entry") os.Exit(1) } //传入参数specified-entries,用逗号分隔 func getAvailableEntries(specifiedEntries string, entries []assets.Entry) []assets.Entry { var availableEntries []assets.Entry specifiedEntries = strings.TrimSpace(specifiedEntries) if len(specifiedEntries) == 0 { return entries } specifiedEntryList := strings.Split(specifiedEntries, ",") specifiedEntryMap := map[string]bool{} for _, specifiedEntry := range specifiedEntryList { specifiedEntryMap[specifiedEntry] = true } for _, entry := range entries { if _, ok := specifiedEntryMap[entry.Name]; ok { availableEntries = append(availableEntries, entry) loggers.Info("the available entry is %+v", entry) } } return availableEntries } func main() { loggers.IsDebug = os.Getenv("GBW_DEBUG") == "1" fmt.Println(gocolorize.NewColor("magenta").Paint("gobuildweb > Build a Golang web application.\n")) cmds := map[string]Command{ "run": commandRun, "dist": commandDist, } specifiedEntries := flag.String("specified-entries", "", "the specified entries name,用逗号分隔") flag.Parse() args := flag.Args() if len(args) == 0 { usage() } if cmd, ok := cmds[args[0]]; !ok { usage() } else { if fi, err := os.Stat("project.toml"); os.IsNotExist(err) { loggers.ERROR.Fatalf("Please provide a project.toml for web project.") } else if err != nil { loggers.ERROR.Fatalf("Accessing project.toml file error, %v.", err) } else if fi.IsDir() { loggers.ERROR.Fatalf("project.toml cannot be a directory.") } if _, err := toml.DecodeFile("project.toml", &rootConfig); err != nil { loggers.ERROR.Fatalf("Cannot decode the project.toml into TOML format, %v", err) } if len(*specifiedEntries) > 0 && rootConfig.Assets != nil { availableEntries := getAvailableEntries(*specifiedEntries, rootConfig.Assets.Entries) rootConfig.Assets.Entries = availableEntries } loggers.SUCC.Printf("Loaded project.toml... %s", rootConfig.Package.Name) if err := cmd(args[1:]); err != nil { loggers.ERROR.Fatalf("Executing command [%v] error, %v", args[0], err) } } } var rootConfig ProjectConfig func init() { runtime.GOMAXPROCS(runtime.NumCPU()) }
// Copyright 2021 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "context" "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" betapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/compute/beta/compute_beta_go_proto" emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto" "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta" ) // Server implements the gRPC interface for Disk. type DiskServer struct{} // ProtoToDiskGuestOSFeatureTypeEnum converts a DiskGuestOSFeatureTypeEnum enum from its proto representation. func ProtoToComputeBetaDiskGuestOSFeatureTypeEnum(e betapb.ComputeBetaDiskGuestOSFeatureTypeEnum) *beta.DiskGuestOSFeatureTypeEnum { if e == 0 { return nil } if n, ok := betapb.ComputeBetaDiskGuestOSFeatureTypeEnum_name[int32(e)]; ok { e := beta.DiskGuestOSFeatureTypeEnum(n[len("ComputeBetaDiskGuestOSFeatureTypeEnum"):]) return &e } return nil } // ProtoToDiskGuestOSFeatureTypeAltEnum converts a DiskGuestOSFeatureTypeAltEnum enum from its proto representation. func ProtoToComputeBetaDiskGuestOSFeatureTypeAltEnum(e betapb.ComputeBetaDiskGuestOSFeatureTypeAltEnum) *beta.DiskGuestOSFeatureTypeAltEnum { if e == 0 { return nil } if n, ok := betapb.ComputeBetaDiskGuestOSFeatureTypeAltEnum_name[int32(e)]; ok { e := beta.DiskGuestOSFeatureTypeAltEnum(n[len("ComputeBetaDiskGuestOSFeatureTypeAltEnum"):]) return &e } return nil } // ProtoToDiskStatusEnum converts a DiskStatusEnum enum from its proto representation. func ProtoToComputeBetaDiskStatusEnum(e betapb.ComputeBetaDiskStatusEnum) *beta.DiskStatusEnum { if e == 0 { return nil } if n, ok := betapb.ComputeBetaDiskStatusEnum_name[int32(e)]; ok { e := beta.DiskStatusEnum(n[len("ComputeBetaDiskStatusEnum"):]) return &e } return nil } // ProtoToDiskGuestOSFeaturesTypeEnum converts a DiskGuestOSFeaturesTypeEnum enum from its proto representation. func ProtoToComputeBetaDiskGuestOSFeaturesTypeEnum(e betapb.ComputeBetaDiskGuestOSFeaturesTypeEnum) *beta.DiskGuestOSFeaturesTypeEnum { if e == 0 { return nil } if n, ok := betapb.ComputeBetaDiskGuestOSFeaturesTypeEnum_name[int32(e)]; ok { e := beta.DiskGuestOSFeaturesTypeEnum(n[len("ComputeBetaDiskGuestOSFeaturesTypeEnum"):]) return &e } return nil } // ProtoToDiskGuestOSFeaturesTypeAltsEnum converts a DiskGuestOSFeaturesTypeAltsEnum enum from its proto representation. func ProtoToComputeBetaDiskGuestOSFeaturesTypeAltsEnum(e betapb.ComputeBetaDiskGuestOSFeaturesTypeAltsEnum) *beta.DiskGuestOSFeaturesTypeAltsEnum { if e == 0 { return nil } if n, ok := betapb.ComputeBetaDiskGuestOSFeaturesTypeAltsEnum_name[int32(e)]; ok { e := beta.DiskGuestOSFeaturesTypeAltsEnum(n[len("ComputeBetaDiskGuestOSFeaturesTypeAltsEnum"):]) return &e } return nil } // ProtoToDiskGuestOSFeature converts a DiskGuestOSFeature resource from its proto representation. func ProtoToComputeBetaDiskGuestOSFeature(p *betapb.ComputeBetaDiskGuestOSFeature) *beta.DiskGuestOSFeature { if p == nil { return nil } obj := &beta.DiskGuestOSFeature{ Type: ProtoToComputeBetaDiskGuestOSFeatureTypeEnum(p.GetType()), } for _, r := range p.GetTypeAlt() { obj.TypeAlt = append(obj.TypeAlt, *ProtoToComputeBetaDiskGuestOSFeatureTypeAltEnum(r)) } return obj } // ProtoToDiskEncryptionKey converts a DiskEncryptionKey resource from its proto representation. func ProtoToComputeBetaDiskEncryptionKey(p *betapb.ComputeBetaDiskEncryptionKey) *beta.DiskEncryptionKey { if p == nil { return nil } obj := &beta.DiskEncryptionKey{ RawKey: dcl.StringOrNil(p.RawKey), KmsKeyName: dcl.StringOrNil(p.KmsKeyName), Sha256: dcl.StringOrNil(p.Sha256), KmsKeyServiceAccount: dcl.StringOrNil(p.KmsKeyServiceAccount), } return obj } // ProtoToDiskGuestOSFeatures converts a DiskGuestOSFeatures resource from its proto representation. func ProtoToComputeBetaDiskGuestOSFeatures(p *betapb.ComputeBetaDiskGuestOSFeatures) *beta.DiskGuestOSFeatures { if p == nil { return nil } obj := &beta.DiskGuestOSFeatures{ Type: ProtoToComputeBetaDiskGuestOSFeaturesTypeEnum(p.GetType()), } for _, r := range p.GetTypeAlts() { obj.TypeAlts = append(obj.TypeAlts, *ProtoToComputeBetaDiskGuestOSFeaturesTypeAltsEnum(r)) } return obj } // ProtoToDisk converts a Disk resource from its proto representation. func ProtoToDisk(p *betapb.ComputeBetaDisk) *beta.Disk { obj := &beta.Disk{ SelfLink: dcl.StringOrNil(p.SelfLink), Description: dcl.StringOrNil(p.Description), DiskEncryptionKey: ProtoToComputeBetaDiskEncryptionKey(p.GetDiskEncryptionKey()), LabelFingerprint: dcl.StringOrNil(p.LabelFingerprint), Name: dcl.StringOrNil(p.Name), Region: dcl.StringOrNil(p.Region), SizeGb: dcl.Int64OrNil(p.SizeGb), SourceImage: dcl.StringOrNil(p.SourceImage), SourceImageEncryptionKey: ProtoToComputeBetaDiskEncryptionKey(p.GetSourceImageEncryptionKey()), SourceImageId: dcl.StringOrNil(p.SourceImageId), SourceSnapshot: dcl.StringOrNil(p.SourceSnapshot), SourceSnapshotEncryptionKey: ProtoToComputeBetaDiskEncryptionKey(p.GetSourceSnapshotEncryptionKey()), SourceSnapshotId: dcl.StringOrNil(p.SourceSnapshotId), Type: dcl.StringOrNil(p.Type), Zone: dcl.StringOrNil(p.Zone), Project: dcl.StringOrNil(p.Project), Id: dcl.Int64OrNil(p.Id), Status: ProtoToComputeBetaDiskStatusEnum(p.GetStatus()), Options: dcl.StringOrNil(p.Options), LastAttachTimestamp: dcl.StringOrNil(p.LastAttachTimestamp), LastDetachTimestamp: dcl.StringOrNil(p.LastDetachTimestamp), PhysicalBlockSizeBytes: dcl.Int64OrNil(p.PhysicalBlockSizeBytes), SourceDisk: dcl.StringOrNil(p.SourceDisk), SourceDiskId: dcl.StringOrNil(p.SourceDiskId), Location: dcl.StringOrNil(p.Location), } for _, r := range p.GetGuestOsFeature() { obj.GuestOSFeature = append(obj.GuestOSFeature, *ProtoToComputeBetaDiskGuestOSFeature(r)) } for _, r := range p.GetLicense() { obj.License = append(obj.License, r) } for _, r := range p.GetReplicaZones() { obj.ReplicaZones = append(obj.ReplicaZones, r) } for _, r := range p.GetResourcePolicy() { obj.ResourcePolicy = append(obj.ResourcePolicy, r) } for _, r := range p.GetLicenses() { obj.Licenses = append(obj.Licenses, r) } for _, r := range p.GetGuestOsFeatures() { obj.GuestOSFeatures = append(obj.GuestOSFeatures, *ProtoToComputeBetaDiskGuestOSFeatures(r)) } for _, r := range p.GetUsers() { obj.Users = append(obj.Users, r) } for _, r := range p.GetLicenseCodes() { obj.LicenseCodes = append(obj.LicenseCodes, r) } for _, r := range p.GetResourcePolicies() { obj.ResourcePolicies = append(obj.ResourcePolicies, r) } return obj } // DiskGuestOSFeatureTypeEnumToProto converts a DiskGuestOSFeatureTypeEnum enum to its proto representation. func ComputeBetaDiskGuestOSFeatureTypeEnumToProto(e *beta.DiskGuestOSFeatureTypeEnum) betapb.ComputeBetaDiskGuestOSFeatureTypeEnum { if e == nil { return betapb.ComputeBetaDiskGuestOSFeatureTypeEnum(0) } if v, ok := betapb.ComputeBetaDiskGuestOSFeatureTypeEnum_value["DiskGuestOSFeatureTypeEnum"+string(*e)]; ok { return betapb.ComputeBetaDiskGuestOSFeatureTypeEnum(v) } return betapb.ComputeBetaDiskGuestOSFeatureTypeEnum(0) } // DiskGuestOSFeatureTypeAltEnumToProto converts a DiskGuestOSFeatureTypeAltEnum enum to its proto representation. func ComputeBetaDiskGuestOSFeatureTypeAltEnumToProto(e *beta.DiskGuestOSFeatureTypeAltEnum) betapb.ComputeBetaDiskGuestOSFeatureTypeAltEnum { if e == nil { return betapb.ComputeBetaDiskGuestOSFeatureTypeAltEnum(0) } if v, ok := betapb.ComputeBetaDiskGuestOSFeatureTypeAltEnum_value["DiskGuestOSFeatureTypeAltEnum"+string(*e)]; ok { return betapb.ComputeBetaDiskGuestOSFeatureTypeAltEnum(v) } return betapb.ComputeBetaDiskGuestOSFeatureTypeAltEnum(0) } // DiskStatusEnumToProto converts a DiskStatusEnum enum to its proto representation. func ComputeBetaDiskStatusEnumToProto(e *beta.DiskStatusEnum) betapb.ComputeBetaDiskStatusEnum { if e == nil { return betapb.ComputeBetaDiskStatusEnum(0) } if v, ok := betapb.ComputeBetaDiskStatusEnum_value["DiskStatusEnum"+string(*e)]; ok { return betapb.ComputeBetaDiskStatusEnum(v) } return betapb.ComputeBetaDiskStatusEnum(0) } // DiskGuestOSFeaturesTypeEnumToProto converts a DiskGuestOSFeaturesTypeEnum enum to its proto representation. func ComputeBetaDiskGuestOSFeaturesTypeEnumToProto(e *beta.DiskGuestOSFeaturesTypeEnum) betapb.ComputeBetaDiskGuestOSFeaturesTypeEnum { if e == nil { return betapb.ComputeBetaDiskGuestOSFeaturesTypeEnum(0) } if v, ok := betapb.ComputeBetaDiskGuestOSFeaturesTypeEnum_value["DiskGuestOSFeaturesTypeEnum"+string(*e)]; ok { return betapb.ComputeBetaDiskGuestOSFeaturesTypeEnum(v) } return betapb.ComputeBetaDiskGuestOSFeaturesTypeEnum(0) } // DiskGuestOSFeaturesTypeAltsEnumToProto converts a DiskGuestOSFeaturesTypeAltsEnum enum to its proto representation. func ComputeBetaDiskGuestOSFeaturesTypeAltsEnumToProto(e *beta.DiskGuestOSFeaturesTypeAltsEnum) betapb.ComputeBetaDiskGuestOSFeaturesTypeAltsEnum { if e == nil { return betapb.ComputeBetaDiskGuestOSFeaturesTypeAltsEnum(0) } if v, ok := betapb.ComputeBetaDiskGuestOSFeaturesTypeAltsEnum_value["DiskGuestOSFeaturesTypeAltsEnum"+string(*e)]; ok { return betapb.ComputeBetaDiskGuestOSFeaturesTypeAltsEnum(v) } return betapb.ComputeBetaDiskGuestOSFeaturesTypeAltsEnum(0) } // DiskGuestOSFeatureToProto converts a DiskGuestOSFeature resource to its proto representation. func ComputeBetaDiskGuestOSFeatureToProto(o *beta.DiskGuestOSFeature) *betapb.ComputeBetaDiskGuestOSFeature { if o == nil { return nil } p := &betapb.ComputeBetaDiskGuestOSFeature{ Type: ComputeBetaDiskGuestOSFeatureTypeEnumToProto(o.Type), } for _, r := range o.TypeAlt { p.TypeAlt = append(p.TypeAlt, betapb.ComputeBetaDiskGuestOSFeatureTypeAltEnum(betapb.ComputeBetaDiskGuestOSFeatureTypeAltEnum_value[string(r)])) } return p } // DiskEncryptionKeyToProto converts a DiskEncryptionKey resource to its proto representation. func ComputeBetaDiskEncryptionKeyToProto(o *beta.DiskEncryptionKey) *betapb.ComputeBetaDiskEncryptionKey { if o == nil { return nil } p := &betapb.ComputeBetaDiskEncryptionKey{ RawKey: dcl.ValueOrEmptyString(o.RawKey), KmsKeyName: dcl.ValueOrEmptyString(o.KmsKeyName), Sha256: dcl.ValueOrEmptyString(o.Sha256), KmsKeyServiceAccount: dcl.ValueOrEmptyString(o.KmsKeyServiceAccount), } return p } // DiskGuestOSFeaturesToProto converts a DiskGuestOSFeatures resource to its proto representation. func ComputeBetaDiskGuestOSFeaturesToProto(o *beta.DiskGuestOSFeatures) *betapb.ComputeBetaDiskGuestOSFeatures { if o == nil { return nil } p := &betapb.ComputeBetaDiskGuestOSFeatures{ Type: ComputeBetaDiskGuestOSFeaturesTypeEnumToProto(o.Type), } for _, r := range o.TypeAlts { p.TypeAlts = append(p.TypeAlts, betapb.ComputeBetaDiskGuestOSFeaturesTypeAltsEnum(betapb.ComputeBetaDiskGuestOSFeaturesTypeAltsEnum_value[string(r)])) } return p } // DiskToProto converts a Disk resource to its proto representation. func DiskToProto(resource *beta.Disk) *betapb.ComputeBetaDisk { p := &betapb.ComputeBetaDisk{ SelfLink: dcl.ValueOrEmptyString(resource.SelfLink), Description: dcl.ValueOrEmptyString(resource.Description), DiskEncryptionKey: ComputeBetaDiskEncryptionKeyToProto(resource.DiskEncryptionKey), LabelFingerprint: dcl.ValueOrEmptyString(resource.LabelFingerprint), Name: dcl.ValueOrEmptyString(resource.Name), Region: dcl.ValueOrEmptyString(resource.Region), SizeGb: dcl.ValueOrEmptyInt64(resource.SizeGb), SourceImage: dcl.ValueOrEmptyString(resource.SourceImage), SourceImageEncryptionKey: ComputeBetaDiskEncryptionKeyToProto(resource.SourceImageEncryptionKey), SourceImageId: dcl.ValueOrEmptyString(resource.SourceImageId), SourceSnapshot: dcl.ValueOrEmptyString(resource.SourceSnapshot), SourceSnapshotEncryptionKey: ComputeBetaDiskEncryptionKeyToProto(resource.SourceSnapshotEncryptionKey), SourceSnapshotId: dcl.ValueOrEmptyString(resource.SourceSnapshotId), Type: dcl.ValueOrEmptyString(resource.Type), Zone: dcl.ValueOrEmptyString(resource.Zone), Project: dcl.ValueOrEmptyString(resource.Project), Id: dcl.ValueOrEmptyInt64(resource.Id), Status: ComputeBetaDiskStatusEnumToProto(resource.Status), Options: dcl.ValueOrEmptyString(resource.Options), LastAttachTimestamp: dcl.ValueOrEmptyString(resource.LastAttachTimestamp), LastDetachTimestamp: dcl.ValueOrEmptyString(resource.LastDetachTimestamp), PhysicalBlockSizeBytes: dcl.ValueOrEmptyInt64(resource.PhysicalBlockSizeBytes), SourceDisk: dcl.ValueOrEmptyString(resource.SourceDisk), SourceDiskId: dcl.ValueOrEmptyString(resource.SourceDiskId), Location: dcl.ValueOrEmptyString(resource.Location), } for _, r := range resource.GuestOSFeature { p.GuestOsFeature = append(p.GuestOsFeature, ComputeBetaDiskGuestOSFeatureToProto(&r)) } for _, r := range resource.License { p.License = append(p.License, r) } for _, r := range resource.ReplicaZones { p.ReplicaZones = append(p.ReplicaZones, r) } for _, r := range resource.ResourcePolicy { p.ResourcePolicy = append(p.ResourcePolicy, r) } for _, r := range resource.Licenses { p.Licenses = append(p.Licenses, r) } for _, r := range resource.GuestOSFeatures { p.GuestOsFeatures = append(p.GuestOsFeatures, ComputeBetaDiskGuestOSFeaturesToProto(&r)) } for _, r := range resource.Users { p.Users = append(p.Users, r) } for _, r := range resource.LicenseCodes { p.LicenseCodes = append(p.LicenseCodes, r) } for _, r := range resource.ResourcePolicies { p.ResourcePolicies = append(p.ResourcePolicies, r) } return p } // ApplyDisk handles the gRPC request by passing it to the underlying Disk Apply() method. func (s *DiskServer) applyDisk(ctx context.Context, c *beta.Client, request *betapb.ApplyComputeBetaDiskRequest) (*betapb.ComputeBetaDisk, error) { p := ProtoToDisk(request.GetResource()) res, err := c.ApplyDisk(ctx, p) if err != nil { return nil, err } r := DiskToProto(res) return r, nil } // ApplyDisk handles the gRPC request by passing it to the underlying Disk Apply() method. func (s *DiskServer) ApplyComputeBetaDisk(ctx context.Context, request *betapb.ApplyComputeBetaDiskRequest) (*betapb.ComputeBetaDisk, error) { cl, err := createConfigDisk(ctx, request.ServiceAccountFile) if err != nil { return nil, err } return s.applyDisk(ctx, cl, request) } // DeleteDisk handles the gRPC request by passing it to the underlying Disk Delete() method. func (s *DiskServer) DeleteComputeBetaDisk(ctx context.Context, request *betapb.DeleteComputeBetaDiskRequest) (*emptypb.Empty, error) { cl, err := createConfigDisk(ctx, request.ServiceAccountFile) if err != nil { return nil, err } return &emptypb.Empty{}, cl.DeleteDisk(ctx, ProtoToDisk(request.GetResource())) } // ListComputeBetaDisk handles the gRPC request by passing it to the underlying DiskList() method. func (s *DiskServer) ListComputeBetaDisk(ctx context.Context, request *betapb.ListComputeBetaDiskRequest) (*betapb.ListComputeBetaDiskResponse, error) { cl, err := createConfigDisk(ctx, request.ServiceAccountFile) if err != nil { return nil, err } resources, err := cl.ListDisk(ctx, request.Project, request.Location) if err != nil { return nil, err } var protos []*betapb.ComputeBetaDisk for _, r := range resources.Items { rp := DiskToProto(r) protos = append(protos, rp) } return &betapb.ListComputeBetaDiskResponse{Items: protos}, nil } func createConfigDisk(ctx context.Context, service_account_file string) (*beta.Client, error) { conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file)) return beta.NewClient(conf), nil }
package core func NewVector(args ...Type) *Type { slice := make([]Type, 0) for _, arg := range args { slice = append(slice, arg) } return &Type{Vector: &slice} } func (node *Type) IsVector() bool { return node.Vector != nil }
package core import ( "fmt" "sync" "time" ) type Space interface { Get(key string) (Source, bool) } type Center struct { nodes map[string]Source sessions map[string]*Session } func (c *Center) NewSession(name string) (*Session, error) { if _, ok := c.sessions[name]; ok { return nil, fmt.Errorf("Session %s already exists! (duplicate named session is not allowed!)", name) } s := &Session{ parent: c, } s.parent = c c.sessions[name] = s return s, nil } func (c *Center) Get(key string) (Source, bool) { src, ok := c.nodes[key] return src, ok } func (c *Center) SetGlobalNode(key string, v Source) { c.nodes[key] = v } func (c *Center) Shutdown(timeout time.Duration) { wg := sync.WaitGroup{} wg.Add(len(c.nodes) + len(c.sessions)) for _, session := range c.sessions { go func() { session.Shutdown() wg.Done() }() } for _, node := range c.nodes { go func() { node.Shutdown() wg.Done() }() } cancel := make(chan struct{}) go func() { wg.Wait() cancel <- struct{}{} }() select { case <-cancel: fmt.Println("Successfully shutdown") case <-time.After(timeout): fmt.Println("Shutdown timeout") } }
package scheduler import ( "github.com/EmpregoLigado/cron-srv/mock" "github.com/EmpregoLigado/cron-srv/models" "testing" ) func TestScheduleAll(t *testing.T) { repoMock := mock.NewRepo() s := New() if err := s.ScheduleAll(repoMock); err != nil { t.Errorf("Expected to schedule all events %s", err) } } func TestSchedulerCreate(t *testing.T) { s := New() c := &models.Event{Id: 1, Expression: "* * * * *"} if err := s.Create(c); err != nil { t.Errorf("Expected to schedule a cron %s", err) } } func TestSchedulerFind(t *testing.T) { s := New() c := &models.Event{Id: 1, Expression: "* * * * *"} if err := s.Create(c); err != nil { t.Errorf("Expected to schedule a cron %s", err) } _, err := s.Find(c.Id) if err != nil { t.Errorf("Expected to find a cron %s", err) } } func TestSchedulerUpdate(t *testing.T) { s := New() c := &models.Event{Id: 1, Expression: "* * * * *"} if err := s.Create(c); err != nil { t.Errorf("Expected to schedule a cron %s", err) } c.Status = "active" if err := s.Update(c); err != nil { t.Errorf("Expected to update a scheduled cron %s", err) } } func TestSchedulerDelete(t *testing.T) { s := New() c := &models.Event{Id: 1, Expression: "* * * * *"} if err := s.Create(c); err != nil { t.Errorf("Expected to schedule a cron %s", err) } if err := s.Delete(c.Id); err != nil { t.Errorf("Expected to delete a scheduled cron %s", err) } }
package main import ( "bytes" "fmt" "io" "net/http" "net/http/httptest" "strings" "testing" "github.com/gorilla/mux" ) func GetTestHandler() *Handler { db := newDB(":memory:") h := Handler{} h.initialise(db) return &h } func newRequest(t *testing.T, method, url string, body io.Reader) *http.Request { req, err := http.NewRequest(method, url, body) if err != nil { t.Fatal(err) } return req } func serveRequest(h http.HandlerFunc, req *http.Request) *httptest.ResponseRecorder { rr := httptest.NewRecorder() handler := http.HandlerFunc(h) handler.ServeHTTP(rr, req) return rr } func TestHandler_postURL(t *testing.T) { h := GetTestHandler() var jsonStr = []byte(`{"url":"http://google.com"}`) req := newRequest(t, "POST", "/url", bytes.NewBuffer(jsonStr)) rr := serveRequest(h.postURL, req) t.Run("should return a status created", func(t *testing.T) { if status := rr.Code; status != http.StatusCreated { t.Errorf("handler returned wrong status code: got %v want %v", status, http.StatusCreated) } }) t.Run("should return a message", func(t *testing.T) { expected := `{"message":` if !strings.Contains(rr.Body.String(), expected) { t.Errorf("postURL() returned an error: %s", rr.Body.String()) } }) } func TestHandler_getURL(t *testing.T) { h := GetTestHandler() t.Run("should return a not found with wrong parameter", func(t *testing.T) { req := newRequest(t, "GET", "/somekey", nil) rr := serveRequest(h.getURL, req) if status := rr.Code; status != http.StatusNotFound { t.Errorf("handler returned wrong status code: got %v want %v", status, http.StatusNotFound) } }) t.Run("and we make a get request", func(t *testing.T) { key := "randomkey" redirect := "http://google.com" err := h.storage.Set(key, redirect) if err != nil { t.Errorf("h.storage.Set() caused an error: %v", err) } req := newRequest(t, "GET", fmt.Sprintf("/%s", key), nil) req = mux.SetURLVars(req, map[string]string{ "key": key, }) rr := serveRequest(h.getURL, req) t.Run("should return a redirect status", func(t *testing.T) { if status := rr.Code; status != http.StatusMovedPermanently { t.Errorf("handler returned wrong status code: got %v want %v", status, http.StatusMovedPermanently) } }) t.Run("and should be redirected correctly", func(t *testing.T) { loc, err := rr.Result().Location() if err != nil { t.Errorf("request location had an error: %v", err) } if str := loc.String(); str != redirect { t.Errorf("not corected redirect: got %s wanted %s", str, redirect) } }) }) }
package main /* * @lc app=leetcode id=718 lang=golang * * [718] Maximum Length of Repeated Subarray */ // 提示: // 1 1 2 3 4 // 1 2 3 4 5 // // 1. 最长公共子串的写法 func findLength(A []int, B []int) int { max := 0 dp := make([][]int, len(A)+1) for i := range dp { dp[i] = make([]int, len(B)+1) } for i := 0; i < len(A); i++ { for j := 0; j < len(B); j++ { if A[i] == B[j] { dp[i+1][j+1] = dp[i][j] + 1 if dp[i+1][j+1] > max { max = dp[i+1][j+1] } } } } return max } // 2 最长公共子序列的写法 func findLength2(A []int, B []int) int { dp := make([][]int, len(A)+1) for i := range dp { dp[i] = make([]int, len(B)+1) } for i := 0; i < len(A); i++ { for j := 0; j < len(B); j++ { if A[i] == B[j] { dp[i+1][j+1] = dp[i][j] + 1 } else { if dp[i+1][j] > dp[i][j+1] { dp[i+1][j+1] = dp[i+1][j] } else { dp[i+1][j+1] = dp[i][j+1] } } } } return dp[len(A)][len(B)] }
package sdl2 import ( "fmt" "github.com/veandco/go-sdl2/sdl" "github.com/veandco/go-sdl2/ttf" "github.com/evelritual/goose/graphics" ) // Font wraps an SDL TTF Font and allows drawing to screen. type Font struct { renderer *sdl.Renderer // reference to renderer to use font *ttf.Font } // NewFont opens a TTF font and sets it up for use in rendering. func (s *SDL2) NewFont(fontPath string, size int) (graphics.Font, error) { f, err := ttf.OpenFont(fontPath, size) if err != nil { return nil, fmt.Errorf("error loading sdl font: %v", err) } return &Font{ renderer: s.renderer, font: f, }, nil } // SetFont loads a new font. All future calls to Texture will use the newly // loaded font. func (f *Font) SetFont(fontPath string, size int) error { font, err := ttf.OpenFont(fontPath, size) if err != nil { return fmt.Errorf("error loading sdl font: %v", err) } f2 := f.font defer f2.Close() f.font = font return nil } // Texture loads the font as a drawable texture. Texture must be closed // manually. func (f *Font) Texture(text string, color graphics.Color) (graphics.Texture, error) { c := sdl.Color{ R: color.R, G: color.G, B: color.B, A: color.A, } t, err := f.font.RenderUTF8Blended(text, c) if err != nil { return nil, fmt.Errorf("error rendering sdl text: %v", err) } defer t.Free() tex, err := f.renderer.CreateTextureFromSurface(t) if err != nil { return nil, fmt.Errorf("error creating sdl surface for font: %v", err) } _, _, w, h, err := tex.Query() if err != nil { return nil, fmt.Errorf("error querying sdl font info: %v", err) } return &Texture{ renderer: f.renderer, image: nil, texture: tex, w: w, h: h, }, nil } // Close releases the SDL font resource. func (f *Font) Close() error { f.font.Close() return nil }
package algorand import ( "bytes" appComm "github.com/HNB-ECO/HNB-Blockchain/HNB/appMgr/common" "github.com/HNB-ECO/HNB-Blockchain/HNB/config" "github.com/HNB-ECO/HNB-Blockchain/HNB/consensus/algorand/bftGroup/vrf" "github.com/HNB-ECO/HNB-Blockchain/HNB/consensus/algorand/msgHandler" "github.com/HNB-ECO/HNB-Blockchain/HNB/consensus/algorand/types" "github.com/HNB-ECO/HNB-Blockchain/HNB/msp" "github.com/HNB-ECO/HNB-Blockchain/HNB/txpool" ) func isProposerByVrf(h *msgHandler.TDMMsgHandler, height uint64, round int32) (bool, *types.Validator) { ConsLog.Debugf(LOGTABLE_CONS, "#(%v-%v) calc proposer preVrfValue %x, vals %s", height, round, h.LastCommitState.PrevVRFValue, h.Validators) proposer := vrf.CalcProposerByVRF(h.LastCommitState.PrevVRFValue, h.Validators, height, round) return bytes.Equal(h.GetDigestAddr(), proposer.Address), proposer } func geneProposalBlkByVrf(h *msgHandler.TDMMsgHandler) (block *types.Block, blockParts *types.PartSet) { var commit *types.Commit if h.Height == 1 { // We're creating a proposal for the first block. // The commit is empty, but not nil. commit = &types.Commit{} } else if h.LastCommit.HasTwoThirdsMajority() { // Make the commit from LastCommit commit = h.LastCommit.MakeCommit() } else { // This shouldn't happen. ConsLog.Errorf(LOGTABLE_CONS, "#(%v-%v) enterPropose: Cannot propose anything: No commit for the previous block.", h.Height, h.Round) return } var txs []types.Tx txPool, err := txpool.GetTxsFromTXPool(appComm.HNB, 1000) if err != nil || len(txPool) == 0 { ConsLog.Warningf(LOGTABLE_CONS, "enterPropose: empty txs") if !config.Config.CreateEmptyBlocks { ConsLog.Infof(LOGTABLE_CONS, "config not create empty") return } txs = make([]types.Tx, 0) } else { txs, err = types.Tx2TDMTx(txPool) if err != nil { ConsLog.Errorf(LOGTABLE_CONS, "enterPropose: get original tx err %v", err) return } ConsLog.Infof(LOGTABLE_CONS, "propose tx len %v", len(txs)) } _, val := h.Validators.GetByAddress(h.GetDigestAddr()) VRFValue, VRFProof, err := h.ComputeNewVRF() if err != nil { ConsLog.Errorf(LOGTABLE_CONS, "%s", err) } blkMaterial := &types.BlkMaterial{ BlkVRFProof: VRFProof, BlkVRFValue: VRFValue, Height: h.Height, Proposer: val, Commit: commit, NumTxs: uint64(len(txs)), Txs: txs, } block, parts := h.LastCommitState.MakeBlockVRF(blkMaterial) return block, parts } func validateProposalBlkByVrf(h *msgHandler.TDMMsgHandler, proposalBlk *types.Block) bool { proposer := proposalBlk.Validators.Proposer VRFValue := proposalBlk.BlkVRFValue VRFProof := proposalBlk.BlkVRFProof VRFBlkData := &vrf.VRFBlkData{ PrevVrf: h.LastCommitState.PrevVRFValue, BlockNum: proposalBlk.BlockNum, } _, val := h.Validators.GetByAddress(proposer.Address) pk := msp.StringToBccspKey(val.PubKeyStr) VRFVerifySuccess, err := vrf.VerifyVRF4Blk(pk, VRFBlkData, VRFValue, VRFProof, msp.GetAlgType()) if err != nil { return false } if !VRFVerifySuccess { return false } return true }
/* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package validation import ( "fmt" v1 "k8s.io/api/core/v1" metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/kubernetes/pkg/scheduler/apis/config" ) // ValidateInterPodAffinityArgs validates that InterPodAffinityArgs are correct. func ValidateInterPodAffinityArgs(args config.InterPodAffinityArgs) error { return ValidateHardPodAffinityWeight(field.NewPath("hardPodAffinityWeight"), args.HardPodAffinityWeight) } // ValidateHardPodAffinityWeight validates that weight is within allowed range. func ValidateHardPodAffinityWeight(path *field.Path, w int32) error { const ( minHardPodAffinityWeight = 0 maxHardPodAffinityWeight = 100 ) if w < minHardPodAffinityWeight || w > maxHardPodAffinityWeight { msg := fmt.Sprintf("not in valid range [%d-%d]", minHardPodAffinityWeight, maxHardPodAffinityWeight) return field.Invalid(path, w, msg) } return nil } // ValidateNodeLabelArgs validates that NodeLabelArgs are correct. func ValidateNodeLabelArgs(args config.NodeLabelArgs) error { if err := validateNoConflict(args.PresentLabels, args.AbsentLabels); err != nil { return err } if err := validateNoConflict(args.PresentLabelsPreference, args.AbsentLabelsPreference); err != nil { return err } return nil } // validateNoConflict validates that presentLabels and absentLabels do not conflict. func validateNoConflict(presentLabels []string, absentLabels []string) error { m := make(map[string]struct{}, len(presentLabels)) for _, l := range presentLabels { m[l] = struct{}{} } for _, l := range absentLabels { if _, ok := m[l]; ok { return fmt.Errorf("detecting at least one label (e.g., %q) that exist in both the present(%+v) and absent(%+v) label list", l, presentLabels, absentLabels) } } return nil } // ValidatePodTopologySpreadArgs validates that PodTopologySpreadArgs are correct. // It replicates the validation from pkg/apis/core/validation.validateTopologySpreadConstraints // with an additional check for .labelSelector to be nil. func ValidatePodTopologySpreadArgs(args *config.PodTopologySpreadArgs) error { var allErrs field.ErrorList path := field.NewPath("defaultConstraints") for i, c := range args.DefaultConstraints { p := path.Index(i) if c.MaxSkew <= 0 { f := p.Child("maxSkew") allErrs = append(allErrs, field.Invalid(f, c.MaxSkew, "must be greater than zero")) } allErrs = append(allErrs, validateTopologyKey(p.Child("topologyKey"), c.TopologyKey)...) if err := validateWhenUnsatisfiable(p.Child("whenUnsatisfiable"), c.WhenUnsatisfiable); err != nil { allErrs = append(allErrs, err) } if c.LabelSelector != nil { f := field.Forbidden(p.Child("labelSelector"), "constraint must not define a selector, as they deduced for each pod") allErrs = append(allErrs, f) } if err := validateConstraintNotRepeat(path, args.DefaultConstraints, i); err != nil { allErrs = append(allErrs, err) } } if len(allErrs) == 0 { return nil } return allErrs.ToAggregate() } func validateTopologyKey(p *field.Path, v string) field.ErrorList { var allErrs field.ErrorList if len(v) == 0 { allErrs = append(allErrs, field.Required(p, "can not be empty")) } else { allErrs = append(allErrs, metav1validation.ValidateLabelName(v, p)...) } return allErrs } func validateWhenUnsatisfiable(p *field.Path, v v1.UnsatisfiableConstraintAction) *field.Error { supportedScheduleActions := sets.NewString(string(v1.DoNotSchedule), string(v1.ScheduleAnyway)) if len(v) == 0 { return field.Required(p, "can not be empty") } if !supportedScheduleActions.Has(string(v)) { return field.NotSupported(p, v, supportedScheduleActions.List()) } return nil } func validateConstraintNotRepeat(path *field.Path, constraints []v1.TopologySpreadConstraint, idx int) *field.Error { c := &constraints[idx] for i := range constraints[:idx] { other := &constraints[i] if c.TopologyKey == other.TopologyKey && c.WhenUnsatisfiable == other.WhenUnsatisfiable { return field.Duplicate(path.Index(idx), fmt.Sprintf("{%v, %v}", c.TopologyKey, c.WhenUnsatisfiable)) } } return nil } // ValidateRequestedToCapacityRatioArgs validates that RequestedToCapacityRatioArgs are correct. func ValidateRequestedToCapacityRatioArgs(args config.RequestedToCapacityRatioArgs) error { if err := validateFunctionShape(args.Shape); err != nil { return err } if err := validateResourcesNoMax(args.Resources); err != nil { return err } return nil } func validateFunctionShape(shape []config.UtilizationShapePoint) error { const ( minUtilization = 0 maxUtilization = 100 minScore = 0 maxScore = int32(config.MaxCustomPriorityScore) ) if len(shape) == 0 { return fmt.Errorf("at least one point must be specified") } for i := 1; i < len(shape); i++ { if shape[i-1].Utilization >= shape[i].Utilization { return fmt.Errorf("utilization values must be sorted. Utilization[%d]==%d >= Utilization[%d]==%d", i-1, shape[i-1].Utilization, i, shape[i].Utilization) } } for i, point := range shape { if point.Utilization < minUtilization { return fmt.Errorf("utilization values must not be less than %d. Utilization[%d]==%d", minUtilization, i, point.Utilization) } if point.Utilization > maxUtilization { return fmt.Errorf("utilization values must not be greater than %d. Utilization[%d]==%d", maxUtilization, i, point.Utilization) } if point.Score < minScore { return fmt.Errorf("score values must not be less than %d. Score[%d]==%d", minScore, i, point.Score) } if point.Score > maxScore { return fmt.Errorf("score values must not be greater than %d. Score[%d]==%d", maxScore, i, point.Score) } } return nil } // TODO potentially replace with validateResources func validateResourcesNoMax(resources []config.ResourceSpec) error { for _, r := range resources { if r.Weight < 1 { return fmt.Errorf("resource %s weight %d must not be less than 1", string(r.Name), r.Weight) } } return nil } // ValidateNodeResourcesLeastAllocatedArgs validates that NodeResourcesLeastAllocatedArgs are correct. func ValidateNodeResourcesLeastAllocatedArgs(args *config.NodeResourcesLeastAllocatedArgs) error { return validateResources(args.Resources) } // ValidateNodeResourcesMostAllocatedArgs validates that NodeResourcesMostAllocatedArgs are correct. func ValidateNodeResourcesMostAllocatedArgs(args *config.NodeResourcesMostAllocatedArgs) error { return validateResources(args.Resources) } func validateResources(resources []config.ResourceSpec) error { for _, resource := range resources { if resource.Weight <= 0 { return fmt.Errorf("resource Weight of %v should be a positive value, got %v", resource.Name, resource.Weight) } if resource.Weight > 100 { return fmt.Errorf("resource Weight of %v should be less than 100, got %v", resource.Name, resource.Weight) } } return nil }
package storage import ( "bytes" "encoding/gob" "github.com/ActiveState/log" "io/ioutil" "os" "sync" ) // exposing these for testing type Storage interface { Encode(data interface{}) ([]byte, error) Load(data interface{}) error Write(buf []byte) error } type FileStorage struct { file_path string writeLock *sync.Mutex } const FILE_MODE = 0666 func NewFileStorage(path string) Storage { return &FileStorage{ file_path: path, writeLock: &sync.Mutex{}, } } func (s *FileStorage) Encode(data interface{}) ([]byte, error) { m := new(bytes.Buffer) enc := gob.NewEncoder(m) err := enc.Encode(data) if err != nil { return nil, err } return m.Bytes(), nil } func (s *FileStorage) Write(buf []byte) error { s.writeLock.Lock() defer s.writeLock.Unlock() if err := ioutil.WriteFile(s.file_path, buf, FILE_MODE); err != nil { return err } // this extra step to make the file accessible by stackato user if err := os.Chmod(s.file_path, FILE_MODE); err != nil { return err } return nil } func (s *FileStorage) Load(e interface{}) error { var err error if _, err = os.Stat(s.file_path); os.IsNotExist(err) { log.Infof("Creating %s since it does not exist", s.file_path) _, err = os.Create(s.file_path) } else { n, err := ioutil.ReadFile(s.file_path) if err != nil { log.Error(err) } p := bytes.NewBuffer(n) dec := gob.NewDecoder(p) err = dec.Decode(e) if err != nil { log.Error(err) } } return err }
package main /** * created: 2019/7/15 15:43 * By Will Fan */ func main() { ch := make(chan int) ch <- 1 println(<-ch) }
package searching import ( "fmt" ) //BinarySearch implementation func BinarySearch(array []int, tosearch int) int { fmt.Println("Binary Search") a := searching(array, 0, (len(array) - 1), tosearch) return a } func searching(array []int, left, right, tosearch int) int { if right >= left { mid := left + (right-left)/2 if array[mid] == tosearch { return mid } if array[mid] > tosearch { return searching(array, left, mid-1, tosearch) } if array[mid] < tosearch { return searching(array, mid+1, right, tosearch) } } return 999 }
package domain import ( "time" "github.com/gofrs/uuid" ) type CropBatchCreated struct { UID uuid.UUID BatchID string Status CropStatus Type CropType Container CropContainer InventoryUID uuid.UUID FarmUID uuid.UUID CreatedDate time.Time InitialAreaUID uuid.UUID Quantity int } type CropBatchTypeChanged struct { UID uuid.UUID Type CropType } type CropBatchInventoryChanged struct { UID uuid.UUID InventoryUID uuid.UUID BatchID string } type CropBatchContainerChanged struct { UID uuid.UUID Container CropContainer } type CropBatchMoved struct { UID uuid.UUID Quantity int SrcAreaUID uuid.UUID DstAreaUID uuid.UUID MovedDate time.Time UpdatedSrcAreaCode string // Values: INITIAL_AREA / MOVED_AREA UpdatedSrcArea interface{} UpdatedDstAreaCode string // Values: INITIAL_AREA / MOVED_AREA UpdatedDstArea interface{} } type CropBatchHarvested struct { UID uuid.UUID CropStatus string // Values: ACTIVE / ARCHIVED HarvestType string HarvestedQuantity int ProducedGramQuantity float32 UpdatedHarvestedStorage HarvestedStorage HarvestedArea interface{} HarvestedAreaCode string // Values: INITIAL_AREA / MOVED_AREA HarvestDate time.Time Notes string } type CropBatchDumped struct { UID uuid.UUID CropStatus string // Values: ACTIVE / ARCHIVED Quantity int UpdatedTrash Trash DumpedArea interface{} DumpedAreaCode string // Values: INITIAL_AREA / MOVED_AREA DumpDate time.Time Notes string } type CropBatchWatered struct { UID uuid.UUID BatchID string ContainerType string AreaUID uuid.UUID AreaName string WateringDate time.Time } type CropBatchNoteCreated struct { UID uuid.UUID CropUID uuid.UUID Content string CreatedDate time.Time } type CropBatchNoteRemoved struct { UID uuid.UUID CropUID uuid.UUID Content string CreatedDate time.Time } type CropBatchPhotoCreated struct { UID uuid.UUID CropUID uuid.UUID Filename string MimeType string Size int Width int Height int Description string }
package mergeTwoLists type ListNode struct { Val int Next *ListNode } func mergeTwoLists(l1 *ListNode, l2 *ListNode) *ListNode { if l1 == nil { return l2 } if l2 == nil { return l1 } var head *ListNode var tail *ListNode for l1 != nil && l2 != nil { var node *ListNode if l1.Val < l2.Val { node = l1 l1 = l1.Next } else { node = l2 l2 = l2.Next } if head == nil { head = node tail = head } else { tail.Next = node tail = tail.Next } } if l1 != nil { tail.Next = l1 } else { tail.Next = l2 } return head }
package main import ( "fmt" "log" "net/http" socketio "github.com/googollee/go-socket.io" ) func main() { fmt.Println("test") server, err := socketio.NewServer(nil) if err != nil { log.Fatal(err) } server.OnConnect("/", func(s socketio.Conn) error { log.Println("connected:", s.ID()) s.Emit(s.ID()) return nil }) server.OnError("/", func(s socketio.Conn, e error) { fmt.Println("meet error:", e) }) server.OnDisconnect("/", func(s socketio.Conn, reason string) { fmt.Println("closed", reason) }) go server.Serve() defer server.Close() http.Handle("/socketio/", server) log.Println("Serving at localhost:8001...") log.Fatal(http.ListenAndServe(":8001", nil)) }
package main import ( "io/ioutil" "os" "github.com/yamil-rivera/flowit/internal/command" "github.com/yamil-rivera/flowit/internal/config" "github.com/yamil-rivera/flowit/internal/fsm" "github.com/yamil-rivera/flowit/internal/io" "github.com/yamil-rivera/flowit/internal/repository" "github.com/yamil-rivera/flowit/internal/runtime" "github.com/yamil-rivera/flowit/internal/workflow" ) // TODO: Make flowit concurrent func main() { // TODO: Get this from a default or from the env workflowDefinition, err := config.Load(io.GetProjectRootDir() + "/samples/test.yaml") optionalExit(err) repositoryService := repository.NewService() workflowService := workflow.NewService() fsmServiceFactory := fsm.NewServiceFactory() runtimeService := runtime.NewService(repositoryService, fsmServiceFactory, workflowService) commandService := command.NewService(runtimeService, fsmServiceFactory, repositoryService, workflowDefinition) version, err := cliVersion() optionalExit(err) optionalExit(commandService.RegisterCommands(version)) optionalExit(commandService.Execute()) } func cliVersion() (string, error) { version, err := ioutil.ReadFile(io.GetProjectRootDir() + "/cmd/version") return string(version), err } func optionalExit(err error) { if err != nil { io.Logger.Errorf("%+v", err) io.Printf("%v\n", err) // TODO: Do not show "exit status 1" // TODO: Return exit status of failed command os.Exit(1) } }
/* * Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved. * * This program and the accompanying materials are made available under * the terms of the under the Apache License, Version 2.0 (the "License”); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package format_test import ( "fmt" "github.com/fatih/color" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/pivotal-cf/spring-cloud-services-cli-plugin/format" ) var _ = Describe("Table", func() { var tab *format.Table Context("when the table has no body", func() { BeforeEach(func() { tab = &format.Table{} tab.Entitle([]string{"a", "b"}) }) It("should output only the title in bold", func() { bold := color.New(color.Bold).SprintfFunc() Expect(tab.String()).To(ContainSubstring(fmt.Sprintf("%s %s \n", bold("a"), bold("b")))) }) }) Context("when the table has a body", func() { BeforeEach(func() { tab = &format.Table{} tab.Entitle([]string{"a", "bb", "c"}) tab.AddRow([]string{"aa", "b", "cc"}) }) It("should output the title and row in the correct colors", func() { bold := color.New(color.Bold).SprintfFunc() cyan := color.New(color.FgHiCyan).SprintfFunc() Expect(tab.String()).To(ContainSubstring(fmt.Sprintf("%s %s %s \n%s %s %s \n", bold("a"), bold("bb"), bold("c"), cyan("aa"), "b", "cc"))) }) }) })
package cachingloader import ( "context" "github.com/ns1/jsonschema2go/pkg/gen" "log" "net/url" "sync" ) // New returns a new thread safe loader which caches requests and can handle either file system or http URIs. If the // debug bool flag is set true, messages will be logged concerning every served request. func NewSimple() gen.Loader { return &loader{ cache: make(map[string]*gen.Schema), loader: gen.NewLoader(), } } type loader struct { cache map[string]*gen.Schema mu sync.RWMutex loader gen.Loader } func (l *loader) Close() error { return nil } // Read returns a schema for the provided URL, either filesystem or HTTP func (l *loader) Load(ctx context.Context, u *url.URL) (*gen.Schema, error) { k := u.String() l.mu.RLock() v := l.cache[k] l.mu.RUnlock() if v != nil { return v, nil } if gen.IsDebug(ctx) { log.Printf("cache miss -- requesting %v", u) } schema, err := l.loader.Load(ctx, u) if err != nil { return nil, err } l.mu.Lock() defer l.mu.Unlock() if _, ok := l.cache[k]; !ok { l.cache[k] = schema } return l.cache[k], nil }
package main import ( "fmt" ) func main() { nrs := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10} for i, nr := range nrs { if nr%2 == 0 { fmt.Println(i, " even") } else { fmt.Println(i, " odd") } } }
package main import ( "archive/tar" "archive/zip" "compress/gzip" "fmt" "github.com/gookit/color" "github.com/vbauerster/mpb" "github.com/vbauerster/mpb/decor" "io" "net/http" "os" "path/filepath" "runtime" "strconv" "sync" "time" ) func main() { sukkit := " ____ _ _ _ _ \n / ___| _ _| | _| | _(_) |_ \n \\___ \\| | | | |/ / |/ / | __|\n ___) | |_| | <| <| | |_ \n |____/ \\__,_|_|\\_\\_|\\_\\_|\\__|\n " color.LightYellow.Println(sukkit) time.Sleep(500 * time.Millisecond) color.LightYellow.Println("Sukkit. The solar powered server software.") color.LightYellow.Println("Press enter to start setup.") fmt.Scanln() color.LightCyan.Println("Downloading files...") files := getFiles() var wg sync.WaitGroup wg.Add(len(files)) // Create new progress container instance p := mpb.New(mpb.WithWaitGroup(&wg), mpb.WithWidth(60)) for filename, url := range files { go func(filename string, url string) { defer wg.Done() err := downloadFile(p, url, filename) if err != nil { panic(err) } }(filename, url) } // Wait for all bars to complete p.Wait() color.LightCyan.Println("Unleashing the power...") if runtime.GOOS == "windows" { unzip("php", ".") } else { extractTarGz("php", ".") } color.LightCyan.Println("Feeding the leftovers to dogs...") if err := deleteFile("php"); err != nil { panic(err) } color.BgGreen.Println("Installation complete!") time.Sleep(3 * time.Second) color.Gray.Println("Press enter to continue...") fmt.Scanln() } // Get the required files based on the user's OS func getFiles() map[string]string { suffix := "sh" php := "https://jenkins.pmmp.io/job/PHP-7.4-Aggregate/lastSuccessfulBuild/artifact/PHP-7.4-Linux-x86_64.tar.gz" if runtime.GOOS == "windows" { suffix = "ps1" php = "https://jenkins.pmmp.io/job/PHP-7.4-Aggregate/lastSuccessfulBuild/artifact/PHP-7.4-Windows-x64.zip" } if runtime.GOOS == "darwin" { php = "https://jenkins.pmmp.io/job/PHP-7.4-Aggregate/lastSuccessfulBuild/artifact/PHP-7.4-MacOS-x86_64.tar.gz" } m := make(map[string]string) m["php"] = php m["PocketMine-MP.phar"] = "https://jenkins.pmmp.io/job/PocketMine-MP/lastStableBuild/artifact/PocketMine-MP.phar" m["start."+suffix] = "https://jenkins.pmmp.io/job/PocketMine-MP/lastStableBuild/artifact/start." + suffix return m } // Download a file from a url into given filename. func downloadFile(p *mpb.Progress, url string, filename string) error { // Create file out, err := os.Create(filename + ".tmp") if err != nil { return err } defer out.Close() // Get the data resp, err := http.Get(url) if err != nil { return err } defer func() { if err := resp.Body.Close(); err != nil { panic(err) } }() // the Header "Content-Length" will let us know // the total file size to download size, _ := strconv.Atoi(resp.Header.Get("Content-Length")) // Create new bar with filesize bar := p.AddBar(int64(size), mpb.PrependDecorators( decor.Percentage(), ), mpb.AppendDecorators( decor.Name(filename, decor.WC{W: len(filename) + 1, C: decor.DidentRight}), ), ) // Create a proxy reader for the bar proxyReader := bar.ProxyReader(resp.Body) defer proxyReader.Close() // Start the copy action with our proxy reader _, err = io.Copy(out, proxyReader) if err != nil { return err } // Close it before renaming to prevent file in use error out.Close() // The bar uses the same line so print a new line once it's finished downloading fmt.Println() if err := os.Rename(filename+".tmp", filename); err != nil { return err } os.Chmod(filename, 0755) return nil } // Erase a file from disk func deleteFile(file string) error { return os.Remove(file) } // Unzip a zip archive func unzip(archive string, dest string) { r, err := zip.OpenReader(archive) if err != nil { panic(err) } defer func() { if err := r.Close(); err != nil { panic(err) } }() os.MkdirAll(dest, 0755) extractAndWriteFile := func(f *zip.File) error { rc, err := f.Open() if err != nil { return err } defer func() { if err := rc.Close(); err != nil { panic(err) } }() path := filepath.Join(dest, f.Name) if f.FileInfo().IsDir() { os.MkdirAll(path, f.Mode()) } else { os.MkdirAll(filepath.Dir(path), f.Mode()) f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) if err != nil { return err } defer func() { if err := f.Close(); err != nil { panic(err) } }() _, err = io.Copy(f, rc) if err != nil { return err } } return nil } for _, f := range r.File { err := extractAndWriteFile(f) if err != nil { panic(err) } } } func extractTarGz(archive string, dest string) { r, err := os.Open(archive) if err != nil { panic(err) } uncompressedStream, err := gzip.NewReader(r) if err != nil { panic(err) } tarReader := tar.NewReader(uncompressedStream) for true { header, err := tarReader.Next() if err == io.EOF { break } if err != nil { panic(err) } if header.Typeflag == tar.TypeDir { if err := os.Mkdir(header.Name, 0755); err != nil { panic(err) } } else { outFile, err := os.Create(header.Name) if err != nil { panic(err) } if _, err := io.Copy(outFile, tarReader); err != nil { panic(err) } outFile.Close() } } }
package oidc import ( "fmt" "github.com/ory/fosite" "github.com/ory/herodot" "github.com/authelia/authelia/v4/internal/configuration/schema" "github.com/authelia/authelia/v4/internal/storage" "github.com/authelia/authelia/v4/internal/templates" ) // NewOpenIDConnectProvider new-ups a OpenIDConnectProvider. func NewOpenIDConnectProvider(config *schema.IdentityProvidersOpenIDConnect, store storage.Provider, templates *templates.Provider) (provider *OpenIDConnectProvider) { if config == nil { return nil } signer := NewKeyManager(config) provider = &OpenIDConnectProvider{ JSONWriter: herodot.NewJSONWriter(nil), Store: NewStore(config, store), KeyManager: signer, Config: NewConfig(config, signer, templates), } provider.OAuth2Provider = fosite.NewOAuth2Provider(provider.Store, provider.Config) provider.Config.LoadHandlers(provider.Store) provider.Config.Strategy.ClientAuthentication = provider.DefaultClientAuthenticationStrategy provider.discovery = NewOpenIDConnectWellKnownConfiguration(config) return provider } // GetOAuth2WellKnownConfiguration returns the discovery document for the OAuth Configuration. func (p *OpenIDConnectProvider) GetOAuth2WellKnownConfiguration(issuer string) OAuth2WellKnownConfiguration { options := p.discovery.OAuth2WellKnownConfiguration.Copy() options.Issuer = issuer options.JWKSURI = fmt.Sprintf("%s%s", issuer, EndpointPathJWKs) options.AuthorizationEndpoint = fmt.Sprintf("%s%s", issuer, EndpointPathAuthorization) options.PushedAuthorizationRequestEndpoint = fmt.Sprintf("%s%s", issuer, EndpointPathPushedAuthorizationRequest) options.TokenEndpoint = fmt.Sprintf("%s%s", issuer, EndpointPathToken) options.IntrospectionEndpoint = fmt.Sprintf("%s%s", issuer, EndpointPathIntrospection) options.RevocationEndpoint = fmt.Sprintf("%s%s", issuer, EndpointPathRevocation) return options } // GetOpenIDConnectWellKnownConfiguration returns the discovery document for the OpenID Configuration. func (p *OpenIDConnectProvider) GetOpenIDConnectWellKnownConfiguration(issuer string) OpenIDConnectWellKnownConfiguration { options := p.discovery.Copy() options.Issuer = issuer options.JWKSURI = fmt.Sprintf("%s%s", issuer, EndpointPathJWKs) options.AuthorizationEndpoint = fmt.Sprintf("%s%s", issuer, EndpointPathAuthorization) options.PushedAuthorizationRequestEndpoint = fmt.Sprintf("%s%s", issuer, EndpointPathPushedAuthorizationRequest) options.TokenEndpoint = fmt.Sprintf("%s%s", issuer, EndpointPathToken) options.UserinfoEndpoint = fmt.Sprintf("%s%s", issuer, EndpointPathUserinfo) options.IntrospectionEndpoint = fmt.Sprintf("%s%s", issuer, EndpointPathIntrospection) options.RevocationEndpoint = fmt.Sprintf("%s%s", issuer, EndpointPathRevocation) return options }
package core import( "log" "fmt" "github.com/boltdb/bolt" "encoding/hex" "os" ) const dbFile="blockchain.db" //数据库文件名目录 const blockBucket="blocks" //名称 const genesisCoinbaseData="sssssdkdk" /* 结构体定义区块 Blockchain */ type Blockchain struct{ // Blocks []*Block //一个存储Block指针地址的数组, Tip []byte //二进制数组 DB *bolt.DB //数据库 } type BlockchainIterator struct{ currentHash []byte //当前的hash db *bolt.DB //数据库 } //挖矿带来的交易 func (blockchain *Blockchain)MineBlock(transactions []*Transaction){ var Lasthash []byte //最后的哈希 err:=blockchain.DB.View(func (tx *bolt.Tx) error{ bucket:=tx.Bucket([]byte (blockBucket)) // lastHash=bucket.Get([]byte("1")) //取出最后区块Hash Lasthash=bucket.Get([]byte("1")) return nil }) if err!=nil{ log.Panic(err) } newBlock:=NewBlock(Lasthash,transactions) //创建一个新的区块 err=blockchain.DB.Update(func (tx *bolt.Tx)error{ bucket:=tx.Bucket([]byte (blockBucket)) err:=bucket.Put(newBlock.Hash,newBlock.Serialize()) //存入 if err!=nil{ log.Panic(err) } err=bucket.Put([]byte("1"),newBlock.Hash) //存入 if err!=nil{ log.Panic(err) } blockchain.Tip=newBlock.Hash //保存上一块的hash return nil }) if err!=nil{ log.Panic(err) } } //获取没使用输出的交易列表 func (blockchain *Blockchain) FindUnspentTransactions(address string) []Transaction{ var unspentTXs [] Transaction //交易实务 spentTXOS:=make(map[string][]int) //开辟内存 bci:=blockchain.Iterator() //迭代器 for{ block:=bci.next() for _,tx:=range block.Transaction{ //循坏每一个交易 txID:=hex.EncodeToString(tx.ID) //获取交易编号 Outputs: for outindex,out:=range tx.Vout{ if spentTXOS[txID]!=nil{ for _,spentOut:=range spentTXOS[txID]{ if spentOut==outindex{ continue Outputs //循坏到不等为止 } } } if out.CanBeUnlockedWith(address){ unspentTXs=append(unspentTXs,*tx) //加入列表 } } if tx.IsCoinBase()==false{ for _,in:=range tx.Vin{ if in.CanUnlockOutPutWith(address){ //判断是否可以绑定 inTxID:=hex.EncodeToString(in.Txid) // spentTXOS[inTxID]=append(spentTXOS[inTxID],in.Vout) } } } } if len(block.PrevBlockHash)==0{ //最后一块,跳出 break } } return unspentTXs } //获取所有没有使用的交易 func (blockchain *Blockchain) FindUTXO(address string)[]TXOutput{ var UTXOs []TXOutput unspentTransactions:=blockchain.FindUnspentTransactions(address) //查找所有的 for _,tx:=range unspentTransactions{ //循环所有的交易 for _,out:=range tx.Vout{ if out.CanBeUnlockedWith(address){ //判断是否锁定 UTXOs=append(UTXOs,out) //加入数据 } } } return UTXOs } //获取没有使用的输出以参考输入 func (blockchain *Blockchain) FindSpendableOutputs(address string,amount int)(int,map[string][]int){ unspentOutputs:=make(map[string][]int) //输出 unspentTXs:=blockchain.FindUnspentTransactions(address) //根据地质查找所有的交易 accmulated:=0 //累计 Work: for _,tx:=range unspentTXs{ txID:=hex.EncodeToString(tx.ID) // for outindex,out:=range tx.Vout{ if out.CanBeUnlockedWith(address) && accmulated<amount{ accmulated+=out.Value //统计金额 unspentOutputs[txID]=append(unspentOutputs[txID],outindex) //序列叠加 if accmulated>=amount{ break Work } } } } return accmulated,unspentOutputs } //将区块添加到链上 // func (block *Blockchain) AddBlock(data string){ // var lastHash []byte //上一块hash // err:=block.DB.View(func (tx *bolt.Tx) error{ // block:=tx.Bucket([]byte(blockBucket)) //取得数据 // lastHash=block.Get([]byte("1")) //取得第一块 // return nil // }) // if err!=nil{ // log.Panic(err) //处理打开错误 // } // newBlock:=NewBlock(lastHash,data) //创建一个新的区块 // err=block.DB.Update(func (tx *bolt.Tx) error{ // bucket :=tx.Bucket([]byte(blockBucket)) //取出 // err:=bucket.Put(newBlock.Hash,newBlock.Serialize()) //压入数据 // if err!=nil{ // log.Panic(err) //处理压入错误 // } // err=bucket.Put([]byte("1"),newBlock.Hash) //压入数据 // if err!=nil{ // log.Panic(err) //处理压入错误 // } // block.Tip=newBlock.Hash // return nil // }) // } //迭代器 func (block *Blockchain) Iterator() *BlockchainIterator{ bcit:=&BlockchainIterator{currentHash:block.Tip,db:block.DB} //根据区块链创建区块链迭代器 return bcit } //根据迭代器取得下一个区块 func (it *BlockchainIterator) next() *Block{ var block *Block err:=it.db.View(func (tx *bolt.Tx) error{ // block:=tx.Bucket([]byte(blockBucket)) //取得数据 // lastHash=block.Get([]byte("1")) //取得第一块 bucket:=tx.Bucket([]byte(blockBucket)) encodedBlock:=bucket.Get(it.currentHash) //抓取二进制数据 block=DeserializeBlock(encodedBlock) //解码 return nil }) if err!=nil{ log.Panic(err) //处理打开错误 } it.currentHash=block.PrevBlockHash //哈希赋值 return block } //判断数据库是否存在 func dbExists() bool{ if _,err:=os.Stat(dbFile);os.IsNotExist(err){ return false } return true } //新建一个区块链 func NewBlockchain(address string) *Blockchain{ if dbExists()==false{ fmt.Println("数据库不存在,创建一个") os.Exit(1) } // fmt.Println("开始") var tip []byte //存储区块链的二进制数据 db,err:=bolt.Open(dbFile,0600,nil) //打开数据库 if err!=nil{ log.Panic(err) //处理数据库打开错误 } err=db.Update(func (tx *bolt.Tx) error{ bucket:=tx.Bucket([]byte(blockBucket)) //按照名称打开数据库的表格 tip=bucket.Get([]byte("1")) // // if bucket==nil{ // fmt.Println("*******当前数据库没有区块链,创建一个新的") // genesis:=GenerateGenesisBlock() //创建创世区块 // bucket,err:=tx.CreateBucket([]byte(blockBucket)) //创建一个数据库的表格 // if err!=nil{ // log.Panic(err) //处理数据库表格创建错误 // } // err=bucket.Put(genesis.Hash,genesis.Serialize()) //存入数据 // if err!=nil{ // log.Panic(err) //处理数据库数据存入错误 // } // err=bucket.Put([]byte("1"),genesis.Hash) //存入数据 // if err!=nil{ // log.Panic(err) //处理数据库数据存入错误 // } // tip=genesis.Hash //取得Hash // }else{ // //创建区块 // tip=bucket.Get([]byte("1")) // } return nil }) //更新数据 if err!=nil{ log.Panic(err) //处理数据库更新错误 } bc:=Blockchain{Tip:tip,DB:db} return &bc } func createBlockChain(address string)*Blockchain{ if dbExists()==false{ fmt.Println("数据库已经存在,无需创建") os.Exit(1) } var tip []byte //存储区块链的二进制数据 db,err:=bolt.Open(dbFile,0600,nil) //打开数据库 if err!=nil{ log.Panic(err) //处理数据库打开错误 } err=db.Update(func (tx *bolt.Tx) error{ cbtx:=NewCoinBaseTX(address,genesisCoinbaseData) //创建创世区块的事务交易 genesis:=GenerateGenesisBlock(cbtx) //创建创世区块 // bucket,err:=tx.Bucket([]byte(blockBucket)) //按照名称打开数据库的表格 bucket,err:=tx.CreateBucket([]byte(blockBucket)) if err!=nil{ log.Panic(err) //处理数据库打开错误 } err=bucket.Put(genesis.Hash,genesis.Serialize()) //存储 if err!=nil{ log.Panic(err) } err=bucket.Put([]byte("1"),genesis.Hash) //记录最后一个区块的Hash if err!=nil{ log.Panic(err) } tip=genesis.Hash return nil }) bc:=Blockchain{Tip:tip,DB:db} return &bc } // func FindSpendableOutPuts(){ // } // //创建新的区块链 // func NewBlockchain() *Blockchain{ // return &Blockchain{Blocks:[]*Block{GenerateGenesisBlock()}} // } // //将区块添加到链上 接口 // func (blocks *Blockchain) AddBlock(data string){ // prevBlock:=blocks.Blocks[len(blocks.Blocks)-1] //取出最后一个区块 // newBlock:=NewBlock(prevBlock.Hash,data) //创建一个新的区块 // blocks.Blocks=append(blocks.Blocks,newBlock) // } // func (bc *Blockchain)SendData(data string){ // preBlock := bc.Blocks[len(bc.Blocks)-1] // newBlock :=GenerateNewBlock(*preBlock,data) // bc.ApendBlock(&newBlock) // } // func (bc *Blockchain) ApendBlock(newBlock *Block){ // if len(bc.Blocks) ==0{ // bc.Blocks=append(bc.Blocks,newBlock) // return // } // if isValid(*newBlock,*bc.Blocks[len(bc.Blocks)-1]){ // bc.Blocks=append(bc.Blocks,newBlock) // }else { // log.Fatal("invalid block") // } // } // func (bc *Blockchain) Print(){ // for _,block:=range bc.Blocks{ // fmt.Print("Index:",block.Index) // fmt.Print("\n") // fmt.Print("Prev.Hash:"+block.PrevBlockHash+"\n") // fmt.Print("Curr.Hash:"+block.Hash+"\n") // fmt.Print("Curr.Data:"+block.Data+"\n") // fmt.Print("Curr.Timestamp:\n",block.Timestamp) // fmt.Print("\n") // fmt.Print("\n") // } // } // //校验新的区块 // func isValid(newBlock Block,oldBlock Block) bool{ // if newBlock.Index-1 != oldBlock.Index{ // return false // } // if newBlock.PrevBlockHash != oldBlock.Hash{ // return false // } // if calculateHash(newBlock) != newBlock.Hash{ // return false // } // return true // }
package log import ( "time" "github.com/sirupsen/logrus" "github.com/feng/future/go-kit/microsvr/app-server/service" "github.com/feng/future/go-kit/microsvr/app-server/model" ) //LoggingMiddleware 日志中间件 func LoggingMiddleware() service.SvcMiddleware { return func(next service.AppService) service.AppService { return logmw{next} } } type logmw struct { service.AppService } func (mw logmw) GetAccount(userAddr string) (status uint32, msg string, userAccount model.UserAccount) { defer func(begin time.Time) { logrus.Infoln( "method", "GetAccount", "input", userAddr, "output", status, msg, userAccount, "took", time.Since(begin), ) }(time.Now()) status, msg, userAccount = mw.AppService.GetAccount(userAddr) return } func (mw logmw) SetAccount(userKeyStore, userParse, keyString string, userAccount model.UserAccount) (status uint32, msg string) { defer func(begin time.Time) { logrus.Infoln( "method", "SetAccount", "input", userKeyStore, userParse, keyString, "output", status, msg, "took", time.Since(begin), ) }(time.Now()) status, msg = mw.AppService.SetAccount(userKeyStore, userParse, keyString, userAccount) return } func (mw logmw) GetEthBalance(userAddr string) (status uint32, msg string, balance string) { defer func(begin time.Time) { logrus.Infoln( "method", "GetEthBalance", "input", userAddr, "output", status, msg, balance, "took", time.Since(begin), ) }(time.Now()) status, msg, balance = mw.AppService.GetEthBalance(userAddr) return }
package main import ( "context" "log" "net/http" "os" "os/signal" "time" "github.com/gorilla/mux" "github.com/saurabmish/Coffee-Shop/data" "github.com/saurabmish/Coffee-Shop/handlers" ) func main() { l := log.New(os.Stdout, "Coffee shop API service ", log.LstdFlags) v := data.NewValidation() coffeeHandler := handlers.NewProducts(l, v) serveMux := mux.NewRouter() getRouter := serveMux.Methods(http.MethodGet).Subrouter() getRouter.HandleFunc("/coffee/get/all", coffeeHandler.RetrieveAll) getRouter.HandleFunc("/coffee/get/{id:[0-9]+}", coffeeHandler.RetrieveSingle) putRouter := serveMux.Methods(http.MethodPut).Subrouter() putRouter.HandleFunc("/coffee/modify/{id:[0-9]+}", coffeeHandler.Modify) putRouter.Use(coffeeHandler.MiddlewareProductValidation) postRouter := serveMux.Methods(http.MethodPost).Subrouter() postRouter.HandleFunc("/coffee/add", coffeeHandler.Add) postRouter.Use(coffeeHandler.MiddlewareProductValidation) deleteRouter := serveMux.Methods(http.MethodDelete).Subrouter() deleteRouter.HandleFunc("/coffee/remove/{id:[0-9]+}", coffeeHandler.Remove) // reliability pattern for server server := &http.Server{ Addr: ":8080", Handler: serveMux, IdleTimeout: 120 * time.Second, ReadTimeout: 1 * time.Second, WriteTimeout: 1 * time.Second, } // ensure that service will not be blocked go func() { err := server.ListenAndServe() if err != nil { l.Fatal(err) } }() // ensure graceful shutdown of server signalChannel := make(chan os.Signal, 1) signal.Notify(signalChannel, os.Interrupt) signal.Notify(signalChannel, os.Kill) signal := <-signalChannel l.Println("Received signal for graceful shutdown", signal) timeoutContext, _ := context.WithTimeout(context.Background(), 30*time.Second) server.Shutdown(timeoutContext) }
package main import ( "encoding/json" "fmt" ) //Person struct to build Go slice of struct from JSON string type Person struct{ First string Last string Age int Sayings []string } func main() { s := `[ { "First":"James", "Last":"Bond", "Age":32, "Sayings":["Shaken, not stirred","Youth is no guarantee of innovation","In his majesty's royal service"] },{ "First":"Miss", "Last":"Moneypenny", "Age":27, "Sayings":["James, it is soo good to see you","Would you like me to take care of that for you, James?","I would really prefer to be a secret agent myself."] },{ "First":"M", "Last":"Hmmmm", "Age":54, "Sayings":["Oh, James. You didn't.","Dear God, what has James done now?","Can someone please tell me where James Bond is?"] } ]` fmt.Println(s) var people []Person err := json.Unmarshal([]byte(s), &people) if err != nil { fmt.Println(err) return } fmt.Println((people)) for i, person := range people { fmt.Println("Person #", i) fmt.Println("\t", person.First, person.Last, person.Age) for _, saying := range person.Sayings { fmt.Println("\t\t", saying) } } }
package guest import "context" func (s *Service) GetAvailableSpace(ctx context.Context) (int, error) { allGuests, err := s.ListGuests(ctx, false) if err != nil { return 0, err } allArrivedGuests, err := s.ListGuests(ctx, true) if err != nil { return 0, err } var totalSpace int for _, guest := range allGuests { totalSpace = totalSpace + guest.AccompanyingGuests } var totalUsedSpace int for _, guest := range allArrivedGuests { totalUsedSpace = totalUsedSpace + guest.ActualAccompanyingGuests } return totalSpace - totalUsedSpace, nil }
// DRUNKWATER TEMPLATE(add description and prototypes) // Question Title and Description on leetcode.com // Function Declaration and Function Prototypes on leetcode.com //551. Student Attendance Record I //You are given a string representing an attendance record for a student. The record only contains the following three characters: //'A' : Absent. //'L' : Late. //'P' : Present. //A student could be rewarded if his attendance record doesn't contain more than one 'A' (absent) or more than two continuous 'L' (late). //You need to return whether the student could be rewarded according to his attendance record. //Example 1: //Input: "PPALLP" //Output: True //Example 2: //Input: "PPALLL" //Output: False //func checkRecord(s string) bool { //} // Time Is Money
func isIdealPermutation(A []int) bool { for i,v:=range A{ if v-i>1 || v-i<(-1){ return false } } return true }
package conv import ( "github.com/badgerodon/goreify/generics" ) //go:generate goreify github.com/ElPeque/reflect-db/conv.To uint,uint8,uint16,uint32,uint64,int,int8,int16,int32,int64,float32,float64 func To(elem interface{}) generics.T1 { switch elem.(type) { // unsigned case *uint: return generics.T1(*elem.(*uint)) case *uint8: return generics.T1(*elem.(*uint8)) case *uint16: return generics.T1(*elem.(*uint16)) case *uint32: return generics.T1(*elem.(*uint32)) case *uint64: return generics.T1(*elem.(*uint64)) case uint: return generics.T1(elem.(uint)) case uint8: return generics.T1(elem.(uint8)) case uint16: return generics.T1(elem.(uint16)) case uint32: return generics.T1(elem.(uint32)) case uint64: return generics.T1(elem.(uint64)) // signed case *int: return generics.T1(*elem.(*int)) case *int8: return generics.T1(*elem.(*int8)) case *int16: return generics.T1(*elem.(*int16)) case *int32: return generics.T1(*elem.(*int32)) case *int64: return generics.T1(*elem.(*int64)) case int: return generics.T1(elem.(int)) case int8: return generics.T1(elem.(int8)) case int16: return generics.T1(elem.(int16)) case int32: return generics.T1(elem.(int32)) case int64: return generics.T1(elem.(int64)) // float case *float32: return generics.T1(*elem.(*float32)) case *float64: return generics.T1(*elem.(*float64)) case float32: return generics.T1(elem.(float32)) case float64: return generics.T1(elem.(float64)) } return generics.T1(0) }
package test import ( "fmt" "gengine/builder" "gengine/context" "gengine/engine" "testing" "time" ) func Test_at_salience(t *testing.T) { dataContext := context.NewDataContext() dataContext.Add("println", fmt.Println) //init rule engine ruleBuilder := builder.NewRuleBuilder(dataContext) err := ruleBuilder.BuildRuleFromString(` rule "1" salience 10 begin println(@sal) end rule "2" begin println(@sal) end `) if err != nil { panic(err) } eng := engine.NewGengine() start := time.Now().UnixNano() // true: means when there are many rules, if one rule execute error,continue to execute rules after the occur error rule err = eng.Execute(ruleBuilder, true) end := time.Now().UnixNano() if err != nil { panic(err) } println(fmt.Sprintf("execute rule cost %d ns", end-start)) }